Magellan Linux

Annotation of /trunk/kernel26-alx/patches-3.10/0125-3.10.26-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2672 - (hide annotations) (download)
Tue Jul 21 16:46:35 2015 UTC (8 years, 9 months ago) by niro
File size: 130538 byte(s)
-3.10.84-alx-r1
1 niro 2672 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2     index 2fe6e767b3d6..1311a48a7367 100644
3     --- a/Documentation/kernel-parameters.txt
4     +++ b/Documentation/kernel-parameters.txt
5     @@ -1456,6 +1456,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6    
7     * dump_id: dump IDENTIFY data.
8    
9     + * atapi_dmadir: Enable ATAPI DMADIR bridge support
10     +
11     + * disable: Disable this device.
12     +
13     If there are multiple matching configurations changing
14     the same attribute, the last one is used.
15    
16     diff --git a/Makefile b/Makefile
17     index cd97e9a25410..ac07707a2f9e 100644
18     --- a/Makefile
19     +++ b/Makefile
20     @@ -1,6 +1,6 @@
21     VERSION = 3
22     PATCHLEVEL = 10
23     -SUBLEVEL = 25
24     +SUBLEVEL = 26
25     EXTRAVERSION =
26     NAME = TOSSUG Baby Fish
27    
28     diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
29     index 7c1bfc0aea0c..accefe099182 100644
30     --- a/arch/arm/include/asm/arch_timer.h
31     +++ b/arch/arm/include/asm/arch_timer.h
32     @@ -80,15 +80,6 @@ static inline u32 arch_timer_get_cntfrq(void)
33     return val;
34     }
35    
36     -static inline u64 arch_counter_get_cntpct(void)
37     -{
38     - u64 cval;
39     -
40     - isb();
41     - asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
42     - return cval;
43     -}
44     -
45     static inline u64 arch_counter_get_cntvct(void)
46     {
47     u64 cval;
48     diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
49     index 1315c4ccfa56..dbe21107945a 100644
50     --- a/arch/arm/kernel/hyp-stub.S
51     +++ b/arch/arm/kernel/hyp-stub.S
52     @@ -153,6 +153,8 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
53     mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
54     orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
55     mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
56     + mov r7, #0
57     + mcrr p15, 4, r7, r7, c14 @ CNTVOFF
58     1:
59     #endif
60    
61     diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
62     index 2b44b95a86dd..6f18695a09cb 100644
63     --- a/arch/arm/kvm/interrupts_head.S
64     +++ b/arch/arm/kvm/interrupts_head.S
65     @@ -503,6 +503,10 @@ vcpu .req r0 @ vcpu pointer always in r0
66     add r5, vcpu, r4
67     strd r2, r3, [r5]
68    
69     + @ Ensure host CNTVCT == CNTPCT
70     + mov r2, #0
71     + mcrr p15, 4, r2, r2, c14 @ CNTVOFF
72     +
73     1:
74     #endif
75     @ Allow physical timer/counter access for the host
76     diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
77     index d05fc7b54567..83735b72895d 100644
78     --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
79     +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
80     @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
81    
82     /* gpmc */
83     static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
84     - { .irq = 20 },
85     + { .irq = 20 + OMAP_INTC_START, },
86     { .irq = -1 }
87     };
88    
89     @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
90     };
91    
92     static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
93     - { .irq = 52 },
94     + { .irq = 52 + OMAP_INTC_START, },
95     { .irq = -1 }
96     };
97    
98     diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
99     index 02b1b10537bc..9f6238c9dfc9 100644
100     --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
101     +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
102     @@ -2152,7 +2152,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
103     };
104    
105     static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
106     - { .irq = 20 },
107     + { .irq = 20 + OMAP_INTC_START, },
108     { .irq = -1 }
109     };
110    
111     @@ -2986,7 +2986,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
112    
113     static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
114     static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
115     - { .irq = 24 },
116     + { .irq = 24 + OMAP_INTC_START, },
117     { .irq = -1 }
118     };
119    
120     @@ -3028,7 +3028,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
121    
122     static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
123     static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
124     - { .irq = 28 },
125     + { .irq = 28 + OMAP_INTC_START, },
126     { .irq = -1 }
127     };
128    
129     diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
130     index 84fcc5018284..519c4b2c0687 100644
131     --- a/arch/arm64/boot/dts/foundation-v8.dts
132     +++ b/arch/arm64/boot/dts/foundation-v8.dts
133     @@ -6,6 +6,8 @@
134    
135     /dts-v1/;
136    
137     +/memreserve/ 0x80000000 0x00010000;
138     +
139     / {
140     model = "Foundation-v8A";
141     compatible = "arm,foundation-aarch64", "arm,vexpress";
142     diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
143     index bf6ab242f047..d56ed11ba9a3 100644
144     --- a/arch/arm64/include/asm/arch_timer.h
145     +++ b/arch/arm64/include/asm/arch_timer.h
146     @@ -110,16 +110,6 @@ static inline void __cpuinit arch_counter_set_user_access(void)
147     asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
148     }
149    
150     -static inline u64 arch_counter_get_cntpct(void)
151     -{
152     - u64 cval;
153     -
154     - isb();
155     - asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
156     -
157     - return cval;
158     -}
159     -
160     static inline u64 arch_counter_get_cntvct(void)
161     {
162     u64 cval;
163     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
164     index e333a243bfcc..e9a1a1d81892 100644
165     --- a/arch/arm64/include/asm/pgtable.h
166     +++ b/arch/arm64/include/asm/pgtable.h
167     @@ -184,7 +184,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
168     #define pgprot_noncached(prot) \
169     __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
170     #define pgprot_writecombine(prot) \
171     - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
172     + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
173     #define pgprot_dmacoherent(prot) \
174     __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
175     #define __HAVE_PHYS_MEM_ACCESS_PROT
176     diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
177     index 7065e920149d..0defa0728a9b 100644
178     --- a/arch/arm64/include/asm/spinlock.h
179     +++ b/arch/arm64/include/asm/spinlock.h
180     @@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
181     unsigned int tmp;
182    
183     asm volatile(
184     - " ldaxr %w0, %1\n"
185     + "2: ldaxr %w0, %1\n"
186     " cbnz %w0, 1f\n"
187     " stxr %w0, %w2, %1\n"
188     + " cbnz %w0, 2b\n"
189     "1:\n"
190     : "=&r" (tmp), "+Q" (lock->lock)
191     : "r" (1)
192     diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
193     index 89c047f9a971..70ba9d4ee978 100644
194     --- a/arch/arm64/include/asm/syscall.h
195     +++ b/arch/arm64/include/asm/syscall.h
196     @@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
197     unsigned int i, unsigned int n,
198     unsigned long *args)
199     {
200     + if (n == 0)
201     + return;
202     +
203     if (i + n > SYSCALL_MAX_ARGS) {
204     unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
205     unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
206     @@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
207     unsigned int i, unsigned int n,
208     const unsigned long *args)
209     {
210     + if (n == 0)
211     + return;
212     +
213     if (i + n > SYSCALL_MAX_ARGS) {
214     pr_warning("%s called with max args %d, handling only %d\n",
215     __func__, i + n, SYSCALL_MAX_ARGS);
216     diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
217     index 3659e460071d..23a3c4791d86 100644
218     --- a/arch/arm64/include/asm/thread_info.h
219     +++ b/arch/arm64/include/asm/thread_info.h
220     @@ -24,10 +24,10 @@
221     #include <linux/compiler.h>
222    
223     #ifndef CONFIG_ARM64_64K_PAGES
224     -#define THREAD_SIZE_ORDER 1
225     +#define THREAD_SIZE_ORDER 2
226     #endif
227    
228     -#define THREAD_SIZE 8192
229     +#define THREAD_SIZE 16384
230     #define THREAD_START_SP (THREAD_SIZE - 16)
231    
232     #ifndef __ASSEMBLY__
233     diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
234     index 439827271e3d..26e310c54344 100644
235     --- a/arch/arm64/include/asm/virt.h
236     +++ b/arch/arm64/include/asm/virt.h
237     @@ -21,6 +21,7 @@
238     #define BOOT_CPU_MODE_EL2 (0x0e12b007)
239    
240     #ifndef __ASSEMBLY__
241     +#include <asm/cacheflush.h>
242    
243     /*
244     * __boot_cpu_mode records what mode CPUs were booted in.
245     @@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
246     void __hyp_set_vectors(phys_addr_t phys_vector_base);
247     phys_addr_t __hyp_get_vectors(void);
248    
249     +static inline void sync_boot_mode(void)
250     +{
251     + /*
252     + * As secondaries write to __boot_cpu_mode with caches disabled, we
253     + * must flush the corresponding cache entries to ensure the visibility
254     + * of their writes.
255     + */
256     + __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
257     +}
258     +
259     /* Reports the availability of HYP mode */
260     static inline bool is_hyp_mode_available(void)
261     {
262     + sync_boot_mode();
263     return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
264     __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
265     }
266     @@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
267     /* Check if the bootloader has booted CPUs in different modes */
268     static inline bool is_hyp_mode_mismatched(void)
269     {
270     + sync_boot_mode();
271     return __boot_cpu_mode[0] != __boot_cpu_mode[1];
272     }
273    
274     diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
275     index 1d1314280a03..6ad781b21c08 100644
276     --- a/arch/arm64/kernel/entry.S
277     +++ b/arch/arm64/kernel/entry.S
278     @@ -121,7 +121,7 @@
279    
280     .macro get_thread_info, rd
281     mov \rd, sp
282     - and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
283     + and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
284     .endm
285    
286     /*
287     diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
288     index e8b8357aedb4..2fa308e4a1fa 100644
289     --- a/arch/arm64/kernel/fpsimd.c
290     +++ b/arch/arm64/kernel/fpsimd.c
291     @@ -79,8 +79,10 @@ void fpsimd_thread_switch(struct task_struct *next)
292    
293     void fpsimd_flush_thread(void)
294     {
295     + preempt_disable();
296     memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
297     fpsimd_load_state(&current->thread.fpsimd_state);
298     + preempt_enable();
299     }
300    
301     /*
302     diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
303     index 6e1e77f1831c..5341534b6d04 100644
304     --- a/arch/arm64/kernel/ptrace.c
305     +++ b/arch/arm64/kernel/ptrace.c
306     @@ -236,31 +236,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
307     {
308     int err, len, type, disabled = !ctrl.enabled;
309    
310     - if (disabled) {
311     - len = 0;
312     - type = HW_BREAKPOINT_EMPTY;
313     - } else {
314     - err = arch_bp_generic_fields(ctrl, &len, &type);
315     - if (err)
316     - return err;
317     -
318     - switch (note_type) {
319     - case NT_ARM_HW_BREAK:
320     - if ((type & HW_BREAKPOINT_X) != type)
321     - return -EINVAL;
322     - break;
323     - case NT_ARM_HW_WATCH:
324     - if ((type & HW_BREAKPOINT_RW) != type)
325     - return -EINVAL;
326     - break;
327     - default:
328     + attr->disabled = disabled;
329     + if (disabled)
330     + return 0;
331     +
332     + err = arch_bp_generic_fields(ctrl, &len, &type);
333     + if (err)
334     + return err;
335     +
336     + switch (note_type) {
337     + case NT_ARM_HW_BREAK:
338     + if ((type & HW_BREAKPOINT_X) != type)
339     return -EINVAL;
340     - }
341     + break;
342     + case NT_ARM_HW_WATCH:
343     + if ((type & HW_BREAKPOINT_RW) != type)
344     + return -EINVAL;
345     + break;
346     + default:
347     + return -EINVAL;
348     }
349    
350     attr->bp_len = len;
351     attr->bp_type = type;
352     - attr->disabled = disabled;
353    
354     return 0;
355     }
356     diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
357     index 5d54e3717bf8..9c93e126328c 100644
358     --- a/arch/arm64/kernel/smp.c
359     +++ b/arch/arm64/kernel/smp.c
360     @@ -200,13 +200,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
361     raw_spin_unlock(&boot_lock);
362    
363     /*
364     - * Enable local interrupts.
365     - */
366     - notify_cpu_starting(cpu);
367     - local_irq_enable();
368     - local_fiq_enable();
369     -
370     - /*
371     * OK, now it's safe to let the boot CPU continue. Wait for
372     * the CPU migration code to notice that the CPU is online
373     * before we continue.
374     @@ -215,6 +208,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
375     complete(&cpu_running);
376    
377     /*
378     + * Enable GIC and timers.
379     + */
380     + notify_cpu_starting(cpu);
381     +
382     + local_irq_enable();
383     + local_fiq_enable();
384     +
385     + /*
386     * OK, it's off to the idle thread for us
387     */
388     cpu_startup_entry(CPUHP_ONLINE);
389     diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
390     index 88611c3a421a..7c716634a671 100644
391     --- a/arch/arm64/mm/flush.c
392     +++ b/arch/arm64/mm/flush.c
393     @@ -77,14 +77,12 @@ void __flush_dcache_page(struct page *page)
394    
395     void __sync_icache_dcache(pte_t pte, unsigned long addr)
396     {
397     - unsigned long pfn;
398     - struct page *page;
399     + struct page *page = pte_page(pte);
400    
401     - pfn = pte_pfn(pte);
402     - if (!pfn_valid(pfn))
403     + /* no flushing needed for anonymous pages */
404     + if (!page_mapping(page))
405     return;
406    
407     - page = pfn_to_page(pfn);
408     if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
409     __flush_dcache_page(page);
410     __flush_icache_all();
411     @@ -94,28 +92,14 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
412     }
413    
414     /*
415     - * Ensure cache coherency between kernel mapping and userspace mapping of this
416     - * page.
417     + * This function is called when a page has been modified by the kernel. Mark
418     + * it as dirty for later flushing when mapped in user space (if executable,
419     + * see __sync_icache_dcache).
420     */
421     void flush_dcache_page(struct page *page)
422     {
423     - struct address_space *mapping;
424     -
425     - /*
426     - * The zero page is never written to, so never has any dirty cache
427     - * lines, and therefore never needs to be flushed.
428     - */
429     - if (page == ZERO_PAGE(0))
430     - return;
431     -
432     - mapping = page_mapping(page);
433     - if (mapping && mapping_mapped(mapping)) {
434     - __flush_dcache_page(page);
435     - __flush_icache_all();
436     - set_bit(PG_dcache_clean, &page->flags);
437     - } else {
438     + if (test_bit(PG_dcache_clean, &page->flags))
439     clear_bit(PG_dcache_clean, &page->flags);
440     - }
441     }
442     EXPORT_SYMBOL(flush_dcache_page);
443    
444     diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
445     index eeecc9c8ed68..80a369eab637 100644
446     --- a/arch/arm64/mm/mmu.c
447     +++ b/arch/arm64/mm/mmu.c
448     @@ -339,7 +339,6 @@ void __init paging_init(void)
449     bootmem_init();
450    
451     empty_zero_page = virt_to_page(zero_page);
452     - __flush_dcache_page(empty_zero_page);
453    
454     /*
455     * TTBR0 is only used for the identity mapping at this stage. Make it
456     diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
457     index a82ae8868077..f84fcf71f129 100644
458     --- a/arch/arm64/mm/proc.S
459     +++ b/arch/arm64/mm/proc.S
460     @@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm)
461     ret
462     ENDPROC(cpu_do_switch_mm)
463    
464     -cpu_name:
465     - .ascii "AArch64 Processor"
466     - .align
467     -
468     .section ".text.init", #alloc, #execinstr
469    
470     /*
471     diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
472     index 07ca627e52c0..e17d94d429a8 100644
473     --- a/arch/powerpc/include/asm/exception-64s.h
474     +++ b/arch/powerpc/include/asm/exception-64s.h
475     @@ -264,7 +264,7 @@ do_kvm_##n: \
476     subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
477     beq- 1f; \
478     ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
479     -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
480     +1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
481     blt+ cr1,3f; /* abort if it is */ \
482     li r1,(n); /* will be reloaded later */ \
483     sth r1,PACA_TRAP_SAVE(r13); \
484     diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
485     index b61363d557b5..192a3f562bdb 100644
486     --- a/arch/powerpc/kernel/head_64.S
487     +++ b/arch/powerpc/kernel/head_64.S
488     @@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
489     mtctr r8
490     bctr
491    
492     +.balign 8
493     p_end: .llong _end - _stext
494    
495     4: /* Now copy the rest of the kernel up to _end */
496     diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
497     index 5880dfb31074..b616e364dbe9 100644
498     --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
499     +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
500     @@ -473,11 +473,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
501     slb_v = vcpu->kvm->arch.vrma_slb_v;
502     }
503    
504     + preempt_disable();
505     /* Find the HPTE in the hash table */
506     index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
507     HPTE_V_VALID | HPTE_V_ABSENT);
508     - if (index < 0)
509     + if (index < 0) {
510     + preempt_enable();
511     return -ENOENT;
512     + }
513     hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
514     v = hptep[0] & ~HPTE_V_HVLOCK;
515     gr = kvm->arch.revmap[index].guest_rpte;
516     @@ -485,6 +488,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
517     /* Unlock the HPTE */
518     asm volatile("lwsync" : : : "memory");
519     hptep[0] = v;
520     + preempt_enable();
521    
522     gpte->eaddr = eaddr;
523     gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
524     diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
525     index 6dcbb49105a4..049b899e40e4 100644
526     --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
527     +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
528     @@ -724,6 +724,10 @@ static int slb_base_page_shift[4] = {
529     20, /* 1M, unsupported */
530     };
531    
532     +/* When called from virtmode, this func should be protected by
533     + * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
534     + * can trigger deadlock issue.
535     + */
536     long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
537     unsigned long valid)
538     {
539     diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
540     index 2a0a596ebf67..d77f2f6c7ff0 100644
541     --- a/arch/sh/kernel/sh_ksyms_32.c
542     +++ b/arch/sh/kernel/sh_ksyms_32.c
543     @@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
544     EXPORT_SYMBOL(copy_page);
545     EXPORT_SYMBOL(__clear_user);
546     EXPORT_SYMBOL(empty_zero_page);
547     +#ifdef CONFIG_FLATMEM
548     +/* need in pfn_valid macro */
549     +EXPORT_SYMBOL(min_low_pfn);
550     +EXPORT_SYMBOL(max_low_pfn);
551     +#endif
552    
553     #define DECLARE_EXPORT(name) \
554     extern void name(void);EXPORT_SYMBOL(name)
555     diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
556     index 7b95f29e3174..3baff31e58cf 100644
557     --- a/arch/sh/lib/Makefile
558     +++ b/arch/sh/lib/Makefile
559     @@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
560     checksum.o strlen.o div64.o div64-generic.o
561    
562     # Extracted from libgcc
563     -lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
564     +obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
565     ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
566     udiv_qrnnd.o
567    
568     diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
569     index 7619f2f792af..dfb0019bf05b 100644
570     --- a/arch/sparc/include/asm/pgtable_64.h
571     +++ b/arch/sparc/include/asm/pgtable_64.h
572     @@ -616,7 +616,7 @@ static inline unsigned long pte_present(pte_t pte)
573     }
574    
575     #define pte_accessible pte_accessible
576     -static inline unsigned long pte_accessible(pte_t a)
577     +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
578     {
579     return pte_val(a) & _PAGE_VALID;
580     }
581     @@ -806,7 +806,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
582     * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
583     * and SUN4V pte layout, so this inline test is fine.
584     */
585     - if (likely(mm != &init_mm) && pte_accessible(orig))
586     + if (likely(mm != &init_mm) && pte_accessible(mm, orig))
587     tlb_batch_add(mm, addr, ptep, orig, fullmm);
588     }
589    
590     diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
591     index 1e672234c4ff..5460bf923e16 100644
592     --- a/arch/x86/include/asm/pgtable.h
593     +++ b/arch/x86/include/asm/pgtable.h
594     @@ -415,9 +415,16 @@ static inline int pte_present(pte_t a)
595     }
596    
597     #define pte_accessible pte_accessible
598     -static inline int pte_accessible(pte_t a)
599     +static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
600     {
601     - return pte_flags(a) & _PAGE_PRESENT;
602     + if (pte_flags(a) & _PAGE_PRESENT)
603     + return true;
604     +
605     + if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
606     + mm_tlb_flush_pending(mm))
607     + return true;
608     +
609     + return false;
610     }
611    
612     static inline int pte_hidden(pte_t pte)
613     diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
614     index 33692eaabab5..e3ddd7db723f 100644
615     --- a/arch/x86/include/asm/spinlock.h
616     +++ b/arch/x86/include/asm/spinlock.h
617     @@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
618     #define arch_read_relax(lock) cpu_relax()
619     #define arch_write_relax(lock) cpu_relax()
620    
621     -/* The {read|write|spin}_lock() on x86 are full memory barriers. */
622     -static inline void smp_mb__after_lock(void) { }
623     -#define ARCH_HAS_SMP_MB_AFTER_LOCK
624     -
625     #endif /* _ASM_X86_SPINLOCK_H */
626     diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
627     index 9b0c441c03f5..32d37d9a7787 100644
628     --- a/arch/x86/kernel/cpu/intel.c
629     +++ b/arch/x86/kernel/cpu/intel.c
630     @@ -387,7 +387,8 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
631     set_cpu_cap(c, X86_FEATURE_PEBS);
632     }
633    
634     - if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
635     + if (c->x86 == 6 && cpu_has_clflush &&
636     + (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
637     set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
638    
639     #ifdef CONFIG_X86_64
640     diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
641     index 0987c638db11..d0a97e808828 100644
642     --- a/arch/x86/kvm/lapic.c
643     +++ b/arch/x86/kvm/lapic.c
644     @@ -1364,6 +1364,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
645     return;
646     }
647    
648     + if (!kvm_vcpu_is_bsp(apic->vcpu))
649     + value &= ~MSR_IA32_APICBASE_BSP;
650     + vcpu->arch.apic_base = value;
651     +
652     /* update jump label if enable bit changes */
653     if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
654     if (value & MSR_IA32_APICBASE_ENABLE)
655     @@ -1373,10 +1377,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
656     recalculate_apic_map(vcpu->kvm);
657     }
658    
659     - if (!kvm_vcpu_is_bsp(apic->vcpu))
660     - value &= ~MSR_IA32_APICBASE_BSP;
661     -
662     - vcpu->arch.apic_base = value;
663     if ((old_value ^ value) & X2APIC_ENABLE) {
664     if (value & X2APIC_ENABLE) {
665     u32 id = kvm_apic_id(apic);
666     diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
667     index cd6aa28a8143..8cacd1693f03 100644
668     --- a/drivers/ata/libata-core.c
669     +++ b/drivers/ata/libata-core.c
670     @@ -2401,7 +2401,7 @@ int ata_dev_configure(struct ata_device *dev)
671     cdb_intr_string = ", CDB intr";
672     }
673    
674     - if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
675     + if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) {
676     dev->flags |= ATA_DFLAG_DMADIR;
677     dma_dir_string = ", DMADIR";
678     }
679     @@ -4140,6 +4140,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
680     { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
681     ATA_HORKAGE_FIRMWARE_WARN },
682    
683     + /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
684     + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
685     +
686     /* Blacklist entries taken from Silicon Image 3124/3132
687     Windows driver .inf file - also several Linux problem reports */
688     { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
689     @@ -6503,6 +6506,8 @@ static int __init ata_parse_force_one(char **cur,
690     { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
691     { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
692     { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
693     + { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
694     + { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
695     };
696     char *start = *cur, *p = *cur;
697     char *id, *val, *endp;
698     diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
699     index 4abdbdff6943..81a353590b8a 100644
700     --- a/drivers/ata/libata-scsi.c
701     +++ b/drivers/ata/libata-scsi.c
702     @@ -3864,6 +3864,27 @@ void ata_scsi_hotplug(struct work_struct *work)
703     return;
704     }
705    
706     + /*
707     + * XXX - UGLY HACK
708     + *
709     + * The block layer suspend/resume path is fundamentally broken due
710     + * to freezable kthreads and workqueue and may deadlock if a block
711     + * device gets removed while resume is in progress. I don't know
712     + * what the solution is short of removing freezable kthreads and
713     + * workqueues altogether.
714     + *
715     + * The following is an ugly hack to avoid kicking off device
716     + * removal while freezer is active. This is a joke but does avoid
717     + * this particular deadlock scenario.
718     + *
719     + * https://bugzilla.kernel.org/show_bug.cgi?id=62801
720     + * http://marc.info/?l=linux-kernel&m=138695698516487
721     + */
722     +#ifdef CONFIG_FREEZER
723     + while (pm_freezing)
724     + msleep(10);
725     +#endif
726     +
727     DPRINTK("ENTER\n");
728     mutex_lock(&ap->scsi_scan_mutex);
729    
730     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
731     index 8c7421af8f15..c421fa528518 100644
732     --- a/drivers/block/rbd.c
733     +++ b/drivers/block/rbd.c
734     @@ -937,12 +937,14 @@ static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
735     u64 snap_id)
736     {
737     u32 which;
738     + const char *snap_name;
739    
740     which = rbd_dev_snap_index(rbd_dev, snap_id);
741     if (which == BAD_SNAP_INDEX)
742     - return NULL;
743     + return ERR_PTR(-ENOENT);
744    
745     - return _rbd_dev_v1_snap_name(rbd_dev, which);
746     + snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
747     + return snap_name ? snap_name : ERR_PTR(-ENOMEM);
748     }
749    
750     static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
751     @@ -1126,6 +1128,7 @@ static void zero_bio_chain(struct bio *chain, int start_ofs)
752     buf = bvec_kmap_irq(bv, &flags);
753     memset(buf + remainder, 0,
754     bv->bv_len - remainder);
755     + flush_dcache_page(bv->bv_page);
756     bvec_kunmap_irq(buf, &flags);
757     }
758     pos += bv->bv_len;
759     @@ -1158,6 +1161,7 @@ static void zero_pages(struct page **pages, u64 offset, u64 end)
760     local_irq_save(flags);
761     kaddr = kmap_atomic(*page);
762     memset(kaddr + page_offset, 0, length);
763     + flush_dcache_page(*page);
764     kunmap_atomic(kaddr);
765     local_irq_restore(flags);
766    
767     @@ -2171,9 +2175,9 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
768     struct rbd_obj_request *obj_request = NULL;
769     struct rbd_obj_request *next_obj_request;
770     bool write_request = img_request_write_test(img_request);
771     - struct bio *bio_list;
772     + struct bio *bio_list = 0;
773     unsigned int bio_offset = 0;
774     - struct page **pages;
775     + struct page **pages = 0;
776     u64 img_offset;
777     u64 resid;
778     u16 opcode;
779     @@ -2211,6 +2215,11 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
780     rbd_segment_name_free(object_name);
781     if (!obj_request)
782     goto out_unwind;
783     + /*
784     + * set obj_request->img_request before creating the
785     + * osd_request so that it gets the right snapc
786     + */
787     + rbd_img_obj_request_add(img_request, obj_request);
788    
789     if (type == OBJ_REQUEST_BIO) {
790     unsigned int clone_size;
791     @@ -2252,11 +2261,6 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
792     obj_request->pages, length,
793     offset & ~PAGE_MASK, false, false);
794    
795     - /*
796     - * set obj_request->img_request before formatting
797     - * the osd_request so that it gets the right snapc
798     - */
799     - rbd_img_obj_request_add(img_request, obj_request);
800     if (write_request)
801     rbd_osd_req_format_write(obj_request);
802     else
803     @@ -2817,7 +2821,7 @@ out_err:
804     obj_request_done_set(obj_request);
805     }
806    
807     -static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
808     +static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
809     {
810     struct rbd_obj_request *obj_request;
811     struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
812     @@ -2832,16 +2836,17 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
813     obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
814     if (!obj_request->osd_req)
815     goto out;
816     - obj_request->callback = rbd_obj_request_put;
817    
818     osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
819     notify_id, 0, 0);
820     rbd_osd_req_format_read(obj_request);
821    
822     ret = rbd_obj_request_submit(osdc, obj_request);
823     -out:
824     if (ret)
825     - rbd_obj_request_put(obj_request);
826     + goto out;
827     + ret = rbd_obj_request_wait(obj_request);
828     +out:
829     + rbd_obj_request_put(obj_request);
830    
831     return ret;
832     }
833     @@ -2861,7 +2866,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
834     if (ret)
835     rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
836    
837     - rbd_obj_notify_ack(rbd_dev, notify_id);
838     + rbd_obj_notify_ack_sync(rbd_dev, notify_id);
839     }
840    
841     /*
842     @@ -3333,6 +3338,31 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
843     clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
844     }
845    
846     +static void rbd_dev_update_size(struct rbd_device *rbd_dev)
847     +{
848     + sector_t size;
849     + bool removing;
850     +
851     + /*
852     + * Don't hold the lock while doing disk operations,
853     + * or lock ordering will conflict with the bdev mutex via:
854     + * rbd_add() -> blkdev_get() -> rbd_open()
855     + */
856     + spin_lock_irq(&rbd_dev->lock);
857     + removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
858     + spin_unlock_irq(&rbd_dev->lock);
859     + /*
860     + * If the device is being removed, rbd_dev->disk has
861     + * been destroyed, so don't try to update its size
862     + */
863     + if (!removing) {
864     + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
865     + dout("setting size to %llu sectors", (unsigned long long)size);
866     + set_capacity(rbd_dev->disk, size);
867     + revalidate_disk(rbd_dev->disk);
868     + }
869     +}
870     +
871     static int rbd_dev_refresh(struct rbd_device *rbd_dev)
872     {
873     u64 mapping_size;
874     @@ -3351,12 +3381,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
875     rbd_exists_validate(rbd_dev);
876     mutex_unlock(&ctl_mutex);
877     if (mapping_size != rbd_dev->mapping.size) {
878     - sector_t size;
879     -
880     - size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
881     - dout("setting size to %llu sectors", (unsigned long long)size);
882     - set_capacity(rbd_dev->disk, size);
883     - revalidate_disk(rbd_dev->disk);
884     + rbd_dev_update_size(rbd_dev);
885     }
886    
887     return ret;
888     @@ -3710,12 +3735,14 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
889     if (ret < sizeof (size_buf))
890     return -ERANGE;
891    
892     - if (order)
893     + if (order) {
894     *order = size_buf.order;
895     + dout(" order %u", (unsigned int)*order);
896     + }
897     *snap_size = le64_to_cpu(size_buf.size);
898    
899     - dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
900     - (unsigned long long)snap_id, (unsigned int)*order,
901     + dout(" snap_id 0x%016llx snap_size = %llu\n",
902     + (unsigned long long)snap_id,
903     (unsigned long long)*snap_size);
904    
905     return 0;
906     @@ -4030,8 +4057,13 @@ static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
907    
908     snap_id = snapc->snaps[which];
909     snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
910     - if (IS_ERR(snap_name))
911     - break;
912     + if (IS_ERR(snap_name)) {
913     + /* ignore no-longer existing snapshots */
914     + if (PTR_ERR(snap_name) == -ENOENT)
915     + continue;
916     + else
917     + break;
918     + }
919     found = !strcmp(name, snap_name);
920     kfree(snap_name);
921     }
922     @@ -4110,8 +4142,8 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
923     /* Look up the snapshot name, and make a copy */
924    
925     snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
926     - if (!snap_name) {
927     - ret = -ENOMEM;
928     + if (IS_ERR(snap_name)) {
929     + ret = PTR_ERR(snap_name);
930     goto out_err;
931     }
932    
933     @@ -5059,23 +5091,6 @@ err_out_module:
934     return (ssize_t)rc;
935     }
936    
937     -static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
938     -{
939     - struct list_head *tmp;
940     - struct rbd_device *rbd_dev;
941     -
942     - spin_lock(&rbd_dev_list_lock);
943     - list_for_each(tmp, &rbd_dev_list) {
944     - rbd_dev = list_entry(tmp, struct rbd_device, node);
945     - if (rbd_dev->dev_id == dev_id) {
946     - spin_unlock(&rbd_dev_list_lock);
947     - return rbd_dev;
948     - }
949     - }
950     - spin_unlock(&rbd_dev_list_lock);
951     - return NULL;
952     -}
953     -
954     static void rbd_dev_device_release(struct device *dev)
955     {
956     struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
957     @@ -5120,8 +5135,10 @@ static ssize_t rbd_remove(struct bus_type *bus,
958     size_t count)
959     {
960     struct rbd_device *rbd_dev = NULL;
961     - int target_id;
962     + struct list_head *tmp;
963     + int dev_id;
964     unsigned long ul;
965     + bool already = false;
966     int ret;
967    
968     ret = strict_strtoul(buf, 10, &ul);
969     @@ -5129,30 +5146,51 @@ static ssize_t rbd_remove(struct bus_type *bus,
970     return ret;
971    
972     /* convert to int; abort if we lost anything in the conversion */
973     - target_id = (int) ul;
974     - if (target_id != ul)
975     + dev_id = (int)ul;
976     + if (dev_id != ul)
977     return -EINVAL;
978    
979     mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
980    
981     - rbd_dev = __rbd_get_dev(target_id);
982     - if (!rbd_dev) {
983     - ret = -ENOENT;
984     - goto done;
985     + ret = -ENOENT;
986     + spin_lock(&rbd_dev_list_lock);
987     + list_for_each(tmp, &rbd_dev_list) {
988     + rbd_dev = list_entry(tmp, struct rbd_device, node);
989     + if (rbd_dev->dev_id == dev_id) {
990     + ret = 0;
991     + break;
992     + }
993     }
994     -
995     - spin_lock_irq(&rbd_dev->lock);
996     - if (rbd_dev->open_count)
997     - ret = -EBUSY;
998     - else
999     - set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
1000     - spin_unlock_irq(&rbd_dev->lock);
1001     - if (ret < 0)
1002     + if (!ret) {
1003     + spin_lock_irq(&rbd_dev->lock);
1004     + if (rbd_dev->open_count)
1005     + ret = -EBUSY;
1006     + else
1007     + already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
1008     + &rbd_dev->flags);
1009     + spin_unlock_irq(&rbd_dev->lock);
1010     + }
1011     + spin_unlock(&rbd_dev_list_lock);
1012     + if (ret < 0 || already)
1013     goto done;
1014     - rbd_bus_del_dev(rbd_dev);
1015     +
1016     ret = rbd_dev_header_watch_sync(rbd_dev, false);
1017     if (ret)
1018     rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
1019     +
1020     + /*
1021     + * flush remaining watch callbacks - these must be complete
1022     + * before the osd_client is shutdown
1023     + */
1024     + dout("%s: flushing notifies", __func__);
1025     + ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
1026     + /*
1027     + * Don't free anything from rbd_dev->disk until after all
1028     + * notifies are completely processed. Otherwise
1029     + * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
1030     + * in a potential use after free of rbd_dev->disk or rbd_dev.
1031     + */
1032     + rbd_bus_del_dev(rbd_dev);
1033     rbd_dev_image_release(rbd_dev);
1034     module_put(THIS_MODULE);
1035     ret = count;
1036     diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
1037     index a2b254189782..053d846ab5b1 100644
1038     --- a/drivers/clocksource/arm_arch_timer.c
1039     +++ b/drivers/clocksource/arm_arch_timer.c
1040     @@ -186,27 +186,19 @@ u32 arch_timer_get_rate(void)
1041     return arch_timer_rate;
1042     }
1043    
1044     -/*
1045     - * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
1046     - * call it before it has been initialised. Rather than incur a performance
1047     - * penalty checking for initialisation, provide a default implementation that
1048     - * won't lead to time appearing to jump backwards.
1049     - */
1050     -static u64 arch_timer_read_zero(void)
1051     +u64 arch_timer_read_counter(void)
1052     {
1053     - return 0;
1054     + return arch_counter_get_cntvct();
1055     }
1056    
1057     -u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
1058     -
1059     static cycle_t arch_counter_read(struct clocksource *cs)
1060     {
1061     - return arch_timer_read_counter();
1062     + return arch_counter_get_cntvct();
1063     }
1064    
1065     static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
1066     {
1067     - return arch_timer_read_counter();
1068     + return arch_counter_get_cntvct();
1069     }
1070    
1071     static struct clocksource clocksource_counter = {
1072     @@ -287,7 +279,7 @@ static int __init arch_timer_register(void)
1073     cyclecounter.mult = clocksource_counter.mult;
1074     cyclecounter.shift = clocksource_counter.shift;
1075     timecounter_init(&timecounter, &cyclecounter,
1076     - arch_counter_get_cntpct());
1077     + arch_counter_get_cntvct());
1078    
1079     if (arch_timer_use_virtual) {
1080     ppi = arch_timer_ppi[VIRT_PPI];
1081     @@ -376,11 +368,6 @@ static void __init arch_timer_init(struct device_node *np)
1082     }
1083     }
1084    
1085     - if (arch_timer_use_virtual)
1086     - arch_timer_read_counter = arch_counter_get_cntvct;
1087     - else
1088     - arch_timer_read_counter = arch_counter_get_cntpct;
1089     -
1090     arch_timer_register();
1091     arch_timer_arch_init();
1092     }
1093     diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
1094     index 6b02eddc5f50..f22417cb0969 100644
1095     --- a/drivers/clocksource/dw_apb_timer_of.c
1096     +++ b/drivers/clocksource/dw_apb_timer_of.c
1097     @@ -77,7 +77,7 @@ static void __iomem *sched_io_base;
1098    
1099     static u32 read_sched_clock(void)
1100     {
1101     - return __raw_readl(sched_io_base);
1102     + return ~__raw_readl(sched_io_base);
1103     }
1104    
1105     static const struct of_device_id sptimer_ids[] __initconst = {
1106     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1107     index 9520e3b90bde..7054c579d451 100644
1108     --- a/drivers/cpufreq/intel_pstate.c
1109     +++ b/drivers/cpufreq/intel_pstate.c
1110     @@ -543,6 +543,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
1111     cpu = all_cpu_data[cpunum];
1112    
1113     intel_pstate_get_cpu_pstates(cpu);
1114     + if (!cpu->pstate.current_pstate) {
1115     + all_cpu_data[cpunum] = NULL;
1116     + kfree(cpu);
1117     + return -ENODATA;
1118     + }
1119    
1120     cpu->cpu = cpunum;
1121     cpu->pstate_policy =
1122     diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
1123     index e9924898043a..0ba5a95199d3 100644
1124     --- a/drivers/dma/Kconfig
1125     +++ b/drivers/dma/Kconfig
1126     @@ -333,6 +333,7 @@ config NET_DMA
1127     bool "Network: TCP receive copy offload"
1128     depends on DMA_ENGINE && NET
1129     default (INTEL_IOATDMA || FSL_DMA)
1130     + depends on BROKEN
1131     help
1132     This enables the use of DMA engines in the network stack to
1133     offload receive copy-to-user operations, freeing CPU cycles.
1134     diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
1135     index c8e3b2c48369..47674b913843 100644
1136     --- a/drivers/firewire/sbp2.c
1137     +++ b/drivers/firewire/sbp2.c
1138     @@ -1626,7 +1626,6 @@ static struct scsi_host_template scsi_driver_template = {
1139     .cmd_per_lun = 1,
1140     .can_queue = 1,
1141     .sdev_attrs = sbp2_scsi_sysfs_attrs,
1142     - .no_write_same = 1,
1143     };
1144    
1145     MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1146     diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
1147     index dd2eddeb1e0c..500c4d19322b 100644
1148     --- a/drivers/gpio/gpio-msm-v2.c
1149     +++ b/drivers/gpio/gpio-msm-v2.c
1150     @@ -248,7 +248,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
1151    
1152     spin_lock_irqsave(&tlmm_lock, irq_flags);
1153     writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
1154     - clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
1155     + clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
1156     __clear_bit(gpio, msm_gpio.enabled_irqs);
1157     spin_unlock_irqrestore(&tlmm_lock, irq_flags);
1158     }
1159     @@ -260,7 +260,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
1160    
1161     spin_lock_irqsave(&tlmm_lock, irq_flags);
1162     __set_bit(gpio, msm_gpio.enabled_irqs);
1163     - set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
1164     + set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
1165     writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
1166     spin_unlock_irqrestore(&tlmm_lock, irq_flags);
1167     }
1168     diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
1169     index ed1d10ae1999..28d987661146 100644
1170     --- a/drivers/gpio/gpio-twl4030.c
1171     +++ b/drivers/gpio/gpio-twl4030.c
1172     @@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
1173     if (offset < TWL4030_GPIO_MAX)
1174     ret = twl4030_set_gpio_direction(offset, 1);
1175     else
1176     - ret = -EINVAL;
1177     + ret = -EINVAL; /* LED outputs can't be set as input */
1178    
1179     if (!ret)
1180     priv->direction &= ~BIT(offset);
1181     @@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
1182     static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
1183     {
1184     struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
1185     - int ret = -EINVAL;
1186     + int ret = 0;
1187    
1188     mutex_lock(&priv->mutex);
1189     - if (offset < TWL4030_GPIO_MAX)
1190     + if (offset < TWL4030_GPIO_MAX) {
1191     ret = twl4030_set_gpio_direction(offset, 0);
1192     + if (ret) {
1193     + mutex_unlock(&priv->mutex);
1194     + return ret;
1195     + }
1196     + }
1197     +
1198     + /*
1199     + * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
1200     + */
1201    
1202     priv->direction |= BIT(offset);
1203     mutex_unlock(&priv->mutex);
1204     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1205     index 0cb9b5d8e30a..83f0ba5859c0 100644
1206     --- a/drivers/gpu/drm/drm_edid.c
1207     +++ b/drivers/gpu/drm/drm_edid.c
1208     @@ -68,6 +68,8 @@
1209     #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
1210     /* Force reduced-blanking timings for detailed modes */
1211     #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
1212     +/* Force 8bpc */
1213     +#define EDID_QUIRK_FORCE_8BPC (1 << 8)
1214    
1215     struct detailed_mode_closure {
1216     struct drm_connector *connector;
1217     @@ -128,6 +130,9 @@ static struct edid_quirk {
1218    
1219     /* Medion MD 30217 PG */
1220     { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
1221     +
1222     + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
1223     + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
1224     };
1225    
1226     /*
1227     @@ -2955,6 +2960,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
1228    
1229     drm_add_display_info(edid, &connector->display_info);
1230    
1231     + if (quirks & EDID_QUIRK_FORCE_8BPC)
1232     + connector->display_info.bpc = 8;
1233     +
1234     return num_modes;
1235     }
1236     EXPORT_SYMBOL(drm_add_edid_modes);
1237     diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
1238     index 17d9b0b6afc5..d9d7d675246a 100644
1239     --- a/drivers/gpu/drm/i915/i915_dma.c
1240     +++ b/drivers/gpu/drm/i915/i915_dma.c
1241     @@ -84,6 +84,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
1242     drm_i915_private_t *dev_priv = dev->dev_private;
1243     struct drm_i915_master_private *master_priv;
1244    
1245     + /*
1246     + * The dri breadcrumb update races against the drm master disappearing.
1247     + * Instead of trying to fix this (this is by far not the only ums issue)
1248     + * just don't do the update in kms mode.
1249     + */
1250     + if (drm_core_check_feature(dev, DRIVER_MODESET))
1251     + return;
1252     +
1253     if (dev->primary->master) {
1254     master_priv = dev->primary->master->driver_priv;
1255     if (master_priv->sarea_priv)
1256     @@ -1847,8 +1855,10 @@ void i915_driver_lastclose(struct drm_device * dev)
1257    
1258     void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1259     {
1260     + mutex_lock(&dev->struct_mutex);
1261     i915_gem_context_close(dev, file_priv);
1262     i915_gem_release(dev, file_priv);
1263     + mutex_unlock(&dev->struct_mutex);
1264     }
1265    
1266     void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1267     diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
1268     index 3bc8a58a8d5f..b10b1b1b4873 100644
1269     --- a/drivers/gpu/drm/i915/i915_gem_context.c
1270     +++ b/drivers/gpu/drm/i915/i915_gem_context.c
1271     @@ -291,10 +291,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
1272     {
1273     struct drm_i915_file_private *file_priv = file->driver_priv;
1274    
1275     - mutex_lock(&dev->struct_mutex);
1276     idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
1277     idr_destroy(&file_priv->context_idr);
1278     - mutex_unlock(&dev->struct_mutex);
1279     }
1280    
1281     static struct i915_hw_context *
1282     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1283     index f1e600490bb2..15358add4f70 100644
1284     --- a/drivers/gpu/drm/i915/intel_display.c
1285     +++ b/drivers/gpu/drm/i915/intel_display.c
1286     @@ -9456,7 +9456,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
1287    
1288     intel_setup_overlay(dev);
1289    
1290     + drm_modeset_lock_all(dev);
1291     intel_modeset_setup_hw_state(dev, false);
1292     + drm_modeset_unlock_all(dev);
1293     }
1294    
1295     void intel_modeset_cleanup(struct drm_device *dev)
1296     @@ -9530,14 +9532,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
1297     int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
1298     {
1299     struct drm_i915_private *dev_priv = dev->dev_private;
1300     + unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
1301     u16 gmch_ctrl;
1302    
1303     - pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
1304     + pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
1305     if (state)
1306     gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
1307     else
1308     gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
1309     - pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
1310     + pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
1311     return 0;
1312     }
1313    
1314     diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1315     index 22f06c8d8e0b..5285a90e9f59 100644
1316     --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1317     +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1318     @@ -1176,7 +1176,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1319     if ((rdev->family == CHIP_TAHITI) ||
1320     (rdev->family == CHIP_PITCAIRN))
1321     fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1322     - else if (rdev->family == CHIP_VERDE)
1323     + else if ((rdev->family == CHIP_VERDE) ||
1324     + (rdev->family == CHIP_OLAND) ||
1325     + (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1326     fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1327    
1328     switch (radeon_crtc->crtc_id) {
1329     diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1330     index 3bf43a16adcc..4438d1b09325 100644
1331     --- a/drivers/gpu/drm/radeon/ni.c
1332     +++ b/drivers/gpu/drm/radeon/ni.c
1333     @@ -753,6 +753,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1334     (rdev->pdev->device == 0x999C)) {
1335     rdev->config.cayman.max_simds_per_se = 6;
1336     rdev->config.cayman.max_backends_per_se = 2;
1337     + rdev->config.cayman.max_hw_contexts = 8;
1338     + rdev->config.cayman.sx_max_export_size = 256;
1339     + rdev->config.cayman.sx_max_export_pos_size = 64;
1340     + rdev->config.cayman.sx_max_export_smx_size = 192;
1341     } else if ((rdev->pdev->device == 0x9903) ||
1342     (rdev->pdev->device == 0x9904) ||
1343     (rdev->pdev->device == 0x990A) ||
1344     @@ -763,6 +767,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1345     (rdev->pdev->device == 0x999D)) {
1346     rdev->config.cayman.max_simds_per_se = 4;
1347     rdev->config.cayman.max_backends_per_se = 2;
1348     + rdev->config.cayman.max_hw_contexts = 8;
1349     + rdev->config.cayman.sx_max_export_size = 256;
1350     + rdev->config.cayman.sx_max_export_pos_size = 64;
1351     + rdev->config.cayman.sx_max_export_smx_size = 192;
1352     } else if ((rdev->pdev->device == 0x9919) ||
1353     (rdev->pdev->device == 0x9990) ||
1354     (rdev->pdev->device == 0x9991) ||
1355     @@ -773,9 +781,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1356     (rdev->pdev->device == 0x99A0)) {
1357     rdev->config.cayman.max_simds_per_se = 3;
1358     rdev->config.cayman.max_backends_per_se = 1;
1359     + rdev->config.cayman.max_hw_contexts = 4;
1360     + rdev->config.cayman.sx_max_export_size = 128;
1361     + rdev->config.cayman.sx_max_export_pos_size = 32;
1362     + rdev->config.cayman.sx_max_export_smx_size = 96;
1363     } else {
1364     rdev->config.cayman.max_simds_per_se = 2;
1365     rdev->config.cayman.max_backends_per_se = 1;
1366     + rdev->config.cayman.max_hw_contexts = 4;
1367     + rdev->config.cayman.sx_max_export_size = 128;
1368     + rdev->config.cayman.sx_max_export_pos_size = 32;
1369     + rdev->config.cayman.sx_max_export_smx_size = 96;
1370     }
1371     rdev->config.cayman.max_texture_channel_caches = 2;
1372     rdev->config.cayman.max_gprs = 256;
1373     @@ -783,10 +799,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1374     rdev->config.cayman.max_gs_threads = 32;
1375     rdev->config.cayman.max_stack_entries = 512;
1376     rdev->config.cayman.sx_num_of_sets = 8;
1377     - rdev->config.cayman.sx_max_export_size = 256;
1378     - rdev->config.cayman.sx_max_export_pos_size = 64;
1379     - rdev->config.cayman.sx_max_export_smx_size = 192;
1380     - rdev->config.cayman.max_hw_contexts = 8;
1381     rdev->config.cayman.sq_num_cf_insts = 2;
1382    
1383     rdev->config.cayman.sc_prim_fifo_size = 0x40;
1384     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1385     index a809b1733e7b..21d2d5280fc1 100644
1386     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1387     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1388     @@ -460,7 +460,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
1389     return -EINVAL;
1390     }
1391    
1392     - if ((start >> 28) != (end >> 28)) {
1393     + if ((start >> 28) != ((end - 1) >> 28)) {
1394     DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
1395     start, end);
1396     return -EINVAL;
1397     diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
1398     index 55880d5962c3..ea28ecbd5c79 100644
1399     --- a/drivers/gpu/drm/radeon/rs690.c
1400     +++ b/drivers/gpu/drm/radeon/rs690.c
1401     @@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
1402     base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
1403     base = G_000100_MC_FB_START(base) << 16;
1404     rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1405     + /* Some boards seem to be configured for 128MB of sideport memory,
1406     + * but really only have 64MB. Just skip the sideport and use
1407     + * UMA memory.
1408     + */
1409     + if (rdev->mc.igp_sideport_enabled &&
1410     + (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
1411     + base += 128 * 1024 * 1024;
1412     + rdev->mc.real_vram_size -= 128 * 1024 * 1024;
1413     + rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1414     + }
1415    
1416     /* Use K8 direct mapping for fast fb access. */
1417     rdev->fastfb_working = false;
1418     diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
1419     index f116d664b473..d47bb0f267f7 100644
1420     --- a/drivers/idle/intel_idle.c
1421     +++ b/drivers/idle/intel_idle.c
1422     @@ -361,6 +361,9 @@ static int intel_idle(struct cpuidle_device *dev,
1423    
1424     if (!current_set_polling_and_test()) {
1425    
1426     + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
1427     + clflush((void *)&current_thread_info()->flags);
1428     +
1429     __monitor((void *)&current_thread_info()->flags, 0, 0);
1430     smp_mb();
1431     if (!need_resched())
1432     diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
1433     index dd15a5b0f701..3394ebd410c7 100644
1434     --- a/drivers/iio/adc/ad7887.c
1435     +++ b/drivers/iio/adc/ad7887.c
1436     @@ -211,7 +211,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
1437     .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1438     .address = 1,
1439     .scan_index = 1,
1440     - .scan_type = IIO_ST('u', 12, 16, 0),
1441     + .scan_type = {
1442     + .sign = 'u',
1443     + .realbits = 12,
1444     + .storagebits = 16,
1445     + .shift = 0,
1446     + .endianness = IIO_BE,
1447     + },
1448     },
1449     .channel[1] = {
1450     .type = IIO_VOLTAGE,
1451     @@ -221,7 +227,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
1452     .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1453     .address = 0,
1454     .scan_index = 0,
1455     - .scan_type = IIO_ST('u', 12, 16, 0),
1456     + .scan_type = {
1457     + .sign = 'u',
1458     + .realbits = 12,
1459     + .storagebits = 16,
1460     + .shift = 0,
1461     + .endianness = IIO_BE,
1462     + },
1463     },
1464     .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
1465     .int_vref_mv = 2500,
1466     diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
1467     index f60591f0b925..e58c529aacdf 100644
1468     --- a/drivers/iio/imu/adis16400_core.c
1469     +++ b/drivers/iio/imu/adis16400_core.c
1470     @@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
1471     .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1472     .address = ADIS16448_BARO_OUT,
1473     .scan_index = ADIS16400_SCAN_BARO,
1474     - .scan_type = IIO_ST('s', 16, 16, 0),
1475     + .scan_type = {
1476     + .sign = 's',
1477     + .realbits = 16,
1478     + .storagebits = 16,
1479     + .endianness = IIO_BE,
1480     + },
1481     },
1482     ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
1483     IIO_CHAN_SOFT_TIMESTAMP(11)
1484     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1485     index 5849dc0726b9..6fc283a041d6 100644
1486     --- a/drivers/infiniband/ulp/isert/ib_isert.c
1487     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1488     @@ -242,21 +242,29 @@ isert_create_device_ib_res(struct isert_device *device)
1489     isert_cq_event_callback,
1490     (void *)&cq_desc[i],
1491     ISER_MAX_RX_CQ_LEN, i);
1492     - if (IS_ERR(device->dev_rx_cq[i]))
1493     + if (IS_ERR(device->dev_rx_cq[i])) {
1494     + ret = PTR_ERR(device->dev_rx_cq[i]);
1495     + device->dev_rx_cq[i] = NULL;
1496     goto out_cq;
1497     + }
1498    
1499     device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
1500     isert_cq_tx_callback,
1501     isert_cq_event_callback,
1502     (void *)&cq_desc[i],
1503     ISER_MAX_TX_CQ_LEN, i);
1504     - if (IS_ERR(device->dev_tx_cq[i]))
1505     + if (IS_ERR(device->dev_tx_cq[i])) {
1506     + ret = PTR_ERR(device->dev_tx_cq[i]);
1507     + device->dev_tx_cq[i] = NULL;
1508     goto out_cq;
1509     + }
1510    
1511     - if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
1512     + ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
1513     + if (ret)
1514     goto out_cq;
1515    
1516     - if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
1517     + ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
1518     + if (ret)
1519     goto out_cq;
1520     }
1521    
1522     diff --git a/drivers/input/input.c b/drivers/input/input.c
1523     index c04469928925..66984e272c45 100644
1524     --- a/drivers/input/input.c
1525     +++ b/drivers/input/input.c
1526     @@ -1866,6 +1866,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1527     break;
1528    
1529     case EV_ABS:
1530     + input_alloc_absinfo(dev);
1531     + if (!dev->absinfo)
1532     + return;
1533     +
1534     __set_bit(code, dev->absbit);
1535     break;
1536    
1537     diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1538     index 8ee9d1556e6e..263dd921edc4 100644
1539     --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1540     +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1541     @@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
1542     /* set LED in default state (end of init phase) */
1543     pcan_usb_pro_set_led(dev, 0, 1);
1544    
1545     + kfree(bi);
1546     + kfree(fi);
1547     +
1548     return 0;
1549    
1550     err_out:
1551     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1552     index b51560d56590..36a0b438e65e 100644
1553     --- a/drivers/net/ethernet/broadcom/tg3.c
1554     +++ b/drivers/net/ethernet/broadcom/tg3.c
1555     @@ -7482,7 +7482,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
1556     {
1557     u32 base = (u32) mapping & 0xffffffff;
1558    
1559     - return (base > 0xffffdcc0) && (base + len + 8 < base);
1560     + return base + len + 8 < base;
1561     }
1562    
1563     /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
1564     diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
1565     index c6867f926cff..c0bfc818c701 100644
1566     --- a/drivers/net/usb/dm9601.c
1567     +++ b/drivers/net/usb/dm9601.c
1568     @@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
1569     dev->net->ethtool_ops = &dm9601_ethtool_ops;
1570     dev->net->hard_header_len += DM_TX_OVERHEAD;
1571     dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1572     - dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
1573     +
1574     + /* dm9620/21a require room for 4 byte padding, even in dm9601
1575     + * mode, so we need +1 to be able to receive full size
1576     + * ethernet frames.
1577     + */
1578     + dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
1579    
1580     dev->mii.dev = dev->net;
1581     dev->mii.mdio_read = dm9601_mdio_read;
1582     @@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1583     static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1584     gfp_t flags)
1585     {
1586     - int len;
1587     + int len, pad;
1588    
1589     /* format:
1590     b1: packet length low
1591     @@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1592     b3..n: packet data
1593     */
1594    
1595     - len = skb->len;
1596     + len = skb->len + DM_TX_OVERHEAD;
1597    
1598     - if (skb_headroom(skb) < DM_TX_OVERHEAD) {
1599     + /* workaround for dm962x errata with tx fifo getting out of
1600     + * sync if a USB bulk transfer retry happens right after a
1601     + * packet with odd / maxpacket length by adding up to 3 bytes
1602     + * padding.
1603     + */
1604     + while ((len & 1) || !(len % dev->maxpacket))
1605     + len++;
1606     +
1607     + len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
1608     + pad = len - skb->len;
1609     +
1610     + if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
1611     struct sk_buff *skb2;
1612    
1613     - skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
1614     + skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
1615     dev_kfree_skb_any(skb);
1616     skb = skb2;
1617     if (!skb)
1618     @@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1619    
1620     __skb_push(skb, DM_TX_OVERHEAD);
1621    
1622     - /* usbnet adds padding if length is a multiple of packet size
1623     - if so, adjust length value in header */
1624     - if ((skb->len % dev->maxpacket) == 0)
1625     - len++;
1626     + if (pad) {
1627     + memset(skb->data + skb->len, 0, pad);
1628     + __skb_put(skb, pad);
1629     + }
1630    
1631     skb->data[0] = len;
1632     skb->data[1] = len >> 8;
1633     diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
1634     index 8d78253c26ce..a366d6b4626f 100644
1635     --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
1636     +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
1637     @@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1638     mask2 |= ATH9K_INT_CST;
1639     if (isr2 & AR_ISR_S2_TSFOOR)
1640     mask2 |= ATH9K_INT_TSFOOR;
1641     +
1642     + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
1643     + REG_WRITE(ah, AR_ISR_S2, isr2);
1644     + isr &= ~AR_ISR_BCNMISC;
1645     + }
1646     }
1647    
1648     - isr = REG_READ(ah, AR_ISR_RAC);
1649     + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
1650     + isr = REG_READ(ah, AR_ISR_RAC);
1651     +
1652     if (isr == 0xffffffff) {
1653     *masked = 0;
1654     return false;
1655     @@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1656    
1657     *masked |= ATH9K_INT_TX;
1658    
1659     - s0_s = REG_READ(ah, AR_ISR_S0_S);
1660     + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
1661     + s0_s = REG_READ(ah, AR_ISR_S0_S);
1662     + s1_s = REG_READ(ah, AR_ISR_S1_S);
1663     + } else {
1664     + s0_s = REG_READ(ah, AR_ISR_S0);
1665     + REG_WRITE(ah, AR_ISR_S0, s0_s);
1666     + s1_s = REG_READ(ah, AR_ISR_S1);
1667     + REG_WRITE(ah, AR_ISR_S1, s1_s);
1668     +
1669     + isr &= ~(AR_ISR_TXOK |
1670     + AR_ISR_TXDESC |
1671     + AR_ISR_TXERR |
1672     + AR_ISR_TXEOL);
1673     + }
1674     +
1675     ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
1676     ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
1677     -
1678     - s1_s = REG_READ(ah, AR_ISR_S1_S);
1679     ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
1680     ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
1681     }
1682     @@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1683     *masked |= mask2;
1684     }
1685    
1686     - if (AR_SREV_9100(ah))
1687     - return true;
1688     -
1689     - if (isr & AR_ISR_GENTMR) {
1690     + if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
1691     u32 s5_s;
1692    
1693     - s5_s = REG_READ(ah, AR_ISR_S5_S);
1694     + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
1695     + s5_s = REG_READ(ah, AR_ISR_S5_S);
1696     + } else {
1697     + s5_s = REG_READ(ah, AR_ISR_S5);
1698     + }
1699     +
1700     ah->intr_gen_timer_trigger =
1701     MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
1702    
1703     @@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1704     if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
1705     !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1706     *masked |= ATH9K_INT_TIM_TIMER;
1707     +
1708     + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
1709     + REG_WRITE(ah, AR_ISR_S5, s5_s);
1710     + isr &= ~AR_ISR_GENTMR;
1711     + }
1712     }
1713    
1714     + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
1715     + REG_WRITE(ah, AR_ISR, isr);
1716     + REG_READ(ah, AR_ISR);
1717     + }
1718     +
1719     + if (AR_SREV_9100(ah))
1720     + return true;
1721     +
1722     if (sync_cause) {
1723     ath9k_debug_sync_cause(common, sync_cause);
1724     fatal_int =
1725     diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1726     index 62f1b7636c92..21e7edc7207c 100644
1727     --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1728     +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1729     @@ -145,21 +145,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1730     struct ath9k_vif_iter_data *iter_data = data;
1731     int i;
1732    
1733     - for (i = 0; i < ETH_ALEN; i++)
1734     - iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
1735     + if (iter_data->hw_macaddr != NULL) {
1736     + for (i = 0; i < ETH_ALEN; i++)
1737     + iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
1738     + } else {
1739     + iter_data->hw_macaddr = mac;
1740     + }
1741     }
1742    
1743     -static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
1744     +static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
1745     struct ieee80211_vif *vif)
1746     {
1747     struct ath_common *common = ath9k_hw_common(priv->ah);
1748     struct ath9k_vif_iter_data iter_data;
1749    
1750     /*
1751     - * Use the hardware MAC address as reference, the hardware uses it
1752     - * together with the BSSID mask when matching addresses.
1753     + * Pick the MAC address of the first interface as the new hardware
1754     + * MAC address. The hardware will use it together with the BSSID mask
1755     + * when matching addresses.
1756     */
1757     - iter_data.hw_macaddr = common->macaddr;
1758     + iter_data.hw_macaddr = NULL;
1759     memset(&iter_data.mask, 0xff, ETH_ALEN);
1760    
1761     if (vif)
1762     @@ -171,6 +176,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
1763     ath9k_htc_bssid_iter, &iter_data);
1764    
1765     memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1766     +
1767     + if (iter_data.hw_macaddr)
1768     + memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
1769     +
1770     ath_hw_setbssidmask(common);
1771     }
1772    
1773     @@ -1076,7 +1085,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1774     goto out;
1775     }
1776    
1777     - ath9k_htc_set_bssid_mask(priv, vif);
1778     + ath9k_htc_set_mac_bssid_mask(priv, vif);
1779    
1780     priv->vif_slot |= (1 << avp->index);
1781     priv->nvifs++;
1782     @@ -1139,7 +1148,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1783    
1784     ath9k_htc_set_opmode(priv);
1785    
1786     - ath9k_htc_set_bssid_mask(priv, vif);
1787     + ath9k_htc_set_mac_bssid_mask(priv, vif);
1788    
1789     /*
1790     * Stop ANI only if there are no associated station interfaces.
1791     diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1792     index a8fee08479ef..82a1b5b16b62 100644
1793     --- a/drivers/net/wireless/ath/ath9k/main.c
1794     +++ b/drivers/net/wireless/ath/ath9k/main.c
1795     @@ -889,8 +889,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
1796     struct ath_common *common = ath9k_hw_common(ah);
1797    
1798     /*
1799     - * Use the hardware MAC address as reference, the hardware uses it
1800     - * together with the BSSID mask when matching addresses.
1801     + * Pick the MAC address of the first interface as the new hardware
1802     + * MAC address. The hardware will use it together with the BSSID mask
1803     + * when matching addresses.
1804     */
1805     memset(iter_data, 0, sizeof(*iter_data));
1806     memset(&iter_data->mask, 0xff, ETH_ALEN);
1807     diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
1808     index e70b4ffaf97f..ee6a62616c72 100644
1809     --- a/drivers/net/wireless/rtlwifi/pci.c
1810     +++ b/drivers/net/wireless/rtlwifi/pci.c
1811     @@ -734,6 +734,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
1812     };
1813     int index = rtlpci->rx_ring[rx_queue_idx].idx;
1814    
1815     + if (rtlpci->driver_is_goingto_unload)
1816     + return;
1817     /*RX NORMAL PKT */
1818     while (count--) {
1819     /*rx descriptor */
1820     @@ -1630,6 +1632,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1821     */
1822     set_hal_stop(rtlhal);
1823    
1824     + rtlpci->driver_is_goingto_unload = true;
1825     rtlpriv->cfg->ops->disable_interrupt(hw);
1826     cancel_work_sync(&rtlpriv->works.lps_change_work);
1827    
1828     @@ -1647,7 +1650,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1829     ppsc->rfchange_inprogress = true;
1830     spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1831    
1832     - rtlpci->driver_is_goingto_unload = true;
1833     rtlpriv->cfg->ops->hw_disable(hw);
1834     /* some things are not needed if firmware not available */
1835     if (!rtlpriv->max_fw_size)
1836     diff --git a/drivers/of/address.c b/drivers/of/address.c
1837     index 7c8221d36329..9e69b4f0d204 100644
1838     --- a/drivers/of/address.c
1839     +++ b/drivers/of/address.c
1840     @@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
1841     (unsigned long long)cp, (unsigned long long)s,
1842     (unsigned long long)da);
1843    
1844     - /*
1845     - * If the number of address cells is larger than 2 we assume the
1846     - * mapping doesn't specify a physical address. Rather, the address
1847     - * specifies an identifier that must match exactly.
1848     - */
1849     - if (na > 2 && memcmp(range, addr, na * 4) != 0)
1850     - return OF_BAD_ADDR;
1851     -
1852     if (da < cp || da >= (cp + s))
1853     return OF_BAD_ADDR;
1854     return da - cp;
1855     diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
1856     index cee69dac3e18..4dd71ca0269c 100644
1857     --- a/drivers/s390/char/tty3270.c
1858     +++ b/drivers/s390/char/tty3270.c
1859     @@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
1860     return rc;
1861     }
1862    
1863     - tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
1864     + tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
1865     if (IS_ERR(tp->screen)) {
1866     rc = PTR_ERR(tp->screen);
1867     raw3270_put_view(&tp->view);
1868     diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
1869     index 76dec96aeb2a..05bcf0dffb8c 100644
1870     --- a/drivers/staging/comedi/drivers/8255_pci.c
1871     +++ b/drivers/staging/comedi/drivers/8255_pci.c
1872     @@ -66,7 +66,8 @@ enum pci_8255_boardid {
1873     BOARD_ADLINK_PCI7296,
1874     BOARD_CB_PCIDIO24,
1875     BOARD_CB_PCIDIO24H,
1876     - BOARD_CB_PCIDIO48H,
1877     + BOARD_CB_PCIDIO48H_OLD,
1878     + BOARD_CB_PCIDIO48H_NEW,
1879     BOARD_CB_PCIDIO96H,
1880     BOARD_NI_PCIDIO96,
1881     BOARD_NI_PCIDIO96B,
1882     @@ -109,11 +110,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
1883     .dio_badr = 2,
1884     .n_8255 = 1,
1885     },
1886     - [BOARD_CB_PCIDIO48H] = {
1887     + [BOARD_CB_PCIDIO48H_OLD] = {
1888     .name = "cb_pci-dio48h",
1889     .dio_badr = 1,
1890     .n_8255 = 2,
1891     },
1892     + [BOARD_CB_PCIDIO48H_NEW] = {
1893     + .name = "cb_pci-dio48h",
1894     + .dio_badr = 2,
1895     + .n_8255 = 2,
1896     + },
1897     [BOARD_CB_PCIDIO96H] = {
1898     .name = "cb_pci-dio96h",
1899     .dio_badr = 2,
1900     @@ -270,7 +276,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
1901     { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
1902     { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
1903     { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
1904     - { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
1905     + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
1906     + .driver_data = BOARD_CB_PCIDIO48H_OLD },
1907     + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
1908     + .driver_data = BOARD_CB_PCIDIO48H_NEW },
1909     { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
1910     { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
1911     { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
1912     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1913     index 4c1b8dbdcb36..b71a69750607 100644
1914     --- a/drivers/target/iscsi/iscsi_target.c
1915     +++ b/drivers/target/iscsi/iscsi_target.c
1916     @@ -838,24 +838,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
1917     if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
1918     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
1919     /*
1920     - * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
1921     - * that adds support for RESERVE/RELEASE. There is a bug
1922     - * add with this new functionality that sets R/W bits when
1923     - * neither CDB carries any READ or WRITE datapayloads.
1924     + * From RFC-3720 Section 10.3.1:
1925     + *
1926     + * "Either or both of R and W MAY be 1 when either the
1927     + * Expected Data Transfer Length and/or Bidirectional Read
1928     + * Expected Data Transfer Length are 0"
1929     + *
1930     + * For this case, go ahead and clear the unnecssary bits
1931     + * to avoid any confusion with ->data_direction.
1932     */
1933     - if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
1934     - hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1935     - hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1936     - goto done;
1937     - }
1938     + hdr->flags &= ~ISCSI_FLAG_CMD_READ;
1939     + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1940    
1941     - pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1942     + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1943     " set when Expected Data Transfer Length is 0 for"
1944     - " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
1945     - return iscsit_add_reject_cmd(cmd,
1946     - ISCSI_REASON_BOOKMARK_INVALID, buf);
1947     + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1948     }
1949     -done:
1950    
1951     if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
1952     !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
1953     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1954     index 4630481b6043..660b109487ae 100644
1955     --- a/drivers/target/target_core_device.c
1956     +++ b/drivers/target/target_core_device.c
1957     @@ -1078,6 +1078,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
1958     dev->dev_attrib.block_size = block_size;
1959     pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1960     dev, block_size);
1961     +
1962     + if (dev->dev_attrib.max_bytes_per_io)
1963     + dev->dev_attrib.hw_max_sectors =
1964     + dev->dev_attrib.max_bytes_per_io / block_size;
1965     +
1966     return 0;
1967     }
1968    
1969     diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
1970     index b11890d85120..3b2879316b87 100644
1971     --- a/drivers/target/target_core_file.c
1972     +++ b/drivers/target/target_core_file.c
1973     @@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
1974     pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
1975     " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
1976     TARGET_CORE_MOD_VERSION);
1977     - pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
1978     - " MaxSectors: %u\n",
1979     - hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
1980     + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
1981     + hba->hba_id, fd_host->fd_host_id);
1982    
1983     return 0;
1984     }
1985     @@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
1986     }
1987    
1988     dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
1989     - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
1990     + dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
1991     + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
1992     dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
1993    
1994     if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
1995     diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
1996     index 37ffc5bd2399..d7772c167685 100644
1997     --- a/drivers/target/target_core_file.h
1998     +++ b/drivers/target/target_core_file.h
1999     @@ -7,7 +7,10 @@
2000     #define FD_DEVICE_QUEUE_DEPTH 32
2001     #define FD_MAX_DEVICE_QUEUE_DEPTH 128
2002     #define FD_BLOCKSIZE 512
2003     -#define FD_MAX_SECTORS 2048
2004     +/*
2005     + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
2006     + */
2007     +#define FD_MAX_BYTES 8388608
2008    
2009     #define RRF_EMULATE_CDB 0x01
2010     #define RRF_GOT_LBA 0x02
2011     diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
2012     index d07b6af3a937..5d880917850f 100644
2013     --- a/drivers/tty/serial/8250/8250_dw.c
2014     +++ b/drivers/tty/serial/8250/8250_dw.c
2015     @@ -369,6 +369,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
2016     static const struct acpi_device_id dw8250_acpi_match[] = {
2017     { "INT33C4", 0 },
2018     { "INT33C5", 0 },
2019     + { "INT3434", 0 },
2020     + { "INT3435", 0 },
2021     { "80860F0A", 0 },
2022     { },
2023     };
2024     diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
2025     index b1785f58b6e3..7735bbdccbc9 100644
2026     --- a/drivers/tty/serial/pmac_zilog.c
2027     +++ b/drivers/tty/serial/pmac_zilog.c
2028     @@ -2051,6 +2051,9 @@ static int __init pmz_console_init(void)
2029     /* Probe ports */
2030     pmz_probe();
2031    
2032     + if (pmz_ports_count == 0)
2033     + return -ENODEV;
2034     +
2035     /* TODO: Autoprobe console based on OF */
2036     /* pmz_console.index = i; */
2037     register_console(&pmz_console);
2038     diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2039     index d3318a0df8ee..6463ca3bcfba 100644
2040     --- a/drivers/usb/class/cdc-wdm.c
2041     +++ b/drivers/usb/class/cdc-wdm.c
2042     @@ -820,13 +820,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
2043     {
2044     /* need autopm_get/put here to ensure the usbcore sees the new value */
2045     int rv = usb_autopm_get_interface(intf);
2046     - if (rv < 0)
2047     - goto err;
2048    
2049     intf->needs_remote_wakeup = on;
2050     - usb_autopm_put_interface(intf);
2051     -err:
2052     - return rv;
2053     + if (!rv)
2054     + usb_autopm_put_interface(intf);
2055     + return 0;
2056     }
2057    
2058     static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
2059     diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
2060     index ba45170c78e5..8335b484f14e 100644
2061     --- a/drivers/usb/serial/generic.c
2062     +++ b/drivers/usb/serial/generic.c
2063     @@ -176,14 +176,7 @@ retry:
2064     return result;
2065     }
2066    
2067     - /* Try sending off another urb, unless in irq context (in which case
2068     - * there will be no free urb). */
2069     - if (!in_irq())
2070     - goto retry;
2071     -
2072     - clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
2073     -
2074     - return 0;
2075     + goto retry; /* try sending off another urb */
2076     }
2077    
2078     /**
2079     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2080     index 496b7e39d5be..cc7a24154490 100644
2081     --- a/drivers/usb/serial/option.c
2082     +++ b/drivers/usb/serial/option.c
2083     @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
2084     #define ZTE_PRODUCT_MF628 0x0015
2085     #define ZTE_PRODUCT_MF626 0x0031
2086     #define ZTE_PRODUCT_MC2718 0xffe8
2087     +#define ZTE_PRODUCT_AC2726 0xfff1
2088    
2089     #define BENQ_VENDOR_ID 0x04a5
2090     #define BENQ_PRODUCT_H10 0x4068
2091     @@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
2092     { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
2093     { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
2094     { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
2095     + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
2096    
2097     { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
2098     { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
2099     diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
2100     index fca4c752a4ed..eae2c873b39f 100644
2101     --- a/drivers/usb/serial/zte_ev.c
2102     +++ b/drivers/usb/serial/zte_ev.c
2103     @@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
2104     { USB_DEVICE(0x19d2, 0xfffd) },
2105     { USB_DEVICE(0x19d2, 0xfffc) },
2106     { USB_DEVICE(0x19d2, 0xfffb) },
2107     - /* AC2726, AC8710_V3 */
2108     - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
2109     + /* AC8710_V3 */
2110     { USB_DEVICE(0x19d2, 0xfff6) },
2111     { USB_DEVICE(0x19d2, 0xfff7) },
2112     { USB_DEVICE(0x19d2, 0xfff8) },
2113     diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
2114     index 3e68ac101040..5da06f020986 100644
2115     --- a/fs/ceph/addr.c
2116     +++ b/fs/ceph/addr.c
2117     @@ -213,9 +213,13 @@ static int readpage_nounlock(struct file *filp, struct page *page)
2118     if (err < 0) {
2119     SetPageError(page);
2120     goto out;
2121     - } else if (err < PAGE_CACHE_SIZE) {
2122     + } else {
2123     + if (err < PAGE_CACHE_SIZE) {
2124     /* zero fill remainder of page */
2125     - zero_user_segment(page, err, PAGE_CACHE_SIZE);
2126     + zero_user_segment(page, err, PAGE_CACHE_SIZE);
2127     + } else {
2128     + flush_dcache_page(page);
2129     + }
2130     }
2131     SetPageUptodate(page);
2132    
2133     diff --git a/fs/ceph/file.c b/fs/ceph/file.c
2134     index 656e16907430..5de16f5ac7e9 100644
2135     --- a/fs/ceph/file.c
2136     +++ b/fs/ceph/file.c
2137     @@ -313,9 +313,9 @@ static int striped_read(struct inode *inode,
2138     {
2139     struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2140     struct ceph_inode_info *ci = ceph_inode(inode);
2141     - u64 pos, this_len;
2142     + u64 pos, this_len, left;
2143     int io_align, page_align;
2144     - int left, pages_left;
2145     + int pages_left;
2146     int read;
2147     struct page **page_pos;
2148     int ret;
2149     @@ -346,47 +346,40 @@ more:
2150     ret = 0;
2151     hit_stripe = this_len < left;
2152     was_short = ret >= 0 && ret < this_len;
2153     - dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
2154     + dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
2155     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
2156    
2157     - if (ret > 0) {
2158     - int didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
2159     -
2160     - if (read < pos - off) {
2161     - dout(" zero gap %llu to %llu\n", off + read, pos);
2162     - ceph_zero_page_vector_range(page_align + read,
2163     - pos - off - read, pages);
2164     + if (ret >= 0) {
2165     + int didpages;
2166     + if (was_short && (pos + ret < inode->i_size)) {
2167     + u64 tmp = min(this_len - ret,
2168     + inode->i_size - pos - ret);
2169     + dout(" zero gap %llu to %llu\n",
2170     + pos + ret, pos + ret + tmp);
2171     + ceph_zero_page_vector_range(page_align + read + ret,
2172     + tmp, pages);
2173     + ret += tmp;
2174     }
2175     +
2176     + didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
2177     pos += ret;
2178     read = pos - off;
2179     left -= ret;
2180     page_pos += didpages;
2181     pages_left -= didpages;
2182    
2183     - /* hit stripe? */
2184     - if (left && hit_stripe)
2185     + /* hit stripe and need continue*/
2186     + if (left && hit_stripe && pos < inode->i_size)
2187     goto more;
2188     }
2189    
2190     - if (was_short) {
2191     + if (read > 0) {
2192     + ret = read;
2193     /* did we bounce off eof? */
2194     if (pos + left > inode->i_size)
2195     *checkeof = 1;
2196     -
2197     - /* zero trailing bytes (inside i_size) */
2198     - if (left > 0 && pos < inode->i_size) {
2199     - if (pos + left > inode->i_size)
2200     - left = inode->i_size - pos;
2201     -
2202     - dout("zero tail %d\n", left);
2203     - ceph_zero_page_vector_range(page_align + read, left,
2204     - pages);
2205     - read += left;
2206     - }
2207     }
2208    
2209     - if (ret >= 0)
2210     - ret = read;
2211     dout("striped_read returns %d\n", ret);
2212     return ret;
2213     }
2214     @@ -618,6 +611,8 @@ out:
2215     if (check_caps)
2216     ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
2217     NULL);
2218     + } else if (ret != -EOLDSNAPC && written > 0) {
2219     + ret = written;
2220     }
2221     return ret;
2222     }
2223     diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
2224     index a5ce62eb7806..669622fd1ae3 100644
2225     --- a/fs/ceph/ioctl.c
2226     +++ b/fs/ceph/ioctl.c
2227     @@ -211,8 +211,12 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
2228     snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
2229     ceph_ino(inode), dl.object_no);
2230    
2231     - ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap,
2232     - ceph_file_layout_pg_pool(ci->i_layout));
2233     + r = ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap,
2234     + ceph_file_layout_pg_pool(ci->i_layout));
2235     + if (r < 0) {
2236     + up_read(&osdc->map_sem);
2237     + return r;
2238     + }
2239    
2240     dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
2241     if (dl.osd >= 0) {
2242     diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
2243     index 4d2920304be8..d6a536886472 100644
2244     --- a/fs/ceph/mds_client.c
2245     +++ b/fs/ceph/mds_client.c
2246     @@ -414,6 +414,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
2247     {
2248     struct ceph_mds_session *s;
2249    
2250     + if (mds >= mdsc->mdsmap->m_max_mds)
2251     + return ERR_PTR(-EINVAL);
2252     +
2253     s = kzalloc(sizeof(*s), GFP_NOFS);
2254     if (!s)
2255     return ERR_PTR(-ENOMEM);
2256     @@ -639,6 +642,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
2257     req->r_unsafe_dir = NULL;
2258     }
2259    
2260     + complete_all(&req->r_safe_completion);
2261     +
2262     ceph_mdsc_put_request(req);
2263     }
2264    
2265     @@ -1840,8 +1845,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
2266     int mds = -1;
2267     int err = -EAGAIN;
2268    
2269     - if (req->r_err || req->r_got_result)
2270     + if (req->r_err || req->r_got_result) {
2271     + if (req->r_aborted)
2272     + __unregister_request(mdsc, req);
2273     goto out;
2274     + }
2275    
2276     if (req->r_timeout &&
2277     time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2278     @@ -2151,7 +2159,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2279     if (head->safe) {
2280     req->r_got_safe = true;
2281     __unregister_request(mdsc, req);
2282     - complete_all(&req->r_safe_completion);
2283    
2284     if (req->r_got_unsafe) {
2285     /*
2286     @@ -3040,8 +3047,10 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
2287     fsc->mdsc = mdsc;
2288     mutex_init(&mdsc->mutex);
2289     mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2290     - if (mdsc->mdsmap == NULL)
2291     + if (mdsc->mdsmap == NULL) {
2292     + kfree(mdsc);
2293     return -ENOMEM;
2294     + }
2295    
2296     init_completion(&mdsc->safe_umount_waiters);
2297     init_waitqueue_head(&mdsc->session_close_wq);
2298     diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
2299     index 9278dec9e940..d4d38977dcbb 100644
2300     --- a/fs/ceph/mdsmap.c
2301     +++ b/fs/ceph/mdsmap.c
2302     @@ -138,6 +138,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
2303     m->m_info[mds].export_targets =
2304     kcalloc(num_export_targets, sizeof(u32),
2305     GFP_NOFS);
2306     + if (m->m_info[mds].export_targets == NULL)
2307     + goto badmem;
2308     for (j = 0; j < num_export_targets; j++)
2309     m->m_info[mds].export_targets[j] =
2310     ceph_decode_32(&pexport_targets);
2311     @@ -170,7 +172,7 @@ bad:
2312     DUMP_PREFIX_OFFSET, 16, 1,
2313     start, end - start, true);
2314     ceph_mdsmap_destroy(m);
2315     - return ERR_PTR(-EINVAL);
2316     + return ERR_PTR(err);
2317     }
2318    
2319     void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
2320     diff --git a/fs/ceph/super.c b/fs/ceph/super.c
2321     index 7d377c9a5e35..6627b26a800c 100644
2322     --- a/fs/ceph/super.c
2323     +++ b/fs/ceph/super.c
2324     @@ -357,7 +357,7 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
2325     }
2326     err = -EINVAL;
2327     dev_name_end--; /* back up to ':' separator */
2328     - if (*dev_name_end != ':') {
2329     + if (dev_name_end < dev_name || *dev_name_end != ':') {
2330     pr_err("device name is missing path (no : separator in %s)\n",
2331     dev_name);
2332     goto out;
2333     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2334     index 5aae3d12d400..7bb2e2e55123 100644
2335     --- a/fs/ext4/ext4.h
2336     +++ b/fs/ext4/ext4.h
2337     @@ -280,6 +280,16 @@ struct ext4_io_submit {
2338     /* Translate # of blks to # of clusters */
2339     #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
2340     (sbi)->s_cluster_bits)
2341     +/* Mask out the low bits to get the starting block of the cluster */
2342     +#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
2343     + ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
2344     +#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
2345     + ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
2346     +/* Get the cluster offset */
2347     +#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
2348     + ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
2349     +#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
2350     + ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
2351    
2352     /*
2353     * Structure of a blocks group descriptor
2354     diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
2355     index 1c88061da526..1be3996b5942 100644
2356     --- a/fs/ext4/ext4_jbd2.c
2357     +++ b/fs/ext4/ext4_jbd2.c
2358     @@ -223,6 +223,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
2359     if (WARN_ON_ONCE(err)) {
2360     ext4_journal_abort_handle(where, line, __func__, bh,
2361     handle, err);
2362     + ext4_error_inode(inode, where, line,
2363     + bh->b_blocknr,
2364     + "journal_dirty_metadata failed: "
2365     + "handle type %u started at line %u, "
2366     + "credits %u/%u, errcode %d",
2367     + handle->h_type,
2368     + handle->h_line_no,
2369     + handle->h_requested_credits,
2370     + handle->h_buffer_credits, err);
2371     }
2372     } else {
2373     if (inode)
2374     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
2375     index dc1e03047226..a2b625e279db 100644
2376     --- a/fs/ext4/extents.c
2377     +++ b/fs/ext4/extents.c
2378     @@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
2379     {
2380     ext4_fsblk_t block = ext4_ext_pblock(ext);
2381     int len = ext4_ext_get_actual_len(ext);
2382     + ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
2383     + ext4_lblk_t last = lblock + len - 1;
2384    
2385     - if (len == 0)
2386     + if (lblock > last)
2387     return 0;
2388     return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
2389     }
2390     @@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
2391     if (depth == 0) {
2392     /* leaf entries */
2393     struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
2394     + struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
2395     + ext4_fsblk_t pblock = 0;
2396     + ext4_lblk_t lblock = 0;
2397     + ext4_lblk_t prev = 0;
2398     + int len = 0;
2399     while (entries) {
2400     if (!ext4_valid_extent(inode, ext))
2401     return 0;
2402     +
2403     + /* Check for overlapping extents */
2404     + lblock = le32_to_cpu(ext->ee_block);
2405     + len = ext4_ext_get_actual_len(ext);
2406     + if ((lblock <= prev) && prev) {
2407     + pblock = ext4_ext_pblock(ext);
2408     + es->s_last_error_block = cpu_to_le64(pblock);
2409     + return 0;
2410     + }
2411     ext++;
2412     entries--;
2413     + prev = lblock + len - 1;
2414     }
2415     } else {
2416     struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
2417     @@ -1755,8 +1772,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
2418     depth = ext_depth(inode);
2419     if (!path[depth].p_ext)
2420     goto out;
2421     - b2 = le32_to_cpu(path[depth].p_ext->ee_block);
2422     - b2 &= ~(sbi->s_cluster_ratio - 1);
2423     + b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
2424    
2425     /*
2426     * get the next allocated block if the extent in the path
2427     @@ -1766,7 +1782,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
2428     b2 = ext4_ext_next_allocated_block(path);
2429     if (b2 == EXT_MAX_BLOCKS)
2430     goto out;
2431     - b2 &= ~(sbi->s_cluster_ratio - 1);
2432     + b2 = EXT4_LBLK_CMASK(sbi, b2);
2433     }
2434    
2435     /* check for wrap through zero on extent logical start block*/
2436     @@ -2427,7 +2443,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2437     * truncate operation has removed all of the blocks in
2438     * the cluster.
2439     */
2440     - if (pblk & (sbi->s_cluster_ratio - 1) &&
2441     + if (EXT4_PBLK_COFF(sbi, pblk) &&
2442     (ee_len == num))
2443     *partial_cluster = EXT4_B2C(sbi, pblk);
2444     else
2445     @@ -3658,7 +3674,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
2446     {
2447     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2448     ext4_lblk_t lblk_start, lblk_end;
2449     - lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
2450     + lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
2451     lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
2452    
2453     return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
2454     @@ -3717,9 +3733,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
2455     trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
2456    
2457     /* Check towards left side */
2458     - c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
2459     + c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
2460     if (c_offset) {
2461     - lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
2462     + lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
2463     lblk_to = lblk_from + c_offset - 1;
2464    
2465     if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
2466     @@ -3727,7 +3743,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
2467     }
2468    
2469     /* Now check towards right. */
2470     - c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
2471     + c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
2472     if (allocated_clusters && c_offset) {
2473     lblk_from = lblk_start + num_blks;
2474     lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
2475     @@ -3935,7 +3951,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
2476     struct ext4_ext_path *path)
2477     {
2478     struct ext4_sb_info *sbi = EXT4_SB(sb);
2479     - ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
2480     + ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
2481     ext4_lblk_t ex_cluster_start, ex_cluster_end;
2482     ext4_lblk_t rr_cluster_start;
2483     ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
2484     @@ -3953,8 +3969,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
2485     (rr_cluster_start == ex_cluster_start)) {
2486     if (rr_cluster_start == ex_cluster_end)
2487     ee_start += ee_len - 1;
2488     - map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
2489     - c_offset;
2490     + map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
2491     map->m_len = min(map->m_len,
2492     (unsigned) sbi->s_cluster_ratio - c_offset);
2493     /*
2494     @@ -4108,7 +4123,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
2495     */
2496     map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
2497     newex.ee_block = cpu_to_le32(map->m_lblk);
2498     - cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
2499     + cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
2500    
2501     /*
2502     * If we are doing bigalloc, check to see if the extent returned
2503     @@ -4176,7 +4191,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
2504     * needed so that future calls to get_implied_cluster_alloc()
2505     * work correctly.
2506     */
2507     - offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
2508     + offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
2509     ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
2510     ar.goal -= offset;
2511     ar.logical -= offset;
2512     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2513     index 904ca1a21dce..cb2bdc7ccb05 100644
2514     --- a/fs/ext4/inode.c
2515     +++ b/fs/ext4/inode.c
2516     @@ -1263,7 +1263,6 @@ static int ext4_journalled_write_end(struct file *file,
2517     */
2518     static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
2519     {
2520     - int retries = 0;
2521     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2522     struct ext4_inode_info *ei = EXT4_I(inode);
2523     unsigned int md_needed;
2524     @@ -1275,7 +1274,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
2525     * in order to allocate nrblocks
2526     * worse case is one extent per block
2527     */
2528     -repeat:
2529     spin_lock(&ei->i_block_reservation_lock);
2530     /*
2531     * ext4_calc_metadata_amount() has side effects, which we have
2532     @@ -1295,10 +1293,6 @@ repeat:
2533     ei->i_da_metadata_calc_len = save_len;
2534     ei->i_da_metadata_calc_last_lblock = save_last_lblock;
2535     spin_unlock(&ei->i_block_reservation_lock);
2536     - if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
2537     - cond_resched();
2538     - goto repeat;
2539     - }
2540     return -ENOSPC;
2541     }
2542     ei->i_reserved_meta_blocks += md_needed;
2543     @@ -1312,7 +1306,6 @@ repeat:
2544     */
2545     static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
2546     {
2547     - int retries = 0;
2548     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2549     struct ext4_inode_info *ei = EXT4_I(inode);
2550     unsigned int md_needed;
2551     @@ -1334,7 +1327,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
2552     * in order to allocate nrblocks
2553     * worse case is one extent per block
2554     */
2555     -repeat:
2556     spin_lock(&ei->i_block_reservation_lock);
2557     /*
2558     * ext4_calc_metadata_amount() has side effects, which we have
2559     @@ -1354,10 +1346,6 @@ repeat:
2560     ei->i_da_metadata_calc_len = save_len;
2561     ei->i_da_metadata_calc_last_lblock = save_last_lblock;
2562     spin_unlock(&ei->i_block_reservation_lock);
2563     - if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
2564     - cond_resched();
2565     - goto repeat;
2566     - }
2567     dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
2568     return -ENOSPC;
2569     }
2570     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2571     index 59c6750b894f..fba960ee26de 100644
2572     --- a/fs/ext4/mballoc.c
2573     +++ b/fs/ext4/mballoc.c
2574     @@ -3423,6 +3423,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
2575     {
2576     struct ext4_prealloc_space *pa;
2577     pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
2578     +
2579     + BUG_ON(atomic_read(&pa->pa_count));
2580     + BUG_ON(pa->pa_deleted == 0);
2581     kmem_cache_free(ext4_pspace_cachep, pa);
2582     }
2583    
2584     @@ -3436,11 +3439,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
2585     ext4_group_t grp;
2586     ext4_fsblk_t grp_blk;
2587    
2588     - if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
2589     - return;
2590     -
2591     /* in this short window concurrent discard can set pa_deleted */
2592     spin_lock(&pa->pa_lock);
2593     + if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
2594     + spin_unlock(&pa->pa_lock);
2595     + return;
2596     + }
2597     +
2598     if (pa->pa_deleted == 1) {
2599     spin_unlock(&pa->pa_lock);
2600     return;
2601     @@ -4102,7 +4107,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
2602     ext4_get_group_no_and_offset(sb, goal, &group, &block);
2603    
2604     /* set up allocation goals */
2605     - ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
2606     + ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
2607     ac->ac_status = AC_STATUS_CONTINUE;
2608     ac->ac_sb = sb;
2609     ac->ac_inode = ar->inode;
2610     @@ -4639,7 +4644,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
2611     * blocks at the beginning or the end unless we are explicitly
2612     * requested to avoid doing so.
2613     */
2614     - overflow = block & (sbi->s_cluster_ratio - 1);
2615     + overflow = EXT4_PBLK_COFF(sbi, block);
2616     if (overflow) {
2617     if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
2618     overflow = sbi->s_cluster_ratio - overflow;
2619     @@ -4653,7 +4658,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
2620     count += overflow;
2621     }
2622     }
2623     - overflow = count & (sbi->s_cluster_ratio - 1);
2624     + overflow = EXT4_LBLK_COFF(sbi, count);
2625     if (overflow) {
2626     if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
2627     if (count > overflow)
2628     @@ -4766,8 +4771,8 @@ do_more:
2629     " group:%d block:%d count:%lu failed"
2630     " with %d", block_group, bit, count,
2631     err);
2632     - }
2633     -
2634     + } else
2635     + EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
2636    
2637     ext4_lock_group(sb, block_group);
2638     mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
2639     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2640     index 3f7c39e6d097..e4923b6a9e39 100644
2641     --- a/fs/ext4/super.c
2642     +++ b/fs/ext4/super.c
2643     @@ -3213,11 +3213,19 @@ int ext4_calculate_overhead(struct super_block *sb)
2644     }
2645    
2646    
2647     -static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
2648     +static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
2649     {
2650     ext4_fsblk_t resv_clusters;
2651    
2652     /*
2653     + * There's no need to reserve anything when we aren't using extents.
2654     + * The space estimates are exact, there are no unwritten extents,
2655     + * hole punching doesn't need new metadata... This is needed especially
2656     + * to keep ext2/3 backward compatibility.
2657     + */
2658     + if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2659     + return 0;
2660     + /*
2661     * By default we reserve 2% or 4096 clusters, whichever is smaller.
2662     * This should cover the situations where we can not afford to run
2663     * out of space like for example punch hole, or converting
2664     @@ -3225,7 +3233,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
2665     * allocation would require 1, or 2 blocks, higher numbers are
2666     * very rare.
2667     */
2668     - resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
2669     + resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
2670     + EXT4_SB(sb)->s_cluster_bits;
2671    
2672     do_div(resv_clusters, 50);
2673     resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
2674     @@ -3969,10 +3978,10 @@ no_journal:
2675     "available");
2676     }
2677    
2678     - err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
2679     + err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
2680     if (err) {
2681     ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
2682     - "reserved pool", ext4_calculate_resv_clusters(sbi));
2683     + "reserved pool", ext4_calculate_resv_clusters(sb));
2684     goto failed_mount4a;
2685     }
2686    
2687     diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
2688     index 0bad69ed6336..76251600cbea 100644
2689     --- a/fs/gfs2/aops.c
2690     +++ b/fs/gfs2/aops.c
2691     @@ -999,6 +999,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
2692     {
2693     struct file *file = iocb->ki_filp;
2694     struct inode *inode = file->f_mapping->host;
2695     + struct address_space *mapping = inode->i_mapping;
2696     struct gfs2_inode *ip = GFS2_I(inode);
2697     struct gfs2_holder gh;
2698     int rv;
2699     @@ -1019,6 +1020,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
2700     if (rv != 1)
2701     goto out; /* dio not valid, fall back to buffered i/o */
2702    
2703     + /*
2704     + * Now since we are holding a deferred (CW) lock at this point, you
2705     + * might be wondering why this is ever needed. There is a case however
2706     + * where we've granted a deferred local lock against a cached exclusive
2707     + * glock. That is ok provided all granted local locks are deferred, but
2708     + * it also means that it is possible to encounter pages which are
2709     + * cached and possibly also mapped. So here we check for that and sort
2710     + * them out ahead of the dio. The glock state machine will take care of
2711     + * everything else.
2712     + *
2713     + * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
2714     + * the first place, mapping->nr_pages will always be zero.
2715     + */
2716     + if (mapping->nrpages) {
2717     + loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
2718     + loff_t len = iov_length(iov, nr_segs);
2719     + loff_t end = PAGE_ALIGN(offset + len) - 1;
2720     +
2721     + rv = 0;
2722     + if (len == 0)
2723     + goto out;
2724     + if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
2725     + unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
2726     + rv = filemap_write_and_wait_range(mapping, lstart, end);
2727     + if (rv)
2728     + return rv;
2729     + truncate_inode_pages_range(mapping, lstart, end);
2730     + }
2731     +
2732     rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2733     offset, nr_segs, gfs2_get_block_direct,
2734     NULL, NULL, 0);
2735     diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
2736     index 60ede2a0f43f..f7dd3b4f8ab0 100644
2737     --- a/fs/gfs2/ops_fstype.c
2738     +++ b/fs/gfs2/ops_fstype.c
2739     @@ -1317,8 +1317,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
2740     if (IS_ERR(s))
2741     goto error_bdev;
2742    
2743     - if (s->s_root)
2744     + if (s->s_root) {
2745     + /*
2746     + * s_umount nests inside bd_mutex during
2747     + * __invalidate_device(). blkdev_put() acquires
2748     + * bd_mutex and can't be called under s_umount. Drop
2749     + * s_umount temporarily. This is safe as we're
2750     + * holding an active reference.
2751     + */
2752     + up_write(&s->s_umount);
2753     blkdev_put(bdev, mode);
2754     + down_write(&s->s_umount);
2755     + }
2756    
2757     memset(&args, 0, sizeof(args));
2758     args.ar_quota = GFS2_QUOTA_DEFAULT;
2759     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
2760     index e0c0bc275924..a6917125f215 100644
2761     --- a/fs/jbd2/transaction.c
2762     +++ b/fs/jbd2/transaction.c
2763     @@ -1151,7 +1151,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
2764     * once a transaction -bzzz
2765     */
2766     jh->b_modified = 1;
2767     - J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
2768     + if (handle->h_buffer_credits <= 0) {
2769     + ret = -ENOSPC;
2770     + goto out_unlock_bh;
2771     + }
2772     handle->h_buffer_credits--;
2773     }
2774    
2775     @@ -1234,7 +1237,6 @@ out_unlock_bh:
2776     jbd2_journal_put_journal_head(jh);
2777     out:
2778     JBUFFER_TRACE(jh, "exit");
2779     - WARN_ON(ret); /* All errors are bugs, so dump the stack */
2780     return ret;
2781     }
2782    
2783     diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
2784     index a59ff51b0166..b58268a5ddd4 100644
2785     --- a/include/asm-generic/pgtable.h
2786     +++ b/include/asm-generic/pgtable.h
2787     @@ -220,7 +220,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
2788     #endif
2789    
2790     #ifndef pte_accessible
2791     -# define pte_accessible(pte) ((void)(pte),1)
2792     +# define pte_accessible(mm, pte) ((void)(pte), 1)
2793     #endif
2794    
2795     #ifndef flush_tlb_fix_spurious_fault
2796     diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
2797     index e6c9c4cc9b23..c463ce990c48 100644
2798     --- a/include/clocksource/arm_arch_timer.h
2799     +++ b/include/clocksource/arm_arch_timer.h
2800     @@ -32,7 +32,7 @@
2801     #ifdef CONFIG_ARM_ARCH_TIMER
2802    
2803     extern u32 arch_timer_get_rate(void);
2804     -extern u64 (*arch_timer_read_counter)(void);
2805     +extern u64 arch_timer_read_counter(void);
2806     extern struct timecounter *arch_timer_get_timecounter(void);
2807    
2808     #else
2809     diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2810     index bb1bc485390b..ecaef57f9f6c 100644
2811     --- a/include/drm/drm_pciids.h
2812     +++ b/include/drm/drm_pciids.h
2813     @@ -559,7 +559,7 @@
2814     {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2815     {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2816     {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2817     - {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2818     + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2819     {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2820     {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2821     {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2822     diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
2823     index 669fef5c745a..3e0fbe441763 100644
2824     --- a/include/linux/auxvec.h
2825     +++ b/include/linux/auxvec.h
2826     @@ -3,6 +3,6 @@
2827    
2828     #include <uapi/linux/auxvec.h>
2829    
2830     -#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
2831     +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
2832     /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
2833     #endif /* _LINUX_AUXVEC_H */
2834     diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
2835     index 186db0bf4951..8f47625a0661 100644
2836     --- a/include/linux/ceph/osd_client.h
2837     +++ b/include/linux/ceph/osd_client.h
2838     @@ -145,7 +145,6 @@ struct ceph_osd_request {
2839     s32 r_reply_op_result[CEPH_OSD_MAX_OP];
2840     int r_got_reply;
2841     int r_linger;
2842     - int r_completed;
2843    
2844     struct ceph_osd_client *r_osdc;
2845     struct kref r_kref;
2846     @@ -336,6 +335,8 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
2847     struct ceph_osd_request *req);
2848     extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
2849    
2850     +extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
2851     +
2852     extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2853     struct ceph_vino vino,
2854     struct ceph_file_layout *layout,
2855     diff --git a/include/linux/libata.h b/include/linux/libata.h
2856     index eae7a053dc51..9a4c194ebc8a 100644
2857     --- a/include/linux/libata.h
2858     +++ b/include/linux/libata.h
2859     @@ -399,6 +399,7 @@ enum {
2860     ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
2861     ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
2862     ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
2863     + ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
2864    
2865     /* DMA mask for user DMA control: User visible values; DO NOT
2866     renumber */
2867     diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2868     index 4a189ba6b128..10a9a17342fc 100644
2869     --- a/include/linux/mm_types.h
2870     +++ b/include/linux/mm_types.h
2871     @@ -437,6 +437,14 @@ struct mm_struct {
2872     */
2873     int first_nid;
2874     #endif
2875     +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
2876     + /*
2877     + * An operation with batched TLB flushing is going on. Anything that
2878     + * can move process memory needs to flush the TLB when moving a
2879     + * PROT_NONE or PROT_NUMA mapped page.
2880     + */
2881     + bool tlb_flush_pending;
2882     +#endif
2883     struct uprobes_state uprobes_state;
2884     };
2885    
2886     @@ -457,4 +465,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
2887     return mm->cpu_vm_mask_var;
2888     }
2889    
2890     +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
2891     +/*
2892     + * Memory barriers to keep this state in sync are graciously provided by
2893     + * the page table locks, outside of which no page table modifications happen.
2894     + * The barriers below prevent the compiler from re-ordering the instructions
2895     + * around the memory barriers that are already present in the code.
2896     + */
2897     +static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
2898     +{
2899     + barrier();
2900     + return mm->tlb_flush_pending;
2901     +}
2902     +static inline void set_tlb_flush_pending(struct mm_struct *mm)
2903     +{
2904     + mm->tlb_flush_pending = true;
2905     +
2906     + /*
2907     + * Guarantee that the tlb_flush_pending store does not leak into the
2908     + * critical section updating the page tables
2909     + */
2910     + smp_mb__before_spinlock();
2911     +}
2912     +/* Clearing is done after a TLB flush, which also provides a barrier. */
2913     +static inline void clear_tlb_flush_pending(struct mm_struct *mm)
2914     +{
2915     + barrier();
2916     + mm->tlb_flush_pending = false;
2917     +}
2918     +#else
2919     +static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
2920     +{
2921     + return false;
2922     +}
2923     +static inline void set_tlb_flush_pending(struct mm_struct *mm)
2924     +{
2925     +}
2926     +static inline void clear_tlb_flush_pending(struct mm_struct *mm)
2927     +{
2928     +}
2929     +#endif
2930     +
2931     #endif /* _LINUX_MM_TYPES_H */
2932     diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
2933     index 7d537ced949a..75f34949d9ab 100644
2934     --- a/include/linux/spinlock.h
2935     +++ b/include/linux/spinlock.h
2936     @@ -117,9 +117,17 @@ do { \
2937     #endif /*arch_spin_is_contended*/
2938     #endif
2939    
2940     -/* The lock does not imply full memory barrier. */
2941     -#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
2942     -static inline void smp_mb__after_lock(void) { smp_mb(); }
2943     +/*
2944     + * Despite its name it doesn't necessarily has to be a full barrier.
2945     + * It should only guarantee that a STORE before the critical section
2946     + * can not be reordered with a LOAD inside this section.
2947     + * spin_lock() is the one-way barrier, this LOAD can not escape out
2948     + * of the region. So the default implementation simply ensures that
2949     + * a STORE can not move into the critical section, smp_wmb() should
2950     + * serialize it with another STORE done by spin_lock().
2951     + */
2952     +#ifndef smp_mb__before_spinlock
2953     +#define smp_mb__before_spinlock() smp_wmb()
2954     #endif
2955    
2956     /**
2957     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
2958     index 4ea4f985f394..7d99c0b5b789 100644
2959     --- a/include/target/target_core_base.h
2960     +++ b/include/target/target_core_base.h
2961     @@ -614,6 +614,7 @@ struct se_dev_attrib {
2962     u32 unmap_granularity;
2963     u32 unmap_granularity_alignment;
2964     u32 max_write_same_len;
2965     + u32 max_bytes_per_io;
2966     struct se_device *da_dev;
2967     struct config_group da_group;
2968     };
2969     diff --git a/kernel/fork.c b/kernel/fork.c
2970     index 80d92e987f21..ff7be9dac4c1 100644
2971     --- a/kernel/fork.c
2972     +++ b/kernel/fork.c
2973     @@ -544,6 +544,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
2974     mm->cached_hole_size = ~0UL;
2975     mm_init_aio(mm);
2976     mm_init_owner(mm, p);
2977     + clear_tlb_flush_pending(mm);
2978    
2979     if (likely(!mm_alloc_pgd(mm))) {
2980     mm->def_flags = 0;
2981     diff --git a/kernel/freezer.c b/kernel/freezer.c
2982     index c38893b0efba..78758512b1e1 100644
2983     --- a/kernel/freezer.c
2984     +++ b/kernel/freezer.c
2985     @@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
2986     bool pm_freezing;
2987     bool pm_nosig_freezing;
2988    
2989     +/*
2990     + * Temporary export for the deadlock workaround in ata_scsi_hotplug().
2991     + * Remove once the hack becomes unnecessary.
2992     + */
2993     +EXPORT_SYMBOL_GPL(pm_freezing);
2994     +
2995     /* protects freezing and frozen transitions */
2996     static DEFINE_SPINLOCK(freezer_lock);
2997    
2998     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2999     index e8b335016c52..f9e35b1e7713 100644
3000     --- a/kernel/sched/core.c
3001     +++ b/kernel/sched/core.c
3002     @@ -1487,7 +1487,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
3003     unsigned long flags;
3004     int cpu, success = 0;
3005    
3006     - smp_wmb();
3007     + /*
3008     + * If we are going to wake up a thread waiting for CONDITION we
3009     + * need to ensure that CONDITION=1 done by the caller can not be
3010     + * reordered with p->state check below. This pairs with mb() in
3011     + * set_current_state() the waiting thread does.
3012     + */
3013     + smp_mb__before_spinlock();
3014     raw_spin_lock_irqsave(&p->pi_lock, flags);
3015     if (!(p->state & state))
3016     goto out;
3017     @@ -2966,6 +2972,12 @@ need_resched:
3018     if (sched_feat(HRTICK))
3019     hrtick_clear(rq);
3020    
3021     + /*
3022     + * Make sure that signal_pending_state()->signal_pending() below
3023     + * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
3024     + * done by the caller to avoid the race with signal_wake_up().
3025     + */
3026     + smp_mb__before_spinlock();
3027     raw_spin_lock_irq(&rq->lock);
3028    
3029     switch_count = &prev->nivcsw;
3030     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3031     index da7eebb22b06..ce60006132b1 100644
3032     --- a/kernel/sched/fair.c
3033     +++ b/kernel/sched/fair.c
3034     @@ -936,6 +936,13 @@ void task_numa_work(struct callback_head *work)
3035     if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
3036     continue;
3037    
3038     + /*
3039     + * Skip inaccessible VMAs to avoid any confusion between
3040     + * PROT_NONE and NUMA hinting ptes
3041     + */
3042     + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
3043     + continue;
3044     +
3045     do {
3046     start = max(start, vma->vm_start);
3047     end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
3048     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
3049     index 127a2c4cf4ab..15334e6de832 100644
3050     --- a/kernel/sched/rt.c
3051     +++ b/kernel/sched/rt.c
3052     @@ -964,6 +964,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
3053     {
3054     struct rq *rq = rq_of_rt_rq(rt_rq);
3055    
3056     +#ifdef CONFIG_RT_GROUP_SCHED
3057     + /*
3058     + * Change rq's cpupri only if rt_rq is the top queue.
3059     + */
3060     + if (&rq->rt != rt_rq)
3061     + return;
3062     +#endif
3063     if (rq->online && prio < prev_prio)
3064     cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
3065     }
3066     @@ -973,6 +980,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
3067     {
3068     struct rq *rq = rq_of_rt_rq(rt_rq);
3069    
3070     +#ifdef CONFIG_RT_GROUP_SCHED
3071     + /*
3072     + * Change rq's cpupri only if rt_rq is the top queue.
3073     + */
3074     + if (&rq->rt != rt_rq)
3075     + return;
3076     +#endif
3077     if (rq->online && rt_rq->highest_prio.curr != prev_prio)
3078     cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
3079     }
3080     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3081     index 331c102177a6..d0c5c3f0d939 100644
3082     --- a/kernel/trace/ftrace.c
3083     +++ b/kernel/trace/ftrace.c
3084     @@ -750,7 +750,7 @@ static int ftrace_profile_init(void)
3085     int cpu;
3086     int ret = 0;
3087    
3088     - for_each_online_cpu(cpu) {
3089     + for_each_possible_cpu(cpu) {
3090     ret = ftrace_profile_init_cpu(cpu);
3091     if (ret)
3092     break;
3093     diff --git a/mm/compaction.c b/mm/compaction.c
3094     index 05ccb4cc0bdb..9a3e351da29b 100644
3095     --- a/mm/compaction.c
3096     +++ b/mm/compaction.c
3097     @@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
3098     bool migrate_scanner)
3099     {
3100     struct zone *zone = cc->zone;
3101     +
3102     + if (cc->ignore_skip_hint)
3103     + return;
3104     +
3105     if (!page)
3106     return;
3107    
3108     diff --git a/mm/fremap.c b/mm/fremap.c
3109     index 87da3590c61e..1fb6bfe39d8c 100644
3110     --- a/mm/fremap.c
3111     +++ b/mm/fremap.c
3112     @@ -203,9 +203,10 @@ get_write_lock:
3113     if (mapping_cap_account_dirty(mapping)) {
3114     unsigned long addr;
3115     struct file *file = get_file(vma->vm_file);
3116     + /* mmap_region may free vma; grab the info now */
3117     + vm_flags = vma->vm_flags;
3118    
3119     - addr = mmap_region(file, start, size,
3120     - vma->vm_flags, pgoff);
3121     + addr = mmap_region(file, start, size, vm_flags, pgoff);
3122     fput(file);
3123     if (IS_ERR_VALUE(addr)) {
3124     err = addr;
3125     @@ -213,7 +214,7 @@ get_write_lock:
3126     BUG_ON(addr != start);
3127     err = 0;
3128     }
3129     - goto out;
3130     + goto out_freed;
3131     }
3132     mutex_lock(&mapping->i_mmap_mutex);
3133     flush_dcache_mmap_lock(mapping);
3134     @@ -248,6 +249,7 @@ get_write_lock:
3135     out:
3136     if (vma)
3137     vm_flags = vma->vm_flags;
3138     +out_freed:
3139     if (likely(!has_write_lock))
3140     up_read(&mm->mmap_sem);
3141     else
3142     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3143     index c403a74e4bee..6bd22902d289 100644
3144     --- a/mm/huge_memory.c
3145     +++ b/mm/huge_memory.c
3146     @@ -1344,6 +1344,20 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3147     goto out_unlock;
3148     }
3149    
3150     + /* Bail if we fail to protect against THP splits for any reason */
3151     + if (unlikely(!anon_vma)) {
3152     + put_page(page);
3153     + page_nid = -1;
3154     + goto clear_pmdnuma;
3155     + }
3156     +
3157     + /*
3158     + * The page_table_lock above provides a memory barrier
3159     + * with change_protection_range.
3160     + */
3161     + if (mm_tlb_flush_pending(mm))
3162     + flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
3163     +
3164     /*
3165     * Migrate the THP to the requested node, returns with page unlocked
3166     * and pmd_numa cleared.
3167     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3168     index 905ce72c8c4e..134e2106f467 100644
3169     --- a/mm/memcontrol.c
3170     +++ b/mm/memcontrol.c
3171     @@ -379,7 +379,7 @@ struct mem_cgroup {
3172     static size_t memcg_size(void)
3173     {
3174     return sizeof(struct mem_cgroup) +
3175     - nr_node_ids * sizeof(struct mem_cgroup_per_node);
3176     + nr_node_ids * sizeof(struct mem_cgroup_per_node *);
3177     }
3178    
3179     /* internal only representation about the status of kmem accounting. */
3180     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3181     index ceb0c7f1932f..6a7f9cab4ddb 100644
3182     --- a/mm/memory-failure.c
3183     +++ b/mm/memory-failure.c
3184     @@ -936,6 +936,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
3185     BUG_ON(!PageHWPoison(p));
3186     return SWAP_FAIL;
3187     }
3188     + /*
3189     + * We pinned the head page for hwpoison handling,
3190     + * now we split the thp and we are interested in
3191     + * the hwpoisoned raw page, so move the refcount
3192     + * to it.
3193     + */
3194     + if (hpage != p) {
3195     + put_page(hpage);
3196     + get_page(p);
3197     + }
3198     /* THP is split, so ppage should be the real poisoned page. */
3199     ppage = p;
3200     }
3201     diff --git a/mm/migrate.c b/mm/migrate.c
3202     index bf436c15f055..a88c12f2235d 100644
3203     --- a/mm/migrate.c
3204     +++ b/mm/migrate.c
3205     @@ -1715,7 +1715,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
3206     putback_lru_page(page);
3207     mod_zone_page_state(page_zone(page),
3208     NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
3209     - goto out_fail;
3210     +
3211     + goto out_unlock;
3212     }
3213    
3214     /*
3215     @@ -1765,6 +1766,7 @@ out_dropref:
3216     set_pmd_at(mm, haddr, pmd, entry);
3217     update_mmu_cache_pmd(vma, address, &entry);
3218    
3219     +out_unlock:
3220     unlock_page(page);
3221     put_page(page);
3222     return 0;
3223     diff --git a/mm/mprotect.c b/mm/mprotect.c
3224     index d4d5399c7aba..e9f65aaa3182 100644
3225     --- a/mm/mprotect.c
3226     +++ b/mm/mprotect.c
3227     @@ -206,6 +206,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
3228     BUG_ON(addr >= end);
3229     pgd = pgd_offset(mm, addr);
3230     flush_cache_range(vma, addr, end);
3231     + set_tlb_flush_pending(mm);
3232     do {
3233     next = pgd_addr_end(addr, end);
3234     if (pgd_none_or_clear_bad(pgd))
3235     @@ -217,6 +218,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
3236     /* Only flush the TLB if we actually modified any entries: */
3237     if (pages)
3238     flush_tlb_range(vma, start, end);
3239     + clear_tlb_flush_pending(mm);
3240    
3241     return pages;
3242     }
3243     diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
3244     index 0c8323fe6c8f..4b62a16fc3c1 100644
3245     --- a/mm/pgtable-generic.c
3246     +++ b/mm/pgtable-generic.c
3247     @@ -86,9 +86,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
3248     pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
3249     pte_t *ptep)
3250     {
3251     + struct mm_struct *mm = (vma)->vm_mm;
3252     pte_t pte;
3253     - pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
3254     - if (pte_accessible(pte))
3255     + pte = ptep_get_and_clear(mm, address, ptep);
3256     + if (pte_accessible(mm, pte))
3257     flush_tlb_page(vma, address);
3258     return pte;
3259     }
3260     @@ -166,6 +167,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
3261     void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
3262     pmd_t *pmdp)
3263     {
3264     + pmd_t entry = *pmdp;
3265     + if (pmd_numa(entry))
3266     + entry = pmd_mknonnuma(entry);
3267     set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
3268     flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
3269     }
3270     diff --git a/mm/rmap.c b/mm/rmap.c
3271     index 6280da86b5d6..3f6077461aea 100644
3272     --- a/mm/rmap.c
3273     +++ b/mm/rmap.c
3274     @@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
3275     spinlock_t *ptl;
3276    
3277     if (unlikely(PageHuge(page))) {
3278     + /* when pud is not present, pte will be NULL */
3279     pte = huge_pte_offset(mm, address);
3280     + if (!pte)
3281     + return NULL;
3282     +
3283     ptl = &mm->page_table_lock;
3284     goto check;
3285     }
3286     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
3287     index 46ec7672cccc..bc0016e3e5ac 100644
3288     --- a/net/ceph/osd_client.c
3289     +++ b/net/ceph/osd_client.c
3290     @@ -733,12 +733,14 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
3291    
3292     object_size = le32_to_cpu(layout->fl_object_size);
3293     object_base = off - objoff;
3294     - if (truncate_size <= object_base) {
3295     - truncate_size = 0;
3296     - } else {
3297     - truncate_size -= object_base;
3298     - if (truncate_size > object_size)
3299     - truncate_size = object_size;
3300     + if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
3301     + if (truncate_size <= object_base) {
3302     + truncate_size = 0;
3303     + } else {
3304     + truncate_size -= object_base;
3305     + if (truncate_size > object_size)
3306     + truncate_size = object_size;
3307     + }
3308     }
3309    
3310     osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
3311     @@ -1174,6 +1176,7 @@ static void __register_linger_request(struct ceph_osd_client *osdc,
3312     struct ceph_osd_request *req)
3313     {
3314     dout("__register_linger_request %p\n", req);
3315     + ceph_osdc_get_request(req);
3316     list_add_tail(&req->r_linger_item, &osdc->req_linger);
3317     if (req->r_osd)
3318     list_add_tail(&req->r_linger_osd,
3319     @@ -1196,6 +1199,7 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
3320     if (list_empty(&req->r_osd_item))
3321     req->r_osd = NULL;
3322     }
3323     + ceph_osdc_put_request(req);
3324     }
3325    
3326     void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
3327     @@ -1203,9 +1207,8 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
3328     {
3329     mutex_lock(&osdc->request_mutex);
3330     if (req->r_linger) {
3331     - __unregister_linger_request(osdc, req);
3332     req->r_linger = 0;
3333     - ceph_osdc_put_request(req);
3334     + __unregister_linger_request(osdc, req);
3335     }
3336     mutex_unlock(&osdc->request_mutex);
3337     }
3338     @@ -1217,11 +1220,6 @@ void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
3339     if (!req->r_linger) {
3340     dout("set_request_linger %p\n", req);
3341     req->r_linger = 1;
3342     - /*
3343     - * caller is now responsible for calling
3344     - * unregister_linger_request
3345     - */
3346     - ceph_osdc_get_request(req);
3347     }
3348     }
3349     EXPORT_SYMBOL(ceph_osdc_set_request_linger);
3350     @@ -1339,10 +1337,6 @@ static void __send_request(struct ceph_osd_client *osdc,
3351    
3352     ceph_msg_get(req->r_request); /* send consumes a ref */
3353    
3354     - /* Mark the request unsafe if this is the first timet's being sent. */
3355     -
3356     - if (!req->r_sent && req->r_unsafe_callback)
3357     - req->r_unsafe_callback(req, true);
3358     req->r_sent = req->r_osd->o_incarnation;
3359    
3360     ceph_con_send(&req->r_osd->o_con, req->r_request);
3361     @@ -1433,8 +1427,6 @@ static void handle_osds_timeout(struct work_struct *work)
3362    
3363     static void complete_request(struct ceph_osd_request *req)
3364     {
3365     - if (req->r_unsafe_callback)
3366     - req->r_unsafe_callback(req, false);
3367     complete_all(&req->r_safe_completion); /* fsync waiter */
3368     }
3369    
3370     @@ -1496,14 +1488,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
3371     dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
3372     req, result);
3373    
3374     - ceph_decode_need(&p, end, 4, bad);
3375     + ceph_decode_need(&p, end, 4, bad_put);
3376     numops = ceph_decode_32(&p);
3377     if (numops > CEPH_OSD_MAX_OP)
3378     goto bad_put;
3379     if (numops != req->r_num_ops)
3380     goto bad_put;
3381     payload_len = 0;
3382     - ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad);
3383     + ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put);
3384     for (i = 0; i < numops; i++) {
3385     struct ceph_osd_op *op = p;
3386     int len;
3387     @@ -1521,11 +1513,13 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
3388     goto bad_put;
3389     }
3390    
3391     - ceph_decode_need(&p, end, 4 + numops * 4, bad);
3392     + ceph_decode_need(&p, end, 4 + numops * 4, bad_put);
3393     retry_attempt = ceph_decode_32(&p);
3394     for (i = 0; i < numops; i++)
3395     req->r_reply_op_result[i] = ceph_decode_32(&p);
3396    
3397     + already_completed = req->r_got_reply;
3398     +
3399     if (!req->r_got_reply) {
3400    
3401     req->r_result = result;
3402     @@ -1556,19 +1550,23 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
3403     ((flags & CEPH_OSD_FLAG_WRITE) == 0))
3404     __unregister_request(osdc, req);
3405    
3406     - already_completed = req->r_completed;
3407     - req->r_completed = 1;
3408     mutex_unlock(&osdc->request_mutex);
3409     - if (already_completed)
3410     - goto done;
3411    
3412     - if (req->r_callback)
3413     - req->r_callback(req, msg);
3414     - else
3415     - complete_all(&req->r_completion);
3416     + if (!already_completed) {
3417     + if (req->r_unsafe_callback &&
3418     + result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK))
3419     + req->r_unsafe_callback(req, true);
3420     + if (req->r_callback)
3421     + req->r_callback(req, msg);
3422     + else
3423     + complete_all(&req->r_completion);
3424     + }
3425    
3426     - if (flags & CEPH_OSD_FLAG_ONDISK)
3427     + if (flags & CEPH_OSD_FLAG_ONDISK) {
3428     + if (req->r_unsafe_callback && already_completed)
3429     + req->r_unsafe_callback(req, false);
3430     complete_request(req);
3431     + }
3432    
3433     done:
3434     dout("req=%p req->r_linger=%d\n", req, req->r_linger);
3435     @@ -1633,8 +1631,10 @@ static void kick_requests(struct ceph_osd_client *osdc, int force_resend)
3436     dout("%p tid %llu restart on osd%d\n",
3437     req, req->r_tid,
3438     req->r_osd ? req->r_osd->o_osd : -1);
3439     + ceph_osdc_get_request(req);
3440     __unregister_request(osdc, req);
3441     __register_linger_request(osdc, req);
3442     + ceph_osdc_put_request(req);
3443     continue;
3444     }
3445    
3446     @@ -1786,6 +1786,8 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3447     nr_maps--;
3448     }
3449    
3450     + if (!osdc->osdmap)
3451     + goto bad;
3452     done:
3453     downgrade_write(&osdc->map_sem);
3454     ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
3455     @@ -2123,7 +2125,6 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3456     __register_request(osdc, req);
3457     req->r_sent = 0;
3458     req->r_got_reply = 0;
3459     - req->r_completed = 0;
3460     rc = __map_request(osdc, req, 0);
3461     if (rc < 0) {
3462     if (nofail) {
3463     @@ -2208,6 +2209,17 @@ void ceph_osdc_sync(struct ceph_osd_client *osdc)
3464     EXPORT_SYMBOL(ceph_osdc_sync);
3465    
3466     /*
3467     + * Call all pending notify callbacks - for use after a watch is
3468     + * unregistered, to make sure no more callbacks for it will be invoked
3469     + */
3470     +extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
3471     +{
3472     + flush_workqueue(osdc->notify_wq);
3473     +}
3474     +EXPORT_SYMBOL(ceph_osdc_flush_notifies);
3475     +
3476     +
3477     +/*
3478     * init, shutdown
3479     */
3480     int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
3481     @@ -2256,12 +2268,10 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
3482     if (err < 0)
3483     goto out_msgpool;
3484    
3485     + err = -ENOMEM;
3486     osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
3487     - if (IS_ERR(osdc->notify_wq)) {
3488     - err = PTR_ERR(osdc->notify_wq);
3489     - osdc->notify_wq = NULL;
3490     + if (!osdc->notify_wq)
3491     goto out_msgpool;
3492     - }
3493     return 0;
3494    
3495     out_msgpool:
3496     diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
3497     index a271c27fac77..722da616438c 100644
3498     --- a/net/wireless/radiotap.c
3499     +++ b/net/wireless/radiotap.c
3500     @@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
3501     /* find payload start allowing for extended bitmap(s) */
3502    
3503     if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
3504     + if ((unsigned long)iterator->_arg -
3505     + (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
3506     + (unsigned long)iterator->_max_length)
3507     + return -EINVAL;
3508     while (get_unaligned_le32(iterator->_arg) &
3509     (1 << IEEE80211_RADIOTAP_EXT)) {
3510     iterator->_arg += sizeof(uint32_t);
3511     diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
3512     index 5e58d7dd7b69..57f14185cf18 100644
3513     --- a/security/selinux/hooks.c
3514     +++ b/security/selinux/hooks.c
3515     @@ -3723,7 +3723,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
3516     u32 nlbl_sid;
3517     u32 nlbl_type;
3518    
3519     - selinux_skb_xfrm_sid(skb, &xfrm_sid);
3520     + selinux_xfrm_skb_sid(skb, &xfrm_sid);
3521     selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
3522    
3523     err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
3524     @@ -4228,8 +4228,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
3525     }
3526     err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
3527     PEER__RECV, &ad);
3528     - if (err)
3529     + if (err) {
3530     selinux_netlbl_err(skb, err, 0);
3531     + return err;
3532     + }
3533     }
3534    
3535     if (secmark_active) {
3536     @@ -4740,22 +4742,32 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
3537     * as fast and as clean as possible. */
3538     if (!selinux_policycap_netpeer)
3539     return selinux_ip_postroute_compat(skb, ifindex, family);
3540     +
3541     + secmark_active = selinux_secmark_enabled();
3542     + peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
3543     + if (!secmark_active && !peerlbl_active)
3544     + return NF_ACCEPT;
3545     +
3546     + sk = skb->sk;
3547     +
3548     #ifdef CONFIG_XFRM
3549     /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
3550     * packet transformation so allow the packet to pass without any checks
3551     * since we'll have another chance to perform access control checks
3552     * when the packet is on it's final way out.
3553     * NOTE: there appear to be some IPv6 multicast cases where skb->dst
3554     - * is NULL, in this case go ahead and apply access control. */
3555     - if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
3556     + * is NULL, in this case go ahead and apply access control.
3557     + * is NULL, in this case go ahead and apply access control.
3558     + * NOTE: if this is a local socket (skb->sk != NULL) that is in the
3559     + * TCP listening state we cannot wait until the XFRM processing
3560     + * is done as we will miss out on the SA label if we do;
3561     + * unfortunately, this means more work, but it is only once per
3562     + * connection. */
3563     + if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
3564     + !(sk != NULL && sk->sk_state == TCP_LISTEN))
3565     return NF_ACCEPT;
3566     #endif
3567     - secmark_active = selinux_secmark_enabled();
3568     - peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
3569     - if (!secmark_active && !peerlbl_active)
3570     - return NF_ACCEPT;
3571    
3572     - sk = skb->sk;
3573     if (sk == NULL) {
3574     /* Without an associated socket the packet is either coming
3575     * from the kernel or it is being forwarded; check the packet
3576     @@ -4783,6 +4795,25 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
3577     struct sk_security_struct *sksec = sk->sk_security;
3578     if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
3579     return NF_DROP;
3580     + /* At this point, if the returned skb peerlbl is SECSID_NULL
3581     + * and the packet has been through at least one XFRM
3582     + * transformation then we must be dealing with the "final"
3583     + * form of labeled IPsec packet; since we've already applied
3584     + * all of our access controls on this packet we can safely
3585     + * pass the packet. */
3586     + if (skb_sid == SECSID_NULL) {
3587     + switch (family) {
3588     + case PF_INET:
3589     + if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
3590     + return NF_ACCEPT;
3591     + break;
3592     + case PF_INET6:
3593     + if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
3594     + return NF_ACCEPT;
3595     + default:
3596     + return NF_DROP_ERR(-ECONNREFUSED);
3597     + }
3598     + }
3599     if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
3600     return NF_DROP;
3601     secmark_perm = PACKET__SEND;
3602     @@ -5452,11 +5483,11 @@ static int selinux_setprocattr(struct task_struct *p,
3603     /* Check for ptracing, and update the task SID if ok.
3604     Otherwise, leave SID unchanged and fail. */
3605     ptsid = 0;
3606     - task_lock(p);
3607     + rcu_read_lock();
3608     tracer = ptrace_parent(p);
3609     if (tracer)
3610     ptsid = task_sid(tracer);
3611     - task_unlock(p);
3612     + rcu_read_unlock();
3613    
3614     if (tracer) {
3615     error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
3616     diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
3617     index 65f67cb0aefb..3ffdadc9960f 100644
3618     --- a/security/selinux/include/xfrm.h
3619     +++ b/security/selinux/include/xfrm.h
3620     @@ -47,6 +47,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
3621     int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
3622     struct common_audit_data *ad, u8 proto);
3623     int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
3624     +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
3625    
3626     static inline void selinux_xfrm_notify_policyload(void)
3627     {
3628     @@ -80,12 +81,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
3629     static inline void selinux_xfrm_notify_policyload(void)
3630     {
3631     }
3632     -#endif
3633    
3634     -static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
3635     +static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
3636     {
3637     - int err = selinux_xfrm_decode_session(skb, sid, 0);
3638     - BUG_ON(err);
3639     + *sid = SECSID_NULL;
3640     + return 0;
3641     }
3642     +#endif
3643    
3644     #endif /* _SELINUX_XFRM_H_ */
3645     diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
3646     index d03081886214..78504a18958a 100644
3647     --- a/security/selinux/xfrm.c
3648     +++ b/security/selinux/xfrm.c
3649     @@ -152,21 +152,13 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
3650     return rc;
3651     }
3652    
3653     -/*
3654     - * LSM hook implementation that checks and/or returns the xfrm sid for the
3655     - * incoming packet.
3656     - */
3657     -
3658     -int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
3659     +static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
3660     + u32 *sid, int ckall)
3661     {
3662     - struct sec_path *sp;
3663     + struct sec_path *sp = skb->sp;
3664    
3665     *sid = SECSID_NULL;
3666    
3667     - if (skb == NULL)
3668     - return 0;
3669     -
3670     - sp = skb->sp;
3671     if (sp) {
3672     int i, sid_set = 0;
3673    
3674     @@ -190,6 +182,45 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
3675     return 0;
3676     }
3677    
3678     +static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
3679     +{
3680     + struct dst_entry *dst = skb_dst(skb);
3681     + struct xfrm_state *x;
3682     +
3683     + if (dst == NULL)
3684     + return SECSID_NULL;
3685     + x = dst->xfrm;
3686     + if (x == NULL || !selinux_authorizable_xfrm(x))
3687     + return SECSID_NULL;
3688     +
3689     + return x->security->ctx_sid;
3690     +}
3691     +
3692     +/*
3693     + * LSM hook implementation that checks and/or returns the xfrm sid for the
3694     + * incoming packet.
3695     + */
3696     +
3697     +int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
3698     +{
3699     + if (skb == NULL) {
3700     + *sid = SECSID_NULL;
3701     + return 0;
3702     + }
3703     + return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
3704     +}
3705     +
3706     +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
3707     +{
3708     + int rc;
3709     +
3710     + rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
3711     + if (rc == 0 && *sid == SECSID_NULL)
3712     + *sid = selinux_xfrm_skb_sid_egress(skb);
3713     +
3714     + return rc;
3715     +}
3716     +
3717     /*
3718     * Security blob allocation for xfrm_policy and xfrm_state
3719     * CTX does not have a meaningful value on input
3720     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
3721     index 41b3dfe68698..3284940a4af2 100644
3722     --- a/sound/core/pcm_lib.c
3723     +++ b/sound/core/pcm_lib.c
3724     @@ -1936,6 +1936,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
3725     case SNDRV_PCM_STATE_DISCONNECTED:
3726     err = -EBADFD;
3727     goto _endloop;
3728     + case SNDRV_PCM_STATE_PAUSED:
3729     + continue;
3730     }
3731     if (!tout) {
3732     snd_printd("%s write error (DMA or IRQ trouble?)\n",
3733     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
3734     index 624e6c044a44..5f055d7ee85b 100644
3735     --- a/sound/pci/hda/hda_intel.c
3736     +++ b/sound/pci/hda/hda_intel.c
3737     @@ -3332,6 +3332,10 @@ static void check_probe_mask(struct azx *chip, int dev)
3738     * white/black-list for enable_msi
3739     */
3740     static struct snd_pci_quirk msi_black_list[] = {
3741     + SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
3742     + SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
3743     + SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
3744     + SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
3745     SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
3746     SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
3747     SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
3748     diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
3749     index cd6ed88409d7..866b9c657e15 100644
3750     --- a/sound/soc/codecs/wm5110.c
3751     +++ b/sound/soc/codecs/wm5110.c
3752     @@ -897,7 +897,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
3753     { "HPOUT2R", NULL, "OUT2R" },
3754    
3755     { "HPOUT3L", NULL, "OUT3L" },
3756     - { "HPOUT3R", NULL, "OUT3L" },
3757     + { "HPOUT3R", NULL, "OUT3R" },
3758    
3759     { "SPKOUTLN", NULL, "OUT4L" },
3760     { "SPKOUTLP", NULL, "OUT4L" },
3761     diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
3762     index 3ff195c541db..af62f843a691 100644
3763     --- a/sound/soc/codecs/wm8904.c
3764     +++ b/sound/soc/codecs/wm8904.c
3765     @@ -1449,7 +1449,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
3766    
3767     switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
3768     case SND_SOC_DAIFMT_DSP_B:
3769     - aif1 |= WM8904_AIF_LRCLK_INV;
3770     + aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
3771     case SND_SOC_DAIFMT_DSP_A:
3772     aif1 |= 0x3;
3773     break;
3774     diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
3775     index 3470b649c0b2..6dbb17d050c9 100644
3776     --- a/sound/soc/codecs/wm_adsp.c
3777     +++ b/sound/soc/codecs/wm_adsp.c
3778     @@ -1073,13 +1073,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
3779     return ret;
3780    
3781     /* Wait for the RAM to start, should be near instantaneous */
3782     - count = 0;
3783     - do {
3784     + for (count = 0; count < 10; ++count) {
3785     ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
3786     &val);
3787     if (ret != 0)
3788     return ret;
3789     - } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
3790     +
3791     + if (val & ADSP2_RAM_RDY)
3792     + break;
3793     +
3794     + msleep(1);
3795     + }
3796    
3797     if (!(val & ADSP2_RAM_RDY)) {
3798     adsp_err(dsp, "Failed to start DSP RAM\n");
3799     diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
3800     index 52af7f6fb37f..540832e9e684 100644
3801     --- a/sound/soc/tegra/tegra20_i2s.c
3802     +++ b/sound/soc/tegra/tegra20_i2s.c
3803     @@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
3804     unsigned int fmt)
3805     {
3806     struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
3807     - unsigned int mask, val;
3808     + unsigned int mask = 0, val = 0;
3809    
3810     switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
3811     case SND_SOC_DAIFMT_NB_NF:
3812     @@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
3813     return -EINVAL;
3814     }
3815    
3816     - mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
3817     + mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
3818     switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
3819     case SND_SOC_DAIFMT_CBS_CFS:
3820     - val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
3821     + val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
3822     break;
3823     case SND_SOC_DAIFMT_CBM_CFM:
3824     break;
3825     diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
3826     index 551b3c93ce93..2e7d4aca3d7d 100644
3827     --- a/sound/soc/tegra/tegra20_spdif.c
3828     +++ b/sound/soc/tegra/tegra20_spdif.c
3829     @@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
3830     {
3831     struct device *dev = dai->dev;
3832     struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
3833     - unsigned int mask, val;
3834     + unsigned int mask = 0, val = 0;
3835     int ret, spdifclock;
3836    
3837     - mask = TEGRA20_SPDIF_CTRL_PACK |
3838     - TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
3839     + mask |= TEGRA20_SPDIF_CTRL_PACK |
3840     + TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
3841     switch (params_format(params)) {
3842     case SNDRV_PCM_FORMAT_S16_LE:
3843     - val = TEGRA20_SPDIF_CTRL_PACK |
3844     - TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
3845     + val |= TEGRA20_SPDIF_CTRL_PACK |
3846     + TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
3847     break;
3848     default:
3849     return -EINVAL;
3850     diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
3851     index a5432b10eaca..5c6520b8ec0e 100644
3852     --- a/sound/soc/tegra/tegra30_i2s.c
3853     +++ b/sound/soc/tegra/tegra30_i2s.c
3854     @@ -117,7 +117,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
3855     unsigned int fmt)
3856     {
3857     struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
3858     - unsigned int mask, val;
3859     + unsigned int mask = 0, val = 0;
3860    
3861     switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
3862     case SND_SOC_DAIFMT_NB_NF:
3863     @@ -126,10 +126,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
3864     return -EINVAL;
3865     }
3866    
3867     - mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
3868     + mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
3869     switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
3870     case SND_SOC_DAIFMT_CBS_CFS:
3871     - val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
3872     + val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
3873     break;
3874     case SND_SOC_DAIFMT_CBM_CFM:
3875     break;
3876     diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
3877     index dc4de3762111..bcf1d2f0b791 100644
3878     --- a/tools/power/cpupower/utils/cpupower-set.c
3879     +++ b/tools/power/cpupower/utils/cpupower-set.c
3880     @@ -18,9 +18,9 @@
3881     #include "helpers/bitmask.h"
3882    
3883     static struct option set_opts[] = {
3884     - { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
3885     - { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
3886     - { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
3887     + { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
3888     + { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
3889     + { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
3890     { },
3891     };
3892