Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.14/0109-3.14.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2506 - (hide annotations) (download)
Fri Oct 17 07:55:45 2014 UTC (9 years, 6 months ago) by niro
File size: 148272 byte(s)
-patches for 3.14
1 niro 2506 diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
2     index 550068466605..6ae89a9edf2a 100644
3     --- a/Documentation/vm/hwpoison.txt
4     +++ b/Documentation/vm/hwpoison.txt
5     @@ -84,6 +84,11 @@ PR_MCE_KILL
6     PR_MCE_KILL_EARLY: Early kill
7     PR_MCE_KILL_LATE: Late kill
8     PR_MCE_KILL_DEFAULT: Use system global default
9     + Note that if you want to have a dedicated thread which handles
10     + the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should
11     + call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise,
12     + the SIGBUS is sent to the main thread.
13     +
14     PR_MCE_KILL_GET
15     return current mode
16    
17     diff --git a/Makefile b/Makefile
18     index ee247656432f..bd5d673ab57f 100644
19     --- a/Makefile
20     +++ b/Makefile
21     @@ -1,6 +1,6 @@
22     VERSION = 3
23     PATCHLEVEL = 14
24     -SUBLEVEL = 9
25     +SUBLEVEL = 10
26     EXTRAVERSION =
27     NAME = Remembering Coco
28    
29     diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
30     index b33e10ea2ea0..1c6bd83bde5e 100644
31     --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
32     +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
33     @@ -23,7 +23,7 @@
34    
35     memory {
36     device_type = "memory";
37     - reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */
38     + reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
39     };
40    
41     soc {
42     diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
43     index af4e8c8a5422..6582c4adc182 100644
44     --- a/arch/arm/kernel/stacktrace.c
45     +++ b/arch/arm/kernel/stacktrace.c
46     @@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
47     return trace->nr_entries >= trace->max_entries;
48     }
49    
50     -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
51     +/* This must be noinline to so that our skip calculation works correctly */
52     +static noinline void __save_stack_trace(struct task_struct *tsk,
53     + struct stack_trace *trace, unsigned int nosched)
54     {
55     struct stack_trace_data data;
56     struct stackframe frame;
57    
58     data.trace = trace;
59     data.skip = trace->skip;
60     + data.no_sched_functions = nosched;
61    
62     if (tsk != current) {
63     #ifdef CONFIG_SMP
64     @@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
65     trace->entries[trace->nr_entries++] = ULONG_MAX;
66     return;
67     #else
68     - data.no_sched_functions = 1;
69     frame.fp = thread_saved_fp(tsk);
70     frame.sp = thread_saved_sp(tsk);
71     frame.lr = 0; /* recovered from the stack */
72     @@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
73     } else {
74     register unsigned long current_sp asm ("sp");
75    
76     - data.no_sched_functions = 0;
77     + /* We don't want this function nor the caller */
78     + data.skip += 2;
79     frame.fp = (unsigned long)__builtin_frame_address(0);
80     frame.sp = current_sp;
81     frame.lr = (unsigned long)__builtin_return_address(0);
82     - frame.pc = (unsigned long)save_stack_trace_tsk;
83     + frame.pc = (unsigned long)__save_stack_trace;
84     }
85    
86     walk_stackframe(&frame, save_trace, &data);
87     @@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
88     trace->entries[trace->nr_entries++] = ULONG_MAX;
89     }
90    
91     +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
92     +{
93     + __save_stack_trace(tsk, trace, 1);
94     +}
95     +
96     void save_stack_trace(struct stack_trace *trace)
97     {
98     - save_stack_trace_tsk(current, trace);
99     + __save_stack_trace(current, trace, 0);
100     }
101     EXPORT_SYMBOL_GPL(save_stack_trace);
102     #endif
103     diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
104     index fd90cafc2e36..db57072aeed3 100644
105     --- a/arch/arm/mach-omap1/board-h2.c
106     +++ b/arch/arm/mach-omap1/board-h2.c
107     @@ -343,7 +343,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
108     /* usb1 has a Mini-AB port and external isp1301 transceiver */
109     .otg = 2,
110    
111     -#ifdef CONFIG_USB_GADGET_OMAP
112     +#if IS_ENABLED(CONFIG_USB_OMAP)
113     .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
114     /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
115     #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
116     diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
117     index 816ecd13f81e..bfed4f928663 100644
118     --- a/arch/arm/mach-omap1/board-h3.c
119     +++ b/arch/arm/mach-omap1/board-h3.c
120     @@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
121     /* usb1 has a Mini-AB port and external isp1301 transceiver */
122     .otg = 2,
123    
124     -#ifdef CONFIG_USB_GADGET_OMAP
125     +#if IS_ENABLED(CONFIG_USB_OMAP)
126     .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
127     #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
128     /* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
129     diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
130     index bd5f02e9c354..c49ce83cc1eb 100644
131     --- a/arch/arm/mach-omap1/board-innovator.c
132     +++ b/arch/arm/mach-omap1/board-innovator.c
133     @@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
134     /* usb1 has a Mini-AB port and external isp1301 transceiver */
135     .otg = 2,
136    
137     -#ifdef CONFIG_USB_GADGET_OMAP
138     +#if IS_ENABLED(CONFIG_USB_OMAP)
139     .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
140     /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
141     #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
142     diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
143     index d68909b095f1..55b10877f4ff 100644
144     --- a/arch/arm/mach-omap1/board-osk.c
145     +++ b/arch/arm/mach-omap1/board-osk.c
146     @@ -280,7 +280,7 @@ static struct omap_usb_config osk_usb_config __initdata = {
147     * be used, with a NONSTANDARD gender-bending cable/dongle, as
148     * a peripheral.
149     */
150     -#ifdef CONFIG_USB_GADGET_OMAP
151     +#if IS_ENABLED(CONFIG_USB_OMAP)
152     .register_dev = 1,
153     .hmc_mode = 0,
154     #else
155     diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
156     index 54ee6163c181..66781bf34077 100644
157     --- a/arch/arm/mm/hugetlbpage.c
158     +++ b/arch/arm/mm/hugetlbpage.c
159     @@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
160     {
161     return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
162     }
163     -
164     -int pmd_huge_support(void)
165     -{
166     - return 1;
167     -}
168     diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
169     index 01a719e18bb0..22e3ad63500c 100644
170     --- a/arch/arm/mm/proc-v7-3level.S
171     +++ b/arch/arm/mm/proc-v7-3level.S
172     @@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm)
173     mov pc, lr
174     ENDPROC(cpu_v7_switch_mm)
175    
176     +#ifdef __ARMEB__
177     +#define rl r3
178     +#define rh r2
179     +#else
180     +#define rl r2
181     +#define rh r3
182     +#endif
183     +
184     /*
185     * cpu_v7_set_pte_ext(ptep, pte)
186     *
187     @@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm)
188     */
189     ENTRY(cpu_v7_set_pte_ext)
190     #ifdef CONFIG_MMU
191     - tst r2, #L_PTE_VALID
192     + tst rl, #L_PTE_VALID
193     beq 1f
194     - tst r3, #1 << (57 - 32) @ L_PTE_NONE
195     - bicne r2, #L_PTE_VALID
196     + tst rh, #1 << (57 - 32) @ L_PTE_NONE
197     + bicne rl, #L_PTE_VALID
198     bne 1f
199     - tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
200     - orreq r2, #L_PTE_RDONLY
201     + tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
202     + orreq rl, #L_PTE_RDONLY
203     1: strd r2, r3, [r0]
204     ALT_SMP(W(nop))
205     ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
206     diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
207     index fd0c0c0e447a..064d3525f260 100644
208     --- a/arch/arm64/include/asm/dma-mapping.h
209     +++ b/arch/arm64/include/asm/dma-mapping.h
210     @@ -26,8 +26,6 @@
211     #include <xen/xen.h>
212     #include <asm/xen/hypervisor.h>
213    
214     -#define ARCH_HAS_DMA_GET_REQUIRED_MASK
215     -
216     #define DMA_ERROR_CODE (~(dma_addr_t)0)
217     extern struct dma_map_ops *dma_ops;
218    
219     diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
220     index 6a8928bba03c..7a50b86464cc 100644
221     --- a/arch/arm64/kernel/ptrace.c
222     +++ b/arch/arm64/kernel/ptrace.c
223     @@ -650,11 +650,16 @@ static int compat_gpr_get(struct task_struct *target,
224     reg = task_pt_regs(target)->regs[idx];
225     }
226    
227     - ret = copy_to_user(ubuf, &reg, sizeof(reg));
228     - if (ret)
229     - break;
230     -
231     - ubuf += sizeof(reg);
232     + if (kbuf) {
233     + memcpy(kbuf, &reg, sizeof(reg));
234     + kbuf += sizeof(reg);
235     + } else {
236     + ret = copy_to_user(ubuf, &reg, sizeof(reg));
237     + if (ret)
238     + break;
239     +
240     + ubuf += sizeof(reg);
241     + }
242     }
243    
244     return ret;
245     @@ -684,11 +689,16 @@ static int compat_gpr_set(struct task_struct *target,
246     unsigned int idx = start + i;
247     compat_ulong_t reg;
248    
249     - ret = copy_from_user(&reg, ubuf, sizeof(reg));
250     - if (ret)
251     - return ret;
252     + if (kbuf) {
253     + memcpy(&reg, kbuf, sizeof(reg));
254     + kbuf += sizeof(reg);
255     + } else {
256     + ret = copy_from_user(&reg, ubuf, sizeof(reg));
257     + if (ret)
258     + return ret;
259    
260     - ubuf += sizeof(reg);
261     + ubuf += sizeof(reg);
262     + }
263    
264     switch (idx) {
265     case 15:
266     @@ -821,6 +831,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
267     compat_ulong_t val)
268     {
269     int ret;
270     + mm_segment_t old_fs = get_fs();
271    
272     if (off & 3 || off >= COMPAT_USER_SZ)
273     return -EIO;
274     @@ -828,10 +839,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
275     if (off >= sizeof(compat_elf_gregset_t))
276     return 0;
277    
278     + set_fs(KERNEL_DS);
279     ret = copy_regset_from_user(tsk, &user_aarch32_view,
280     REGSET_COMPAT_GPR, off,
281     sizeof(compat_ulong_t),
282     &val);
283     + set_fs(old_fs);
284     +
285     return ret;
286     }
287    
288     diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
289     index 31eb959e9aa8..023747bf4dd7 100644
290     --- a/arch/arm64/mm/hugetlbpage.c
291     +++ b/arch/arm64/mm/hugetlbpage.c
292     @@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
293     #endif
294     }
295    
296     -int pmd_huge_support(void)
297     -{
298     - return 1;
299     -}
300     -
301     static __init int setup_hugepagesz(char *opt)
302     {
303     unsigned long ps = memparse(opt, &opt);
304     diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
305     index 68232db98baa..76069c18ee42 100644
306     --- a/arch/ia64/mm/hugetlbpage.c
307     +++ b/arch/ia64/mm/hugetlbpage.c
308     @@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
309     return 0;
310     }
311    
312     -int pmd_huge_support(void)
313     -{
314     - return 0;
315     -}
316     -
317     struct page *
318     follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
319     {
320     diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
321     index 042431509b56..3c52fa6d0f8e 100644
322     --- a/arch/metag/mm/hugetlbpage.c
323     +++ b/arch/metag/mm/hugetlbpage.c
324     @@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
325     return 0;
326     }
327    
328     -int pmd_huge_support(void)
329     -{
330     - return 1;
331     -}
332     -
333     struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
334     pmd_t *pmd, int write)
335     {
336     diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
337     index 77e0ae036e7c..4ec8ee10d371 100644
338     --- a/arch/mips/mm/hugetlbpage.c
339     +++ b/arch/mips/mm/hugetlbpage.c
340     @@ -84,11 +84,6 @@ int pud_huge(pud_t pud)
341     return (pud_val(pud) & _PAGE_HUGE) != 0;
342     }
343    
344     -int pmd_huge_support(void)
345     -{
346     - return 1;
347     -}
348     -
349     struct page *
350     follow_huge_pmd(struct mm_struct *mm, unsigned long address,
351     pmd_t *pmd, int write)
352     diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
353     index eb923654ba80..7e70ae968e5f 100644
354     --- a/arch/powerpc/mm/hugetlbpage.c
355     +++ b/arch/powerpc/mm/hugetlbpage.c
356     @@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
357     */
358     return ((pgd_val(pgd) & 0x3) != 0x0);
359     }
360     -
361     -int pmd_huge_support(void)
362     -{
363     - return 1;
364     -}
365     #else
366     int pmd_huge(pmd_t pmd)
367     {
368     @@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
369     {
370     return 0;
371     }
372     -
373     -int pmd_huge_support(void)
374     -{
375     - return 0;
376     -}
377     #endif
378    
379     pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
380     diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
381     index bbf8141408cd..2bed4f02a558 100644
382     --- a/arch/s390/include/asm/lowcore.h
383     +++ b/arch/s390/include/asm/lowcore.h
384     @@ -142,9 +142,9 @@ struct _lowcore {
385     __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
386    
387     /* Interrupt response block */
388     - __u8 irb[64]; /* 0x0300 */
389     + __u8 irb[96]; /* 0x0300 */
390    
391     - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
392     + __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
393    
394     /*
395     * 0xe00 contains the address of the IPL Parameter Information
396     @@ -288,12 +288,13 @@ struct _lowcore {
397     __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
398    
399     /* Interrupt response block. */
400     - __u8 irb[64]; /* 0x0400 */
401     + __u8 irb[96]; /* 0x0400 */
402     + __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */
403    
404     /* Per cpu primary space access list */
405     - __u32 paste[16]; /* 0x0440 */
406     + __u32 paste[16]; /* 0x0480 */
407    
408     - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
409     + __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */
410    
411     /*
412     * 0xe00 contains the address of the IPL Parameter Information
413     diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
414     index dd95f1631621..a80190be6e83 100644
415     --- a/arch/s390/kernel/time.c
416     +++ b/arch/s390/kernel/time.c
417     @@ -226,7 +226,7 @@ void update_vsyscall(struct timekeeper *tk)
418     vdso_data->wtom_clock_sec =
419     tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
420     vdso_data->wtom_clock_nsec = tk->xtime_nsec +
421     - + (tk->wall_to_monotonic.tv_nsec << tk->shift);
422     + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift);
423     nsecps = (u64) NSEC_PER_SEC << tk->shift;
424     while (vdso_data->wtom_clock_nsec >= nsecps) {
425     vdso_data->wtom_clock_nsec -= nsecps;
426     diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
427     index d261c62e40a6..248445f92604 100644
428     --- a/arch/s390/mm/hugetlbpage.c
429     +++ b/arch/s390/mm/hugetlbpage.c
430     @@ -223,11 +223,6 @@ int pud_huge(pud_t pud)
431     return 0;
432     }
433    
434     -int pmd_huge_support(void)
435     -{
436     - return 1;
437     -}
438     -
439     struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
440     pmd_t *pmdp, int write)
441     {
442     diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
443     index 0d676a41081e..d7762349ea48 100644
444     --- a/arch/sh/mm/hugetlbpage.c
445     +++ b/arch/sh/mm/hugetlbpage.c
446     @@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
447     return 0;
448     }
449    
450     -int pmd_huge_support(void)
451     -{
452     - return 0;
453     -}
454     -
455     struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
456     pmd_t *pmd, int write)
457     {
458     diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
459     index 9bd9ce80bf77..d329537739c6 100644
460     --- a/arch/sparc/mm/hugetlbpage.c
461     +++ b/arch/sparc/mm/hugetlbpage.c
462     @@ -231,11 +231,6 @@ int pud_huge(pud_t pud)
463     return 0;
464     }
465    
466     -int pmd_huge_support(void)
467     -{
468     - return 0;
469     -}
470     -
471     struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
472     pmd_t *pmd, int write)
473     {
474     diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
475     index 0cb3bbaa580c..e514899e1100 100644
476     --- a/arch/tile/mm/hugetlbpage.c
477     +++ b/arch/tile/mm/hugetlbpage.c
478     @@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
479     return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
480     }
481    
482     -int pmd_huge_support(void)
483     -{
484     - return 1;
485     -}
486     -
487     struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
488     pmd_t *pmd, int write)
489     {
490     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
491     index 0af5250d914f..1981dd9b8a11 100644
492     --- a/arch/x86/Kconfig
493     +++ b/arch/x86/Kconfig
494     @@ -1909,6 +1909,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
495     def_bool y
496     depends on X86_64 || X86_PAE
497    
498     +config ARCH_ENABLE_HUGEPAGE_MIGRATION
499     + def_bool y
500     + depends on X86_64 && HUGETLB_PAGE && MIGRATION
501     +
502     menu "Power management and ACPI options"
503    
504     config ARCH_HIBERNATION_HEADER
505     diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
506     index a2a4f4697889..6491353cc9aa 100644
507     --- a/arch/x86/kernel/entry_32.S
508     +++ b/arch/x86/kernel/entry_32.S
509     @@ -431,9 +431,10 @@ sysenter_past_esp:
510     jnz sysenter_audit
511     sysenter_do_call:
512     cmpl $(NR_syscalls), %eax
513     - jae syscall_badsys
514     + jae sysenter_badsys
515     call *sys_call_table(,%eax,4)
516     movl %eax,PT_EAX(%esp)
517     +sysenter_after_call:
518     LOCKDEP_SYS_EXIT
519     DISABLE_INTERRUPTS(CLBR_ANY)
520     TRACE_IRQS_OFF
521     @@ -551,11 +552,6 @@ ENTRY(iret_exc)
522    
523     CFI_RESTORE_STATE
524     ldt_ss:
525     - larl PT_OLDSS(%esp), %eax
526     - jnz restore_nocheck
527     - testl $0x00400000, %eax # returning to 32bit stack?
528     - jnz restore_nocheck # allright, normal return
529     -
530     #ifdef CONFIG_PARAVIRT
531     /*
532     * The kernel can't run on a non-flat stack if paravirt mode
533     @@ -688,7 +684,12 @@ END(syscall_fault)
534    
535     syscall_badsys:
536     movl $-ENOSYS,PT_EAX(%esp)
537     - jmp resume_userspace
538     + jmp syscall_exit
539     +END(syscall_badsys)
540     +
541     +sysenter_badsys:
542     + movl $-ENOSYS,PT_EAX(%esp)
543     + jmp sysenter_after_call
544     END(syscall_badsys)
545     CFI_ENDPROC
546     /*
547     diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
548     index 8c9f647ff9e1..8b977ebf9388 100644
549     --- a/arch/x86/mm/hugetlbpage.c
550     +++ b/arch/x86/mm/hugetlbpage.c
551     @@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
552     {
553     return NULL;
554     }
555     -
556     -int pmd_huge_support(void)
557     -{
558     - return 0;
559     -}
560     #else
561    
562     struct page *
563     @@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
564     {
565     return !!(pud_val(pud) & _PAGE_PSE);
566     }
567     -
568     -int pmd_huge_support(void)
569     -{
570     - return 1;
571     -}
572     #endif
573    
574     #ifdef CONFIG_HUGETLB_PAGE
575     diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
576     index a12bddc7ccea..7f517ca19d32 100644
577     --- a/arch/x86/syscalls/syscall_64.tbl
578     +++ b/arch/x86/syscalls/syscall_64.tbl
579     @@ -212,10 +212,10 @@
580     203 common sched_setaffinity sys_sched_setaffinity
581     204 common sched_getaffinity sys_sched_getaffinity
582     205 64 set_thread_area
583     -206 common io_setup sys_io_setup
584     +206 64 io_setup sys_io_setup
585     207 common io_destroy sys_io_destroy
586     208 common io_getevents sys_io_getevents
587     -209 common io_submit sys_io_submit
588     +209 64 io_submit sys_io_submit
589     210 common io_cancel sys_io_cancel
590     211 64 get_thread_area
591     212 common lookup_dcookie sys_lookup_dcookie
592     @@ -358,3 +358,5 @@
593     540 x32 process_vm_writev compat_sys_process_vm_writev
594     541 x32 setsockopt compat_sys_setsockopt
595     542 x32 getsockopt compat_sys_getsockopt
596     +543 x32 io_setup compat_sys_io_setup
597     +544 x32 io_submit compat_sys_io_submit
598     diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
599     index 45c0eb26b33d..cd2047af96db 100644
600     --- a/drivers/acpi/acpica/utstring.c
601     +++ b/drivers/acpi/acpica/utstring.c
602     @@ -353,7 +353,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
603     }
604    
605     acpi_os_printf("\"");
606     - for (i = 0; string[i] && (i < max_length); i++) {
607     + for (i = 0; (i < max_length) && string[i]; i++) {
608    
609     /* Escape sequences */
610    
611     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
612     index fcb59c21c68d..b48aefab57e8 100644
613     --- a/drivers/acpi/bus.c
614     +++ b/drivers/acpi/bus.c
615     @@ -52,6 +52,12 @@ struct proc_dir_entry *acpi_root_dir;
616     EXPORT_SYMBOL(acpi_root_dir);
617    
618     #ifdef CONFIG_X86
619     +#ifdef CONFIG_ACPI_CUSTOM_DSDT
620     +static inline int set_copy_dsdt(const struct dmi_system_id *id)
621     +{
622     + return 0;
623     +}
624     +#else
625     static int set_copy_dsdt(const struct dmi_system_id *id)
626     {
627     printk(KERN_NOTICE "%s detected - "
628     @@ -59,6 +65,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
629     acpi_gbl_copy_dsdt_locally = 1;
630     return 0;
631     }
632     +#endif
633    
634     static struct dmi_system_id dsdt_dmi_table[] __initdata = {
635     /*
636     diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
637     index 85e3b612bdc0..81a756c765dc 100644
638     --- a/drivers/acpi/utils.c
639     +++ b/drivers/acpi/utils.c
640     @@ -30,6 +30,7 @@
641     #include <linux/types.h>
642     #include <linux/hardirq.h>
643     #include <linux/acpi.h>
644     +#include <linux/dynamic_debug.h>
645    
646     #include "internal.h"
647    
648     @@ -464,6 +465,24 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
649     EXPORT_SYMBOL(acpi_evaluate_hotplug_ost);
650    
651     /**
652     + * acpi_handle_path: Return the object path of handle
653     + *
654     + * Caller must free the returned buffer
655     + */
656     +static char *acpi_handle_path(acpi_handle handle)
657     +{
658     + struct acpi_buffer buffer = {
659     + .length = ACPI_ALLOCATE_BUFFER,
660     + .pointer = NULL
661     + };
662     +
663     + if (in_interrupt() ||
664     + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
665     + return NULL;
666     + return buffer.pointer;
667     +}
668     +
669     +/**
670     * acpi_handle_printk: Print message with ACPI prefix and object path
671     *
672     * This function is called through acpi_handle_<level> macros and prints
673     @@ -476,29 +495,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
674     {
675     struct va_format vaf;
676     va_list args;
677     - struct acpi_buffer buffer = {
678     - .length = ACPI_ALLOCATE_BUFFER,
679     - .pointer = NULL
680     - };
681     const char *path;
682    
683     va_start(args, fmt);
684     vaf.fmt = fmt;
685     vaf.va = &args;
686    
687     - if (in_interrupt() ||
688     - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
689     - path = "<n/a>";
690     - else
691     - path = buffer.pointer;
692     -
693     - printk("%sACPI: %s: %pV", level, path, &vaf);
694     + path = acpi_handle_path(handle);
695     + printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf);
696    
697     va_end(args);
698     - kfree(buffer.pointer);
699     + kfree(path);
700     }
701     EXPORT_SYMBOL(acpi_handle_printk);
702    
703     +#if defined(CONFIG_DYNAMIC_DEBUG)
704     +/**
705     + * __acpi_handle_debug: pr_debug with ACPI prefix and object path
706     + *
707     + * This function is called through acpi_handle_debug macro and debug
708     + * prints a message with ACPI prefix and object path. This function
709     + * acquires the global namespace mutex to obtain an object path. In
710     + * interrupt context, it shows the object path as <n/a>.
711     + */
712     +void
713     +__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle,
714     + const char *fmt, ...)
715     +{
716     + struct va_format vaf;
717     + va_list args;
718     + const char *path;
719     +
720     + va_start(args, fmt);
721     + vaf.fmt = fmt;
722     + vaf.va = &args;
723     +
724     + path = acpi_handle_path(handle);
725     + __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf);
726     +
727     + va_end(args);
728     + kfree(path);
729     +}
730     +EXPORT_SYMBOL(__acpi_handle_debug);
731     +#endif
732     +
733     /**
734     * acpi_has_method: Check whether @handle has a method named @name
735     * @handle: ACPI device handle
736     diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
737     index fa4187418440..08d99ef60b70 100644
738     --- a/drivers/base/power/opp.c
739     +++ b/drivers/base/power/opp.c
740     @@ -735,11 +735,9 @@ int of_init_opp_table(struct device *dev)
741     unsigned long freq = be32_to_cpup(val++) * 1000;
742     unsigned long volt = be32_to_cpup(val++);
743    
744     - if (dev_pm_opp_add(dev, freq, volt)) {
745     + if (dev_pm_opp_add(dev, freq, volt))
746     dev_warn(dev, "%s: Failed to add OPP %ld\n",
747     __func__, freq);
748     - continue;
749     - }
750     nr -= 2;
751     }
752    
753     diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
754     index 6e06f6f69152..77af52f0e3b1 100644
755     --- a/drivers/bluetooth/hci_ldisc.c
756     +++ b/drivers/bluetooth/hci_ldisc.c
757     @@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
758    
759     int hci_uart_tx_wakeup(struct hci_uart *hu)
760     {
761     - struct tty_struct *tty = hu->tty;
762     - struct hci_dev *hdev = hu->hdev;
763     - struct sk_buff *skb;
764     -
765     if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
766     set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
767     return 0;
768     @@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
769    
770     BT_DBG("");
771    
772     + schedule_work(&hu->write_work);
773     +
774     + return 0;
775     +}
776     +
777     +static void hci_uart_write_work(struct work_struct *work)
778     +{
779     + struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
780     + struct tty_struct *tty = hu->tty;
781     + struct hci_dev *hdev = hu->hdev;
782     + struct sk_buff *skb;
783     +
784     + /* REVISIT: should we cope with bad skbs or ->write() returning
785     + * and error value ?
786     + */
787     +
788     restart:
789     clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
790    
791     @@ -153,7 +165,6 @@ restart:
792     goto restart;
793    
794     clear_bit(HCI_UART_SENDING, &hu->tx_state);
795     - return 0;
796     }
797    
798     static void hci_uart_init_work(struct work_struct *work)
799     @@ -281,6 +292,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
800     tty->receive_room = 65536;
801    
802     INIT_WORK(&hu->init_ready, hci_uart_init_work);
803     + INIT_WORK(&hu->write_work, hci_uart_write_work);
804    
805     spin_lock_init(&hu->rx_lock);
806    
807     @@ -318,6 +330,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
808     if (hdev)
809     hci_uart_close(hdev);
810    
811     + cancel_work_sync(&hu->write_work);
812     +
813     if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
814     if (hdev) {
815     if (test_bit(HCI_UART_REGISTERED, &hu->flags))
816     diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
817     index fffa61ff5cb1..12df101ca942 100644
818     --- a/drivers/bluetooth/hci_uart.h
819     +++ b/drivers/bluetooth/hci_uart.h
820     @@ -68,6 +68,7 @@ struct hci_uart {
821     unsigned long hdev_flags;
822    
823     struct work_struct init_ready;
824     + struct work_struct write_work;
825    
826     struct hci_uart_proto *proto;
827     void *priv;
828     diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
829     index 974321a2508d..14790304b84b 100644
830     --- a/drivers/char/applicom.c
831     +++ b/drivers/char/applicom.c
832     @@ -345,7 +345,6 @@ out:
833     free_irq(apbs[i].irq, &dummy);
834     iounmap(apbs[i].RamIO);
835     }
836     - pci_disable_device(dev);
837     return ret;
838     }
839    
840     diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
841     index 3846941801b8..5c948c9625d2 100644
842     --- a/drivers/extcon/extcon-max14577.c
843     +++ b/drivers/extcon/extcon-max14577.c
844     @@ -650,7 +650,7 @@ static int max14577_muic_probe(struct platform_device *pdev)
845     unsigned int virq = 0;
846    
847     virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
848     - if (!virq)
849     + if (virq <= 0)
850     return -EINVAL;
851     muic_irq->virq = virq;
852    
853     @@ -710,13 +710,8 @@ static int max14577_muic_probe(struct platform_device *pdev)
854     * driver should notify cable state to upper layer.
855     */
856     INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
857     - ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
858     + queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
859     delay_jiffies);
860     - if (ret < 0) {
861     - dev_err(&pdev->dev,
862     - "failed to schedule delayed work for cable detect\n");
863     - goto err_extcon;
864     - }
865    
866     return ret;
867    
868     diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
869     index da268fbc901b..4657a91acf56 100644
870     --- a/drivers/extcon/extcon-max77693.c
871     +++ b/drivers/extcon/extcon-max77693.c
872     @@ -1193,7 +1193,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
873    
874    
875     /* Initialize MUIC register by using platform data or default data */
876     - if (pdata->muic_data) {
877     + if (pdata && pdata->muic_data) {
878     init_data = pdata->muic_data->init_data;
879     num_init_data = pdata->muic_data->num_init_data;
880     } else {
881     @@ -1226,7 +1226,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
882     = init_data[i].data;
883     }
884    
885     - if (pdata->muic_data) {
886     + if (pdata && pdata->muic_data) {
887     struct max77693_muic_platform_data *muic_pdata
888     = pdata->muic_data;
889    
890     diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
891     index 6a00464658c5..5e1b88cecb76 100644
892     --- a/drivers/extcon/extcon-max8997.c
893     +++ b/drivers/extcon/extcon-max8997.c
894     @@ -715,7 +715,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
895     goto err_irq;
896     }
897    
898     - if (pdata->muic_pdata) {
899     + if (pdata && pdata->muic_pdata) {
900     struct max8997_muic_platform_data *muic_pdata
901     = pdata->muic_pdata;
902    
903     diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
904     index 4b9dc836dcf9..e992abc5ef26 100644
905     --- a/drivers/firmware/efi/efi-pstore.c
906     +++ b/drivers/firmware/efi/efi-pstore.c
907     @@ -40,7 +40,7 @@ struct pstore_read_data {
908     static inline u64 generic_id(unsigned long timestamp,
909     unsigned int part, int count)
910     {
911     - return (timestamp * 100 + part) * 1000 + count;
912     + return ((u64) timestamp * 100 + part) * 1000 + count;
913     }
914    
915     static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
916     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
917     index 8a5384ce0352..7cd42ea30d6d 100644
918     --- a/drivers/hid/hid-core.c
919     +++ b/drivers/hid/hid-core.c
920     @@ -842,7 +842,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
921     * ->numbered being checked, which may not always be the case when
922     * drivers go to access report values.
923     */
924     - report = hid->report_enum[type].report_id_hash[id];
925     + if (id == 0) {
926     + /*
927     + * Validating on id 0 means we should examine the first
928     + * report in the list.
929     + */
930     + report = list_entry(
931     + hid->report_enum[type].report_list.next,
932     + struct hid_report, list);
933     + } else {
934     + report = hid->report_enum[type].report_id_hash[id];
935     + }
936     if (!report) {
937     hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
938     return NULL;
939     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
940     index 091169152f77..156205a81523 100644
941     --- a/drivers/infiniband/ulp/isert/ib_isert.c
942     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
943     @@ -705,14 +705,12 @@ isert_disconnect_work(struct work_struct *work)
944     isert_put_conn(isert_conn);
945     return;
946     }
947     - if (!isert_conn->logout_posted) {
948     - pr_debug("Calling rdma_disconnect for !logout_posted from"
949     - " isert_disconnect_work\n");
950     +
951     + if (isert_conn->disconnect) {
952     + /* Send DREQ/DREP towards our initiator */
953     rdma_disconnect(isert_conn->conn_cm_id);
954     - mutex_unlock(&isert_conn->conn_mutex);
955     - iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
956     - goto wake_up;
957     }
958     +
959     mutex_unlock(&isert_conn->conn_mutex);
960    
961     wake_up:
962     @@ -721,10 +719,11 @@ wake_up:
963     }
964    
965     static void
966     -isert_disconnected_handler(struct rdma_cm_id *cma_id)
967     +isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
968     {
969     struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
970    
971     + isert_conn->disconnect = disconnect;
972     INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
973     schedule_work(&isert_conn->conn_logout_work);
974     }
975     @@ -733,29 +732,28 @@ static int
976     isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
977     {
978     int ret = 0;
979     + bool disconnect = false;
980    
981     pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
982     event->event, event->status, cma_id->context, cma_id);
983    
984     switch (event->event) {
985     case RDMA_CM_EVENT_CONNECT_REQUEST:
986     - pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
987     ret = isert_connect_request(cma_id, event);
988     break;
989     case RDMA_CM_EVENT_ESTABLISHED:
990     - pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
991     isert_connected_handler(cma_id);
992     break;
993     - case RDMA_CM_EVENT_DISCONNECTED:
994     - pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
995     - isert_disconnected_handler(cma_id);
996     - break;
997     - case RDMA_CM_EVENT_DEVICE_REMOVAL:
998     - case RDMA_CM_EVENT_ADDR_CHANGE:
999     + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
1000     + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
1001     + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
1002     + disconnect = true;
1003     + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
1004     + isert_disconnected_handler(cma_id, disconnect);
1005     break;
1006     case RDMA_CM_EVENT_CONNECT_ERROR:
1007     default:
1008     - pr_err("Unknown RDMA CMA event: %d\n", event->event);
1009     + pr_err("Unhandled RDMA CMA event: %d\n", event->event);
1010     break;
1011     }
1012    
1013     @@ -1617,11 +1615,8 @@ isert_do_control_comp(struct work_struct *work)
1014     break;
1015     case ISTATE_SEND_LOGOUTRSP:
1016     pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1017     - /*
1018     - * Call atomic_dec(&isert_conn->post_send_buf_count)
1019     - * from isert_wait_conn()
1020     - */
1021     - isert_conn->logout_posted = true;
1022     +
1023     + atomic_dec(&isert_conn->post_send_buf_count);
1024     iscsit_logout_post_handler(cmd, cmd->conn);
1025     break;
1026     case ISTATE_SEND_TEXTRSP:
1027     @@ -1791,6 +1786,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1028     isert_conn->state = ISER_CONN_DOWN;
1029     mutex_unlock(&isert_conn->conn_mutex);
1030    
1031     + iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1032     +
1033     complete(&isert_conn->conn_wait_comp_err);
1034     }
1035    
1036     @@ -2047,7 +2044,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1037     int rc;
1038    
1039     isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1040     - rc = iscsit_build_text_rsp(cmd, conn, hdr);
1041     + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
1042     if (rc < 0)
1043     return rc;
1044    
1045     @@ -2725,9 +2722,14 @@ accept_wait:
1046     return -ENODEV;
1047    
1048     spin_lock_bh(&np->np_thread_lock);
1049     - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1050     + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
1051     spin_unlock_bh(&np->np_thread_lock);
1052     - pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
1053     + pr_debug("np_thread_state %d for isert_accept_np\n",
1054     + np->np_thread_state);
1055     + /**
1056     + * No point in stalling here when np_thread
1057     + * is in state RESET/SHUTDOWN/EXIT - bail
1058     + **/
1059     return -ENODEV;
1060     }
1061     spin_unlock_bh(&np->np_thread_lock);
1062     @@ -2777,15 +2779,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1063     struct isert_conn *isert_conn = conn->context;
1064    
1065     pr_debug("isert_wait_conn: Starting \n");
1066     - /*
1067     - * Decrement post_send_buf_count for special case when called
1068     - * from isert_do_control_comp() -> iscsit_logout_post_handler()
1069     - */
1070     - mutex_lock(&isert_conn->conn_mutex);
1071     - if (isert_conn->logout_posted)
1072     - atomic_dec(&isert_conn->post_send_buf_count);
1073    
1074     - if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
1075     + mutex_lock(&isert_conn->conn_mutex);
1076     + if (isert_conn->conn_cm_id) {
1077     pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
1078     rdma_disconnect(isert_conn->conn_cm_id);
1079     }
1080     @@ -2860,6 +2856,7 @@ destroy_rx_wq:
1081    
1082     static void __exit isert_exit(void)
1083     {
1084     + flush_scheduled_work();
1085     destroy_workqueue(isert_comp_wq);
1086     destroy_workqueue(isert_rx_wq);
1087     iscsit_unregister_transport(&iser_target_transport);
1088     diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1089     index c3578f6091d2..cbecaabe90b9 100644
1090     --- a/drivers/infiniband/ulp/isert/ib_isert.h
1091     +++ b/drivers/infiniband/ulp/isert/ib_isert.h
1092     @@ -93,7 +93,6 @@ struct isert_device;
1093    
1094     struct isert_conn {
1095     enum iser_conn_state state;
1096     - bool logout_posted;
1097     int post_recv_buf_count;
1098     atomic_t post_send_buf_count;
1099     u32 responder_resources;
1100     @@ -128,6 +127,7 @@ struct isert_conn {
1101     #define ISERT_COMP_BATCH_COUNT 8
1102     int conn_comp_batch;
1103     struct llist_head conn_comp_llist;
1104     + bool disconnect;
1105     };
1106    
1107     #define ISERT_MAX_CQ 64
1108     diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
1109     index e1863dbf4edc..7a9b98bc208b 100644
1110     --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
1111     +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
1112     @@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
1113    
1114     /* Instruct the CX2341[56] to start sending packets */
1115     snd_ivtv_lock(itvsc);
1116     +
1117     + if (ivtv_init_on_first_open(itv)) {
1118     + snd_ivtv_unlock(itvsc);
1119     + return -ENXIO;
1120     + }
1121     +
1122     s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
1123    
1124     v4l2_fh_init(&item.fh, s->vdev);
1125     diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
1126     index eb472b5b26a0..40396e8b16a8 100644
1127     --- a/drivers/media/pci/saa7134/saa7134-video.c
1128     +++ b/drivers/media/pci/saa7134/saa7134-video.c
1129     @@ -1243,6 +1243,7 @@ static int video_release(struct file *file)
1130     videobuf_streamoff(&dev->cap);
1131     res_free(dev, fh, RESOURCE_VIDEO);
1132     videobuf_mmap_free(&dev->cap);
1133     + INIT_LIST_HEAD(&dev->cap.stream);
1134     }
1135     if (dev->cap.read_buf) {
1136     buffer_release(&dev->cap, dev->cap.read_buf);
1137     @@ -1254,6 +1255,7 @@ static int video_release(struct file *file)
1138     videobuf_stop(&dev->vbi);
1139     res_free(dev, fh, RESOURCE_VBI);
1140     videobuf_mmap_free(&dev->vbi);
1141     + INIT_LIST_HEAD(&dev->vbi.stream);
1142     }
1143    
1144     /* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/
1145     @@ -1987,17 +1989,12 @@ int saa7134_streamoff(struct file *file, void *priv,
1146     enum v4l2_buf_type type)
1147     {
1148     struct saa7134_dev *dev = video_drvdata(file);
1149     - int err;
1150     int res = saa7134_resource(file);
1151    
1152     if (res != RESOURCE_EMPRESS)
1153     pm_qos_remove_request(&dev->qos_request);
1154    
1155     - err = videobuf_streamoff(saa7134_queue(file));
1156     - if (err < 0)
1157     - return err;
1158     - res_free(dev, priv, res);
1159     - return 0;
1160     + return videobuf_streamoff(saa7134_queue(file));
1161     }
1162     EXPORT_SYMBOL_GPL(saa7134_streamoff);
1163    
1164     diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
1165     index 34a26e0cfe77..03504dcf3c52 100644
1166     --- a/drivers/media/usb/stk1160/stk1160-core.c
1167     +++ b/drivers/media/usb/stk1160/stk1160-core.c
1168     @@ -67,17 +67,25 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
1169     {
1170     int ret;
1171     int pipe = usb_rcvctrlpipe(dev->udev, 0);
1172     + u8 *buf;
1173    
1174     *value = 0;
1175     +
1176     + buf = kmalloc(sizeof(u8), GFP_KERNEL);
1177     + if (!buf)
1178     + return -ENOMEM;
1179     ret = usb_control_msg(dev->udev, pipe, 0x00,
1180     USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1181     - 0x00, reg, value, sizeof(u8), HZ);
1182     + 0x00, reg, buf, sizeof(u8), HZ);
1183     if (ret < 0) {
1184     stk1160_err("read failed on reg 0x%x (%d)\n",
1185     reg, ret);
1186     + kfree(buf);
1187     return ret;
1188     }
1189    
1190     + *value = *buf;
1191     + kfree(buf);
1192     return 0;
1193     }
1194    
1195     diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
1196     index 05b05b160e1e..abdea484c998 100644
1197     --- a/drivers/media/usb/stk1160/stk1160.h
1198     +++ b/drivers/media/usb/stk1160/stk1160.h
1199     @@ -143,7 +143,6 @@ struct stk1160 {
1200     int num_alt;
1201    
1202     struct stk1160_isoc_ctl isoc_ctl;
1203     - char urb_buf[255]; /* urb control msg buffer */
1204    
1205     /* frame properties */
1206     int width; /* current frame width */
1207     diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
1208     index e32d6a59eaca..2ef5b0f8ef77 100644
1209     --- a/drivers/media/usb/uvc/uvc_video.c
1210     +++ b/drivers/media/usb/uvc/uvc_video.c
1211     @@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream,
1212     * Clocks and timestamps
1213     */
1214    
1215     +static inline void uvc_video_get_ts(struct timespec *ts)
1216     +{
1217     + if (uvc_clock_param == CLOCK_MONOTONIC)
1218     + ktime_get_ts(ts);
1219     + else
1220     + ktime_get_real_ts(ts);
1221     +}
1222     +
1223     static void
1224     uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
1225     const __u8 *data, int len)
1226     @@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
1227     stream->clock.last_sof = dev_sof;
1228    
1229     host_sof = usb_get_current_frame_number(stream->dev->udev);
1230     - ktime_get_ts(&ts);
1231     + uvc_video_get_ts(&ts);
1232    
1233     /* The UVC specification allows device implementations that can't obtain
1234     * the USB frame number to keep their own frame counters as long as they
1235     @@ -1011,10 +1019,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
1236     return -ENODATA;
1237     }
1238    
1239     - if (uvc_clock_param == CLOCK_MONOTONIC)
1240     - ktime_get_ts(&ts);
1241     - else
1242     - ktime_get_real_ts(&ts);
1243     + uvc_video_get_ts(&ts);
1244    
1245     buf->buf.v4l2_buf.sequence = stream->sequence;
1246     buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
1247     diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
1248     index 065ca49eb45e..546a848add37 100644
1249     --- a/drivers/net/can/sja1000/peak_pci.c
1250     +++ b/drivers/net/can/sja1000/peak_pci.c
1251     @@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1252     {
1253     struct sja1000_priv *priv;
1254     struct peak_pci_chan *chan;
1255     - struct net_device *dev;
1256     + struct net_device *dev, *prev_dev;
1257     void __iomem *cfg_base, *reg_base;
1258     u16 sub_sys_id, icr;
1259     int i, err, channels;
1260     @@ -687,11 +687,13 @@ failure_remove_channels:
1261     writew(0x0, cfg_base + PITA_ICR + 2);
1262    
1263     chan = NULL;
1264     - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
1265     - unregister_sja1000dev(dev);
1266     - free_sja1000dev(dev);
1267     + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
1268     priv = netdev_priv(dev);
1269     chan = priv->priv;
1270     + prev_dev = chan->prev_dev;
1271     +
1272     + unregister_sja1000dev(dev);
1273     + free_sja1000dev(dev);
1274     }
1275    
1276     /* free any PCIeC resources too */
1277     @@ -725,10 +727,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
1278    
1279     /* Loop over all registered devices */
1280     while (1) {
1281     + struct net_device *prev_dev = chan->prev_dev;
1282     +
1283     dev_info(&pdev->dev, "removing device %s\n", dev->name);
1284     unregister_sja1000dev(dev);
1285     free_sja1000dev(dev);
1286     - dev = chan->prev_dev;
1287     + dev = prev_dev;
1288    
1289     if (!dev) {
1290     /* do that only for first channel */
1291     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
1292     index 7d6d8ec676c8..921b9df2faca 100644
1293     --- a/drivers/net/ethernet/ti/cpsw.c
1294     +++ b/drivers/net/ethernet/ti/cpsw.c
1295     @@ -1884,18 +1884,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1296     mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
1297     phyid = be32_to_cpup(parp+1);
1298     mdio = of_find_device_by_node(mdio_node);
1299     -
1300     - if (strncmp(mdio->name, "gpio", 4) == 0) {
1301     - /* GPIO bitbang MDIO driver attached */
1302     - struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
1303     -
1304     - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1305     - PHY_ID_FMT, bus->id, phyid);
1306     - } else {
1307     - /* davinci MDIO driver attached */
1308     - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1309     - PHY_ID_FMT, mdio->name, phyid);
1310     + if (!mdio) {
1311     + pr_err("Missing mdio platform device\n");
1312     + return -EINVAL;
1313     }
1314     + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
1315     + PHY_ID_FMT, mdio->name, phyid);
1316    
1317     mac_addr = of_get_mac_address(slave_node);
1318     if (mac_addr)
1319     diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
1320     index 7f139326a642..ff026689358c 100644
1321     --- a/drivers/phy/phy-exynos-mipi-video.c
1322     +++ b/drivers/phy/phy-exynos-mipi-video.c
1323     @@ -101,7 +101,7 @@ static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
1324     {
1325     struct exynos_mipi_video_phy *state = dev_get_drvdata(dev);
1326    
1327     - if (WARN_ON(args->args[0] > EXYNOS_MIPI_PHYS_NUM))
1328     + if (WARN_ON(args->args[0] >= EXYNOS_MIPI_PHYS_NUM))
1329     return ERR_PTR(-ENODEV);
1330    
1331     return state->phys[args->args[0]].phy;
1332     diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
1333     index 78b004da2885..ef29636d4c9b 100644
1334     --- a/drivers/scsi/scsi_error.c
1335     +++ b/drivers/scsi/scsi_error.c
1336     @@ -1157,6 +1157,15 @@ int scsi_eh_get_sense(struct list_head *work_q,
1337     __func__));
1338     break;
1339     }
1340     + if (status_byte(scmd->result) != CHECK_CONDITION)
1341     + /*
1342     + * don't request sense if there's no check condition
1343     + * status because the error we're processing isn't one
1344     + * that has a sense code (and some devices get
1345     + * confused by sense requests out of the blue)
1346     + */
1347     + continue;
1348     +
1349     SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1350     "%s: requesting sense\n",
1351     current->comm));
1352     diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
1353     index b2cd3a85166d..bbf236e842a9 100644
1354     --- a/drivers/staging/media/bcm2048/radio-bcm2048.c
1355     +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
1356     @@ -737,7 +737,7 @@ static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
1357     int err;
1358     u32 new_frequency = 0;
1359    
1360     - if (region > ARRAY_SIZE(region_configs))
1361     + if (region >= ARRAY_SIZE(region_configs))
1362     return -EINVAL;
1363    
1364     mutex_lock(&bdev->mutex);
1365     diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
1366     index 51dbc13e757f..5a40925680ac 100644
1367     --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
1368     +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
1369     @@ -924,6 +924,7 @@ static int spinand_remove(struct spi_device *spi)
1370    
1371     static const struct of_device_id spinand_dt[] = {
1372     { .compatible = "spinand,mt29f", },
1373     + {}
1374     };
1375    
1376     /*
1377     diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
1378     index 96df62f95b6b..9b4678c73c0d 100644
1379     --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
1380     +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
1381     @@ -1601,13 +1601,18 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
1382     pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
1383     if (pIE == NULL)
1384     return _FAIL;
1385     + if (ie_len > NDIS_802_11_LENGTH_RATES_EX)
1386     + return _FAIL;
1387    
1388     memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
1389     supportRateNum = ie_len;
1390    
1391     pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
1392     - if (pIE)
1393     + if (pIE) {
1394     + if (supportRateNum + ie_len > NDIS_802_11_LENGTH_RATES_EX)
1395     + return _FAIL;
1396     memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
1397     + }
1398    
1399     return _SUCCESS;
1400     }
1401     diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
1402     index 2f084e181d39..a1aca4416ca7 100644
1403     --- a/drivers/staging/tidspbridge/core/dsp-clock.c
1404     +++ b/drivers/staging/tidspbridge/core/dsp-clock.c
1405     @@ -226,7 +226,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
1406     case GPT_CLK:
1407     status = omap_dm_timer_start(timer[clk_id - 1]);
1408     break;
1409     -#ifdef CONFIG_OMAP_MCBSP
1410     +#ifdef CONFIG_SND_OMAP_SOC_MCBSP
1411     case MCBSP_CLK:
1412     omap_mcbsp_request(MCBSP_ID(clk_id));
1413     omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
1414     @@ -302,7 +302,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
1415     case GPT_CLK:
1416     status = omap_dm_timer_stop(timer[clk_id - 1]);
1417     break;
1418     -#ifdef CONFIG_OMAP_MCBSP
1419     +#ifdef CONFIG_SND_OMAP_SOC_MCBSP
1420     case MCBSP_CLK:
1421     omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
1422     omap_mcbsp_free(MCBSP_ID(clk_id));
1423     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
1424     index 39a34da5260f..22601c718b07 100644
1425     --- a/drivers/target/iscsi/iscsi_target.c
1426     +++ b/drivers/target/iscsi/iscsi_target.c
1427     @@ -3361,7 +3361,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
1428    
1429     #define SENDTARGETS_BUF_LIMIT 32768U
1430    
1431     -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
1432     +static int
1433     +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
1434     + enum iscsit_transport_type network_transport)
1435     {
1436     char *payload = NULL;
1437     struct iscsi_conn *conn = cmd->conn;
1438     @@ -3438,6 +3440,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
1439     struct iscsi_np *np = tpg_np->tpg_np;
1440     bool inaddr_any = iscsit_check_inaddr_any(np);
1441    
1442     + if (np->np_network_transport != network_transport)
1443     + continue;
1444     +
1445     if (!target_name_printed) {
1446     len = sprintf(buf, "TargetName=%s",
1447     tiqn->tiqn);
1448     @@ -3491,11 +3496,12 @@ eob:
1449    
1450     int
1451     iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
1452     - struct iscsi_text_rsp *hdr)
1453     + struct iscsi_text_rsp *hdr,
1454     + enum iscsit_transport_type network_transport)
1455     {
1456     int text_length, padding;
1457    
1458     - text_length = iscsit_build_sendtargets_response(cmd);
1459     + text_length = iscsit_build_sendtargets_response(cmd, network_transport);
1460     if (text_length < 0)
1461     return text_length;
1462    
1463     @@ -3533,7 +3539,7 @@ static int iscsit_send_text_rsp(
1464     u32 tx_size = 0;
1465     int text_length, iov_count = 0, rc;
1466    
1467     - rc = iscsit_build_text_rsp(cmd, conn, hdr);
1468     + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
1469     if (rc < 0)
1470     return rc;
1471    
1472     @@ -4205,8 +4211,6 @@ int iscsit_close_connection(
1473     if (conn->conn_transport->iscsit_wait_conn)
1474     conn->conn_transport->iscsit_wait_conn(conn);
1475    
1476     - iscsit_free_queue_reqs_for_conn(conn);
1477     -
1478     /*
1479     * During Connection recovery drop unacknowledged out of order
1480     * commands for this connection, and prepare the other commands
1481     @@ -4223,6 +4227,7 @@ int iscsit_close_connection(
1482     iscsit_clear_ooo_cmdsns_for_conn(conn);
1483     iscsit_release_commands_from_conn(conn);
1484     }
1485     + iscsit_free_queue_reqs_for_conn(conn);
1486    
1487     /*
1488     * Handle decrementing session or connection usage count if
1489     diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
1490     index b920db3388cd..7b331b95a3ef 100644
1491     --- a/drivers/target/target_core_rd.c
1492     +++ b/drivers/target/target_core_rd.c
1493     @@ -158,7 +158,7 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
1494     - 1;
1495    
1496     for (j = 0; j < sg_per_table; j++) {
1497     - pg = alloc_pages(GFP_KERNEL, 0);
1498     + pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
1499     if (!pg) {
1500     pr_err("Unable to allocate scatterlist"
1501     " pages for struct rd_dev_sg_table\n");
1502     diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
1503     index aa064a7bb446..379033f5903b 100644
1504     --- a/drivers/target/target_core_sbc.c
1505     +++ b/drivers/target/target_core_sbc.c
1506     @@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
1507     transport_kunmap_data_sg(cmd);
1508     }
1509    
1510     - target_complete_cmd(cmd, GOOD);
1511     + target_complete_cmd_with_length(cmd, GOOD, 8);
1512     return 0;
1513     }
1514    
1515     @@ -134,7 +134,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
1516     transport_kunmap_data_sg(cmd);
1517     }
1518    
1519     - target_complete_cmd(cmd, GOOD);
1520     + target_complete_cmd_with_length(cmd, GOOD, 32);
1521     return 0;
1522     }
1523    
1524     diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
1525     index 3bebc71ea033..fcdf98fc947c 100644
1526     --- a/drivers/target/target_core_spc.c
1527     +++ b/drivers/target/target_core_spc.c
1528     @@ -714,6 +714,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
1529     unsigned char *buf;
1530     sense_reason_t ret;
1531     int p;
1532     + int len = 0;
1533    
1534     buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
1535     if (!buf) {
1536     @@ -735,6 +736,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
1537     }
1538    
1539     ret = spc_emulate_inquiry_std(cmd, buf);
1540     + len = buf[4] + 5;
1541     goto out;
1542     }
1543    
1544     @@ -742,6 +744,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
1545     if (cdb[2] == evpd_handlers[p].page) {
1546     buf[1] = cdb[2];
1547     ret = evpd_handlers[p].emulate(cmd, buf);
1548     + len = get_unaligned_be16(&buf[2]) + 4;
1549     goto out;
1550     }
1551     }
1552     @@ -758,7 +761,7 @@ out:
1553     kfree(buf);
1554    
1555     if (!ret)
1556     - target_complete_cmd(cmd, GOOD);
1557     + target_complete_cmd_with_length(cmd, GOOD, len);
1558     return ret;
1559     }
1560    
1561     @@ -1089,7 +1092,7 @@ set_length:
1562     transport_kunmap_data_sg(cmd);
1563     }
1564    
1565     - target_complete_cmd(cmd, GOOD);
1566     + target_complete_cmd_with_length(cmd, GOOD, length);
1567     return 0;
1568     }
1569    
1570     @@ -1266,7 +1269,7 @@ done:
1571     buf[3] = (lun_count & 0xff);
1572     transport_kunmap_data_sg(cmd);
1573    
1574     - target_complete_cmd(cmd, GOOD);
1575     + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
1576     return 0;
1577     }
1578     EXPORT_SYMBOL(spc_emulate_report_luns);
1579     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1580     index c39cf37800d3..24f527977ddb 100644
1581     --- a/drivers/target/target_core_transport.c
1582     +++ b/drivers/target/target_core_transport.c
1583     @@ -560,7 +560,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
1584    
1585     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1586    
1587     - complete(&cmd->t_transport_stop_comp);
1588     + complete_all(&cmd->t_transport_stop_comp);
1589     return 1;
1590     }
1591    
1592     @@ -676,7 +676,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
1593     if (cmd->transport_state & CMD_T_ABORTED &&
1594     cmd->transport_state & CMD_T_STOP) {
1595     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1596     - complete(&cmd->t_transport_stop_comp);
1597     + complete_all(&cmd->t_transport_stop_comp);
1598     return;
1599     } else if (!success) {
1600     INIT_WORK(&cmd->work, target_complete_failure_work);
1601     @@ -692,6 +692,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
1602     }
1603     EXPORT_SYMBOL(target_complete_cmd);
1604    
1605     +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
1606     +{
1607     + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
1608     + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1609     + cmd->residual_count += cmd->data_length - length;
1610     + } else {
1611     + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1612     + cmd->residual_count = cmd->data_length - length;
1613     + }
1614     +
1615     + cmd->data_length = length;
1616     + }
1617     +
1618     + target_complete_cmd(cmd, scsi_status);
1619     +}
1620     +EXPORT_SYMBOL(target_complete_cmd_with_length);
1621     +
1622     static void target_add_to_state_list(struct se_cmd *cmd)
1623     {
1624     struct se_device *dev = cmd->se_dev;
1625     @@ -1748,7 +1765,7 @@ void target_execute_cmd(struct se_cmd *cmd)
1626     cmd->se_tfo->get_task_tag(cmd));
1627    
1628     spin_unlock_irq(&cmd->t_state_lock);
1629     - complete(&cmd->t_transport_stop_comp);
1630     + complete_all(&cmd->t_transport_stop_comp);
1631     return;
1632     }
1633    
1634     @@ -2868,6 +2885,12 @@ static void target_tmr_work(struct work_struct *work)
1635     int transport_generic_handle_tmr(
1636     struct se_cmd *cmd)
1637     {
1638     + unsigned long flags;
1639     +
1640     + spin_lock_irqsave(&cmd->t_state_lock, flags);
1641     + cmd->transport_state |= CMD_T_ACTIVE;
1642     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1643     +
1644     INIT_WORK(&cmd->work, target_tmr_work);
1645     queue_work(cmd->se_dev->tmr_wq, &cmd->work);
1646     return 0;
1647     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1648     index 2da0a5a2803a..09e9619ae381 100644
1649     --- a/drivers/usb/dwc3/gadget.c
1650     +++ b/drivers/usb/dwc3/gadget.c
1651     @@ -586,6 +586,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
1652    
1653     dwc3_remove_requests(dwc, dep);
1654    
1655     + /* make sure HW endpoint isn't stalled */
1656     + if (dep->flags & DWC3_EP_STALL)
1657     + __dwc3_gadget_ep_set_halt(dep, 0);
1658     +
1659     reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
1660     reg &= ~DWC3_DALEPENA_EP(dep->number);
1661     dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
1662     diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
1663     index b94c049ab0d0..4ac9e9928d67 100644
1664     --- a/drivers/usb/gadget/inode.c
1665     +++ b/drivers/usb/gadget/inode.c
1666     @@ -1504,7 +1504,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1667     }
1668     break;
1669    
1670     -#ifndef CONFIG_USB_GADGET_PXA25X
1671     +#ifndef CONFIG_USB_PXA25X
1672     /* PXA automagically handles this request too */
1673     case USB_REQ_GET_CONFIGURATION:
1674     if (ctrl->bRequestType != 0x80)
1675     diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
1676     index 4a6d3dd68572..2f3acebb577a 100644
1677     --- a/drivers/usb/host/pci-quirks.c
1678     +++ b/drivers/usb/host/pci-quirks.c
1679     @@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
1680     DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
1681     },
1682     },
1683     + {
1684     + /* HASEE E200 */
1685     + .matches = {
1686     + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
1687     + DMI_MATCH(DMI_BOARD_NAME, "E210"),
1688     + DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
1689     + },
1690     + },
1691     { }
1692     };
1693    
1694     @@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
1695     {
1696     int try_handoff = 1, tried_handoff = 0;
1697    
1698     - /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
1699     - * the handoff on its unused controller. Skip it. */
1700     - if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
1701     + /*
1702     + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
1703     + * the handoff on its unused controller. Skip it.
1704     + *
1705     + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
1706     + */
1707     + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
1708     + pdev->device == 0x27cc)) {
1709     if (dmi_check_system(ehci_dmi_nohandoff_table))
1710     try_handoff = 0;
1711     }
1712     diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
1713     index f6568b5e9b06..71dcacbab398 100644
1714     --- a/drivers/usb/misc/usbtest.c
1715     +++ b/drivers/usb/misc/usbtest.c
1716     @@ -7,7 +7,7 @@
1717     #include <linux/moduleparam.h>
1718     #include <linux/scatterlist.h>
1719     #include <linux/mutex.h>
1720     -
1721     +#include <linux/timer.h>
1722     #include <linux/usb.h>
1723    
1724     #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
1725     @@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
1726     return sg;
1727     }
1728    
1729     +static void sg_timeout(unsigned long _req)
1730     +{
1731     + struct usb_sg_request *req = (struct usb_sg_request *) _req;
1732     +
1733     + req->status = -ETIMEDOUT;
1734     + usb_sg_cancel(req);
1735     +}
1736     +
1737     static int perform_sglist(
1738     struct usbtest_dev *tdev,
1739     unsigned iterations,
1740     @@ -495,6 +503,9 @@ static int perform_sglist(
1741     {
1742     struct usb_device *udev = testdev_to_usbdev(tdev);
1743     int retval = 0;
1744     + struct timer_list sg_timer;
1745     +
1746     + setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
1747    
1748     while (retval == 0 && iterations-- > 0) {
1749     retval = usb_sg_init(req, udev, pipe,
1750     @@ -505,7 +516,10 @@ static int perform_sglist(
1751    
1752     if (retval)
1753     break;
1754     + mod_timer(&sg_timer, jiffies +
1755     + msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
1756     usb_sg_wait(req);
1757     + del_timer_sync(&sg_timer);
1758     retval = req->status;
1759    
1760     /* FIXME check resulting data pattern */
1761     @@ -1320,6 +1334,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
1762     urb->context = &completion;
1763     urb->complete = unlink1_callback;
1764    
1765     + if (usb_pipeout(urb->pipe)) {
1766     + simple_fill_buf(urb);
1767     + urb->transfer_flags |= URB_ZERO_PACKET;
1768     + }
1769     +
1770     /* keep the endpoint busy. there are lots of hc/hcd-internal
1771     * states, and testing should get to all of them over time.
1772     *
1773     @@ -1450,6 +1469,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
1774     unlink_queued_callback, &ctx);
1775     ctx.urbs[i]->transfer_dma = buf_dma;
1776     ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
1777     +
1778     + if (usb_pipeout(ctx.urbs[i]->pipe)) {
1779     + simple_fill_buf(ctx.urbs[i]);
1780     + ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
1781     + }
1782     }
1783    
1784     /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
1785     diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
1786     index 6e146d723b37..69e49be8866b 100644
1787     --- a/drivers/usb/phy/phy-isp1301-omap.c
1788     +++ b/drivers/usb/phy/phy-isp1301-omap.c
1789     @@ -1295,7 +1295,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
1790     return isp1301_otg_enable(isp);
1791     return 0;
1792    
1793     -#elif !defined(CONFIG_USB_GADGET_OMAP)
1794     +#elif !IS_ENABLED(CONFIG_USB_OMAP)
1795     // FIXME update its refcount
1796     otg->host = host;
1797    
1798     diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
1799     index 35a2373cde67..9374bd2aba20 100644
1800     --- a/drivers/usb/serial/bus.c
1801     +++ b/drivers/usb/serial/bus.c
1802     @@ -97,13 +97,19 @@ static int usb_serial_device_remove(struct device *dev)
1803     struct usb_serial_port *port;
1804     int retval = 0;
1805     int minor;
1806     + int autopm_err;
1807    
1808     port = to_usb_serial_port(dev);
1809     if (!port)
1810     return -ENODEV;
1811    
1812     - /* make sure suspend/resume doesn't race against port_remove */
1813     - usb_autopm_get_interface(port->serial->interface);
1814     + /*
1815     + * Make sure suspend/resume doesn't race against port_remove.
1816     + *
1817     + * Note that no further runtime PM callbacks will be made if
1818     + * autopm_get fails.
1819     + */
1820     + autopm_err = usb_autopm_get_interface(port->serial->interface);
1821    
1822     minor = port->minor;
1823     tty_unregister_device(usb_serial_tty_driver, minor);
1824     @@ -117,7 +123,9 @@ static int usb_serial_device_remove(struct device *dev)
1825     dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
1826     driver->description, minor);
1827    
1828     - usb_autopm_put_interface(port->serial->interface);
1829     + if (!autopm_err)
1830     + usb_autopm_put_interface(port->serial->interface);
1831     +
1832     return retval;
1833     }
1834    
1835     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1836     index 948a19f0cdf7..70ede84f4f6b 100644
1837     --- a/drivers/usb/serial/option.c
1838     +++ b/drivers/usb/serial/option.c
1839     @@ -1925,6 +1925,7 @@ static int option_send_setup(struct usb_serial_port *port)
1840     struct option_private *priv = intfdata->private;
1841     struct usb_wwan_port_private *portdata;
1842     int val = 0;
1843     + int res;
1844    
1845     portdata = usb_get_serial_port_data(port);
1846    
1847     @@ -1933,9 +1934,17 @@ static int option_send_setup(struct usb_serial_port *port)
1848     if (portdata->rts_state)
1849     val |= 0x02;
1850    
1851     - return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
1852     + res = usb_autopm_get_interface(serial->interface);
1853     + if (res)
1854     + return res;
1855     +
1856     + res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
1857     0x22, 0x21, val, priv->bInterfaceNumber, NULL,
1858     0, USB_CTRL_SET_TIMEOUT);
1859     +
1860     + usb_autopm_put_interface(serial->interface);
1861     +
1862     + return res;
1863     }
1864    
1865     MODULE_AUTHOR(DRIVER_AUTHOR);
1866     diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1867     index 6c0a542e8ec1..43d93dbf7d71 100644
1868     --- a/drivers/usb/serial/qcserial.c
1869     +++ b/drivers/usb/serial/qcserial.c
1870     @@ -145,12 +145,33 @@ static const struct usb_device_id id_table[] = {
1871     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
1872     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
1873     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
1874     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)}, /* Sierra Wireless Modem Device Management */
1875     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)}, /* Sierra Wireless Modem NMEA */
1876     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)}, /* Sierra Wireless Modem Modem */
1877     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
1878     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
1879     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
1880     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
1881     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
1882     {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
1883     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)}, /* Sierra Wireless Modem Device Management */
1884     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)}, /* Sierra Wireless Modem NMEA */
1885     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)}, /* Sierra Wireless Modem Modem */
1886     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)}, /* Sierra Wireless Modem Device Management */
1887     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)}, /* Sierra Wireless Modem NMEA */
1888     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)}, /* Sierra Wireless Modem Modem */
1889     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)}, /* Netgear AirCard 341U Device Management */
1890     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)}, /* Netgear AirCard 341U NMEA */
1891     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)}, /* Netgear AirCard 341U Modem */
1892     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)}, /* Sierra Wireless Modem Device Management */
1893     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)}, /* Sierra Wireless Modem NMEA */
1894     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)}, /* Sierra Wireless Modem Modem */
1895     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)}, /* Sierra Wireless Modem Device Management */
1896     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)}, /* Sierra Wireless Modem NMEA */
1897     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)}, /* Sierra Wireless Modem Modem */
1898     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)}, /* Sierra Wireless Modem Device Management */
1899     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)}, /* Sierra Wireless Modem NMEA */
1900     + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)}, /* Sierra Wireless Modem Modem */
1901     {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
1902     {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
1903     {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
1904     diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1905     index 6b192e602ce0..37480348e39b 100644
1906     --- a/drivers/usb/serial/sierra.c
1907     +++ b/drivers/usb/serial/sierra.c
1908     @@ -58,6 +58,7 @@ struct sierra_intf_private {
1909     spinlock_t susp_lock;
1910     unsigned int suspended:1;
1911     int in_flight;
1912     + unsigned int open_ports;
1913     };
1914    
1915     static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
1916     @@ -759,6 +760,7 @@ static void sierra_close(struct usb_serial_port *port)
1917     struct usb_serial *serial = port->serial;
1918     struct sierra_port_private *portdata;
1919     struct sierra_intf_private *intfdata = port->serial->private;
1920     + struct urb *urb;
1921    
1922     portdata = usb_get_serial_port_data(port);
1923    
1924     @@ -767,7 +769,6 @@ static void sierra_close(struct usb_serial_port *port)
1925    
1926     mutex_lock(&serial->disc_mutex);
1927     if (!serial->disconnected) {
1928     - serial->interface->needs_remote_wakeup = 0;
1929     /* odd error handling due to pm counters */
1930     if (!usb_autopm_get_interface(serial->interface))
1931     sierra_send_setup(port);
1932     @@ -778,8 +779,22 @@ static void sierra_close(struct usb_serial_port *port)
1933     mutex_unlock(&serial->disc_mutex);
1934     spin_lock_irq(&intfdata->susp_lock);
1935     portdata->opened = 0;
1936     + if (--intfdata->open_ports == 0)
1937     + serial->interface->needs_remote_wakeup = 0;
1938     spin_unlock_irq(&intfdata->susp_lock);
1939    
1940     + for (;;) {
1941     + urb = usb_get_from_anchor(&portdata->delayed);
1942     + if (!urb)
1943     + break;
1944     + kfree(urb->transfer_buffer);
1945     + usb_free_urb(urb);
1946     + usb_autopm_put_interface_async(serial->interface);
1947     + spin_lock(&portdata->lock);
1948     + portdata->outstanding_urbs--;
1949     + spin_unlock(&portdata->lock);
1950     + }
1951     +
1952     sierra_stop_rx_urbs(port);
1953     for (i = 0; i < portdata->num_in_urbs; i++) {
1954     sierra_release_urb(portdata->in_urbs[i]);
1955     @@ -816,23 +831,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
1956     usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
1957    
1958     err = sierra_submit_rx_urbs(port, GFP_KERNEL);
1959     - if (err) {
1960     - /* get rid of everything as in close */
1961     - sierra_close(port);
1962     - /* restore balance for autopm */
1963     - if (!serial->disconnected)
1964     - usb_autopm_put_interface(serial->interface);
1965     - return err;
1966     - }
1967     + if (err)
1968     + goto err_submit;
1969     +
1970     sierra_send_setup(port);
1971    
1972     - serial->interface->needs_remote_wakeup = 1;
1973     spin_lock_irq(&intfdata->susp_lock);
1974     portdata->opened = 1;
1975     + if (++intfdata->open_ports == 1)
1976     + serial->interface->needs_remote_wakeup = 1;
1977     spin_unlock_irq(&intfdata->susp_lock);
1978     usb_autopm_put_interface(serial->interface);
1979    
1980     return 0;
1981     +
1982     +err_submit:
1983     + sierra_stop_rx_urbs(port);
1984     +
1985     + for (i = 0; i < portdata->num_in_urbs; i++) {
1986     + sierra_release_urb(portdata->in_urbs[i]);
1987     + portdata->in_urbs[i] = NULL;
1988     + }
1989     +
1990     + return err;
1991     }
1992    
1993    
1994     @@ -928,6 +949,7 @@ static int sierra_port_remove(struct usb_serial_port *port)
1995     struct sierra_port_private *portdata;
1996    
1997     portdata = usb_get_serial_port_data(port);
1998     + usb_set_serial_port_data(port, NULL);
1999     kfree(portdata);
2000    
2001     return 0;
2002     @@ -944,6 +966,8 @@ static void stop_read_write_urbs(struct usb_serial *serial)
2003     for (i = 0; i < serial->num_ports; ++i) {
2004     port = serial->port[i];
2005     portdata = usb_get_serial_port_data(port);
2006     + if (!portdata)
2007     + continue;
2008     sierra_stop_rx_urbs(port);
2009     usb_kill_anchored_urbs(&portdata->active);
2010     }
2011     @@ -986,6 +1010,9 @@ static int sierra_resume(struct usb_serial *serial)
2012     port = serial->port[i];
2013     portdata = usb_get_serial_port_data(port);
2014    
2015     + if (!portdata)
2016     + continue;
2017     +
2018     while ((urb = usb_get_from_anchor(&portdata->delayed))) {
2019     usb_anchor_urb(urb, &portdata->active);
2020     intfdata->in_flight++;
2021     @@ -993,8 +1020,12 @@ static int sierra_resume(struct usb_serial *serial)
2022     if (err < 0) {
2023     intfdata->in_flight--;
2024     usb_unanchor_urb(urb);
2025     - usb_scuttle_anchored_urbs(&portdata->delayed);
2026     - break;
2027     + kfree(urb->transfer_buffer);
2028     + usb_free_urb(urb);
2029     + spin_lock(&portdata->lock);
2030     + portdata->outstanding_urbs--;
2031     + spin_unlock(&portdata->lock);
2032     + continue;
2033     }
2034     }
2035    
2036     diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
2037     index b078440e822f..d91a9883e869 100644
2038     --- a/drivers/usb/serial/usb_wwan.c
2039     +++ b/drivers/usb/serial/usb_wwan.c
2040     @@ -228,8 +228,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
2041     usb_pipeendpoint(this_urb->pipe), i);
2042    
2043     err = usb_autopm_get_interface_async(port->serial->interface);
2044     - if (err < 0)
2045     + if (err < 0) {
2046     + clear_bit(i, &portdata->out_busy);
2047     break;
2048     + }
2049    
2050     /* send the data */
2051     memcpy(this_urb->transfer_buffer, buf, todo);
2052     @@ -386,6 +388,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
2053     portdata = usb_get_serial_port_data(port);
2054     intfdata = serial->private;
2055    
2056     + if (port->interrupt_in_urb) {
2057     + err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
2058     + if (err) {
2059     + dev_dbg(&port->dev, "%s: submit int urb failed: %d\n",
2060     + __func__, err);
2061     + }
2062     + }
2063     +
2064     /* Start reading from the IN endpoint */
2065     for (i = 0; i < N_IN_URB; i++) {
2066     urb = portdata->in_urbs[i];
2067     @@ -412,12 +422,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
2068     }
2069     EXPORT_SYMBOL(usb_wwan_open);
2070    
2071     +static void unbusy_queued_urb(struct urb *urb,
2072     + struct usb_wwan_port_private *portdata)
2073     +{
2074     + int i;
2075     +
2076     + for (i = 0; i < N_OUT_URB; i++) {
2077     + if (urb == portdata->out_urbs[i]) {
2078     + clear_bit(i, &portdata->out_busy);
2079     + break;
2080     + }
2081     + }
2082     +}
2083     +
2084     void usb_wwan_close(struct usb_serial_port *port)
2085     {
2086     int i;
2087     struct usb_serial *serial = port->serial;
2088     struct usb_wwan_port_private *portdata;
2089     struct usb_wwan_intf_private *intfdata = port->serial->private;
2090     + struct urb *urb;
2091    
2092     portdata = usb_get_serial_port_data(port);
2093    
2094     @@ -426,10 +450,19 @@ void usb_wwan_close(struct usb_serial_port *port)
2095     portdata->opened = 0;
2096     spin_unlock_irq(&intfdata->susp_lock);
2097    
2098     + for (;;) {
2099     + urb = usb_get_from_anchor(&portdata->delayed);
2100     + if (!urb)
2101     + break;
2102     + unbusy_queued_urb(urb, portdata);
2103     + usb_autopm_put_interface_async(serial->interface);
2104     + }
2105     +
2106     for (i = 0; i < N_IN_URB; i++)
2107     usb_kill_urb(portdata->in_urbs[i]);
2108     for (i = 0; i < N_OUT_URB; i++)
2109     usb_kill_urb(portdata->out_urbs[i]);
2110     + usb_kill_urb(port->interrupt_in_urb);
2111    
2112     /* balancing - important as an error cannot be handled*/
2113     usb_autopm_get_interface_no_resume(serial->interface);
2114     @@ -463,7 +496,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
2115     struct usb_wwan_port_private *portdata;
2116     struct urb *urb;
2117     u8 *buffer;
2118     - int err;
2119     int i;
2120    
2121     if (!port->bulk_in_size || !port->bulk_out_size)
2122     @@ -503,13 +535,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
2123    
2124     usb_set_serial_port_data(port, portdata);
2125    
2126     - if (port->interrupt_in_urb) {
2127     - err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
2128     - if (err)
2129     - dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
2130     - __func__, err);
2131     - }
2132     -
2133     return 0;
2134    
2135     bail_out_error2:
2136     @@ -577,44 +602,29 @@ static void stop_read_write_urbs(struct usb_serial *serial)
2137     int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
2138     {
2139     struct usb_wwan_intf_private *intfdata = serial->private;
2140     - int b;
2141    
2142     + spin_lock_irq(&intfdata->susp_lock);
2143     if (PMSG_IS_AUTO(message)) {
2144     - spin_lock_irq(&intfdata->susp_lock);
2145     - b = intfdata->in_flight;
2146     - spin_unlock_irq(&intfdata->susp_lock);
2147     -
2148     - if (b)
2149     + if (intfdata->in_flight) {
2150     + spin_unlock_irq(&intfdata->susp_lock);
2151     return -EBUSY;
2152     + }
2153     }
2154     -
2155     - spin_lock_irq(&intfdata->susp_lock);
2156     intfdata->suspended = 1;
2157     spin_unlock_irq(&intfdata->susp_lock);
2158     +
2159     stop_read_write_urbs(serial);
2160    
2161     return 0;
2162     }
2163     EXPORT_SYMBOL(usb_wwan_suspend);
2164    
2165     -static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
2166     -{
2167     - int i;
2168     -
2169     - for (i = 0; i < N_OUT_URB; i++) {
2170     - if (urb == portdata->out_urbs[i]) {
2171     - clear_bit(i, &portdata->out_busy);
2172     - break;
2173     - }
2174     - }
2175     -}
2176     -
2177     -static void play_delayed(struct usb_serial_port *port)
2178     +static int play_delayed(struct usb_serial_port *port)
2179     {
2180     struct usb_wwan_intf_private *data;
2181     struct usb_wwan_port_private *portdata;
2182     struct urb *urb;
2183     - int err;
2184     + int err = 0;
2185    
2186     portdata = usb_get_serial_port_data(port);
2187     data = port->serial->private;
2188     @@ -631,6 +641,8 @@ static void play_delayed(struct usb_serial_port *port)
2189     break;
2190     }
2191     }
2192     +
2193     + return err;
2194     }
2195    
2196     int usb_wwan_resume(struct usb_serial *serial)
2197     @@ -640,54 +652,51 @@ int usb_wwan_resume(struct usb_serial *serial)
2198     struct usb_wwan_intf_private *intfdata = serial->private;
2199     struct usb_wwan_port_private *portdata;
2200     struct urb *urb;
2201     - int err = 0;
2202     -
2203     - /* get the interrupt URBs resubmitted unconditionally */
2204     - for (i = 0; i < serial->num_ports; i++) {
2205     - port = serial->port[i];
2206     - if (!port->interrupt_in_urb) {
2207     - dev_dbg(&port->dev, "%s: No interrupt URB for port\n", __func__);
2208     - continue;
2209     - }
2210     - err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
2211     - dev_dbg(&port->dev, "Submitted interrupt URB for port (result %d)\n", err);
2212     - if (err < 0) {
2213     - dev_err(&port->dev, "%s: Error %d for interrupt URB\n",
2214     - __func__, err);
2215     - goto err_out;
2216     - }
2217     - }
2218     + int err;
2219     + int err_count = 0;
2220    
2221     + spin_lock_irq(&intfdata->susp_lock);
2222     for (i = 0; i < serial->num_ports; i++) {
2223     /* walk all ports */
2224     port = serial->port[i];
2225     portdata = usb_get_serial_port_data(port);
2226    
2227     /* skip closed ports */
2228     - spin_lock_irq(&intfdata->susp_lock);
2229     - if (!portdata || !portdata->opened) {
2230     - spin_unlock_irq(&intfdata->susp_lock);
2231     + if (!portdata || !portdata->opened)
2232     continue;
2233     +
2234     + if (port->interrupt_in_urb) {
2235     + err = usb_submit_urb(port->interrupt_in_urb,
2236     + GFP_ATOMIC);
2237     + if (err) {
2238     + dev_err(&port->dev,
2239     + "%s: submit int urb failed: %d\n",
2240     + __func__, err);
2241     + err_count++;
2242     + }
2243     }
2244    
2245     + err = play_delayed(port);
2246     + if (err)
2247     + err_count++;
2248     +
2249     for (j = 0; j < N_IN_URB; j++) {
2250     urb = portdata->in_urbs[j];
2251     err = usb_submit_urb(urb, GFP_ATOMIC);
2252     if (err < 0) {
2253     dev_err(&port->dev, "%s: Error %d for bulk URB %d\n",
2254     __func__, err, i);
2255     - spin_unlock_irq(&intfdata->susp_lock);
2256     - goto err_out;
2257     + err_count++;
2258     }
2259     }
2260     - play_delayed(port);
2261     - spin_unlock_irq(&intfdata->susp_lock);
2262     }
2263     - spin_lock_irq(&intfdata->susp_lock);
2264     intfdata->suspended = 0;
2265     spin_unlock_irq(&intfdata->susp_lock);
2266     -err_out:
2267     - return err;
2268     +
2269     + if (err_count)
2270     + return -EIO;
2271     +
2272     + return 0;
2273     }
2274     EXPORT_SYMBOL(usb_wwan_resume);
2275     #endif
2276     diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h
2277     index 556d96ce40bf..89a8a89a5eb2 100644
2278     --- a/drivers/video/matrox/matroxfb_base.h
2279     +++ b/drivers/video/matrox/matroxfb_base.h
2280     @@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
2281    
2282     #define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
2283    
2284     -#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000)
2285     +#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
2286    
2287     /* code speedup */
2288     #ifdef CONFIG_FB_MATROX_MILLENIUM
2289     diff --git a/fs/aio.c b/fs/aio.c
2290     index 04cd7686555d..19e7d9530dbe 100644
2291     --- a/fs/aio.c
2292     +++ b/fs/aio.c
2293     @@ -1007,6 +1007,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
2294    
2295     /* everything turned out well, dispose of the aiocb. */
2296     kiocb_free(iocb);
2297     + put_reqs_available(ctx, 1);
2298    
2299     /*
2300     * We have to order our ring_info tail store above and test
2301     @@ -1048,6 +1049,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
2302     if (head == tail)
2303     goto out;
2304    
2305     + head %= ctx->nr_events;
2306     + tail %= ctx->nr_events;
2307     +
2308     while (ret < nr) {
2309     long avail;
2310     struct io_event *ev;
2311     @@ -1086,8 +1090,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
2312     flush_dcache_page(ctx->ring_pages[0]);
2313    
2314     pr_debug("%li h%u t%u\n", ret, head, tail);
2315     -
2316     - put_reqs_available(ctx, ret);
2317     out:
2318     mutex_unlock(&ctx->ring_lock);
2319    
2320     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
2321     index aded3ef3d3d4..14d29d02097d 100644
2322     --- a/fs/btrfs/backref.c
2323     +++ b/fs/btrfs/backref.c
2324     @@ -972,11 +972,12 @@ again:
2325     goto out;
2326     }
2327     if (ref->count && ref->parent) {
2328     - if (extent_item_pos && !ref->inode_list) {
2329     + if (extent_item_pos && !ref->inode_list &&
2330     + ref->level == 0) {
2331     u32 bsz;
2332     struct extent_buffer *eb;
2333     bsz = btrfs_level_size(fs_info->extent_root,
2334     - info_level);
2335     + ref->level);
2336     eb = read_tree_block(fs_info->extent_root,
2337     ref->parent, bsz, 0);
2338     if (!eb || !extent_buffer_uptodate(eb)) {
2339     @@ -1411,9 +1412,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2340     * returns <0 on error
2341     */
2342     static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
2343     - struct btrfs_extent_item *ei, u32 item_size,
2344     - struct btrfs_extent_inline_ref **out_eiref,
2345     - int *out_type)
2346     + struct btrfs_key *key,
2347     + struct btrfs_extent_item *ei, u32 item_size,
2348     + struct btrfs_extent_inline_ref **out_eiref,
2349     + int *out_type)
2350     {
2351     unsigned long end;
2352     u64 flags;
2353     @@ -1423,19 +1425,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
2354     /* first call */
2355     flags = btrfs_extent_flags(eb, ei);
2356     if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2357     - info = (struct btrfs_tree_block_info *)(ei + 1);
2358     - *out_eiref =
2359     - (struct btrfs_extent_inline_ref *)(info + 1);
2360     + if (key->type == BTRFS_METADATA_ITEM_KEY) {
2361     + /* a skinny metadata extent */
2362     + *out_eiref =
2363     + (struct btrfs_extent_inline_ref *)(ei + 1);
2364     + } else {
2365     + WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2366     + info = (struct btrfs_tree_block_info *)(ei + 1);
2367     + *out_eiref =
2368     + (struct btrfs_extent_inline_ref *)(info + 1);
2369     + }
2370     } else {
2371     *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2372     }
2373     *ptr = (unsigned long)*out_eiref;
2374     - if ((void *)*ptr >= (void *)ei + item_size)
2375     + if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2376     return -ENOENT;
2377     }
2378    
2379     end = (unsigned long)ei + item_size;
2380     - *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
2381     + *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2382     *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
2383    
2384     *ptr += btrfs_extent_inline_ref_size(*out_type);
2385     @@ -1454,8 +1463,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
2386     * <0 on error.
2387     */
2388     int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2389     - struct btrfs_extent_item *ei, u32 item_size,
2390     - u64 *out_root, u8 *out_level)
2391     + struct btrfs_key *key, struct btrfs_extent_item *ei,
2392     + u32 item_size, u64 *out_root, u8 *out_level)
2393     {
2394     int ret;
2395     int type;
2396     @@ -1466,8 +1475,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2397     return 1;
2398    
2399     while (1) {
2400     - ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
2401     - &eiref, &type);
2402     + ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
2403     + &eiref, &type);
2404     if (ret < 0)
2405     return ret;
2406    
2407     diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
2408     index a910b27a8ad9..519b49e51f57 100644
2409     --- a/fs/btrfs/backref.h
2410     +++ b/fs/btrfs/backref.h
2411     @@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2412     u64 *flags);
2413    
2414     int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2415     - struct btrfs_extent_item *ei, u32 item_size,
2416     - u64 *out_root, u8 *out_level);
2417     + struct btrfs_key *key, struct btrfs_extent_item *ei,
2418     + u32 item_size, u64 *out_root, u8 *out_level);
2419    
2420     int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
2421     u64 extent_item_objectid,
2422     diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
2423     index 2c1a42ca519f..d3511cc17091 100644
2424     --- a/fs/btrfs/ctree.h
2425     +++ b/fs/btrfs/ctree.h
2426     @@ -1104,6 +1104,12 @@ struct btrfs_qgroup_limit_item {
2427     __le64 rsv_excl;
2428     } __attribute__ ((__packed__));
2429    
2430     +/* For raid type sysfs entries */
2431     +struct raid_kobject {
2432     + int raid_type;
2433     + struct kobject kobj;
2434     +};
2435     +
2436     struct btrfs_space_info {
2437     spinlock_t lock;
2438    
2439     @@ -1154,7 +1160,7 @@ struct btrfs_space_info {
2440     wait_queue_head_t wait;
2441    
2442     struct kobject kobj;
2443     - struct kobject block_group_kobjs[BTRFS_NR_RAID_TYPES];
2444     + struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
2445     };
2446    
2447     #define BTRFS_BLOCK_RSV_GLOBAL 1
2448     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
2449     index 9a527a1826df..370ef7450157 100644
2450     --- a/fs/btrfs/disk-io.c
2451     +++ b/fs/btrfs/disk-io.c
2452     @@ -3598,6 +3598,11 @@ int close_ctree(struct btrfs_root *root)
2453    
2454     btrfs_free_block_groups(fs_info);
2455    
2456     + /*
2457     + * we must make sure there is not any read request to
2458     + * submit after we stopping all workers.
2459     + */
2460     + invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
2461     btrfs_stop_all_workers(fs_info);
2462    
2463     free_root_pointers(fs_info, 1);
2464     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2465     index 3c8e68da9ef8..3ff98e23f651 100644
2466     --- a/fs/btrfs/extent-tree.c
2467     +++ b/fs/btrfs/extent-tree.c
2468     @@ -3400,10 +3400,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2469     return ret;
2470     }
2471    
2472     - for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2473     + for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
2474     INIT_LIST_HEAD(&found->block_groups[i]);
2475     - kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
2476     - }
2477     init_rwsem(&found->groups_sem);
2478     spin_lock_init(&found->lock);
2479     found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
2480     @@ -8328,8 +8326,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
2481     list_del(&space_info->list);
2482     for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2483     struct kobject *kobj;
2484     - kobj = &space_info->block_group_kobjs[i];
2485     - if (kobj->parent) {
2486     + kobj = space_info->block_group_kobjs[i];
2487     + space_info->block_group_kobjs[i] = NULL;
2488     + if (kobj) {
2489     kobject_del(kobj);
2490     kobject_put(kobj);
2491     }
2492     @@ -8344,22 +8343,35 @@ static void __link_block_group(struct btrfs_space_info *space_info,
2493     struct btrfs_block_group_cache *cache)
2494     {
2495     int index = get_block_group_index(cache);
2496     + bool first = false;
2497    
2498     down_write(&space_info->groups_sem);
2499     - if (list_empty(&space_info->block_groups[index])) {
2500     - struct kobject *kobj = &space_info->block_group_kobjs[index];
2501     + if (list_empty(&space_info->block_groups[index]))
2502     + first = true;
2503     + list_add_tail(&cache->list, &space_info->block_groups[index]);
2504     + up_write(&space_info->groups_sem);
2505     +
2506     + if (first) {
2507     + struct raid_kobject *rkobj;
2508     int ret;
2509    
2510     - kobject_get(&space_info->kobj); /* put in release */
2511     - ret = kobject_add(kobj, &space_info->kobj, "%s",
2512     - get_raid_name(index));
2513     + rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
2514     + if (!rkobj)
2515     + goto out_err;
2516     + rkobj->raid_type = index;
2517     + kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
2518     + ret = kobject_add(&rkobj->kobj, &space_info->kobj,
2519     + "%s", get_raid_name(index));
2520     if (ret) {
2521     - pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
2522     - kobject_put(&space_info->kobj);
2523     + kobject_put(&rkobj->kobj);
2524     + goto out_err;
2525     }
2526     + space_info->block_group_kobjs[index] = &rkobj->kobj;
2527     }
2528     - list_add_tail(&cache->list, &space_info->block_groups[index]);
2529     - up_write(&space_info->groups_sem);
2530     +
2531     + return;
2532     +out_err:
2533     + pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
2534     }
2535    
2536     static struct btrfs_block_group_cache *
2537     @@ -8694,6 +8706,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2538     struct btrfs_root *tree_root = root->fs_info->tree_root;
2539     struct btrfs_key key;
2540     struct inode *inode;
2541     + struct kobject *kobj = NULL;
2542     int ret;
2543     int index;
2544     int factor;
2545     @@ -8793,11 +8806,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
2546     */
2547     list_del_init(&block_group->list);
2548     if (list_empty(&block_group->space_info->block_groups[index])) {
2549     - kobject_del(&block_group->space_info->block_group_kobjs[index]);
2550     - kobject_put(&block_group->space_info->block_group_kobjs[index]);
2551     + kobj = block_group->space_info->block_group_kobjs[index];
2552     + block_group->space_info->block_group_kobjs[index] = NULL;
2553     clear_avail_alloc_bits(root->fs_info, block_group->flags);
2554     }
2555     up_write(&block_group->space_info->groups_sem);
2556     + if (kobj) {
2557     + kobject_del(kobj);
2558     + kobject_put(kobj);
2559     + }
2560    
2561     if (block_group->cached == BTRFS_CACHE_STARTED)
2562     wait_block_group_cache_done(block_group);
2563     diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2564     index 85bbd01f1271..2eea43f5067c 100644
2565     --- a/fs/btrfs/extent_io.c
2566     +++ b/fs/btrfs/extent_io.c
2567     @@ -1686,6 +1686,7 @@ again:
2568     * shortening the size of the delalloc range we're searching
2569     */
2570     free_extent_state(cached_state);
2571     + cached_state = NULL;
2572     if (!loops) {
2573     max_bytes = PAGE_CACHE_SIZE;
2574     loops = 1;
2575     @@ -2346,7 +2347,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2576     {
2577     int uptodate = (err == 0);
2578     struct extent_io_tree *tree;
2579     - int ret;
2580     + int ret = 0;
2581    
2582     tree = &BTRFS_I(page->mapping->host)->io_tree;
2583    
2584     @@ -2360,6 +2361,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2585     if (!uptodate) {
2586     ClearPageUptodate(page);
2587     SetPageError(page);
2588     + ret = ret < 0 ? ret : -EIO;
2589     + mapping_set_error(page->mapping, ret);
2590     }
2591     return 0;
2592     }
2593     diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
2594     index 73f3de7a083c..a6bd654dcd47 100644
2595     --- a/fs/btrfs/free-space-cache.c
2596     +++ b/fs/btrfs/free-space-cache.c
2597     @@ -831,7 +831,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
2598    
2599     if (!matched) {
2600     __btrfs_remove_free_space_cache(ctl);
2601     - btrfs_err(fs_info, "block group %llu has wrong amount of free space",
2602     + btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
2603     block_group->key.objectid);
2604     ret = -1;
2605     }
2606     @@ -843,7 +843,7 @@ out:
2607     spin_unlock(&block_group->lock);
2608     ret = 0;
2609    
2610     - btrfs_err(fs_info, "failed to load free space cache for block group %llu",
2611     + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
2612     block_group->key.objectid);
2613     }
2614    
2615     diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
2616     index efba5d1282ee..f6666d9b41cf 100644
2617     --- a/fs/btrfs/scrub.c
2618     +++ b/fs/btrfs/scrub.c
2619     @@ -577,8 +577,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
2620    
2621     if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2622     do {
2623     - ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
2624     - &ref_root, &ref_level);
2625     + ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
2626     + item_size, &ref_root,
2627     + &ref_level);
2628     printk_in_rcu(KERN_WARNING
2629     "BTRFS: %s at logical %llu on dev %s, "
2630     "sector %llu: metadata %s (level %d) in tree "
2631     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
2632     index 9dde9717c1b9..a65ed4cb436b 100644
2633     --- a/fs/btrfs/send.c
2634     +++ b/fs/btrfs/send.c
2635     @@ -1589,6 +1589,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
2636     goto out;
2637     }
2638     btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
2639     + if (key.type == BTRFS_ROOT_ITEM_KEY) {
2640     + ret = -ENOENT;
2641     + goto out;
2642     + }
2643     *found_inode = key.objectid;
2644     *found_type = btrfs_dir_type(path->nodes[0], di);
2645    
2646     diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
2647     index 865f4cf9a769..ff286f372ff4 100644
2648     --- a/fs/btrfs/sysfs.c
2649     +++ b/fs/btrfs/sysfs.c
2650     @@ -253,6 +253,7 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj,
2651     BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show);
2652    
2653     #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
2654     +#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
2655    
2656     static ssize_t raid_bytes_show(struct kobject *kobj,
2657     struct kobj_attribute *attr, char *buf);
2658     @@ -265,7 +266,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
2659     {
2660     struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
2661     struct btrfs_block_group_cache *block_group;
2662     - int index = kobj - sinfo->block_group_kobjs;
2663     + int index = to_raid_kobj(kobj)->raid_type;
2664     u64 val = 0;
2665    
2666     down_read(&sinfo->groups_sem);
2667     @@ -287,7 +288,7 @@ static struct attribute *raid_attributes[] = {
2668    
2669     static void release_raid_kobj(struct kobject *kobj)
2670     {
2671     - kobject_put(kobj->parent);
2672     + kfree(to_raid_kobj(kobj));
2673     }
2674    
2675     struct kobj_type btrfs_raid_ktype = {
2676     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2677     index bab0b84d8f80..e46d3d72a9c9 100644
2678     --- a/fs/btrfs/volumes.c
2679     +++ b/fs/btrfs/volumes.c
2680     @@ -1438,6 +1438,22 @@ out:
2681     return ret;
2682     }
2683    
2684     +/*
2685     + * Function to update ctime/mtime for a given device path.
2686     + * Mainly used for ctime/mtime based probe like libblkid.
2687     + */
2688     +static void update_dev_time(char *path_name)
2689     +{
2690     + struct file *filp;
2691     +
2692     + filp = filp_open(path_name, O_RDWR, 0);
2693     + if (!filp)
2694     + return;
2695     + file_update_time(filp);
2696     + filp_close(filp, NULL);
2697     + return;
2698     +}
2699     +
2700     static int btrfs_rm_dev_item(struct btrfs_root *root,
2701     struct btrfs_device *device)
2702     {
2703     @@ -1660,11 +1676,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
2704     struct btrfs_fs_devices *fs_devices;
2705     fs_devices = root->fs_info->fs_devices;
2706     while (fs_devices) {
2707     - if (fs_devices->seed == cur_devices)
2708     + if (fs_devices->seed == cur_devices) {
2709     + fs_devices->seed = cur_devices->seed;
2710     break;
2711     + }
2712     fs_devices = fs_devices->seed;
2713     }
2714     - fs_devices->seed = cur_devices->seed;
2715     cur_devices->seed = NULL;
2716     lock_chunks(root);
2717     __btrfs_close_devices(cur_devices);
2718     @@ -1690,10 +1707,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
2719    
2720     ret = 0;
2721    
2722     - /* Notify udev that device has changed */
2723     - if (bdev)
2724     + if (bdev) {
2725     + /* Notify udev that device has changed */
2726     btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2727    
2728     + /* Update ctime/mtime for device path for libblkid */
2729     + update_dev_time(device_path);
2730     + }
2731     +
2732     error_brelse:
2733     brelse(bh);
2734     if (bdev)
2735     @@ -1869,7 +1890,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
2736     fs_devices->seeding = 0;
2737     fs_devices->num_devices = 0;
2738     fs_devices->open_devices = 0;
2739     - fs_devices->total_devices = 0;
2740     fs_devices->seed = seed_devices;
2741    
2742     generate_random_uuid(fs_devices->fsid);
2743     @@ -2132,6 +2152,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2744     ret = btrfs_commit_transaction(trans, root);
2745     }
2746    
2747     + /* Update ctime/mtime for libblkid */
2748     + update_dev_time(device_path);
2749     return ret;
2750    
2751     error_trans:
2752     @@ -6035,10 +6057,14 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
2753     struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2754     struct btrfs_device *device;
2755    
2756     - mutex_lock(&fs_devices->device_list_mutex);
2757     - list_for_each_entry(device, &fs_devices->devices, dev_list)
2758     - device->dev_root = fs_info->dev_root;
2759     - mutex_unlock(&fs_devices->device_list_mutex);
2760     + while (fs_devices) {
2761     + mutex_lock(&fs_devices->device_list_mutex);
2762     + list_for_each_entry(device, &fs_devices->devices, dev_list)
2763     + device->dev_root = fs_info->dev_root;
2764     + mutex_unlock(&fs_devices->device_list_mutex);
2765     +
2766     + fs_devices = fs_devices->seed;
2767     + }
2768     }
2769    
2770     static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
2771     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2772     index 860344701067..049a3f2693ba 100644
2773     --- a/fs/cifs/smb2pdu.c
2774     +++ b/fs/cifs/smb2pdu.c
2775     @@ -1089,6 +1089,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2776     int rc = 0;
2777     unsigned int num_iovecs = 2;
2778     __u32 file_attributes = 0;
2779     + char *dhc_buf = NULL, *lc_buf = NULL;
2780    
2781     cifs_dbg(FYI, "create/open\n");
2782    
2783     @@ -1155,6 +1156,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2784     kfree(copy_path);
2785     return rc;
2786     }
2787     + lc_buf = iov[num_iovecs-1].iov_base;
2788     }
2789    
2790     if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
2791     @@ -1169,9 +1171,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2792     if (rc) {
2793     cifs_small_buf_release(req);
2794     kfree(copy_path);
2795     - kfree(iov[num_iovecs-1].iov_base);
2796     + kfree(lc_buf);
2797     return rc;
2798     }
2799     + dhc_buf = iov[num_iovecs-1].iov_base;
2800     }
2801    
2802     rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
2803     @@ -1203,6 +1206,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2804     *oplock = rsp->OplockLevel;
2805     creat_exit:
2806     kfree(copy_path);
2807     + kfree(lc_buf);
2808     + kfree(dhc_buf);
2809     free_rsp_buf(resp_buftype, rsp);
2810     return rc;
2811     }
2812     diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2813     index af903128891c..ead00467282d 100644
2814     --- a/fs/eventpoll.c
2815     +++ b/fs/eventpoll.c
2816     @@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = {
2817     void eventpoll_release_file(struct file *file)
2818     {
2819     struct eventpoll *ep;
2820     - struct epitem *epi;
2821     + struct epitem *epi, *next;
2822    
2823     /*
2824     * We don't want to get "file->f_lock" because it is not
2825     @@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file)
2826     * Besides, ep_remove() acquires the lock, so we can't hold it here.
2827     */
2828     mutex_lock(&epmutex);
2829     - list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
2830     + list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
2831     ep = epi->ep;
2832     mutex_lock_nested(&ep->mtx, 0);
2833     ep_remove(ep, epi);
2834     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2835     index 3a603a8d9f96..62f024c051ce 100644
2836     --- a/fs/ext4/ext4.h
2837     +++ b/fs/ext4/ext4.h
2838     @@ -2764,7 +2764,8 @@ extern void ext4_io_submit(struct ext4_io_submit *io);
2839     extern int ext4_bio_write_page(struct ext4_io_submit *io,
2840     struct page *page,
2841     int len,
2842     - struct writeback_control *wbc);
2843     + struct writeback_control *wbc,
2844     + bool keep_towrite);
2845    
2846     /* mmp.c */
2847     extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
2848     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2849     index 4e8903d0432e..a7029f481b7b 100644
2850     --- a/fs/ext4/inode.c
2851     +++ b/fs/ext4/inode.c
2852     @@ -1835,6 +1835,7 @@ static int ext4_writepage(struct page *page,
2853     struct buffer_head *page_bufs = NULL;
2854     struct inode *inode = page->mapping->host;
2855     struct ext4_io_submit io_submit;
2856     + bool keep_towrite = false;
2857    
2858     trace_ext4_writepage(page);
2859     size = i_size_read(inode);
2860     @@ -1865,6 +1866,7 @@ static int ext4_writepage(struct page *page,
2861     unlock_page(page);
2862     return 0;
2863     }
2864     + keep_towrite = true;
2865     }
2866    
2867     if (PageChecked(page) && ext4_should_journal_data(inode))
2868     @@ -1881,7 +1883,7 @@ static int ext4_writepage(struct page *page,
2869     unlock_page(page);
2870     return -ENOMEM;
2871     }
2872     - ret = ext4_bio_write_page(&io_submit, page, len, wbc);
2873     + ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2874     ext4_io_submit(&io_submit);
2875     /* Drop io_end reference we got from init */
2876     ext4_put_io_end_defer(io_submit.io_end);
2877     @@ -1900,7 +1902,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2878     else
2879     len = PAGE_CACHE_SIZE;
2880     clear_page_dirty_for_io(page);
2881     - err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
2882     + err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2883     if (!err)
2884     mpd->wbc->nr_to_write--;
2885     mpd->first_page++;
2886     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
2887     index 04a5c7504be9..08ddfdac955c 100644
2888     --- a/fs/ext4/mballoc.c
2889     +++ b/fs/ext4/mballoc.c
2890     @@ -3135,7 +3135,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2891     }
2892     BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
2893     start > ac->ac_o_ex.fe_logical);
2894     - BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
2895     + BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
2896    
2897     /* now prepare goal request */
2898    
2899     diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
2900     index c18d95b50540..b6a3804a9855 100644
2901     --- a/fs/ext4/page-io.c
2902     +++ b/fs/ext4/page-io.c
2903     @@ -401,7 +401,8 @@ submit_and_retry:
2904     int ext4_bio_write_page(struct ext4_io_submit *io,
2905     struct page *page,
2906     int len,
2907     - struct writeback_control *wbc)
2908     + struct writeback_control *wbc,
2909     + bool keep_towrite)
2910     {
2911     struct inode *inode = page->mapping->host;
2912     unsigned block_start, blocksize;
2913     @@ -414,10 +415,24 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
2914     BUG_ON(!PageLocked(page));
2915     BUG_ON(PageWriteback(page));
2916    
2917     - set_page_writeback(page);
2918     + if (keep_towrite)
2919     + set_page_writeback_keepwrite(page);
2920     + else
2921     + set_page_writeback(page);
2922     ClearPageError(page);
2923    
2924     /*
2925     + * Comments copied from block_write_full_page_endio:
2926     + *
2927     + * The page straddles i_size. It must be zeroed out on each and every
2928     + * writepage invocation because it may be mmapped. "A file is mapped
2929     + * in multiples of the page size. For a file that is not a multiple of
2930     + * the page size, the remaining memory is zeroed when mapped, and
2931     + * writes to that region are not written out to the file."
2932     + */
2933     + if (len < PAGE_CACHE_SIZE)
2934     + zero_user_segment(page, len, PAGE_CACHE_SIZE);
2935     + /*
2936     * In the first loop we prepare and mark buffers to submit. We have to
2937     * mark all buffers in the page before submitting so that
2938     * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
2939     @@ -428,19 +443,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
2940     do {
2941     block_start = bh_offset(bh);
2942     if (block_start >= len) {
2943     - /*
2944     - * Comments copied from block_write_full_page_endio:
2945     - *
2946     - * The page straddles i_size. It must be zeroed out on
2947     - * each and every writepage invocation because it may
2948     - * be mmapped. "A file is mapped in multiples of the
2949     - * page size. For a file that is not a multiple of
2950     - * the page size, the remaining memory is zeroed when
2951     - * mapped, and writes to that region are not written
2952     - * out to the file."
2953     - */
2954     - zero_user_segment(page, block_start,
2955     - block_start + blocksize);
2956     clear_buffer_dirty(bh);
2957     set_buffer_uptodate(bh);
2958     continue;
2959     diff --git a/include/linux/acpi.h b/include/linux/acpi.h
2960     index 1151a1dcfe41..cd80aa80d03e 100644
2961     --- a/include/linux/acpi.h
2962     +++ b/include/linux/acpi.h
2963     @@ -37,6 +37,7 @@
2964    
2965     #include <linux/list.h>
2966     #include <linux/mod_devicetable.h>
2967     +#include <linux/dynamic_debug.h>
2968    
2969     #include <acpi/acpi.h>
2970     #include <acpi/acpi_bus.h>
2971     @@ -590,6 +591,14 @@ static inline __printf(3, 4) void
2972     acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
2973     #endif /* !CONFIG_ACPI */
2974    
2975     +#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
2976     +__printf(3, 4)
2977     +void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...);
2978     +#else
2979     +#define __acpi_handle_debug(descriptor, handle, fmt, ...) \
2980     + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__);
2981     +#endif
2982     +
2983     /*
2984     * acpi_handle_<level>: Print message with ACPI prefix and object path
2985     *
2986     @@ -611,11 +620,19 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
2987     #define acpi_handle_info(handle, fmt, ...) \
2988     acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__)
2989    
2990     -/* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */
2991     -#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
2992     +#if defined(DEBUG)
2993     #define acpi_handle_debug(handle, fmt, ...) \
2994     acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__)
2995     #else
2996     +#if defined(CONFIG_DYNAMIC_DEBUG)
2997     +#define acpi_handle_debug(handle, fmt, ...) \
2998     +do { \
2999     + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
3000     + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
3001     + __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \
3002     + ##__VA_ARGS__); \
3003     +} while (0)
3004     +#else
3005     #define acpi_handle_debug(handle, fmt, ...) \
3006     ({ \
3007     if (0) \
3008     @@ -623,5 +640,6 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
3009     0; \
3010     })
3011     #endif
3012     +#endif
3013    
3014     #endif /*_LINUX_ACPI_H*/
3015     diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3016     index 8c43cc469d78..bd1e9bcec547 100644
3017     --- a/include/linux/hugetlb.h
3018     +++ b/include/linux/hugetlb.h
3019     @@ -382,15 +382,13 @@ static inline pgoff_t basepage_index(struct page *page)
3020    
3021     extern void dissolve_free_huge_pages(unsigned long start_pfn,
3022     unsigned long end_pfn);
3023     -int pmd_huge_support(void);
3024     -/*
3025     - * Currently hugepage migration is enabled only for pmd-based hugepage.
3026     - * This function will be updated when hugepage migration is more widely
3027     - * supported.
3028     - */
3029     static inline int hugepage_migration_support(struct hstate *h)
3030     {
3031     - return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
3032     +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
3033     + return huge_page_shift(h) == PMD_SHIFT;
3034     +#else
3035     + return 0;
3036     +#endif
3037     }
3038    
3039     static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
3040     @@ -430,7 +428,6 @@ static inline pgoff_t basepage_index(struct page *page)
3041     return page->index;
3042     }
3043     #define dissolve_free_huge_pages(s, e) do {} while (0)
3044     -#define pmd_huge_support() 0
3045     #define hugepage_migration_support(h) 0
3046    
3047     static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
3048     diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
3049     index 26e2661d3935..472c021a2d4f 100644
3050     --- a/include/linux/irqdesc.h
3051     +++ b/include/linux/irqdesc.h
3052     @@ -27,6 +27,8 @@ struct irq_desc;
3053     * @irq_count: stats field to detect stalled irqs
3054     * @last_unhandled: aging timer for unhandled count
3055     * @irqs_unhandled: stats field for spurious unhandled interrupts
3056     + * @threads_handled: stats field for deferred spurious detection of threaded handlers
3057     + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
3058     * @lock: locking for SMP
3059     * @affinity_hint: hint to user space for preferred irq affinity
3060     * @affinity_notify: context for notification of affinity changes
3061     @@ -52,6 +54,8 @@ struct irq_desc {
3062     unsigned int irq_count; /* For detecting broken IRQs */
3063     unsigned long last_unhandled; /* Aging timer for unhandled count */
3064     unsigned int irqs_unhandled;
3065     + atomic_t threads_handled;
3066     + int threads_handled_last;
3067     raw_spinlock_t lock;
3068     struct cpumask *percpu_enabled;
3069     #ifdef CONFIG_SMP
3070     diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
3071     index 5f1ea756aace..5bba088bd239 100644
3072     --- a/include/linux/mempolicy.h
3073     +++ b/include/linux/mempolicy.h
3074     @@ -176,6 +176,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
3075     {
3076     if (vma->vm_flags & (VM_IO | VM_PFNMAP))
3077     return 0;
3078     +
3079     +#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
3080     + if (vma->vm_flags & VM_HUGETLB)
3081     + return 0;
3082     +#endif
3083     +
3084     /*
3085     * Migration allocates pages in the highest zone. If we cannot
3086     * do so then migration (at least from node to node) is not
3087     diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
3088     index 9b61b9bf81ac..e6800f0c0d7b 100644
3089     --- a/include/linux/mmzone.h
3090     +++ b/include/linux/mmzone.h
3091     @@ -75,9 +75,13 @@ enum {
3092    
3093     extern int page_group_by_mobility_disabled;
3094    
3095     +#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
3096     +#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
3097     +
3098     static inline int get_pageblock_migratetype(struct page *page)
3099     {
3100     - return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
3101     + BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
3102     + return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
3103     }
3104    
3105     struct free_area {
3106     diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
3107     index d1fe1a761047..ca71a1d347a0 100644
3108     --- a/include/linux/page-flags.h
3109     +++ b/include/linux/page-flags.h
3110     @@ -317,13 +317,23 @@ CLEARPAGEFLAG(Uptodate, uptodate)
3111     extern void cancel_dirty_page(struct page *page, unsigned int account_size);
3112    
3113     int test_clear_page_writeback(struct page *page);
3114     -int test_set_page_writeback(struct page *page);
3115     +int __test_set_page_writeback(struct page *page, bool keep_write);
3116     +
3117     +#define test_set_page_writeback(page) \
3118     + __test_set_page_writeback(page, false)
3119     +#define test_set_page_writeback_keepwrite(page) \
3120     + __test_set_page_writeback(page, true)
3121    
3122     static inline void set_page_writeback(struct page *page)
3123     {
3124     test_set_page_writeback(page);
3125     }
3126    
3127     +static inline void set_page_writeback_keepwrite(struct page *page)
3128     +{
3129     + test_set_page_writeback_keepwrite(page);
3130     +}
3131     +
3132     #ifdef CONFIG_PAGEFLAGS_EXTENDED
3133     /*
3134     * System with lots of page flags available. This allows separate
3135     diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
3136     index 2ee8cd2466b5..c08730c10c7a 100644
3137     --- a/include/linux/pageblock-flags.h
3138     +++ b/include/linux/pageblock-flags.h
3139     @@ -30,9 +30,12 @@ enum pageblock_bits {
3140     PB_migrate,
3141     PB_migrate_end = PB_migrate + 3 - 1,
3142     /* 3 bits required for migrate types */
3143     -#ifdef CONFIG_COMPACTION
3144     PB_migrate_skip,/* If set the block is skipped by compaction */
3145     -#endif /* CONFIG_COMPACTION */
3146     +
3147     + /*
3148     + * Assume the bits will always align on a word. If this assumption
3149     + * changes then get/set pageblock needs updating.
3150     + */
3151     NR_PAGEBLOCK_BITS
3152     };
3153    
3154     @@ -62,11 +65,33 @@ extern int pageblock_order;
3155     /* Forward declaration */
3156     struct page;
3157    
3158     +unsigned long get_pageblock_flags_mask(struct page *page,
3159     + unsigned long end_bitidx,
3160     + unsigned long mask);
3161     +void set_pageblock_flags_mask(struct page *page,
3162     + unsigned long flags,
3163     + unsigned long end_bitidx,
3164     + unsigned long mask);
3165     +
3166     /* Declarations for getting and setting flags. See mm/page_alloc.c */
3167     -unsigned long get_pageblock_flags_group(struct page *page,
3168     - int start_bitidx, int end_bitidx);
3169     -void set_pageblock_flags_group(struct page *page, unsigned long flags,
3170     - int start_bitidx, int end_bitidx);
3171     +static inline unsigned long get_pageblock_flags_group(struct page *page,
3172     + int start_bitidx, int end_bitidx)
3173     +{
3174     + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
3175     + unsigned long mask = (1 << nr_flag_bits) - 1;
3176     +
3177     + return get_pageblock_flags_mask(page, end_bitidx, mask);
3178     +}
3179     +
3180     +static inline void set_pageblock_flags_group(struct page *page,
3181     + unsigned long flags,
3182     + int start_bitidx, int end_bitidx)
3183     +{
3184     + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
3185     + unsigned long mask = (1 << nr_flag_bits) - 1;
3186     +
3187     + set_pageblock_flags_mask(page, flags, end_bitidx, mask);
3188     +}
3189    
3190     #ifdef CONFIG_COMPACTION
3191     #define get_pageblock_skip(page) \
3192     diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
3193     index 07d0df6bf768..077904c8b70d 100644
3194     --- a/include/linux/ptrace.h
3195     +++ b/include/linux/ptrace.h
3196     @@ -5,6 +5,7 @@
3197     #include <linux/sched.h> /* For struct task_struct. */
3198     #include <linux/err.h> /* for IS_ERR_VALUE */
3199     #include <linux/bug.h> /* For BUG_ON. */
3200     +#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
3201     #include <uapi/linux/ptrace.h>
3202    
3203     /*
3204     @@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message)
3205     }
3206    
3207     /**
3208     + * ptrace_event_pid - possibly stop for a ptrace event notification
3209     + * @event: %PTRACE_EVENT_* value to report
3210     + * @pid: process identifier for %PTRACE_GETEVENTMSG to return
3211     + *
3212     + * Check whether @event is enabled and, if so, report @event and @pid
3213     + * to the ptrace parent. @pid is reported as the pid_t seen from the
3214     + * the ptrace parent's pid namespace.
3215     + *
3216     + * Called without locks.
3217     + */
3218     +static inline void ptrace_event_pid(int event, struct pid *pid)
3219     +{
3220     + /*
3221     + * FIXME: There's a potential race if a ptracer in a different pid
3222     + * namespace than parent attaches between computing message below and
3223     + * when we acquire tasklist_lock in ptrace_stop(). If this happens,
3224     + * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
3225     + */
3226     + unsigned long message = 0;
3227     + struct pid_namespace *ns;
3228     +
3229     + rcu_read_lock();
3230     + ns = task_active_pid_ns(rcu_dereference(current->parent));
3231     + if (ns)
3232     + message = pid_nr_ns(pid, ns);
3233     + rcu_read_unlock();
3234     +
3235     + ptrace_event(event, message);
3236     +}
3237     +
3238     +/**
3239     * ptrace_init_task - initialize ptrace state for a new child
3240     * @child: new child task
3241     * @ptrace: true if child should be ptrace'd by parent's tracer
3242     diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
3243     index 4483fadfa68d..d1fb912740f3 100644
3244     --- a/include/target/iscsi/iscsi_transport.h
3245     +++ b/include/target/iscsi/iscsi_transport.h
3246     @@ -68,7 +68,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
3247     extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
3248     struct iscsi_tm_rsp *);
3249     extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
3250     - struct iscsi_text_rsp *);
3251     + struct iscsi_text_rsp *,
3252     + enum iscsit_transport_type);
3253     extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
3254     struct iscsi_reject *);
3255     extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
3256     diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
3257     index 7020e33e742e..f5915b39386a 100644
3258     --- a/include/target/target_core_backend.h
3259     +++ b/include/target/target_core_backend.h
3260     @@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *);
3261     void transport_subsystem_release(struct se_subsystem_api *);
3262    
3263     void target_complete_cmd(struct se_cmd *, u8);
3264     +void target_complete_cmd_with_length(struct se_cmd *, u8, int);
3265    
3266     sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
3267     sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
3268     diff --git a/kernel/fork.c b/kernel/fork.c
3269     index a17621c6cd42..45da005c9961 100644
3270     --- a/kernel/fork.c
3271     +++ b/kernel/fork.c
3272     @@ -1604,10 +1604,12 @@ long do_fork(unsigned long clone_flags,
3273     */
3274     if (!IS_ERR(p)) {
3275     struct completion vfork;
3276     + struct pid *pid;
3277    
3278     trace_sched_process_fork(current, p);
3279    
3280     - nr = task_pid_vnr(p);
3281     + pid = get_task_pid(p, PIDTYPE_PID);
3282     + nr = pid_vnr(pid);
3283    
3284     if (clone_flags & CLONE_PARENT_SETTID)
3285     put_user(nr, parent_tidptr);
3286     @@ -1622,12 +1624,14 @@ long do_fork(unsigned long clone_flags,
3287    
3288     /* forking complete and child started to run, tell ptracer */
3289     if (unlikely(trace))
3290     - ptrace_event(trace, nr);
3291     + ptrace_event_pid(trace, pid);
3292    
3293     if (clone_flags & CLONE_VFORK) {
3294     if (!wait_for_vfork_done(p, &vfork))
3295     - ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
3296     + ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
3297     }
3298     +
3299     + put_pid(pid);
3300     } else {
3301     nr = PTR_ERR(p);
3302     }
3303     diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3304     index 66a1b46d5677..ebb8a9e937fa 100644
3305     --- a/kernel/irq/manage.c
3306     +++ b/kernel/irq/manage.c
3307     @@ -856,8 +856,8 @@ static int irq_thread(void *data)
3308     irq_thread_check_affinity(desc, action);
3309    
3310     action_ret = handler_fn(desc, action);
3311     - if (!noirqdebug)
3312     - note_interrupt(action->irq, desc, action_ret);
3313     + if (action_ret == IRQ_HANDLED)
3314     + atomic_inc(&desc->threads_handled);
3315    
3316     wake_threads_waitq(desc);
3317     }
3318     diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
3319     index a1d8cc63b56e..e2514b0e439e 100644
3320     --- a/kernel/irq/spurious.c
3321     +++ b/kernel/irq/spurious.c
3322     @@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
3323     return action && (action->flags & IRQF_IRQPOLL);
3324     }
3325    
3326     +#define SPURIOUS_DEFERRED 0x80000000
3327     +
3328     void note_interrupt(unsigned int irq, struct irq_desc *desc,
3329     irqreturn_t action_ret)
3330     {
3331     @@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
3332     irq_settings_is_polled(desc))
3333     return;
3334    
3335     - /* we get here again via the threaded handler */
3336     - if (action_ret == IRQ_WAKE_THREAD)
3337     - return;
3338     -
3339     if (bad_action_ret(action_ret)) {
3340     report_bad_irq(irq, desc, action_ret);
3341     return;
3342     }
3343    
3344     + /*
3345     + * We cannot call note_interrupt from the threaded handler
3346     + * because we need to look at the compound of all handlers
3347     + * (primary and threaded). Aside of that in the threaded
3348     + * shared case we have no serialization against an incoming
3349     + * hardware interrupt while we are dealing with a threaded
3350     + * result.
3351     + *
3352     + * So in case a thread is woken, we just note the fact and
3353     + * defer the analysis to the next hardware interrupt.
3354     + *
3355     + * The threaded handlers store whether they sucessfully
3356     + * handled an interrupt and we check whether that number
3357     + * changed versus the last invocation.
3358     + *
3359     + * We could handle all interrupts with the delayed by one
3360     + * mechanism, but for the non forced threaded case we'd just
3361     + * add pointless overhead to the straight hardirq interrupts
3362     + * for the sake of a few lines less code.
3363     + */
3364     + if (action_ret & IRQ_WAKE_THREAD) {
3365     + /*
3366     + * There is a thread woken. Check whether one of the
3367     + * shared primary handlers returned IRQ_HANDLED. If
3368     + * not we defer the spurious detection to the next
3369     + * interrupt.
3370     + */
3371     + if (action_ret == IRQ_WAKE_THREAD) {
3372     + int handled;
3373     + /*
3374     + * We use bit 31 of thread_handled_last to
3375     + * denote the deferred spurious detection
3376     + * active. No locking necessary as
3377     + * thread_handled_last is only accessed here
3378     + * and we have the guarantee that hard
3379     + * interrupts are not reentrant.
3380     + */
3381     + if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
3382     + desc->threads_handled_last |= SPURIOUS_DEFERRED;
3383     + return;
3384     + }
3385     + /*
3386     + * Check whether one of the threaded handlers
3387     + * returned IRQ_HANDLED since the last
3388     + * interrupt happened.
3389     + *
3390     + * For simplicity we just set bit 31, as it is
3391     + * set in threads_handled_last as well. So we
3392     + * avoid extra masking. And we really do not
3393     + * care about the high bits of the handled
3394     + * count. We just care about the count being
3395     + * different than the one we saw before.
3396     + */
3397     + handled = atomic_read(&desc->threads_handled);
3398     + handled |= SPURIOUS_DEFERRED;
3399     + if (handled != desc->threads_handled_last) {
3400     + action_ret = IRQ_HANDLED;
3401     + /*
3402     + * Note: We keep the SPURIOUS_DEFERRED
3403     + * bit set. We are handling the
3404     + * previous invocation right now.
3405     + * Keep it for the current one, so the
3406     + * next hardware interrupt will
3407     + * account for it.
3408     + */
3409     + desc->threads_handled_last = handled;
3410     + } else {
3411     + /*
3412     + * None of the threaded handlers felt
3413     + * responsible for the last interrupt
3414     + *
3415     + * We keep the SPURIOUS_DEFERRED bit
3416     + * set in threads_handled_last as we
3417     + * need to account for the current
3418     + * interrupt as well.
3419     + */
3420     + action_ret = IRQ_NONE;
3421     + }
3422     + } else {
3423     + /*
3424     + * One of the primary handlers returned
3425     + * IRQ_HANDLED. So we don't care about the
3426     + * threaded handlers on the same line. Clear
3427     + * the deferred detection bit.
3428     + *
3429     + * In theory we could/should check whether the
3430     + * deferred bit is set and take the result of
3431     + * the previous run into account here as
3432     + * well. But it's really not worth the
3433     + * trouble. If every other interrupt is
3434     + * handled we never trigger the spurious
3435     + * detector. And if this is just the one out
3436     + * of 100k unhandled ones which is handled
3437     + * then we merily delay the spurious detection
3438     + * by one hard interrupt. Not a real problem.
3439     + */
3440     + desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
3441     + }
3442     + }
3443     +
3444     if (unlikely(action_ret == IRQ_NONE)) {
3445     /*
3446     * If we are seeing only the odd spurious IRQ caused by
3447     diff --git a/kernel/kthread.c b/kernel/kthread.c
3448     index b5ae3ee860a9..f6249f9ab33e 100644
3449     --- a/kernel/kthread.c
3450     +++ b/kernel/kthread.c
3451     @@ -262,7 +262,7 @@ static void create_kthread(struct kthread_create_info *create)
3452     * kthread_stop() has been called). The return value should be zero
3453     * or a negative error number; it will be passed to kthread_stop().
3454     *
3455     - * Returns a task_struct or ERR_PTR(-ENOMEM).
3456     + * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
3457     */
3458     struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
3459     void *data, int node,
3460     @@ -298,7 +298,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
3461     * that thread.
3462     */
3463     if (xchg(&create->done, NULL))
3464     - return ERR_PTR(-ENOMEM);
3465     + return ERR_PTR(-EINTR);
3466     /*
3467     * kthreadd (or new kernel thread) will call complete()
3468     * shortly.
3469     diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
3470     index 14193d596d78..ab29b6a22669 100644
3471     --- a/kernel/locking/rtmutex-debug.h
3472     +++ b/kernel/locking/rtmutex-debug.h
3473     @@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
3474     {
3475     return (waiter != NULL);
3476     }
3477     +
3478     +static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
3479     +{
3480     + debug_rt_mutex_print_deadlock(w);
3481     +}
3482     diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
3483     index 0339f515531a..1ce0f6c6eb01 100644
3484     --- a/kernel/locking/rtmutex.c
3485     +++ b/kernel/locking/rtmutex.c
3486     @@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
3487     owner = *p;
3488     } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
3489     }
3490     +
3491     +/*
3492     + * Safe fastpath aware unlock:
3493     + * 1) Clear the waiters bit
3494     + * 2) Drop lock->wait_lock
3495     + * 3) Try to unlock the lock with cmpxchg
3496     + */
3497     +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
3498     + __releases(lock->wait_lock)
3499     +{
3500     + struct task_struct *owner = rt_mutex_owner(lock);
3501     +
3502     + clear_rt_mutex_waiters(lock);
3503     + raw_spin_unlock(&lock->wait_lock);
3504     + /*
3505     + * If a new waiter comes in between the unlock and the cmpxchg
3506     + * we have two situations:
3507     + *
3508     + * unlock(wait_lock);
3509     + * lock(wait_lock);
3510     + * cmpxchg(p, owner, 0) == owner
3511     + * mark_rt_mutex_waiters(lock);
3512     + * acquire(lock);
3513     + * or:
3514     + *
3515     + * unlock(wait_lock);
3516     + * lock(wait_lock);
3517     + * mark_rt_mutex_waiters(lock);
3518     + *
3519     + * cmpxchg(p, owner, 0) != owner
3520     + * enqueue_waiter();
3521     + * unlock(wait_lock);
3522     + * lock(wait_lock);
3523     + * wake waiter();
3524     + * unlock(wait_lock);
3525     + * lock(wait_lock);
3526     + * acquire(lock);
3527     + */
3528     + return rt_mutex_cmpxchg(lock, owner, NULL);
3529     +}
3530     +
3531     #else
3532     # define rt_mutex_cmpxchg(l,c,n) (0)
3533     static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
3534     @@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
3535     lock->owner = (struct task_struct *)
3536     ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
3537     }
3538     +
3539     +/*
3540     + * Simple slow path only version: lock->owner is protected by lock->wait_lock.
3541     + */
3542     +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
3543     + __releases(lock->wait_lock)
3544     +{
3545     + lock->owner = NULL;
3546     + raw_spin_unlock(&lock->wait_lock);
3547     + return true;
3548     +}
3549     #endif
3550    
3551     static inline int
3552     @@ -248,27 +300,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
3553     */
3554     int max_lock_depth = 1024;
3555    
3556     +static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
3557     +{
3558     + return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
3559     +}
3560     +
3561     /*
3562     * Adjust the priority chain. Also used for deadlock detection.
3563     * Decreases task's usage by one - may thus free the task.
3564     *
3565     - * @task: the task owning the mutex (owner) for which a chain walk is probably
3566     - * needed
3567     + * @task: the task owning the mutex (owner) for which a chain walk is
3568     + * probably needed
3569     * @deadlock_detect: do we have to carry out deadlock detection?
3570     - * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
3571     - * things for a task that has just got its priority adjusted, and
3572     - * is waiting on a mutex)
3573     + * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
3574     + * things for a task that has just got its priority adjusted, and
3575     + * is waiting on a mutex)
3576     + * @next_lock: the mutex on which the owner of @orig_lock was blocked before
3577     + * we dropped its pi_lock. Is never dereferenced, only used for
3578     + * comparison to detect lock chain changes.
3579     * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
3580     - * its priority to the mutex owner (can be NULL in the case
3581     - * depicted above or if the top waiter is gone away and we are
3582     - * actually deboosting the owner)
3583     - * @top_task: the current top waiter
3584     + * its priority to the mutex owner (can be NULL in the case
3585     + * depicted above or if the top waiter is gone away and we are
3586     + * actually deboosting the owner)
3587     + * @top_task: the current top waiter
3588     *
3589     * Returns 0 or -EDEADLK.
3590     */
3591     static int rt_mutex_adjust_prio_chain(struct task_struct *task,
3592     int deadlock_detect,
3593     struct rt_mutex *orig_lock,
3594     + struct rt_mutex *next_lock,
3595     struct rt_mutex_waiter *orig_waiter,
3596     struct task_struct *top_task)
3597     {
3598     @@ -302,7 +363,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
3599     }
3600     put_task_struct(task);
3601    
3602     - return deadlock_detect ? -EDEADLK : 0;
3603     + return -EDEADLK;
3604     }
3605     retry:
3606     /*
3607     @@ -327,6 +388,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
3608     goto out_unlock_pi;
3609    
3610     /*
3611     + * We dropped all locks after taking a refcount on @task, so
3612     + * the task might have moved on in the lock chain or even left
3613     + * the chain completely and blocks now on an unrelated lock or
3614     + * on @orig_lock.
3615     + *
3616     + * We stored the lock on which @task was blocked in @next_lock,
3617     + * so we can detect the chain change.
3618     + */
3619     + if (next_lock != waiter->lock)
3620     + goto out_unlock_pi;
3621     +
3622     + /*
3623     * Drop out, when the task has no waiters. Note,
3624     * top_waiter can be NULL, when we are in the deboosting
3625     * mode!
3626     @@ -365,7 +438,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
3627     if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
3628     debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
3629     raw_spin_unlock(&lock->wait_lock);
3630     - ret = deadlock_detect ? -EDEADLK : 0;
3631     + ret = -EDEADLK;
3632     goto out_unlock_pi;
3633     }
3634    
3635     @@ -410,11 +483,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
3636     __rt_mutex_adjust_prio(task);
3637     }
3638    
3639     + /*
3640     + * Check whether the task which owns the current lock is pi
3641     + * blocked itself. If yes we store a pointer to the lock for
3642     + * the lock chain change detection above. After we dropped
3643     + * task->pi_lock next_lock cannot be dereferenced anymore.
3644     + */
3645     + next_lock = task_blocked_on_lock(task);
3646     +
3647     raw_spin_unlock_irqrestore(&task->pi_lock, flags);
3648    
3649     top_waiter = rt_mutex_top_waiter(lock);
3650     raw_spin_unlock(&lock->wait_lock);
3651    
3652     + /*
3653     + * We reached the end of the lock chain. Stop right here. No
3654     + * point to go back just to figure that out.
3655     + */
3656     + if (!next_lock)
3657     + goto out_put_task;
3658     +
3659     if (!detect_deadlock && waiter != top_waiter)
3660     goto out_put_task;
3661    
3662     @@ -524,8 +612,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
3663     {
3664     struct task_struct *owner = rt_mutex_owner(lock);
3665     struct rt_mutex_waiter *top_waiter = waiter;
3666     - unsigned long flags;
3667     + struct rt_mutex *next_lock;
3668     int chain_walk = 0, res;
3669     + unsigned long flags;
3670    
3671     /*
3672     * Early deadlock detection. We really don't want the task to
3673     @@ -536,7 +625,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
3674     * which is wrong, as the other waiter is not in a deadlock
3675     * situation.
3676     */
3677     - if (detect_deadlock && owner == task)
3678     + if (owner == task)
3679     return -EDEADLK;
3680    
3681     raw_spin_lock_irqsave(&task->pi_lock, flags);
3682     @@ -557,20 +646,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
3683     if (!owner)
3684     return 0;
3685    
3686     + raw_spin_lock_irqsave(&owner->pi_lock, flags);
3687     if (waiter == rt_mutex_top_waiter(lock)) {
3688     - raw_spin_lock_irqsave(&owner->pi_lock, flags);
3689     rt_mutex_dequeue_pi(owner, top_waiter);
3690     rt_mutex_enqueue_pi(owner, waiter);
3691    
3692     __rt_mutex_adjust_prio(owner);
3693     if (owner->pi_blocked_on)
3694     chain_walk = 1;
3695     - raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
3696     - }
3697     - else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
3698     + } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
3699     chain_walk = 1;
3700     + }
3701    
3702     - if (!chain_walk)
3703     + /* Store the lock on which owner is blocked or NULL */
3704     + next_lock = task_blocked_on_lock(owner);
3705     +
3706     + raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
3707     + /*
3708     + * Even if full deadlock detection is on, if the owner is not
3709     + * blocked itself, we can avoid finding this out in the chain
3710     + * walk.
3711     + */
3712     + if (!chain_walk || !next_lock)
3713     return 0;
3714    
3715     /*
3716     @@ -582,8 +679,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
3717    
3718     raw_spin_unlock(&lock->wait_lock);
3719    
3720     - res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
3721     - task);
3722     + res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
3723     + next_lock, waiter, task);
3724    
3725     raw_spin_lock(&lock->wait_lock);
3726    
3727     @@ -593,7 +690,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
3728     /*
3729     * Wake up the next waiter on the lock.
3730     *
3731     - * Remove the top waiter from the current tasks waiter list and wake it up.
3732     + * Remove the top waiter from the current tasks pi waiter list and
3733     + * wake it up.
3734     *
3735     * Called with lock->wait_lock held.
3736     */
3737     @@ -614,10 +712,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
3738     */
3739     rt_mutex_dequeue_pi(current, waiter);
3740    
3741     - rt_mutex_set_owner(lock, NULL);
3742     + /*
3743     + * As we are waking up the top waiter, and the waiter stays
3744     + * queued on the lock until it gets the lock, this lock
3745     + * obviously has waiters. Just set the bit here and this has
3746     + * the added benefit of forcing all new tasks into the
3747     + * slow path making sure no task of lower priority than
3748     + * the top waiter can steal this lock.
3749     + */
3750     + lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
3751    
3752     raw_spin_unlock_irqrestore(&current->pi_lock, flags);
3753    
3754     + /*
3755     + * It's safe to dereference waiter as it cannot go away as
3756     + * long as we hold lock->wait_lock. The waiter task needs to
3757     + * acquire it in order to dequeue the waiter.
3758     + */
3759     wake_up_process(waiter->task);
3760     }
3761    
3762     @@ -632,8 +743,8 @@ static void remove_waiter(struct rt_mutex *lock,
3763     {
3764     int first = (waiter == rt_mutex_top_waiter(lock));
3765     struct task_struct *owner = rt_mutex_owner(lock);
3766     + struct rt_mutex *next_lock = NULL;
3767     unsigned long flags;
3768     - int chain_walk = 0;
3769    
3770     raw_spin_lock_irqsave(&current->pi_lock, flags);
3771     rt_mutex_dequeue(lock, waiter);
3772     @@ -657,13 +768,13 @@ static void remove_waiter(struct rt_mutex *lock,
3773     }
3774     __rt_mutex_adjust_prio(owner);
3775    
3776     - if (owner->pi_blocked_on)
3777     - chain_walk = 1;
3778     + /* Store the lock on which owner is blocked or NULL */
3779     + next_lock = task_blocked_on_lock(owner);
3780    
3781     raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
3782     }
3783    
3784     - if (!chain_walk)
3785     + if (!next_lock)
3786     return;
3787    
3788     /* gets dropped in rt_mutex_adjust_prio_chain()! */
3789     @@ -671,7 +782,7 @@ static void remove_waiter(struct rt_mutex *lock,
3790    
3791     raw_spin_unlock(&lock->wait_lock);
3792    
3793     - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
3794     + rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
3795    
3796     raw_spin_lock(&lock->wait_lock);
3797     }
3798     @@ -684,6 +795,7 @@ static void remove_waiter(struct rt_mutex *lock,
3799     void rt_mutex_adjust_pi(struct task_struct *task)
3800     {
3801     struct rt_mutex_waiter *waiter;
3802     + struct rt_mutex *next_lock;
3803     unsigned long flags;
3804    
3805     raw_spin_lock_irqsave(&task->pi_lock, flags);
3806     @@ -694,12 +806,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
3807     raw_spin_unlock_irqrestore(&task->pi_lock, flags);
3808     return;
3809     }
3810     -
3811     + next_lock = waiter->lock;
3812     raw_spin_unlock_irqrestore(&task->pi_lock, flags);
3813    
3814     /* gets dropped in rt_mutex_adjust_prio_chain()! */
3815     get_task_struct(task);
3816     - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
3817     +
3818     + rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
3819     }
3820    
3821     /**
3822     @@ -751,6 +864,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
3823     return ret;
3824     }
3825    
3826     +static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
3827     + struct rt_mutex_waiter *w)
3828     +{
3829     + /*
3830     + * If the result is not -EDEADLOCK or the caller requested
3831     + * deadlock detection, nothing to do here.
3832     + */
3833     + if (res != -EDEADLOCK || detect_deadlock)
3834     + return;
3835     +
3836     + /*
3837     + * Yell lowdly and stop the task right here.
3838     + */
3839     + rt_mutex_print_deadlock(w);
3840     + while (1) {
3841     + set_current_state(TASK_INTERRUPTIBLE);
3842     + schedule();
3843     + }
3844     +}
3845     +
3846     /*
3847     * Slow path lock function:
3848     */
3849     @@ -790,8 +923,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
3850    
3851     set_current_state(TASK_RUNNING);
3852    
3853     - if (unlikely(ret))
3854     + if (unlikely(ret)) {
3855     remove_waiter(lock, &waiter);
3856     + rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
3857     + }
3858    
3859     /*
3860     * try_to_take_rt_mutex() sets the waiter bit
3861     @@ -847,12 +982,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
3862    
3863     rt_mutex_deadlock_account_unlock(current);
3864    
3865     - if (!rt_mutex_has_waiters(lock)) {
3866     - lock->owner = NULL;
3867     - raw_spin_unlock(&lock->wait_lock);
3868     - return;
3869     + /*
3870     + * We must be careful here if the fast path is enabled. If we
3871     + * have no waiters queued we cannot set owner to NULL here
3872     + * because of:
3873     + *
3874     + * foo->lock->owner = NULL;
3875     + * rtmutex_lock(foo->lock); <- fast path
3876     + * free = atomic_dec_and_test(foo->refcnt);
3877     + * rtmutex_unlock(foo->lock); <- fast path
3878     + * if (free)
3879     + * kfree(foo);
3880     + * raw_spin_unlock(foo->lock->wait_lock);
3881     + *
3882     + * So for the fastpath enabled kernel:
3883     + *
3884     + * Nothing can set the waiters bit as long as we hold
3885     + * lock->wait_lock. So we do the following sequence:
3886     + *
3887     + * owner = rt_mutex_owner(lock);
3888     + * clear_rt_mutex_waiters(lock);
3889     + * raw_spin_unlock(&lock->wait_lock);
3890     + * if (cmpxchg(&lock->owner, owner, 0) == owner)
3891     + * return;
3892     + * goto retry;
3893     + *
3894     + * The fastpath disabled variant is simple as all access to
3895     + * lock->owner is serialized by lock->wait_lock:
3896     + *
3897     + * lock->owner = NULL;
3898     + * raw_spin_unlock(&lock->wait_lock);
3899     + */
3900     + while (!rt_mutex_has_waiters(lock)) {
3901     + /* Drops lock->wait_lock ! */
3902     + if (unlock_rt_mutex_safe(lock) == true)
3903     + return;
3904     + /* Relock the rtmutex and try again */
3905     + raw_spin_lock(&lock->wait_lock);
3906     }
3907    
3908     + /*
3909     + * The wakeup next waiter path does not suffer from the above
3910     + * race. See the comments there.
3911     + */
3912     wakeup_next_waiter(lock);
3913    
3914     raw_spin_unlock(&lock->wait_lock);
3915     @@ -1100,7 +1272,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
3916     return 1;
3917     }
3918    
3919     - ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
3920     + /* We enforce deadlock detection for futexes */
3921     + ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
3922    
3923     if (ret && !rt_mutex_owner(lock)) {
3924     /*
3925     diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
3926     index a1a1dd06421d..f6a1f3c133b1 100644
3927     --- a/kernel/locking/rtmutex.h
3928     +++ b/kernel/locking/rtmutex.h
3929     @@ -24,3 +24,8 @@
3930     #define debug_rt_mutex_print_deadlock(w) do { } while (0)
3931     #define debug_rt_mutex_detect_deadlock(w,d) (d)
3932     #define debug_rt_mutex_reset_waiter(w) do { } while (0)
3933     +
3934     +static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
3935     +{
3936     + WARN(1, "rtmutex deadlock detected\n");
3937     +}
3938     diff --git a/lib/idr.c b/lib/idr.c
3939     index bfe4db4e165f..674c30bc2ed0 100644
3940     --- a/lib/idr.c
3941     +++ b/lib/idr.c
3942     @@ -250,7 +250,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
3943     id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
3944    
3945     /* if already at the top layer, we need to grow */
3946     - if (id >= 1 << (idp->layers * IDR_BITS)) {
3947     + if (id > idr_max(idp->layers)) {
3948     *starting_id = id;
3949     return -EAGAIN;
3950     }
3951     @@ -827,12 +827,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
3952     if (!p)
3953     return ERR_PTR(-EINVAL);
3954    
3955     - n = (p->layer+1) * IDR_BITS;
3956     -
3957     - if (id >= (1 << n))
3958     + if (id > idr_max(p->layer + 1))
3959     return ERR_PTR(-EINVAL);
3960    
3961     - n -= IDR_BITS;
3962     + n = p->layer * IDR_BITS;
3963     while ((n > 0) && p) {
3964     p = p->ary[(id >> n) & IDR_MASK];
3965     n -= IDR_BITS;
3966     diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
3967     index 99a03acb7d47..b74da447e81e 100644
3968     --- a/lib/lz4/lz4_decompress.c
3969     +++ b/lib/lz4/lz4_decompress.c
3970     @@ -108,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
3971     if (length == ML_MASK) {
3972     for (; *ip == 255; length += 255)
3973     ip++;
3974     + if (unlikely(length > (size_t)(length + *ip)))
3975     + goto _output_error;
3976     length += *ip++;
3977     }
3978    
3979     @@ -157,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
3980    
3981     /* write overflow error detected */
3982     _output_error:
3983     - return (int) (-(((char *)ip) - source));
3984     + return -1;
3985     }
3986    
3987     static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
3988     diff --git a/mm/Kconfig b/mm/Kconfig
3989     index 2888024e0b0a..9b63c1584a42 100644
3990     --- a/mm/Kconfig
3991     +++ b/mm/Kconfig
3992     @@ -263,6 +263,9 @@ config MIGRATION
3993     pages as migration can relocate pages to satisfy a huge page
3994     allocation instead of reclaiming.
3995    
3996     +config ARCH_ENABLE_HUGEPAGE_MIGRATION
3997     + boolean
3998     +
3999     config PHYS_ADDR_T_64BIT
4000     def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
4001    
4002     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4003     index e346fa9d30ea..33365e9ce6a7 100644
4004     --- a/mm/memory-failure.c
4005     +++ b/mm/memory-failure.c
4006     @@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
4007     #endif
4008     si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
4009    
4010     - if ((flags & MF_ACTION_REQUIRED) && t == current) {
4011     + if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
4012     si.si_code = BUS_MCEERR_AR;
4013     - ret = force_sig_info(SIGBUS, &si, t);
4014     + ret = force_sig_info(SIGBUS, &si, current);
4015     } else {
4016     /*
4017     * Don't use force here, it's convenient if the signal
4018     @@ -384,20 +384,51 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
4019     }
4020     }
4021    
4022     -static int task_early_kill(struct task_struct *tsk)
4023     +/*
4024     + * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
4025     + * on behalf of the thread group. Return task_struct of the (first found)
4026     + * dedicated thread if found, and return NULL otherwise.
4027     + *
4028     + * We already hold read_lock(&tasklist_lock) in the caller, so we don't
4029     + * have to call rcu_read_lock/unlock() in this function.
4030     + */
4031     +static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
4032     {
4033     + struct task_struct *t;
4034     +
4035     + for_each_thread(tsk, t)
4036     + if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
4037     + return t;
4038     + return NULL;
4039     +}
4040     +
4041     +/*
4042     + * Determine whether a given process is "early kill" process which expects
4043     + * to be signaled when some page under the process is hwpoisoned.
4044     + * Return task_struct of the dedicated thread (main thread unless explicitly
4045     + * specified) if the process is "early kill," and otherwise returns NULL.
4046     + */
4047     +static struct task_struct *task_early_kill(struct task_struct *tsk,
4048     + int force_early)
4049     +{
4050     + struct task_struct *t;
4051     if (!tsk->mm)
4052     - return 0;
4053     - if (tsk->flags & PF_MCE_PROCESS)
4054     - return !!(tsk->flags & PF_MCE_EARLY);
4055     - return sysctl_memory_failure_early_kill;
4056     + return NULL;
4057     + if (force_early)
4058     + return tsk;
4059     + t = find_early_kill_thread(tsk);
4060     + if (t)
4061     + return t;
4062     + if (sysctl_memory_failure_early_kill)
4063     + return tsk;
4064     + return NULL;
4065     }
4066    
4067     /*
4068     * Collect processes when the error hit an anonymous page.
4069     */
4070     static void collect_procs_anon(struct page *page, struct list_head *to_kill,
4071     - struct to_kill **tkc)
4072     + struct to_kill **tkc, int force_early)
4073     {
4074     struct vm_area_struct *vma;
4075     struct task_struct *tsk;
4076     @@ -412,16 +443,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
4077     read_lock(&tasklist_lock);
4078     for_each_process (tsk) {
4079     struct anon_vma_chain *vmac;
4080     + struct task_struct *t = task_early_kill(tsk, force_early);
4081    
4082     - if (!task_early_kill(tsk))
4083     + if (!t)
4084     continue;
4085     anon_vma_interval_tree_foreach(vmac, &av->rb_root,
4086     pgoff, pgoff) {
4087     vma = vmac->vma;
4088     if (!page_mapped_in_vma(page, vma))
4089     continue;
4090     - if (vma->vm_mm == tsk->mm)
4091     - add_to_kill(tsk, page, vma, to_kill, tkc);
4092     + if (vma->vm_mm == t->mm)
4093     + add_to_kill(t, page, vma, to_kill, tkc);
4094     }
4095     }
4096     read_unlock(&tasklist_lock);
4097     @@ -432,7 +464,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
4098     * Collect processes when the error hit a file mapped page.
4099     */
4100     static void collect_procs_file(struct page *page, struct list_head *to_kill,
4101     - struct to_kill **tkc)
4102     + struct to_kill **tkc, int force_early)
4103     {
4104     struct vm_area_struct *vma;
4105     struct task_struct *tsk;
4106     @@ -442,10 +474,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
4107     read_lock(&tasklist_lock);
4108     for_each_process(tsk) {
4109     pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
4110     + struct task_struct *t = task_early_kill(tsk, force_early);
4111    
4112     - if (!task_early_kill(tsk))
4113     + if (!t)
4114     continue;
4115     -
4116     vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
4117     pgoff) {
4118     /*
4119     @@ -455,8 +487,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
4120     * Assume applications who requested early kill want
4121     * to be informed of all such data corruptions.
4122     */
4123     - if (vma->vm_mm == tsk->mm)
4124     - add_to_kill(tsk, page, vma, to_kill, tkc);
4125     + if (vma->vm_mm == t->mm)
4126     + add_to_kill(t, page, vma, to_kill, tkc);
4127     }
4128     }
4129     read_unlock(&tasklist_lock);
4130     @@ -469,7 +501,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
4131     * First preallocate one tokill structure outside the spin locks,
4132     * so that we can kill at least one process reasonably reliable.
4133     */
4134     -static void collect_procs(struct page *page, struct list_head *tokill)
4135     +static void collect_procs(struct page *page, struct list_head *tokill,
4136     + int force_early)
4137     {
4138     struct to_kill *tk;
4139    
4140     @@ -480,9 +513,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
4141     if (!tk)
4142     return;
4143     if (PageAnon(page))
4144     - collect_procs_anon(page, tokill, &tk);
4145     + collect_procs_anon(page, tokill, &tk, force_early);
4146     else
4147     - collect_procs_file(page, tokill, &tk);
4148     + collect_procs_file(page, tokill, &tk, force_early);
4149     kfree(tk);
4150     }
4151    
4152     @@ -967,7 +1000,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4153     * there's nothing that can be done.
4154     */
4155     if (kill)
4156     - collect_procs(ppage, &tokill);
4157     + collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
4158    
4159     ret = try_to_unmap(ppage, ttu);
4160     if (ret != SWAP_SUCCESS)
4161     diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4162     index 8f6daa62206d..d013dba21429 100644
4163     --- a/mm/page-writeback.c
4164     +++ b/mm/page-writeback.c
4165     @@ -2398,7 +2398,7 @@ int test_clear_page_writeback(struct page *page)
4166     return ret;
4167     }
4168    
4169     -int test_set_page_writeback(struct page *page)
4170     +int __test_set_page_writeback(struct page *page, bool keep_write)
4171     {
4172     struct address_space *mapping = page_mapping(page);
4173     int ret;
4174     @@ -2423,9 +2423,10 @@ int test_set_page_writeback(struct page *page)
4175     radix_tree_tag_clear(&mapping->page_tree,
4176     page_index(page),
4177     PAGECACHE_TAG_DIRTY);
4178     - radix_tree_tag_clear(&mapping->page_tree,
4179     - page_index(page),
4180     - PAGECACHE_TAG_TOWRITE);
4181     + if (!keep_write)
4182     + radix_tree_tag_clear(&mapping->page_tree,
4183     + page_index(page),
4184     + PAGECACHE_TAG_TOWRITE);
4185     spin_unlock_irqrestore(&mapping->tree_lock, flags);
4186     } else {
4187     ret = TestSetPageWriteback(page);
4188     @@ -2436,7 +2437,7 @@ int test_set_page_writeback(struct page *page)
4189     return ret;
4190    
4191     }
4192     -EXPORT_SYMBOL(test_set_page_writeback);
4193     +EXPORT_SYMBOL(__test_set_page_writeback);
4194    
4195     /*
4196     * Return true if any of the pages in the mapping are marked with the
4197     diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4198     index 7387a671234e..4b5d4f6360d2 100644
4199     --- a/mm/page_alloc.c
4200     +++ b/mm/page_alloc.c
4201     @@ -6007,53 +6007,65 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
4202     * @end_bitidx: The last bit of interest
4203     * returns pageblock_bits flags
4204     */
4205     -unsigned long get_pageblock_flags_group(struct page *page,
4206     - int start_bitidx, int end_bitidx)
4207     +unsigned long get_pageblock_flags_mask(struct page *page,
4208     + unsigned long end_bitidx,
4209     + unsigned long mask)
4210     {
4211     struct zone *zone;
4212     unsigned long *bitmap;
4213     - unsigned long pfn, bitidx;
4214     - unsigned long flags = 0;
4215     - unsigned long value = 1;
4216     + unsigned long pfn, bitidx, word_bitidx;
4217     + unsigned long word;
4218    
4219     zone = page_zone(page);
4220     pfn = page_to_pfn(page);
4221     bitmap = get_pageblock_bitmap(zone, pfn);
4222     bitidx = pfn_to_bitidx(zone, pfn);
4223     + word_bitidx = bitidx / BITS_PER_LONG;
4224     + bitidx &= (BITS_PER_LONG-1);
4225    
4226     - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4227     - if (test_bit(bitidx + start_bitidx, bitmap))
4228     - flags |= value;
4229     -
4230     - return flags;
4231     + word = bitmap[word_bitidx];
4232     + bitidx += end_bitidx;
4233     + return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
4234     }
4235    
4236     /**
4237     - * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
4238     + * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
4239     * @page: The page within the block of interest
4240     * @start_bitidx: The first bit of interest
4241     * @end_bitidx: The last bit of interest
4242     * @flags: The flags to set
4243     */
4244     -void set_pageblock_flags_group(struct page *page, unsigned long flags,
4245     - int start_bitidx, int end_bitidx)
4246     +void set_pageblock_flags_mask(struct page *page, unsigned long flags,
4247     + unsigned long end_bitidx,
4248     + unsigned long mask)
4249     {
4250     struct zone *zone;
4251     unsigned long *bitmap;
4252     - unsigned long pfn, bitidx;
4253     - unsigned long value = 1;
4254     + unsigned long pfn, bitidx, word_bitidx;
4255     + unsigned long old_word, word;
4256     +
4257     + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
4258    
4259     zone = page_zone(page);
4260     pfn = page_to_pfn(page);
4261     bitmap = get_pageblock_bitmap(zone, pfn);
4262     bitidx = pfn_to_bitidx(zone, pfn);
4263     + word_bitidx = bitidx / BITS_PER_LONG;
4264     + bitidx &= (BITS_PER_LONG-1);
4265     +
4266     VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
4267    
4268     - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
4269     - if (flags & value)
4270     - __set_bit(bitidx + start_bitidx, bitmap);
4271     - else
4272     - __clear_bit(bitidx + start_bitidx, bitmap);
4273     + bitidx += end_bitidx;
4274     + mask <<= (BITS_PER_LONG - bitidx - 1);
4275     + flags <<= (BITS_PER_LONG - bitidx - 1);
4276     +
4277     + word = ACCESS_ONCE(bitmap[word_bitidx]);
4278     + for (;;) {
4279     + old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
4280     + if (word == old_word)
4281     + break;
4282     + word = old_word;
4283     + }
4284     }
4285    
4286     /*
4287     diff --git a/mm/rmap.c b/mm/rmap.c
4288     index 5d91bb71e751..cdbd31285cf6 100644
4289     --- a/mm/rmap.c
4290     +++ b/mm/rmap.c
4291     @@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
4292     * LOCK should suffice since the actual taking of the lock must
4293     * happen _before_ what follows.
4294     */
4295     + might_sleep();
4296     if (rwsem_is_locked(&anon_vma->root->rwsem)) {
4297     anon_vma_lock_write(anon_vma);
4298     anon_vma_unlock_write(anon_vma);
4299     @@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
4300     * above cannot corrupt).
4301     */
4302     if (!page_mapped(page)) {
4303     + rcu_read_unlock();
4304     put_anon_vma(anon_vma);
4305     - anon_vma = NULL;
4306     + return NULL;
4307     }
4308     out:
4309     rcu_read_unlock();
4310     @@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
4311     }
4312    
4313     if (!page_mapped(page)) {
4314     + rcu_read_unlock();
4315     put_anon_vma(anon_vma);
4316     - anon_vma = NULL;
4317     - goto out;
4318     + return NULL;
4319     }
4320    
4321     /* we pinned the anon_vma, its safe to sleep */
4322     diff --git a/mm/vmscan.c b/mm/vmscan.c
4323     index a9c74b409681..6ef876cae8f1 100644
4324     --- a/mm/vmscan.c
4325     +++ b/mm/vmscan.c
4326     @@ -2502,10 +2502,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
4327    
4328     for (i = 0; i <= ZONE_NORMAL; i++) {
4329     zone = &pgdat->node_zones[i];
4330     + if (!populated_zone(zone))
4331     + continue;
4332     +
4333     pfmemalloc_reserve += min_wmark_pages(zone);
4334     free_pages += zone_page_state(zone, NR_FREE_PAGES);
4335     }
4336    
4337     + /* If there are no reserves (unexpected config) then do not throttle */
4338     + if (!pfmemalloc_reserve)
4339     + return true;
4340     +
4341     wmark_ok = free_pages > pfmemalloc_reserve / 2;
4342    
4343     /* kswapd must be awake if processes are being throttled */
4344     @@ -2530,9 +2537,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
4345     static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
4346     nodemask_t *nodemask)
4347     {
4348     + struct zoneref *z;
4349     struct zone *zone;
4350     - int high_zoneidx = gfp_zone(gfp_mask);
4351     - pg_data_t *pgdat;
4352     + pg_data_t *pgdat = NULL;
4353    
4354     /*
4355     * Kernel threads should not be throttled as they may be indirectly
4356     @@ -2551,10 +2558,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
4357     if (fatal_signal_pending(current))
4358     goto out;
4359    
4360     - /* Check if the pfmemalloc reserves are ok */
4361     - first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
4362     - pgdat = zone->zone_pgdat;
4363     - if (pfmemalloc_watermark_ok(pgdat))
4364     + /*
4365     + * Check if the pfmemalloc reserves are ok by finding the first node
4366     + * with a usable ZONE_NORMAL or lower zone. The expectation is that
4367     + * GFP_KERNEL will be required for allocating network buffers when
4368     + * swapping over the network so ZONE_HIGHMEM is unusable.
4369     + *
4370     + * Throttling is based on the first usable node and throttled processes
4371     + * wait on a queue until kswapd makes progress and wakes them. There
4372     + * is an affinity then between processes waking up and where reclaim
4373     + * progress has been made assuming the process wakes on the same node.
4374     + * More importantly, processes running on remote nodes will not compete
4375     + * for remote pfmemalloc reserves and processes on different nodes
4376     + * should make reasonable progress.
4377     + */
4378     + for_each_zone_zonelist_nodemask(zone, z, zonelist,
4379     + gfp_mask, nodemask) {
4380     + if (zone_idx(zone) > ZONE_NORMAL)
4381     + continue;
4382     +
4383     + /* Throttle based on the first usable node */
4384     + pgdat = zone->zone_pgdat;
4385     + if (pfmemalloc_watermark_ok(pgdat))
4386     + goto out;
4387     + break;
4388     + }
4389     +
4390     + /* If no zone was usable by the allocation flags then do not throttle */
4391     + if (!pgdat)
4392     goto out;
4393    
4394     /* Account for the throttling */
4395     @@ -3285,7 +3316,10 @@ static int kswapd(void *p)
4396     }
4397     }
4398    
4399     + tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
4400     current->reclaim_state = NULL;
4401     + lockdep_clear_current_reclaim_state();
4402     +
4403     return 0;
4404     }
4405    
4406     diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
4407     index adb3ea04adaa..d906016f3c6b 100644
4408     --- a/net/bluetooth/6lowpan.c
4409     +++ b/net/bluetooth/6lowpan.c
4410     @@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *conn,
4411     return 0;
4412     }
4413    
4414     -static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
4415     - bdaddr_t *addr, u8 *addr_type)
4416     +static u8 get_addr_type_from_eui64(u8 byte)
4417     {
4418     - u8 *eui64;
4419     + /* Is universal(0) or local(1) bit, */
4420     + if (byte & 0x02)
4421     + return ADDR_LE_DEV_RANDOM;
4422    
4423     - eui64 = ip6_daddr->s6_addr + 8;
4424     + return ADDR_LE_DEV_PUBLIC;
4425     +}
4426     +
4427     +static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
4428     +{
4429     + u8 *eui64 = ip6_daddr->s6_addr + 8;
4430    
4431     addr->b[0] = eui64[7];
4432     addr->b[1] = eui64[6];
4433     @@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
4434     addr->b[3] = eui64[2];
4435     addr->b[4] = eui64[1];
4436     addr->b[5] = eui64[0];
4437     +}
4438    
4439     - addr->b[5] ^= 2;
4440     +static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
4441     + bdaddr_t *addr, u8 *addr_type)
4442     +{
4443     + copy_to_bdaddr(ip6_daddr, addr);
4444    
4445     - /* Set universal/local bit to 0 */
4446     - if (addr->b[5] & 1) {
4447     - addr->b[5] &= ~1;
4448     - *addr_type = ADDR_LE_DEV_PUBLIC;
4449     - } else {
4450     - *addr_type = ADDR_LE_DEV_RANDOM;
4451     - }
4452     + /* We need to toggle the U/L bit that we got from IPv6 address
4453     + * so that we get the proper address and type of the BD address.
4454     + */
4455     + addr->b[5] ^= 0x02;
4456     +
4457     + *addr_type = get_addr_type_from_eui64(addr->b[5]);
4458     }
4459    
4460     static int header_create(struct sk_buff *skb, struct net_device *netdev,
4461     @@ -473,9 +482,11 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
4462     /* Get destination BT device from skb.
4463     * If there is no such peer then discard the packet.
4464     */
4465     - get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
4466     + convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
4467    
4468     - BT_DBG("dest addr %pMR type %d", &addr, addr_type);
4469     + BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
4470     + addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
4471     + &hdr->daddr);
4472    
4473     read_lock_irqsave(&devices_lock, flags);
4474     peer = peer_lookup_ba(dev, &addr, addr_type);
4475     @@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
4476     } else {
4477     unsigned long flags;
4478    
4479     - get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
4480     + convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
4481     eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
4482     dev = lowpan_dev(netdev);
4483    
4484     @@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
4485     peer = peer_lookup_ba(dev, &addr, addr_type);
4486     read_unlock_irqrestore(&devices_lock, flags);
4487    
4488     - BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
4489     - &addr, &lowpan_cb(skb)->addr, peer);
4490     + BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
4491     + netdev->name, &addr,
4492     + addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
4493     + &lowpan_cb(skb)->addr, peer);
4494    
4495     if (peer && peer->conn)
4496     err = send_pkt(peer->conn, netdev->dev_addr,
4497     @@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
4498     eui[6] = addr[1];
4499     eui[7] = addr[0];
4500    
4501     - eui[0] ^= 2;
4502     -
4503     - /* Universal/local bit set, RFC 4291 */
4504     + /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
4505     if (addr_type == ADDR_LE_DEV_PUBLIC)
4506     - eui[0] |= 1;
4507     + eui[0] &= ~0x02;
4508     else
4509     - eui[0] &= ~1;
4510     + eui[0] |= 0x02;
4511     +
4512     + BT_DBG("type %d addr %*phC", addr_type, 8, eui);
4513     }
4514    
4515     static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
4516     @@ -634,7 +647,6 @@ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
4517     {
4518     netdev->addr_assign_type = NET_ADDR_PERM;
4519     set_addr(netdev->dev_addr, addr->b, addr_type);
4520     - netdev->dev_addr[0] ^= 2;
4521     }
4522    
4523     static void ifup(struct net_device *netdev)
4524     @@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
4525    
4526     memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
4527     EUI64_ADDR_LEN);
4528     - peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
4529     - * is done according RFC2464
4530     - */
4531     -
4532     - raw_dump_inline(__func__, "peer IPv6 address",
4533     - (unsigned char *)&peer->peer_addr, 16);
4534     - raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
4535    
4536     write_lock_irqsave(&devices_lock, flags);
4537     INIT_LIST_HEAD(&peer->list);
4538     diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
4539     index d58f76bcebd1..d4b7702d900f 100644
4540     --- a/net/bluetooth/l2cap_sock.c
4541     +++ b/net/bluetooth/l2cap_sock.c
4542     @@ -1181,13 +1181,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
4543     /* Check for backlog size */
4544     if (sk_acceptq_is_full(parent)) {
4545     BT_DBG("backlog full %d", parent->sk_ack_backlog);
4546     + release_sock(parent);
4547     return NULL;
4548     }
4549    
4550     sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
4551     GFP_ATOMIC);
4552     - if (!sk)
4553     + if (!sk) {
4554     + release_sock(parent);
4555     return NULL;
4556     + }
4557    
4558     bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
4559    
4560     diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
4561     index c4b7218058b6..1465363a452b 100644
4562     --- a/net/iucv/af_iucv.c
4563     +++ b/net/iucv/af_iucv.c
4564     @@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
4565     spin_lock_irqsave(&list->lock, flags);
4566    
4567     while (list_skb != (struct sk_buff *)list) {
4568     - if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
4569     + if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
4570     this = list_skb;
4571     break;
4572     }
4573     diff --git a/scripts/package/builddeb b/scripts/package/builddeb
4574     index f46e4dd0558d..152d4d25ab7c 100644
4575     --- a/scripts/package/builddeb
4576     +++ b/scripts/package/builddeb
4577     @@ -155,11 +155,11 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
4578     for module in $(find lib/modules/ -name *.ko); do
4579     mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module)
4580     # only keep debug symbols in the debug file
4581     - objcopy --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
4582     + $OBJCOPY --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
4583     # strip original module from debug symbols
4584     - objcopy --strip-debug $module
4585     + $OBJCOPY --strip-debug $module
4586     # then add a link to those
4587     - objcopy --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
4588     + $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
4589     done
4590     )
4591     fi