Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.15/0102-3.15.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2449 - (show annotations) (download)
Tue Jul 1 13:57:07 2014 UTC (9 years, 10 months ago) by niro
File size: 187079 byte(s)
-linux-3.15.3
1 diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
2 index 550068466605..6ae89a9edf2a 100644
3 --- a/Documentation/vm/hwpoison.txt
4 +++ b/Documentation/vm/hwpoison.txt
5 @@ -84,6 +84,11 @@ PR_MCE_KILL
6 PR_MCE_KILL_EARLY: Early kill
7 PR_MCE_KILL_LATE: Late kill
8 PR_MCE_KILL_DEFAULT: Use system global default
9 + Note that if you want to have a dedicated thread which handles
10 + the SIGBUS(BUS_MCEERR_AO) on behalf of the process, you should
11 + call prctl(PR_MCE_KILL_EARLY) on the designated thread. Otherwise,
12 + the SIGBUS is sent to the main thread.
13 +
14 PR_MCE_KILL_GET
15 return current mode
16
17 diff --git a/Makefile b/Makefile
18 index 475e0853a2f4..2e37d8b0bb96 100644
19 --- a/Makefile
20 +++ b/Makefile
21 @@ -1,6 +1,6 @@
22 VERSION = 3
23 PATCHLEVEL = 15
24 -SUBLEVEL = 2
25 +SUBLEVEL = 3
26 EXTRAVERSION =
27 NAME = Shuffling Zombie Juror
28
29 diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
30 index 5d42feb31049..178382ca594f 100644
31 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
32 +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
33 @@ -25,7 +25,7 @@
34
35 memory {
36 device_type = "memory";
37 - reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */
38 + reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
39 };
40
41 soc {
42 diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
43 index af4e8c8a5422..6582c4adc182 100644
44 --- a/arch/arm/kernel/stacktrace.c
45 +++ b/arch/arm/kernel/stacktrace.c
46 @@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
47 return trace->nr_entries >= trace->max_entries;
48 }
49
50 -void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
51 +/* This must be noinline to so that our skip calculation works correctly */
52 +static noinline void __save_stack_trace(struct task_struct *tsk,
53 + struct stack_trace *trace, unsigned int nosched)
54 {
55 struct stack_trace_data data;
56 struct stackframe frame;
57
58 data.trace = trace;
59 data.skip = trace->skip;
60 + data.no_sched_functions = nosched;
61
62 if (tsk != current) {
63 #ifdef CONFIG_SMP
64 @@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
65 trace->entries[trace->nr_entries++] = ULONG_MAX;
66 return;
67 #else
68 - data.no_sched_functions = 1;
69 frame.fp = thread_saved_fp(tsk);
70 frame.sp = thread_saved_sp(tsk);
71 frame.lr = 0; /* recovered from the stack */
72 @@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
73 } else {
74 register unsigned long current_sp asm ("sp");
75
76 - data.no_sched_functions = 0;
77 + /* We don't want this function nor the caller */
78 + data.skip += 2;
79 frame.fp = (unsigned long)__builtin_frame_address(0);
80 frame.sp = current_sp;
81 frame.lr = (unsigned long)__builtin_return_address(0);
82 - frame.pc = (unsigned long)save_stack_trace_tsk;
83 + frame.pc = (unsigned long)__save_stack_trace;
84 }
85
86 walk_stackframe(&frame, save_trace, &data);
87 @@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
88 trace->entries[trace->nr_entries++] = ULONG_MAX;
89 }
90
91 +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
92 +{
93 + __save_stack_trace(tsk, trace, 1);
94 +}
95 +
96 void save_stack_trace(struct stack_trace *trace)
97 {
98 - save_stack_trace_tsk(current, trace);
99 + __save_stack_trace(current, trace, 0);
100 }
101 EXPORT_SYMBOL_GPL(save_stack_trace);
102 #endif
103 diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
104 index 65d2acb31498..5b45d266d83e 100644
105 --- a/arch/arm/mach-omap1/board-h2.c
106 +++ b/arch/arm/mach-omap1/board-h2.c
107 @@ -346,7 +346,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
108 /* usb1 has a Mini-AB port and external isp1301 transceiver */
109 .otg = 2,
110
111 -#ifdef CONFIG_USB_GADGET_OMAP
112 +#if IS_ENABLED(CONFIG_USB_OMAP)
113 .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
114 /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
115 #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
116 diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
117 index 816ecd13f81e..bfed4f928663 100644
118 --- a/arch/arm/mach-omap1/board-h3.c
119 +++ b/arch/arm/mach-omap1/board-h3.c
120 @@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
121 /* usb1 has a Mini-AB port and external isp1301 transceiver */
122 .otg = 2,
123
124 -#ifdef CONFIG_USB_GADGET_OMAP
125 +#if IS_ENABLED(CONFIG_USB_OMAP)
126 .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
127 #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
128 /* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
129 diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
130 index bd5f02e9c354..c49ce83cc1eb 100644
131 --- a/arch/arm/mach-omap1/board-innovator.c
132 +++ b/arch/arm/mach-omap1/board-innovator.c
133 @@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
134 /* usb1 has a Mini-AB port and external isp1301 transceiver */
135 .otg = 2,
136
137 -#ifdef CONFIG_USB_GADGET_OMAP
138 +#if IS_ENABLED(CONFIG_USB_OMAP)
139 .hmc_mode = 19, /* 0:host(off) 1:dev|otg 2:disabled */
140 /* .hmc_mode = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
141 #elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
142 diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
143 index 3a0262156e93..7436d4cf6596 100644
144 --- a/arch/arm/mach-omap1/board-osk.c
145 +++ b/arch/arm/mach-omap1/board-osk.c
146 @@ -283,7 +283,7 @@ static struct omap_usb_config osk_usb_config __initdata = {
147 * be used, with a NONSTANDARD gender-bending cable/dongle, as
148 * a peripheral.
149 */
150 -#ifdef CONFIG_USB_GADGET_OMAP
151 +#if IS_ENABLED(CONFIG_USB_OMAP)
152 .register_dev = 1,
153 .hmc_mode = 0,
154 #else
155 diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c
156 index 4349e82debfe..17cd39360afe 100644
157 --- a/arch/arm/mach-omap2/gpmc-nand.c
158 +++ b/arch/arm/mach-omap2/gpmc-nand.c
159 @@ -46,7 +46,7 @@ static struct platform_device gpmc_nand_device = {
160 static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
161 {
162 /* platforms which support all ECC schemes */
163 - if (soc_is_am33xx() || cpu_is_omap44xx() ||
164 + if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
165 soc_is_omap54xx() || soc_is_dra7xx())
166 return 1;
167
168 diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
169 index 54ee6163c181..66781bf34077 100644
170 --- a/arch/arm/mm/hugetlbpage.c
171 +++ b/arch/arm/mm/hugetlbpage.c
172 @@ -56,8 +56,3 @@ int pmd_huge(pmd_t pmd)
173 {
174 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
175 }
176 -
177 -int pmd_huge_support(void)
178 -{
179 - return 1;
180 -}
181 diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
182 index 01a719e18bb0..22e3ad63500c 100644
183 --- a/arch/arm/mm/proc-v7-3level.S
184 +++ b/arch/arm/mm/proc-v7-3level.S
185 @@ -64,6 +64,14 @@ ENTRY(cpu_v7_switch_mm)
186 mov pc, lr
187 ENDPROC(cpu_v7_switch_mm)
188
189 +#ifdef __ARMEB__
190 +#define rl r3
191 +#define rh r2
192 +#else
193 +#define rl r2
194 +#define rh r3
195 +#endif
196 +
197 /*
198 * cpu_v7_set_pte_ext(ptep, pte)
199 *
200 @@ -73,13 +81,13 @@ ENDPROC(cpu_v7_switch_mm)
201 */
202 ENTRY(cpu_v7_set_pte_ext)
203 #ifdef CONFIG_MMU
204 - tst r2, #L_PTE_VALID
205 + tst rl, #L_PTE_VALID
206 beq 1f
207 - tst r3, #1 << (57 - 32) @ L_PTE_NONE
208 - bicne r2, #L_PTE_VALID
209 + tst rh, #1 << (57 - 32) @ L_PTE_NONE
210 + bicne rl, #L_PTE_VALID
211 bne 1f
212 - tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
213 - orreq r2, #L_PTE_RDONLY
214 + tst rh, #1 << (55 - 32) @ L_PTE_DIRTY
215 + orreq rl, #L_PTE_RDONLY
216 1: strd r2, r3, [r0]
217 ALT_SMP(W(nop))
218 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
219 diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
220 index 83f71b3004a8..f06a9c2d399e 100644
221 --- a/arch/arm64/include/asm/Kbuild
222 +++ b/arch/arm64/include/asm/Kbuild
223 @@ -30,7 +30,6 @@ generic-y += msgbuf.h
224 generic-y += mutex.h
225 generic-y += pci.h
226 generic-y += poll.h
227 -generic-y += posix_types.h
228 generic-y += preempt.h
229 generic-y += resource.h
230 generic-y += rwsem.h
231 diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
232 index 3a4572ec3273..dc82e52acdb3 100644
233 --- a/arch/arm64/include/asm/dma-mapping.h
234 +++ b/arch/arm64/include/asm/dma-mapping.h
235 @@ -26,8 +26,6 @@
236 #include <xen/xen.h>
237 #include <asm/xen/hypervisor.h>
238
239 -#define ARCH_HAS_DMA_GET_REQUIRED_MASK
240 -
241 #define DMA_ERROR_CODE (~(dma_addr_t)0)
242 extern struct dma_map_ops *dma_ops;
243 extern struct dma_map_ops coherent_swiotlb_dma_ops;
244 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
245 index 7b1c67a0b485..d123f0eea332 100644
246 --- a/arch/arm64/include/asm/pgtable.h
247 +++ b/arch/arm64/include/asm/pgtable.h
248 @@ -253,7 +253,7 @@ static inline pmd_t pte_pmd(pte_t pte)
249 #define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
250 #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
251 #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
252 -#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
253 +#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
254
255 #define __HAVE_ARCH_PMD_WRITE
256 #define pmd_write(pmd) pte_write(pmd_pte(pmd))
257 diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h
258 new file mode 100644
259 index 000000000000..7985ff60ca3f
260 --- /dev/null
261 +++ b/arch/arm64/include/uapi/asm/posix_types.h
262 @@ -0,0 +1,10 @@
263 +#ifndef __ASM_POSIX_TYPES_H
264 +#define __ASM_POSIX_TYPES_H
265 +
266 +typedef unsigned short __kernel_old_uid_t;
267 +typedef unsigned short __kernel_old_gid_t;
268 +#define __kernel_old_uid_t __kernel_old_uid_t
269 +
270 +#include <asm-generic/posix_types.h>
271 +
272 +#endif /* __ASM_POSIX_TYPES_H */
273 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
274 index 6a8928bba03c..7a50b86464cc 100644
275 --- a/arch/arm64/kernel/ptrace.c
276 +++ b/arch/arm64/kernel/ptrace.c
277 @@ -650,11 +650,16 @@ static int compat_gpr_get(struct task_struct *target,
278 reg = task_pt_regs(target)->regs[idx];
279 }
280
281 - ret = copy_to_user(ubuf, &reg, sizeof(reg));
282 - if (ret)
283 - break;
284 -
285 - ubuf += sizeof(reg);
286 + if (kbuf) {
287 + memcpy(kbuf, &reg, sizeof(reg));
288 + kbuf += sizeof(reg);
289 + } else {
290 + ret = copy_to_user(ubuf, &reg, sizeof(reg));
291 + if (ret)
292 + break;
293 +
294 + ubuf += sizeof(reg);
295 + }
296 }
297
298 return ret;
299 @@ -684,11 +689,16 @@ static int compat_gpr_set(struct task_struct *target,
300 unsigned int idx = start + i;
301 compat_ulong_t reg;
302
303 - ret = copy_from_user(&reg, ubuf, sizeof(reg));
304 - if (ret)
305 - return ret;
306 + if (kbuf) {
307 + memcpy(&reg, kbuf, sizeof(reg));
308 + kbuf += sizeof(reg);
309 + } else {
310 + ret = copy_from_user(&reg, ubuf, sizeof(reg));
311 + if (ret)
312 + return ret;
313
314 - ubuf += sizeof(reg);
315 + ubuf += sizeof(reg);
316 + }
317
318 switch (idx) {
319 case 15:
320 @@ -821,6 +831,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
321 compat_ulong_t val)
322 {
323 int ret;
324 + mm_segment_t old_fs = get_fs();
325
326 if (off & 3 || off >= COMPAT_USER_SZ)
327 return -EIO;
328 @@ -828,10 +839,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
329 if (off >= sizeof(compat_elf_gregset_t))
330 return 0;
331
332 + set_fs(KERNEL_DS);
333 ret = copy_regset_from_user(tsk, &user_aarch32_view,
334 REGSET_COMPAT_GPR, off,
335 sizeof(compat_ulong_t),
336 &val);
337 + set_fs(old_fs);
338 +
339 return ret;
340 }
341
342 diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
343 index 31eb959e9aa8..023747bf4dd7 100644
344 --- a/arch/arm64/mm/hugetlbpage.c
345 +++ b/arch/arm64/mm/hugetlbpage.c
346 @@ -58,11 +58,6 @@ int pud_huge(pud_t pud)
347 #endif
348 }
349
350 -int pmd_huge_support(void)
351 -{
352 - return 1;
353 -}
354 -
355 static __init int setup_hugepagesz(char *opt)
356 {
357 unsigned long ps = memparse(opt, &opt);
358 diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
359 index 1a871b78e570..344387a55406 100644
360 --- a/arch/ia64/hp/common/sba_iommu.c
361 +++ b/arch/ia64/hp/common/sba_iommu.c
362 @@ -242,7 +242,7 @@ struct ioc {
363 struct pci_dev *sac_only_dev;
364 };
365
366 -static struct ioc *ioc_list;
367 +static struct ioc *ioc_list, *ioc_found;
368 static int reserve_sba_gart = 1;
369
370 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
371 @@ -1809,20 +1809,13 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
372 { SX2000_IOC_ID, "sx2000", NULL },
373 };
374
375 -static struct ioc *
376 -ioc_init(unsigned long hpa, void *handle)
377 +static void ioc_init(unsigned long hpa, struct ioc *ioc)
378 {
379 - struct ioc *ioc;
380 struct ioc_iommu *info;
381
382 - ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
383 - if (!ioc)
384 - return NULL;
385 -
386 ioc->next = ioc_list;
387 ioc_list = ioc;
388
389 - ioc->handle = handle;
390 ioc->ioc_hpa = ioremap(hpa, 0x1000);
391
392 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
393 @@ -1863,8 +1856,6 @@ ioc_init(unsigned long hpa, void *handle)
394 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
395 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
396 hpa, ioc->iov_size >> 20, ioc->ibase);
397 -
398 - return ioc;
399 }
400
401
402 @@ -2031,22 +2022,21 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
403 #endif
404 }
405
406 -static int
407 -acpi_sba_ioc_add(struct acpi_device *device,
408 - const struct acpi_device_id *not_used)
409 +static void acpi_sba_ioc_add(struct ioc *ioc)
410 {
411 - struct ioc *ioc;
412 + acpi_handle handle = ioc->handle;
413 acpi_status status;
414 u64 hpa, length;
415 struct acpi_device_info *adi;
416
417 - status = hp_acpi_csr_space(device->handle, &hpa, &length);
418 + ioc_found = ioc->next;
419 + status = hp_acpi_csr_space(handle, &hpa, &length);
420 if (ACPI_FAILURE(status))
421 - return 1;
422 + goto err;
423
424 - status = acpi_get_object_info(device->handle, &adi);
425 + status = acpi_get_object_info(handle, &adi);
426 if (ACPI_FAILURE(status))
427 - return 1;
428 + goto err;
429
430 /*
431 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
432 @@ -2067,13 +2057,13 @@ acpi_sba_ioc_add(struct acpi_device *device,
433 if (!iovp_shift)
434 iovp_shift = 12;
435
436 - ioc = ioc_init(hpa, device->handle);
437 - if (!ioc)
438 - return 1;
439 -
440 + ioc_init(hpa, ioc);
441 /* setup NUMA node association */
442 - sba_map_ioc_to_node(ioc, device->handle);
443 - return 0;
444 + sba_map_ioc_to_node(ioc, handle);
445 + return;
446 +
447 + err:
448 + kfree(ioc);
449 }
450
451 static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
452 @@ -2081,9 +2071,26 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
453 {"HWP0004", 0},
454 {"", 0},
455 };
456 +
457 +static int acpi_sba_ioc_attach(struct acpi_device *device,
458 + const struct acpi_device_id *not_used)
459 +{
460 + struct ioc *ioc;
461 +
462 + ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
463 + if (!ioc)
464 + return -ENOMEM;
465 +
466 + ioc->next = ioc_found;
467 + ioc_found = ioc;
468 + ioc->handle = device->handle;
469 + return 1;
470 +}
471 +
472 +
473 static struct acpi_scan_handler acpi_sba_ioc_handler = {
474 .ids = hp_ioc_iommu_device_ids,
475 - .attach = acpi_sba_ioc_add,
476 + .attach = acpi_sba_ioc_attach,
477 };
478
479 static int __init acpi_sba_ioc_init_acpi(void)
480 @@ -2118,9 +2125,12 @@ sba_init(void)
481 #endif
482
483 /*
484 - * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
485 + * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
486 * routine, but that only happens if acpi_scan_init() has already run.
487 */
488 + while (ioc_found)
489 + acpi_sba_ioc_add(ioc_found);
490 +
491 if (!ioc_list) {
492 #ifdef CONFIG_IA64_GENERIC
493 /*
494 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
495 index 68232db98baa..76069c18ee42 100644
496 --- a/arch/ia64/mm/hugetlbpage.c
497 +++ b/arch/ia64/mm/hugetlbpage.c
498 @@ -114,11 +114,6 @@ int pud_huge(pud_t pud)
499 return 0;
500 }
501
502 -int pmd_huge_support(void)
503 -{
504 - return 0;
505 -}
506 -
507 struct page *
508 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
509 {
510 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
511 index 042431509b56..3c52fa6d0f8e 100644
512 --- a/arch/metag/mm/hugetlbpage.c
513 +++ b/arch/metag/mm/hugetlbpage.c
514 @@ -110,11 +110,6 @@ int pud_huge(pud_t pud)
515 return 0;
516 }
517
518 -int pmd_huge_support(void)
519 -{
520 - return 1;
521 -}
522 -
523 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
524 pmd_t *pmd, int write)
525 {
526 diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
527 index 77e0ae036e7c..4ec8ee10d371 100644
528 --- a/arch/mips/mm/hugetlbpage.c
529 +++ b/arch/mips/mm/hugetlbpage.c
530 @@ -84,11 +84,6 @@ int pud_huge(pud_t pud)
531 return (pud_val(pud) & _PAGE_HUGE) != 0;
532 }
533
534 -int pmd_huge_support(void)
535 -{
536 - return 1;
537 -}
538 -
539 struct page *
540 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
541 pmd_t *pmd, int write)
542 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
543 index eb923654ba80..7e70ae968e5f 100644
544 --- a/arch/powerpc/mm/hugetlbpage.c
545 +++ b/arch/powerpc/mm/hugetlbpage.c
546 @@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
547 */
548 return ((pgd_val(pgd) & 0x3) != 0x0);
549 }
550 -
551 -int pmd_huge_support(void)
552 -{
553 - return 1;
554 -}
555 #else
556 int pmd_huge(pmd_t pmd)
557 {
558 @@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
559 {
560 return 0;
561 }
562 -
563 -int pmd_huge_support(void)
564 -{
565 - return 0;
566 -}
567 #endif
568
569 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
570 diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
571 index bbf8141408cd..2bed4f02a558 100644
572 --- a/arch/s390/include/asm/lowcore.h
573 +++ b/arch/s390/include/asm/lowcore.h
574 @@ -142,9 +142,9 @@ struct _lowcore {
575 __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
576
577 /* Interrupt response block */
578 - __u8 irb[64]; /* 0x0300 */
579 + __u8 irb[96]; /* 0x0300 */
580
581 - __u8 pad_0x0340[0x0e00-0x0340]; /* 0x0340 */
582 + __u8 pad_0x0360[0x0e00-0x0360]; /* 0x0360 */
583
584 /*
585 * 0xe00 contains the address of the IPL Parameter Information
586 @@ -288,12 +288,13 @@ struct _lowcore {
587 __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
588
589 /* Interrupt response block. */
590 - __u8 irb[64]; /* 0x0400 */
591 + __u8 irb[96]; /* 0x0400 */
592 + __u8 pad_0x0460[0x0480-0x0460]; /* 0x0460 */
593
594 /* Per cpu primary space access list */
595 - __u32 paste[16]; /* 0x0440 */
596 + __u32 paste[16]; /* 0x0480 */
597
598 - __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */
599 + __u8 pad_0x04c0[0x0e00-0x04c0]; /* 0x04c0 */
600
601 /*
602 * 0xe00 contains the address of the IPL Parameter Information
603 diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
604 index 386d37a228bb..0931b110c826 100644
605 --- a/arch/s390/kernel/time.c
606 +++ b/arch/s390/kernel/time.c
607 @@ -226,7 +226,7 @@ void update_vsyscall(struct timekeeper *tk)
608 vdso_data->wtom_clock_sec =
609 tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
610 vdso_data->wtom_clock_nsec = tk->xtime_nsec +
611 - + (tk->wall_to_monotonic.tv_nsec << tk->shift);
612 + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift);
613 nsecps = (u64) NSEC_PER_SEC << tk->shift;
614 while (vdso_data->wtom_clock_nsec >= nsecps) {
615 vdso_data->wtom_clock_nsec -= nsecps;
616 diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
617 index 0727a55d87d9..0ff66a7e29bb 100644
618 --- a/arch/s390/mm/hugetlbpage.c
619 +++ b/arch/s390/mm/hugetlbpage.c
620 @@ -220,11 +220,6 @@ int pud_huge(pud_t pud)
621 return 0;
622 }
623
624 -int pmd_huge_support(void)
625 -{
626 - return 1;
627 -}
628 -
629 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
630 pmd_t *pmdp, int write)
631 {
632 diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
633 index 0d676a41081e..d7762349ea48 100644
634 --- a/arch/sh/mm/hugetlbpage.c
635 +++ b/arch/sh/mm/hugetlbpage.c
636 @@ -83,11 +83,6 @@ int pud_huge(pud_t pud)
637 return 0;
638 }
639
640 -int pmd_huge_support(void)
641 -{
642 - return 0;
643 -}
644 -
645 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
646 pmd_t *pmd, int write)
647 {
648 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
649 index 9bd9ce80bf77..d329537739c6 100644
650 --- a/arch/sparc/mm/hugetlbpage.c
651 +++ b/arch/sparc/mm/hugetlbpage.c
652 @@ -231,11 +231,6 @@ int pud_huge(pud_t pud)
653 return 0;
654 }
655
656 -int pmd_huge_support(void)
657 -{
658 - return 0;
659 -}
660 -
661 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
662 pmd_t *pmd, int write)
663 {
664 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
665 index 0cb3bbaa580c..e514899e1100 100644
666 --- a/arch/tile/mm/hugetlbpage.c
667 +++ b/arch/tile/mm/hugetlbpage.c
668 @@ -166,11 +166,6 @@ int pud_huge(pud_t pud)
669 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
670 }
671
672 -int pmd_huge_support(void)
673 -{
674 - return 1;
675 -}
676 -
677 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
678 pmd_t *pmd, int write)
679 {
680 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
681 index 25d2c6f7325e..6b8b429c832f 100644
682 --- a/arch/x86/Kconfig
683 +++ b/arch/x86/Kconfig
684 @@ -1871,6 +1871,10 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
685 def_bool y
686 depends on X86_64 || X86_PAE
687
688 +config ARCH_ENABLE_HUGEPAGE_MIGRATION
689 + def_bool y
690 + depends on X86_64 && HUGETLB_PAGE && MIGRATION
691 +
692 menu "Power management and ACPI options"
693
694 config ARCH_HIBERNATION_HEADER
695 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
696 index a2a4f4697889..6491353cc9aa 100644
697 --- a/arch/x86/kernel/entry_32.S
698 +++ b/arch/x86/kernel/entry_32.S
699 @@ -431,9 +431,10 @@ sysenter_past_esp:
700 jnz sysenter_audit
701 sysenter_do_call:
702 cmpl $(NR_syscalls), %eax
703 - jae syscall_badsys
704 + jae sysenter_badsys
705 call *sys_call_table(,%eax,4)
706 movl %eax,PT_EAX(%esp)
707 +sysenter_after_call:
708 LOCKDEP_SYS_EXIT
709 DISABLE_INTERRUPTS(CLBR_ANY)
710 TRACE_IRQS_OFF
711 @@ -551,11 +552,6 @@ ENTRY(iret_exc)
712
713 CFI_RESTORE_STATE
714 ldt_ss:
715 - larl PT_OLDSS(%esp), %eax
716 - jnz restore_nocheck
717 - testl $0x00400000, %eax # returning to 32bit stack?
718 - jnz restore_nocheck # allright, normal return
719 -
720 #ifdef CONFIG_PARAVIRT
721 /*
722 * The kernel can't run on a non-flat stack if paravirt mode
723 @@ -688,7 +684,12 @@ END(syscall_fault)
724
725 syscall_badsys:
726 movl $-ENOSYS,PT_EAX(%esp)
727 - jmp resume_userspace
728 + jmp syscall_exit
729 +END(syscall_badsys)
730 +
731 +sysenter_badsys:
732 + movl $-ENOSYS,PT_EAX(%esp)
733 + jmp sysenter_after_call
734 END(syscall_badsys)
735 CFI_ENDPROC
736 /*
737 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
738 index 8c9f647ff9e1..8b977ebf9388 100644
739 --- a/arch/x86/mm/hugetlbpage.c
740 +++ b/arch/x86/mm/hugetlbpage.c
741 @@ -58,11 +58,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
742 {
743 return NULL;
744 }
745 -
746 -int pmd_huge_support(void)
747 -{
748 - return 0;
749 -}
750 #else
751
752 struct page *
753 @@ -80,11 +75,6 @@ int pud_huge(pud_t pud)
754 {
755 return !!(pud_val(pud) & _PAGE_PSE);
756 }
757 -
758 -int pmd_huge_support(void)
759 -{
760 - return 1;
761 -}
762 #endif
763
764 #ifdef CONFIG_HUGETLB_PAGE
765 diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
766 index 04376ac3d9ef..ec255a1646d2 100644
767 --- a/arch/x86/syscalls/syscall_64.tbl
768 +++ b/arch/x86/syscalls/syscall_64.tbl
769 @@ -212,10 +212,10 @@
770 203 common sched_setaffinity sys_sched_setaffinity
771 204 common sched_getaffinity sys_sched_getaffinity
772 205 64 set_thread_area
773 -206 common io_setup sys_io_setup
774 +206 64 io_setup sys_io_setup
775 207 common io_destroy sys_io_destroy
776 208 common io_getevents sys_io_getevents
777 -209 common io_submit sys_io_submit
778 +209 64 io_submit sys_io_submit
779 210 common io_cancel sys_io_cancel
780 211 64 get_thread_area
781 212 common lookup_dcookie sys_lookup_dcookie
782 @@ -359,3 +359,5 @@
783 540 x32 process_vm_writev compat_sys_process_vm_writev
784 541 x32 setsockopt compat_sys_setsockopt
785 542 x32 getsockopt compat_sys_getsockopt
786 +543 x32 io_setup compat_sys_io_setup
787 +544 x32 io_submit compat_sys_io_submit
788 diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
789 index 77219336c7e0..6dc54b3c28b0 100644
790 --- a/drivers/acpi/acpica/utstring.c
791 +++ b/drivers/acpi/acpica/utstring.c
792 @@ -353,7 +353,7 @@ void acpi_ut_print_string(char *string, u16 max_length)
793 }
794
795 acpi_os_printf("\"");
796 - for (i = 0; string[i] && (i < max_length); i++) {
797 + for (i = 0; (i < max_length) && string[i]; i++) {
798
799 /* Escape sequences */
800
801 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
802 index cf925c4f36b7..ed9fca0250fa 100644
803 --- a/drivers/acpi/bus.c
804 +++ b/drivers/acpi/bus.c
805 @@ -52,6 +52,12 @@ struct proc_dir_entry *acpi_root_dir;
806 EXPORT_SYMBOL(acpi_root_dir);
807
808 #ifdef CONFIG_X86
809 +#ifdef CONFIG_ACPI_CUSTOM_DSDT
810 +static inline int set_copy_dsdt(const struct dmi_system_id *id)
811 +{
812 + return 0;
813 +}
814 +#else
815 static int set_copy_dsdt(const struct dmi_system_id *id)
816 {
817 printk(KERN_NOTICE "%s detected - "
818 @@ -59,6 +65,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
819 acpi_gbl_copy_dsdt_locally = 1;
820 return 0;
821 }
822 +#endif
823
824 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
825 /*
826 diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
827 index bba526148583..07c8c5a5ee95 100644
828 --- a/drivers/acpi/utils.c
829 +++ b/drivers/acpi/utils.c
830 @@ -30,6 +30,7 @@
831 #include <linux/types.h>
832 #include <linux/hardirq.h>
833 #include <linux/acpi.h>
834 +#include <linux/dynamic_debug.h>
835
836 #include "internal.h"
837
838 @@ -457,6 +458,24 @@ acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
839 EXPORT_SYMBOL(acpi_evaluate_ost);
840
841 /**
842 + * acpi_handle_path: Return the object path of handle
843 + *
844 + * Caller must free the returned buffer
845 + */
846 +static char *acpi_handle_path(acpi_handle handle)
847 +{
848 + struct acpi_buffer buffer = {
849 + .length = ACPI_ALLOCATE_BUFFER,
850 + .pointer = NULL
851 + };
852 +
853 + if (in_interrupt() ||
854 + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
855 + return NULL;
856 + return buffer.pointer;
857 +}
858 +
859 +/**
860 * acpi_handle_printk: Print message with ACPI prefix and object path
861 *
862 * This function is called through acpi_handle_<level> macros and prints
863 @@ -469,29 +488,50 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
864 {
865 struct va_format vaf;
866 va_list args;
867 - struct acpi_buffer buffer = {
868 - .length = ACPI_ALLOCATE_BUFFER,
869 - .pointer = NULL
870 - };
871 const char *path;
872
873 va_start(args, fmt);
874 vaf.fmt = fmt;
875 vaf.va = &args;
876
877 - if (in_interrupt() ||
878 - acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer) != AE_OK)
879 - path = "<n/a>";
880 - else
881 - path = buffer.pointer;
882 -
883 - printk("%sACPI: %s: %pV", level, path, &vaf);
884 + path = acpi_handle_path(handle);
885 + printk("%sACPI: %s: %pV", level, path ? path : "<n/a>" , &vaf);
886
887 va_end(args);
888 - kfree(buffer.pointer);
889 + kfree(path);
890 }
891 EXPORT_SYMBOL(acpi_handle_printk);
892
893 +#if defined(CONFIG_DYNAMIC_DEBUG)
894 +/**
895 + * __acpi_handle_debug: pr_debug with ACPI prefix and object path
896 + *
897 + * This function is called through acpi_handle_debug macro and debug
898 + * prints a message with ACPI prefix and object path. This function
899 + * acquires the global namespace mutex to obtain an object path. In
900 + * interrupt context, it shows the object path as <n/a>.
901 + */
902 +void
903 +__acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle,
904 + const char *fmt, ...)
905 +{
906 + struct va_format vaf;
907 + va_list args;
908 + const char *path;
909 +
910 + va_start(args, fmt);
911 + vaf.fmt = fmt;
912 + vaf.va = &args;
913 +
914 + path = acpi_handle_path(handle);
915 + __dynamic_pr_debug(descriptor, "ACPI: %s: %pV", path ? path : "<n/a>", &vaf);
916 +
917 + va_end(args);
918 + kfree(path);
919 +}
920 +EXPORT_SYMBOL(__acpi_handle_debug);
921 +#endif
922 +
923 /**
924 * acpi_has_method: Check whether @handle has a method named @name
925 * @handle: ACPI device handle
926 diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
927 index 25538675d59e..c539d70b97ab 100644
928 --- a/drivers/base/power/opp.c
929 +++ b/drivers/base/power/opp.c
930 @@ -734,11 +734,9 @@ int of_init_opp_table(struct device *dev)
931 unsigned long freq = be32_to_cpup(val++) * 1000;
932 unsigned long volt = be32_to_cpup(val++);
933
934 - if (dev_pm_opp_add(dev, freq, volt)) {
935 + if (dev_pm_opp_add(dev, freq, volt))
936 dev_warn(dev, "%s: Failed to add OPP %ld\n",
937 __func__, freq);
938 - continue;
939 - }
940 nr -= 2;
941 }
942
943 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
944 index cb9b1f8326c3..31e5bc1351b4 100644
945 --- a/drivers/block/virtio_blk.c
946 +++ b/drivers/block/virtio_blk.c
947 @@ -159,6 +159,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
948 unsigned int num;
949 const bool last = (req->cmd_flags & REQ_END) != 0;
950 int err;
951 + bool notify = false;
952
953 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
954
955 @@ -211,10 +212,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
956 return BLK_MQ_RQ_QUEUE_ERROR;
957 }
958
959 - if (last)
960 - virtqueue_kick(vblk->vq);
961 -
962 + if (last && virtqueue_kick_prepare(vblk->vq))
963 + notify = true;
964 spin_unlock_irqrestore(&vblk->vq_lock, flags);
965 +
966 + if (notify)
967 + virtqueue_notify(vblk->vq);
968 return BLK_MQ_RQ_QUEUE_OK;
969 }
970
971 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
972 index 9849b5233bf4..48eccb350180 100644
973 --- a/drivers/block/zram/zram_drv.c
974 +++ b/drivers/block/zram/zram_drv.c
975 @@ -572,10 +572,10 @@ static void zram_bio_discard(struct zram *zram, u32 index,
976 * skipping this logical block is appropriate here.
977 */
978 if (offset) {
979 - if (n < offset)
980 + if (n <= (PAGE_SIZE - offset))
981 return;
982
983 - n -= offset;
984 + n -= (PAGE_SIZE - offset);
985 index++;
986 }
987
988 diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
989 index f1fbf4f1e5be..e00f8f5b5c8e 100644
990 --- a/drivers/bluetooth/hci_ldisc.c
991 +++ b/drivers/bluetooth/hci_ldisc.c
992 @@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
993
994 int hci_uart_tx_wakeup(struct hci_uart *hu)
995 {
996 - struct tty_struct *tty = hu->tty;
997 - struct hci_dev *hdev = hu->hdev;
998 - struct sk_buff *skb;
999 -
1000 if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
1001 set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
1002 return 0;
1003 @@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
1004
1005 BT_DBG("");
1006
1007 + schedule_work(&hu->write_work);
1008 +
1009 + return 0;
1010 +}
1011 +
1012 +static void hci_uart_write_work(struct work_struct *work)
1013 +{
1014 + struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
1015 + struct tty_struct *tty = hu->tty;
1016 + struct hci_dev *hdev = hu->hdev;
1017 + struct sk_buff *skb;
1018 +
1019 + /* REVISIT: should we cope with bad skbs or ->write() returning
1020 + * and error value ?
1021 + */
1022 +
1023 restart:
1024 clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
1025
1026 @@ -153,7 +165,6 @@ restart:
1027 goto restart;
1028
1029 clear_bit(HCI_UART_SENDING, &hu->tx_state);
1030 - return 0;
1031 }
1032
1033 static void hci_uart_init_work(struct work_struct *work)
1034 @@ -282,6 +293,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
1035 tty->receive_room = 65536;
1036
1037 INIT_WORK(&hu->init_ready, hci_uart_init_work);
1038 + INIT_WORK(&hu->write_work, hci_uart_write_work);
1039
1040 spin_lock_init(&hu->rx_lock);
1041
1042 @@ -319,6 +331,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
1043 if (hdev)
1044 hci_uart_close(hdev);
1045
1046 + cancel_work_sync(&hu->write_work);
1047 +
1048 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
1049 if (hdev) {
1050 if (test_bit(HCI_UART_REGISTERED, &hu->flags))
1051 diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
1052 index fffa61ff5cb1..12df101ca942 100644
1053 --- a/drivers/bluetooth/hci_uart.h
1054 +++ b/drivers/bluetooth/hci_uart.h
1055 @@ -68,6 +68,7 @@ struct hci_uart {
1056 unsigned long hdev_flags;
1057
1058 struct work_struct init_ready;
1059 + struct work_struct write_work;
1060
1061 struct hci_uart_proto *proto;
1062 void *priv;
1063 diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
1064 index 974321a2508d..14790304b84b 100644
1065 --- a/drivers/char/applicom.c
1066 +++ b/drivers/char/applicom.c
1067 @@ -345,7 +345,6 @@ out:
1068 free_irq(apbs[i].irq, &dummy);
1069 iounmap(apbs[i].RamIO);
1070 }
1071 - pci_disable_device(dev);
1072 return ret;
1073 }
1074
1075 diff --git a/drivers/char/random.c b/drivers/char/random.c
1076 index 102c50d38902..2b6e4cd8de8e 100644
1077 --- a/drivers/char/random.c
1078 +++ b/drivers/char/random.c
1079 @@ -979,7 +979,6 @@ static void push_to_pool(struct work_struct *work)
1080 static size_t account(struct entropy_store *r, size_t nbytes, int min,
1081 int reserved)
1082 {
1083 - int have_bytes;
1084 int entropy_count, orig;
1085 size_t ibytes;
1086
1087 @@ -988,17 +987,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
1088 /* Can we pull enough? */
1089 retry:
1090 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
1091 - have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1092 ibytes = nbytes;
1093 /* If limited, never pull more than available */
1094 - if (r->limit)
1095 - ibytes = min_t(size_t, ibytes, have_bytes - reserved);
1096 + if (r->limit) {
1097 + int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1098 +
1099 + if ((have_bytes -= reserved) < 0)
1100 + have_bytes = 0;
1101 + ibytes = min_t(size_t, ibytes, have_bytes);
1102 + }
1103 if (ibytes < min)
1104 ibytes = 0;
1105 - if (have_bytes >= ibytes + reserved)
1106 - entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
1107 - else
1108 - entropy_count = reserved << (ENTROPY_SHIFT + 3);
1109 + if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0)
1110 + entropy_count = 0;
1111
1112 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1113 goto retry;
1114 diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
1115 index 3846941801b8..5c948c9625d2 100644
1116 --- a/drivers/extcon/extcon-max14577.c
1117 +++ b/drivers/extcon/extcon-max14577.c
1118 @@ -650,7 +650,7 @@ static int max14577_muic_probe(struct platform_device *pdev)
1119 unsigned int virq = 0;
1120
1121 virq = regmap_irq_get_virq(max14577->irq_data, muic_irq->irq);
1122 - if (!virq)
1123 + if (virq <= 0)
1124 return -EINVAL;
1125 muic_irq->virq = virq;
1126
1127 @@ -710,13 +710,8 @@ static int max14577_muic_probe(struct platform_device *pdev)
1128 * driver should notify cable state to upper layer.
1129 */
1130 INIT_DELAYED_WORK(&info->wq_detcable, max14577_muic_detect_cable_wq);
1131 - ret = queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
1132 + queue_delayed_work(system_power_efficient_wq, &info->wq_detcable,
1133 delay_jiffies);
1134 - if (ret < 0) {
1135 - dev_err(&pdev->dev,
1136 - "failed to schedule delayed work for cable detect\n");
1137 - goto err_extcon;
1138 - }
1139
1140 return ret;
1141
1142 diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
1143 index da268fbc901b..4657a91acf56 100644
1144 --- a/drivers/extcon/extcon-max77693.c
1145 +++ b/drivers/extcon/extcon-max77693.c
1146 @@ -1193,7 +1193,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
1147
1148
1149 /* Initialize MUIC register by using platform data or default data */
1150 - if (pdata->muic_data) {
1151 + if (pdata && pdata->muic_data) {
1152 init_data = pdata->muic_data->init_data;
1153 num_init_data = pdata->muic_data->num_init_data;
1154 } else {
1155 @@ -1226,7 +1226,7 @@ static int max77693_muic_probe(struct platform_device *pdev)
1156 = init_data[i].data;
1157 }
1158
1159 - if (pdata->muic_data) {
1160 + if (pdata && pdata->muic_data) {
1161 struct max77693_muic_platform_data *muic_pdata
1162 = pdata->muic_data;
1163
1164 diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
1165 index 6a00464658c5..5e1b88cecb76 100644
1166 --- a/drivers/extcon/extcon-max8997.c
1167 +++ b/drivers/extcon/extcon-max8997.c
1168 @@ -715,7 +715,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
1169 goto err_irq;
1170 }
1171
1172 - if (pdata->muic_pdata) {
1173 + if (pdata && pdata->muic_pdata) {
1174 struct max8997_muic_platform_data *muic_pdata
1175 = pdata->muic_pdata;
1176
1177 diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
1178 index 4b9dc836dcf9..e992abc5ef26 100644
1179 --- a/drivers/firmware/efi/efi-pstore.c
1180 +++ b/drivers/firmware/efi/efi-pstore.c
1181 @@ -40,7 +40,7 @@ struct pstore_read_data {
1182 static inline u64 generic_id(unsigned long timestamp,
1183 unsigned int part, int count)
1184 {
1185 - return (timestamp * 100 + part) * 1000 + count;
1186 + return ((u64) timestamp * 100 + part) * 1000 + count;
1187 }
1188
1189 static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
1190 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1191 index 2bdae61c0ac0..12c663e86ca1 100644
1192 --- a/drivers/gpu/drm/radeon/radeon_pm.c
1193 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
1194 @@ -984,6 +984,8 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1195 if (enable) {
1196 mutex_lock(&rdev->pm.mutex);
1197 rdev->pm.dpm.uvd_active = true;
1198 + /* disable this for now */
1199 +#if 0
1200 if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
1201 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
1202 else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
1203 @@ -993,6 +995,7 @@ void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
1204 else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
1205 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
1206 else
1207 +#endif
1208 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
1209 rdev->pm.dpm.state = dpm_state;
1210 mutex_unlock(&rdev->pm.mutex);
1211 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1212 index 1b65ae2433cd..a4ad270e8261 100644
1213 --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1214 +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1215 @@ -812,7 +812,8 @@ void radeon_uvd_note_usage(struct radeon_device *rdev)
1216 (rdev->pm.dpm.hd != hd)) {
1217 rdev->pm.dpm.sd = sd;
1218 rdev->pm.dpm.hd = hd;
1219 - streams_changed = true;
1220 + /* disable this for now */
1221 + /*streams_changed = true;*/
1222 }
1223 }
1224
1225 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1226 index da52279de939..a5c7927c9bd2 100644
1227 --- a/drivers/hid/hid-core.c
1228 +++ b/drivers/hid/hid-core.c
1229 @@ -842,7 +842,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
1230 * ->numbered being checked, which may not always be the case when
1231 * drivers go to access report values.
1232 */
1233 - report = hid->report_enum[type].report_id_hash[id];
1234 + if (id == 0) {
1235 + /*
1236 + * Validating on id 0 means we should examine the first
1237 + * report in the list.
1238 + */
1239 + report = list_entry(
1240 + hid->report_enum[type].report_list.next,
1241 + struct hid_report, list);
1242 + } else {
1243 + report = hid->report_enum[type].report_id_hash[id];
1244 + }
1245 if (!report) {
1246 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1247 return NULL;
1248 diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
1249 index 2e2d903db838..8d44a4060634 100644
1250 --- a/drivers/infiniband/ulp/iser/iser_initiator.c
1251 +++ b/drivers/infiniband/ulp/iser/iser_initiator.c
1252 @@ -41,11 +41,11 @@
1253 #include "iscsi_iser.h"
1254
1255 /* Register user buffer memory and initialize passive rdma
1256 - * dto descriptor. Total data size is stored in
1257 - * iser_task->data[ISER_DIR_IN].data_len
1258 + * dto descriptor. Data size is stored in
1259 + * task->data[ISER_DIR_IN].data_len, Protection size
1260 + * os stored in task->prot[ISER_DIR_IN].data_len
1261 */
1262 -static int iser_prepare_read_cmd(struct iscsi_task *task,
1263 - unsigned int edtl)
1264 +static int iser_prepare_read_cmd(struct iscsi_task *task)
1265
1266 {
1267 struct iscsi_iser_task *iser_task = task->dd_data;
1268 @@ -73,14 +73,6 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
1269 return err;
1270 }
1271
1272 - if (edtl > iser_task->data[ISER_DIR_IN].data_len) {
1273 - iser_err("Total data length: %ld, less than EDTL: "
1274 - "%d, in READ cmd BHS itt: %d, conn: 0x%p\n",
1275 - iser_task->data[ISER_DIR_IN].data_len, edtl,
1276 - task->itt, iser_task->ib_conn);
1277 - return -EINVAL;
1278 - }
1279 -
1280 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
1281 if (err) {
1282 iser_err("Failed to set up Data-IN RDMA\n");
1283 @@ -100,8 +92,9 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
1284 }
1285
1286 /* Register user buffer memory and initialize passive rdma
1287 - * dto descriptor. Total data size is stored in
1288 - * task->data[ISER_DIR_OUT].data_len
1289 + * dto descriptor. Data size is stored in
1290 + * task->data[ISER_DIR_OUT].data_len, Protection size
1291 + * is stored at task->prot[ISER_DIR_OUT].data_len
1292 */
1293 static int
1294 iser_prepare_write_cmd(struct iscsi_task *task,
1295 @@ -135,14 +128,6 @@ iser_prepare_write_cmd(struct iscsi_task *task,
1296 return err;
1297 }
1298
1299 - if (edtl > iser_task->data[ISER_DIR_OUT].data_len) {
1300 - iser_err("Total data length: %ld, less than EDTL: %d, "
1301 - "in WRITE cmd BHS itt: %d, conn: 0x%p\n",
1302 - iser_task->data[ISER_DIR_OUT].data_len,
1303 - edtl, task->itt, task->conn);
1304 - return -EINVAL;
1305 - }
1306 -
1307 err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
1308 if (err != 0) {
1309 iser_err("Failed to register write cmd RDMA mem\n");
1310 @@ -417,11 +402,12 @@ int iser_send_command(struct iscsi_conn *conn,
1311 if (scsi_prot_sg_count(sc)) {
1312 prot_buf->buf = scsi_prot_sglist(sc);
1313 prot_buf->size = scsi_prot_sg_count(sc);
1314 - prot_buf->data_len = sc->prot_sdb->length;
1315 + prot_buf->data_len = data_buf->data_len >>
1316 + ilog2(sc->device->sector_size) * 8;
1317 }
1318
1319 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
1320 - err = iser_prepare_read_cmd(task, edtl);
1321 + err = iser_prepare_read_cmd(task);
1322 if (err)
1323 goto send_command_error;
1324 }
1325 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1326 index b9d647468b99..d4c7928a0f36 100644
1327 --- a/drivers/infiniband/ulp/isert/ib_isert.c
1328 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1329 @@ -663,8 +663,9 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1330
1331 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
1332 if (pi_support && !device->pi_capable) {
1333 - pr_err("Protection information requested but not supported\n");
1334 - ret = -EINVAL;
1335 + pr_err("Protection information requested but not supported, "
1336 + "rejecting connect request\n");
1337 + ret = rdma_reject(cma_id, NULL, 0);
1338 goto out_mr;
1339 }
1340
1341 @@ -787,14 +788,12 @@ isert_disconnect_work(struct work_struct *work)
1342 isert_put_conn(isert_conn);
1343 return;
1344 }
1345 - if (!isert_conn->logout_posted) {
1346 - pr_debug("Calling rdma_disconnect for !logout_posted from"
1347 - " isert_disconnect_work\n");
1348 +
1349 + if (isert_conn->disconnect) {
1350 + /* Send DREQ/DREP towards our initiator */
1351 rdma_disconnect(isert_conn->conn_cm_id);
1352 - mutex_unlock(&isert_conn->conn_mutex);
1353 - iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1354 - goto wake_up;
1355 }
1356 +
1357 mutex_unlock(&isert_conn->conn_mutex);
1358
1359 wake_up:
1360 @@ -803,10 +802,11 @@ wake_up:
1361 }
1362
1363 static void
1364 -isert_disconnected_handler(struct rdma_cm_id *cma_id)
1365 +isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
1366 {
1367 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
1368
1369 + isert_conn->disconnect = disconnect;
1370 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
1371 schedule_work(&isert_conn->conn_logout_work);
1372 }
1373 @@ -815,29 +815,28 @@ static int
1374 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
1375 {
1376 int ret = 0;
1377 + bool disconnect = false;
1378
1379 pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
1380 event->event, event->status, cma_id->context, cma_id);
1381
1382 switch (event->event) {
1383 case RDMA_CM_EVENT_CONNECT_REQUEST:
1384 - pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
1385 ret = isert_connect_request(cma_id, event);
1386 break;
1387 case RDMA_CM_EVENT_ESTABLISHED:
1388 - pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
1389 isert_connected_handler(cma_id);
1390 break;
1391 - case RDMA_CM_EVENT_DISCONNECTED:
1392 - pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
1393 - isert_disconnected_handler(cma_id);
1394 - break;
1395 - case RDMA_CM_EVENT_DEVICE_REMOVAL:
1396 - case RDMA_CM_EVENT_ADDR_CHANGE:
1397 + case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
1398 + case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
1399 + case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
1400 + disconnect = true;
1401 + case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
1402 + isert_disconnected_handler(cma_id, disconnect);
1403 break;
1404 case RDMA_CM_EVENT_CONNECT_ERROR:
1405 default:
1406 - pr_err("Unknown RDMA CMA event: %d\n", event->event);
1407 + pr_err("Unhandled RDMA CMA event: %d\n", event->event);
1408 break;
1409 }
1410
1411 @@ -1054,7 +1053,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1412 }
1413 if (!login->login_failed) {
1414 if (login->login_complete) {
1415 - if (isert_conn->conn_device->use_fastreg) {
1416 + if (!conn->sess->sess_ops->SessionType &&
1417 + isert_conn->conn_device->use_fastreg) {
1418 + /* Normal Session and fastreg is used */
1419 u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
1420
1421 ret = isert_conn_create_fastreg_pool(isert_conn,
1422 @@ -1824,11 +1825,8 @@ isert_do_control_comp(struct work_struct *work)
1423 break;
1424 case ISTATE_SEND_LOGOUTRSP:
1425 pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
1426 - /*
1427 - * Call atomic_dec(&isert_conn->post_send_buf_count)
1428 - * from isert_wait_conn()
1429 - */
1430 - isert_conn->logout_posted = true;
1431 +
1432 + atomic_dec(&isert_conn->post_send_buf_count);
1433 iscsit_logout_post_handler(cmd, cmd->conn);
1434 break;
1435 case ISTATE_SEND_TEXTRSP:
1436 @@ -2034,6 +2032,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
1437 isert_conn->state = ISER_CONN_DOWN;
1438 mutex_unlock(&isert_conn->conn_mutex);
1439
1440 + iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
1441 +
1442 complete(&isert_conn->conn_wait_comp_err);
1443 }
1444
1445 @@ -2320,7 +2320,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
1446 int rc;
1447
1448 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
1449 - rc = iscsit_build_text_rsp(cmd, conn, hdr);
1450 + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
1451 if (rc < 0)
1452 return rc;
1453
1454 @@ -3156,9 +3156,14 @@ accept_wait:
1455 return -ENODEV;
1456
1457 spin_lock_bh(&np->np_thread_lock);
1458 - if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1459 + if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
1460 spin_unlock_bh(&np->np_thread_lock);
1461 - pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
1462 + pr_debug("np_thread_state %d for isert_accept_np\n",
1463 + np->np_thread_state);
1464 + /**
1465 + * No point in stalling here when np_thread
1466 + * is in state RESET/SHUTDOWN/EXIT - bail
1467 + **/
1468 return -ENODEV;
1469 }
1470 spin_unlock_bh(&np->np_thread_lock);
1471 @@ -3208,15 +3213,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1472 struct isert_conn *isert_conn = conn->context;
1473
1474 pr_debug("isert_wait_conn: Starting \n");
1475 - /*
1476 - * Decrement post_send_buf_count for special case when called
1477 - * from isert_do_control_comp() -> iscsit_logout_post_handler()
1478 - */
1479 - mutex_lock(&isert_conn->conn_mutex);
1480 - if (isert_conn->logout_posted)
1481 - atomic_dec(&isert_conn->post_send_buf_count);
1482
1483 - if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
1484 + mutex_lock(&isert_conn->conn_mutex);
1485 + if (isert_conn->conn_cm_id) {
1486 pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
1487 rdma_disconnect(isert_conn->conn_cm_id);
1488 }
1489 @@ -3293,6 +3292,7 @@ destroy_rx_wq:
1490
1491 static void __exit isert_exit(void)
1492 {
1493 + flush_scheduled_work();
1494 destroy_workqueue(isert_comp_wq);
1495 destroy_workqueue(isert_rx_wq);
1496 iscsit_unregister_transport(&iser_target_transport);
1497 diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
1498 index da6612e68000..04f51f7bf614 100644
1499 --- a/drivers/infiniband/ulp/isert/ib_isert.h
1500 +++ b/drivers/infiniband/ulp/isert/ib_isert.h
1501 @@ -116,7 +116,6 @@ struct isert_device;
1502
1503 struct isert_conn {
1504 enum iser_conn_state state;
1505 - bool logout_posted;
1506 int post_recv_buf_count;
1507 atomic_t post_send_buf_count;
1508 u32 responder_resources;
1509 @@ -151,6 +150,7 @@ struct isert_conn {
1510 #define ISERT_COMP_BATCH_COUNT 8
1511 int conn_comp_batch;
1512 struct llist_head conn_comp_llist;
1513 + bool disconnect;
1514 };
1515
1516 #define ISERT_MAX_CQ 64
1517 diff --git a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
1518 index e1863dbf4edc..7a9b98bc208b 100644
1519 --- a/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
1520 +++ b/drivers/media/pci/ivtv/ivtv-alsa-pcm.c
1521 @@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
1522
1523 /* Instruct the CX2341[56] to start sending packets */
1524 snd_ivtv_lock(itvsc);
1525 +
1526 + if (ivtv_init_on_first_open(itv)) {
1527 + snd_ivtv_unlock(itvsc);
1528 + return -ENXIO;
1529 + }
1530 +
1531 s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
1532
1533 v4l2_fh_init(&item.fh, s->vdev);
1534 diff --git a/drivers/media/pci/saa7134/saa7134-video.c b/drivers/media/pci/saa7134/saa7134-video.c
1535 index eb472b5b26a0..40396e8b16a8 100644
1536 --- a/drivers/media/pci/saa7134/saa7134-video.c
1537 +++ b/drivers/media/pci/saa7134/saa7134-video.c
1538 @@ -1243,6 +1243,7 @@ static int video_release(struct file *file)
1539 videobuf_streamoff(&dev->cap);
1540 res_free(dev, fh, RESOURCE_VIDEO);
1541 videobuf_mmap_free(&dev->cap);
1542 + INIT_LIST_HEAD(&dev->cap.stream);
1543 }
1544 if (dev->cap.read_buf) {
1545 buffer_release(&dev->cap, dev->cap.read_buf);
1546 @@ -1254,6 +1255,7 @@ static int video_release(struct file *file)
1547 videobuf_stop(&dev->vbi);
1548 res_free(dev, fh, RESOURCE_VBI);
1549 videobuf_mmap_free(&dev->vbi);
1550 + INIT_LIST_HEAD(&dev->vbi.stream);
1551 }
1552
1553 /* ts-capture will not work in planar mode, so turn it off Hac: 04.05*/
1554 @@ -1987,17 +1989,12 @@ int saa7134_streamoff(struct file *file, void *priv,
1555 enum v4l2_buf_type type)
1556 {
1557 struct saa7134_dev *dev = video_drvdata(file);
1558 - int err;
1559 int res = saa7134_resource(file);
1560
1561 if (res != RESOURCE_EMPRESS)
1562 pm_qos_remove_request(&dev->qos_request);
1563
1564 - err = videobuf_streamoff(saa7134_queue(file));
1565 - if (err < 0)
1566 - return err;
1567 - res_free(dev, priv, res);
1568 - return 0;
1569 + return videobuf_streamoff(saa7134_queue(file));
1570 }
1571 EXPORT_SYMBOL_GPL(saa7134_streamoff);
1572
1573 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
1574 index 128b73b6cce2..5476dce3ad29 100644
1575 --- a/drivers/media/platform/exynos4-is/fimc-is.c
1576 +++ b/drivers/media/platform/exynos4-is/fimc-is.c
1577 @@ -367,6 +367,9 @@ static void fimc_is_free_cpu_memory(struct fimc_is *is)
1578 {
1579 struct device *dev = &is->pdev->dev;
1580
1581 + if (is->memory.vaddr == NULL)
1582 + return;
1583 +
1584 dma_free_coherent(dev, is->memory.size, is->memory.vaddr,
1585 is->memory.paddr);
1586 }
1587 diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
1588 index e62211a80f0e..6e2d6042ade6 100644
1589 --- a/drivers/media/platform/exynos4-is/media-dev.c
1590 +++ b/drivers/media/platform/exynos4-is/media-dev.c
1591 @@ -1520,7 +1520,7 @@ err:
1592 }
1593 #else
1594 #define fimc_md_register_clk_provider(fmd) (0)
1595 -#define fimc_md_unregister_clk_provider(fmd) (0)
1596 +#define fimc_md_unregister_clk_provider(fmd)
1597 #endif
1598
1599 static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
1600 diff --git a/drivers/media/platform/exynos4-is/media-dev.h b/drivers/media/platform/exynos4-is/media-dev.h
1601 index ee1e2519f728..58c49456b13f 100644
1602 --- a/drivers/media/platform/exynos4-is/media-dev.h
1603 +++ b/drivers/media/platform/exynos4-is/media-dev.h
1604 @@ -94,7 +94,9 @@ struct fimc_sensor_info {
1605 };
1606
1607 struct cam_clk {
1608 +#ifdef CONFIG_COMMON_CLK
1609 struct clk_hw hw;
1610 +#endif
1611 struct fimc_md *fmd;
1612 };
1613 #define to_cam_clk(_hw) container_of(_hw, struct cam_clk, hw)
1614 @@ -142,7 +144,9 @@ struct fimc_md {
1615
1616 struct cam_clk_provider {
1617 struct clk *clks[FIMC_MAX_CAMCLKS];
1618 +#ifdef CONFIG_COMMON_CLK
1619 struct clk_onecell_data clk_data;
1620 +#endif
1621 struct device_node *of_node;
1622 struct cam_clk camclk[FIMC_MAX_CAMCLKS];
1623 int num_clocks;
1624 diff --git a/drivers/media/usb/stk1160/stk1160-core.c b/drivers/media/usb/stk1160/stk1160-core.c
1625 index 34a26e0cfe77..03504dcf3c52 100644
1626 --- a/drivers/media/usb/stk1160/stk1160-core.c
1627 +++ b/drivers/media/usb/stk1160/stk1160-core.c
1628 @@ -67,17 +67,25 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
1629 {
1630 int ret;
1631 int pipe = usb_rcvctrlpipe(dev->udev, 0);
1632 + u8 *buf;
1633
1634 *value = 0;
1635 +
1636 + buf = kmalloc(sizeof(u8), GFP_KERNEL);
1637 + if (!buf)
1638 + return -ENOMEM;
1639 ret = usb_control_msg(dev->udev, pipe, 0x00,
1640 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1641 - 0x00, reg, value, sizeof(u8), HZ);
1642 + 0x00, reg, buf, sizeof(u8), HZ);
1643 if (ret < 0) {
1644 stk1160_err("read failed on reg 0x%x (%d)\n",
1645 reg, ret);
1646 + kfree(buf);
1647 return ret;
1648 }
1649
1650 + *value = *buf;
1651 + kfree(buf);
1652 return 0;
1653 }
1654
1655 diff --git a/drivers/media/usb/stk1160/stk1160.h b/drivers/media/usb/stk1160/stk1160.h
1656 index 05b05b160e1e..abdea484c998 100644
1657 --- a/drivers/media/usb/stk1160/stk1160.h
1658 +++ b/drivers/media/usb/stk1160/stk1160.h
1659 @@ -143,7 +143,6 @@ struct stk1160 {
1660 int num_alt;
1661
1662 struct stk1160_isoc_ctl isoc_ctl;
1663 - char urb_buf[255]; /* urb control msg buffer */
1664
1665 /* frame properties */
1666 int width; /* current frame width */
1667 diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
1668 index 8d52baf5952b..8496811fb7fa 100644
1669 --- a/drivers/media/usb/uvc/uvc_video.c
1670 +++ b/drivers/media/usb/uvc/uvc_video.c
1671 @@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream,
1672 * Clocks and timestamps
1673 */
1674
1675 +static inline void uvc_video_get_ts(struct timespec *ts)
1676 +{
1677 + if (uvc_clock_param == CLOCK_MONOTONIC)
1678 + ktime_get_ts(ts);
1679 + else
1680 + ktime_get_real_ts(ts);
1681 +}
1682 +
1683 static void
1684 uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
1685 const __u8 *data, int len)
1686 @@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
1687 stream->clock.last_sof = dev_sof;
1688
1689 host_sof = usb_get_current_frame_number(stream->dev->udev);
1690 - ktime_get_ts(&ts);
1691 + uvc_video_get_ts(&ts);
1692
1693 /* The UVC specification allows device implementations that can't obtain
1694 * the USB frame number to keep their own frame counters as long as they
1695 @@ -1011,10 +1019,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
1696 return -ENODATA;
1697 }
1698
1699 - if (uvc_clock_param == CLOCK_MONOTONIC)
1700 - ktime_get_ts(&ts);
1701 - else
1702 - ktime_get_real_ts(&ts);
1703 + uvc_video_get_ts(&ts);
1704
1705 buf->buf.v4l2_buf.sequence = stream->sequence;
1706 buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
1707 diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
1708 index 2b859249303b..b0e61bf261a7 100644
1709 --- a/drivers/pci/hotplug/acpiphp.h
1710 +++ b/drivers/pci/hotplug/acpiphp.h
1711 @@ -142,6 +142,16 @@ static inline acpi_handle func_to_handle(struct acpiphp_func *func)
1712 return func_to_acpi_device(func)->handle;
1713 }
1714
1715 +struct acpiphp_root_context {
1716 + struct acpi_hotplug_context hp;
1717 + struct acpiphp_bridge *root_bridge;
1718 +};
1719 +
1720 +static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_hotplug_context *hp)
1721 +{
1722 + return container_of(hp, struct acpiphp_root_context, hp);
1723 +}
1724 +
1725 /*
1726 * struct acpiphp_attention_info - device specific attention registration
1727 *
1728 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
1729 index bccc27ee1030..af53580cf4f5 100644
1730 --- a/drivers/pci/hotplug/acpiphp_glue.c
1731 +++ b/drivers/pci/hotplug/acpiphp_glue.c
1732 @@ -374,17 +374,13 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
1733
1734 static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
1735 {
1736 - struct acpiphp_context *context;
1737 struct acpiphp_bridge *bridge = NULL;
1738
1739 acpi_lock_hp_context();
1740 - context = acpiphp_get_context(adev);
1741 - if (context) {
1742 - bridge = context->bridge;
1743 + if (adev->hp) {
1744 + bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
1745 if (bridge)
1746 get_bridge(bridge);
1747 -
1748 - acpiphp_put_context(context);
1749 }
1750 acpi_unlock_hp_context();
1751 return bridge;
1752 @@ -883,7 +879,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
1753 */
1754 get_device(&bus->dev);
1755
1756 - if (!pci_is_root_bus(bridge->pci_bus)) {
1757 + acpi_lock_hp_context();
1758 + if (pci_is_root_bus(bridge->pci_bus)) {
1759 + struct acpiphp_root_context *root_context;
1760 +
1761 + root_context = kzalloc(sizeof(*root_context), GFP_KERNEL);
1762 + if (!root_context)
1763 + goto err;
1764 +
1765 + root_context->root_bridge = bridge;
1766 + acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL);
1767 + } else {
1768 struct acpiphp_context *context;
1769
1770 /*
1771 @@ -892,21 +898,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
1772 * parent is going to be handled by pciehp, in which case this
1773 * bridge is not interesting to us either.
1774 */
1775 - acpi_lock_hp_context();
1776 context = acpiphp_get_context(adev);
1777 - if (!context) {
1778 - acpi_unlock_hp_context();
1779 - put_device(&bus->dev);
1780 - pci_dev_put(bridge->pci_dev);
1781 - kfree(bridge);
1782 - return;
1783 - }
1784 + if (!context)
1785 + goto err;
1786 +
1787 bridge->context = context;
1788 context->bridge = bridge;
1789 /* Get a reference to the parent bridge. */
1790 get_bridge(context->func.parent);
1791 - acpi_unlock_hp_context();
1792 }
1793 + acpi_unlock_hp_context();
1794
1795 /* Must be added to the list prior to calling acpiphp_add_context(). */
1796 mutex_lock(&bridge_mutex);
1797 @@ -921,6 +922,30 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
1798 cleanup_bridge(bridge);
1799 put_bridge(bridge);
1800 }
1801 + return;
1802 +
1803 + err:
1804 + acpi_unlock_hp_context();
1805 + put_device(&bus->dev);
1806 + pci_dev_put(bridge->pci_dev);
1807 + kfree(bridge);
1808 +}
1809 +
1810 +void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
1811 +{
1812 + if (pci_is_root_bus(bridge->pci_bus)) {
1813 + struct acpiphp_root_context *root_context;
1814 + struct acpi_device *adev;
1815 +
1816 + acpi_lock_hp_context();
1817 + adev = ACPI_COMPANION(bridge->pci_bus->bridge);
1818 + root_context = to_acpiphp_root_context(adev->hp);
1819 + adev->hp = NULL;
1820 + acpi_unlock_hp_context();
1821 + kfree(root_context);
1822 + }
1823 + cleanup_bridge(bridge);
1824 + put_bridge(bridge);
1825 }
1826
1827 /**
1828 @@ -938,8 +963,7 @@ void acpiphp_remove_slots(struct pci_bus *bus)
1829 list_for_each_entry(bridge, &bridge_list, list)
1830 if (bridge->pci_bus == bus) {
1831 mutex_unlock(&bridge_mutex);
1832 - cleanup_bridge(bridge);
1833 - put_bridge(bridge);
1834 + acpiphp_drop_bridge(bridge);
1835 return;
1836 }
1837
1838 diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
1839 index 7f139326a642..ff026689358c 100644
1840 --- a/drivers/phy/phy-exynos-mipi-video.c
1841 +++ b/drivers/phy/phy-exynos-mipi-video.c
1842 @@ -101,7 +101,7 @@ static struct phy *exynos_mipi_video_phy_xlate(struct device *dev,
1843 {
1844 struct exynos_mipi_video_phy *state = dev_get_drvdata(dev);
1845
1846 - if (WARN_ON(args->args[0] > EXYNOS_MIPI_PHYS_NUM))
1847 + if (WARN_ON(args->args[0] >= EXYNOS_MIPI_PHYS_NUM))
1848 return ERR_PTR(-ENODEV);
1849
1850 return state->phys[args->args[0]].phy;
1851 diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
1852 index f19a30f0fb42..fdd68dd69049 100644
1853 --- a/drivers/regulator/s2mpa01.c
1854 +++ b/drivers/regulator/s2mpa01.c
1855 @@ -116,7 +116,6 @@ static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
1856 ramp_delay = s2mpa01->ramp_delay16;
1857
1858 ramp_shift = S2MPA01_BUCK16_RAMP_SHIFT;
1859 - ramp_reg = S2MPA01_REG_RAMP1;
1860 break;
1861 case S2MPA01_BUCK2:
1862 enable_shift = S2MPA01_BUCK2_RAMP_EN_SHIFT;
1863 @@ -192,11 +191,15 @@ static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
1864 if (!ramp_enable)
1865 goto ramp_disable;
1866
1867 - ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
1868 - 1 << enable_shift, 1 << enable_shift);
1869 - if (ret) {
1870 - dev_err(&rdev->dev, "failed to enable ramp rate\n");
1871 - return ret;
1872 + /* Ramp delay can be enabled/disabled only for buck[1234] */
1873 + if (rdev_get_id(rdev) >= S2MPA01_BUCK1 &&
1874 + rdev_get_id(rdev) <= S2MPA01_BUCK4) {
1875 + ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
1876 + 1 << enable_shift, 1 << enable_shift);
1877 + if (ret) {
1878 + dev_err(&rdev->dev, "failed to enable ramp rate\n");
1879 + return ret;
1880 + }
1881 }
1882
1883 ramp_val = get_ramp_delay(ramp_delay);
1884 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
1885 index e713c162fbd4..aaca37e1424f 100644
1886 --- a/drivers/regulator/s2mps11.c
1887 +++ b/drivers/regulator/s2mps11.c
1888 @@ -202,11 +202,16 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
1889 if (!ramp_enable)
1890 goto ramp_disable;
1891
1892 - ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
1893 - 1 << enable_shift, 1 << enable_shift);
1894 - if (ret) {
1895 - dev_err(&rdev->dev, "failed to enable ramp rate\n");
1896 - return ret;
1897 + /* Ramp delay can be enabled/disabled only for buck[2346] */
1898 + if ((rdev_get_id(rdev) >= S2MPS11_BUCK2 &&
1899 + rdev_get_id(rdev) <= S2MPS11_BUCK4) ||
1900 + rdev_get_id(rdev) == S2MPS11_BUCK6) {
1901 + ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
1902 + 1 << enable_shift, 1 << enable_shift);
1903 + if (ret) {
1904 + dev_err(&rdev->dev, "failed to enable ramp rate\n");
1905 + return ret;
1906 + }
1907 }
1908
1909 ramp_val = get_ramp_delay(ramp_delay);
1910 diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1911 index 26dc005bb0f0..3f462349b16c 100644
1912 --- a/drivers/scsi/libiscsi.c
1913 +++ b/drivers/scsi/libiscsi.c
1914 @@ -338,7 +338,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
1915 struct iscsi_session *session = conn->session;
1916 struct scsi_cmnd *sc = task->sc;
1917 struct iscsi_scsi_req *hdr;
1918 - unsigned hdrlength, cmd_len;
1919 + unsigned hdrlength, cmd_len, transfer_length;
1920 itt_t itt;
1921 int rc;
1922
1923 @@ -391,11 +391,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
1924 if (scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
1925 task->protected = true;
1926
1927 + transfer_length = scsi_transfer_length(sc);
1928 + hdr->data_length = cpu_to_be32(transfer_length);
1929 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1930 - unsigned out_len = scsi_out(sc)->length;
1931 struct iscsi_r2t_info *r2t = &task->unsol_r2t;
1932
1933 - hdr->data_length = cpu_to_be32(out_len);
1934 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
1935 /*
1936 * Write counters:
1937 @@ -414,18 +414,19 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
1938 memset(r2t, 0, sizeof(*r2t));
1939
1940 if (session->imm_data_en) {
1941 - if (out_len >= session->first_burst)
1942 + if (transfer_length >= session->first_burst)
1943 task->imm_count = min(session->first_burst,
1944 conn->max_xmit_dlength);
1945 else
1946 - task->imm_count = min(out_len,
1947 - conn->max_xmit_dlength);
1948 + task->imm_count = min(transfer_length,
1949 + conn->max_xmit_dlength);
1950 hton24(hdr->dlength, task->imm_count);
1951 } else
1952 zero_data(hdr->dlength);
1953
1954 if (!session->initial_r2t_en) {
1955 - r2t->data_length = min(session->first_burst, out_len) -
1956 + r2t->data_length = min(session->first_burst,
1957 + transfer_length) -
1958 task->imm_count;
1959 r2t->data_offset = task->imm_count;
1960 r2t->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1961 @@ -438,7 +439,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
1962 } else {
1963 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1964 zero_data(hdr->dlength);
1965 - hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
1966
1967 if (sc->sc_data_direction == DMA_FROM_DEVICE)
1968 hdr->flags |= ISCSI_FLAG_CMD_READ;
1969 @@ -466,7 +466,7 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
1970 scsi_bidi_cmnd(sc) ? "bidirectional" :
1971 sc->sc_data_direction == DMA_TO_DEVICE ?
1972 "write" : "read", conn->id, sc, sc->cmnd[0],
1973 - task->itt, scsi_bufflen(sc),
1974 + task->itt, transfer_length,
1975 scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
1976 session->cmdsn,
1977 session->max_cmdsn - session->exp_cmdsn + 1);
1978 diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
1979 index d47dedd2cdb4..6f5efcc89880 100644
1980 --- a/drivers/staging/imx-drm/imx-hdmi.c
1981 +++ b/drivers/staging/imx-drm/imx-hdmi.c
1982 @@ -120,8 +120,6 @@ struct imx_hdmi {
1983 struct clk *isfr_clk;
1984 struct clk *iahb_clk;
1985
1986 - enum drm_connector_status connector_status;
1987 -
1988 struct hdmi_data_info hdmi_data;
1989 int vic;
1990
1991 @@ -1382,7 +1380,9 @@ static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector
1992 {
1993 struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi,
1994 connector);
1995 - return hdmi->connector_status;
1996 +
1997 + return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ?
1998 + connector_status_connected : connector_status_disconnected;
1999 }
2000
2001 static int imx_hdmi_connector_get_modes(struct drm_connector *connector)
2002 @@ -1524,7 +1524,6 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
2003
2004 hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0);
2005
2006 - hdmi->connector_status = connector_status_connected;
2007 imx_hdmi_poweron(hdmi);
2008 } else {
2009 dev_dbg(hdmi->dev, "EVENT=plugout\n");
2010 @@ -1532,7 +1531,6 @@ static irqreturn_t imx_hdmi_irq(int irq, void *dev_id)
2011 hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD,
2012 HDMI_PHY_POL0);
2013
2014 - hdmi->connector_status = connector_status_disconnected;
2015 imx_hdmi_poweroff(hdmi);
2016 }
2017 drm_helper_hpd_irq_event(hdmi->connector.dev);
2018 @@ -1606,7 +1604,6 @@ static int imx_hdmi_bind(struct device *dev, struct device *master, void *data)
2019 return -ENOMEM;
2020
2021 hdmi->dev = dev;
2022 - hdmi->connector_status = connector_status_disconnected;
2023 hdmi->sample_rate = 48000;
2024 hdmi->ratio = 100;
2025
2026 diff --git a/drivers/staging/media/bcm2048/radio-bcm2048.c b/drivers/staging/media/bcm2048/radio-bcm2048.c
2027 index b2cd3a85166d..bbf236e842a9 100644
2028 --- a/drivers/staging/media/bcm2048/radio-bcm2048.c
2029 +++ b/drivers/staging/media/bcm2048/radio-bcm2048.c
2030 @@ -737,7 +737,7 @@ static int bcm2048_set_region(struct bcm2048_device *bdev, u8 region)
2031 int err;
2032 u32 new_frequency = 0;
2033
2034 - if (region > ARRAY_SIZE(region_configs))
2035 + if (region >= ARRAY_SIZE(region_configs))
2036 return -EINVAL;
2037
2038 mutex_lock(&bdev->mutex);
2039 diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c
2040 index 51dbc13e757f..5a40925680ac 100644
2041 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c
2042 +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c
2043 @@ -924,6 +924,7 @@ static int spinand_remove(struct spi_device *spi)
2044
2045 static const struct of_device_id spinand_dt[] = {
2046 { .compatible = "spinand,mt29f", },
2047 + {}
2048 };
2049
2050 /*
2051 diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
2052 index 3dd90599fd4b..6c9e9a16b2e9 100644
2053 --- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
2054 +++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
2055 @@ -1599,13 +1599,18 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
2056 pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
2057 if (pIE == NULL)
2058 return _FAIL;
2059 + if (ie_len > NDIS_802_11_LENGTH_RATES_EX)
2060 + return _FAIL;
2061
2062 memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
2063 supportRateNum = ie_len;
2064
2065 pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
2066 - if (pIE)
2067 + if (pIE) {
2068 + if (supportRateNum + ie_len > NDIS_802_11_LENGTH_RATES_EX)
2069 + return _FAIL;
2070 memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
2071 + }
2072
2073 return _SUCCESS;
2074 }
2075 diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c
2076 index 2f084e181d39..a1aca4416ca7 100644
2077 --- a/drivers/staging/tidspbridge/core/dsp-clock.c
2078 +++ b/drivers/staging/tidspbridge/core/dsp-clock.c
2079 @@ -226,7 +226,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
2080 case GPT_CLK:
2081 status = omap_dm_timer_start(timer[clk_id - 1]);
2082 break;
2083 -#ifdef CONFIG_OMAP_MCBSP
2084 +#ifdef CONFIG_SND_OMAP_SOC_MCBSP
2085 case MCBSP_CLK:
2086 omap_mcbsp_request(MCBSP_ID(clk_id));
2087 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
2088 @@ -302,7 +302,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
2089 case GPT_CLK:
2090 status = omap_dm_timer_stop(timer[clk_id - 1]);
2091 break;
2092 -#ifdef CONFIG_OMAP_MCBSP
2093 +#ifdef CONFIG_SND_OMAP_SOC_MCBSP
2094 case MCBSP_CLK:
2095 omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
2096 omap_mcbsp_free(MCBSP_ID(clk_id));
2097 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2098 index 9189bc0a87ae..ca2bc348ef5b 100644
2099 --- a/drivers/target/iscsi/iscsi_target.c
2100 +++ b/drivers/target/iscsi/iscsi_target.c
2101 @@ -3390,7 +3390,9 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
2102
2103 #define SENDTARGETS_BUF_LIMIT 32768U
2104
2105 -static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
2106 +static int
2107 +iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
2108 + enum iscsit_transport_type network_transport)
2109 {
2110 char *payload = NULL;
2111 struct iscsi_conn *conn = cmd->conn;
2112 @@ -3467,6 +3469,9 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
2113 struct iscsi_np *np = tpg_np->tpg_np;
2114 bool inaddr_any = iscsit_check_inaddr_any(np);
2115
2116 + if (np->np_network_transport != network_transport)
2117 + continue;
2118 +
2119 if (!target_name_printed) {
2120 len = sprintf(buf, "TargetName=%s",
2121 tiqn->tiqn);
2122 @@ -3520,11 +3525,12 @@ eob:
2123
2124 int
2125 iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2126 - struct iscsi_text_rsp *hdr)
2127 + struct iscsi_text_rsp *hdr,
2128 + enum iscsit_transport_type network_transport)
2129 {
2130 int text_length, padding;
2131
2132 - text_length = iscsit_build_sendtargets_response(cmd);
2133 + text_length = iscsit_build_sendtargets_response(cmd, network_transport);
2134 if (text_length < 0)
2135 return text_length;
2136
2137 @@ -3562,7 +3568,7 @@ static int iscsit_send_text_rsp(
2138 u32 tx_size = 0;
2139 int text_length, iov_count = 0, rc;
2140
2141 - rc = iscsit_build_text_rsp(cmd, conn, hdr);
2142 + rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
2143 if (rc < 0)
2144 return rc;
2145
2146 @@ -4234,8 +4240,6 @@ int iscsit_close_connection(
2147 if (conn->conn_transport->iscsit_wait_conn)
2148 conn->conn_transport->iscsit_wait_conn(conn);
2149
2150 - iscsit_free_queue_reqs_for_conn(conn);
2151 -
2152 /*
2153 * During Connection recovery drop unacknowledged out of order
2154 * commands for this connection, and prepare the other commands
2155 @@ -4252,6 +4256,7 @@ int iscsit_close_connection(
2156 iscsit_clear_ooo_cmdsns_for_conn(conn);
2157 iscsit_release_commands_from_conn(conn);
2158 }
2159 + iscsit_free_queue_reqs_for_conn(conn);
2160
2161 /*
2162 * Handle decrementing session or connection usage count if
2163 diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
2164 index c886ad1c39fb..1f4c015e9078 100644
2165 --- a/drivers/target/loopback/tcm_loop.c
2166 +++ b/drivers/target/loopback/tcm_loop.c
2167 @@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
2168 struct tcm_loop_hba *tl_hba;
2169 struct tcm_loop_tpg *tl_tpg;
2170 struct scatterlist *sgl_bidi = NULL;
2171 - u32 sgl_bidi_count = 0;
2172 + u32 sgl_bidi_count = 0, transfer_length;
2173 int rc;
2174
2175 tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
2176 @@ -213,12 +213,21 @@ static void tcm_loop_submission_work(struct work_struct *work)
2177
2178 }
2179
2180 - if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
2181 + transfer_length = scsi_transfer_length(sc);
2182 + if (!scsi_prot_sg_count(sc) &&
2183 + scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
2184 se_cmd->prot_pto = true;
2185 + /*
2186 + * loopback transport doesn't support
2187 + * WRITE_GENERATE, READ_STRIP protection
2188 + * information operations, go ahead unprotected.
2189 + */
2190 + transfer_length = scsi_bufflen(sc);
2191 + }
2192
2193 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
2194 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
2195 - scsi_bufflen(sc), tcm_loop_sam_attr(sc),
2196 + transfer_length, tcm_loop_sam_attr(sc),
2197 sc->sc_data_direction, 0,
2198 scsi_sglist(sc), scsi_sg_count(sc),
2199 sgl_bidi, sgl_bidi_count,
2200 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
2201 index e0229592ec55..bcbc6810666d 100644
2202 --- a/drivers/target/target_core_sbc.c
2203 +++ b/drivers/target/target_core_sbc.c
2204 @@ -81,7 +81,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
2205 transport_kunmap_data_sg(cmd);
2206 }
2207
2208 - target_complete_cmd(cmd, GOOD);
2209 + target_complete_cmd_with_length(cmd, GOOD, 8);
2210 return 0;
2211 }
2212
2213 @@ -137,7 +137,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
2214 transport_kunmap_data_sg(cmd);
2215 }
2216
2217 - target_complete_cmd(cmd, GOOD);
2218 + target_complete_cmd_with_length(cmd, GOOD, 32);
2219 return 0;
2220 }
2221
2222 @@ -665,8 +665,19 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
2223
2224 cmd->prot_type = dev->dev_attrib.pi_prot_type;
2225 cmd->prot_length = dev->prot_length * sectors;
2226 - pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
2227 - __func__, cmd->prot_type, cmd->prot_length,
2228 +
2229 + /**
2230 + * In case protection information exists over the wire
2231 + * we modify command data length to describe pure data.
2232 + * The actual transfer length is data length + protection
2233 + * length
2234 + **/
2235 + if (protect)
2236 + cmd->data_length = sectors * dev->dev_attrib.block_size;
2237 +
2238 + pr_debug("%s: prot_type=%d, data_length=%d, prot_length=%d "
2239 + "prot_op=%d prot_checks=%d\n",
2240 + __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
2241 cmd->prot_op, cmd->prot_checks);
2242
2243 return true;
2244 diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
2245 index 8653666612a8..d24df1a6afc1 100644
2246 --- a/drivers/target/target_core_spc.c
2247 +++ b/drivers/target/target_core_spc.c
2248 @@ -721,6 +721,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
2249 unsigned char *buf;
2250 sense_reason_t ret;
2251 int p;
2252 + int len = 0;
2253
2254 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL);
2255 if (!buf) {
2256 @@ -742,6 +743,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
2257 }
2258
2259 ret = spc_emulate_inquiry_std(cmd, buf);
2260 + len = buf[4] + 5;
2261 goto out;
2262 }
2263
2264 @@ -749,6 +751,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
2265 if (cdb[2] == evpd_handlers[p].page) {
2266 buf[1] = cdb[2];
2267 ret = evpd_handlers[p].emulate(cmd, buf);
2268 + len = get_unaligned_be16(&buf[2]) + 4;
2269 goto out;
2270 }
2271 }
2272 @@ -765,7 +768,7 @@ out:
2273 kfree(buf);
2274
2275 if (!ret)
2276 - target_complete_cmd(cmd, GOOD);
2277 + target_complete_cmd_with_length(cmd, GOOD, len);
2278 return ret;
2279 }
2280
2281 @@ -1103,7 +1106,7 @@ set_length:
2282 transport_kunmap_data_sg(cmd);
2283 }
2284
2285 - target_complete_cmd(cmd, GOOD);
2286 + target_complete_cmd_with_length(cmd, GOOD, length);
2287 return 0;
2288 }
2289
2290 @@ -1279,7 +1282,7 @@ done:
2291 buf[3] = (lun_count & 0xff);
2292 transport_kunmap_data_sg(cmd);
2293
2294 - target_complete_cmd(cmd, GOOD);
2295 + target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
2296 return 0;
2297 }
2298 EXPORT_SYMBOL(spc_emulate_report_luns);
2299 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2300 index a51dd4efc23b..14772e98d3d2 100644
2301 --- a/drivers/target/target_core_transport.c
2302 +++ b/drivers/target/target_core_transport.c
2303 @@ -562,7 +562,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
2304
2305 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2306
2307 - complete(&cmd->t_transport_stop_comp);
2308 + complete_all(&cmd->t_transport_stop_comp);
2309 return 1;
2310 }
2311
2312 @@ -687,7 +687,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
2313 if (cmd->transport_state & CMD_T_ABORTED &&
2314 cmd->transport_state & CMD_T_STOP) {
2315 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2316 - complete(&cmd->t_transport_stop_comp);
2317 + complete_all(&cmd->t_transport_stop_comp);
2318 return;
2319 } else if (!success) {
2320 INIT_WORK(&cmd->work, target_complete_failure_work);
2321 @@ -703,6 +703,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
2322 }
2323 EXPORT_SYMBOL(target_complete_cmd);
2324
2325 +void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
2326 +{
2327 + if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
2328 + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
2329 + cmd->residual_count += cmd->data_length - length;
2330 + } else {
2331 + cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
2332 + cmd->residual_count = cmd->data_length - length;
2333 + }
2334 +
2335 + cmd->data_length = length;
2336 + }
2337 +
2338 + target_complete_cmd(cmd, scsi_status);
2339 +}
2340 +EXPORT_SYMBOL(target_complete_cmd_with_length);
2341 +
2342 static void target_add_to_state_list(struct se_cmd *cmd)
2343 {
2344 struct se_device *dev = cmd->se_dev;
2345 @@ -1761,7 +1778,7 @@ void target_execute_cmd(struct se_cmd *cmd)
2346 cmd->se_tfo->get_task_tag(cmd));
2347
2348 spin_unlock_irq(&cmd->t_state_lock);
2349 - complete(&cmd->t_transport_stop_comp);
2350 + complete_all(&cmd->t_transport_stop_comp);
2351 return;
2352 }
2353
2354 @@ -2938,6 +2955,12 @@ static void target_tmr_work(struct work_struct *work)
2355 int transport_generic_handle_tmr(
2356 struct se_cmd *cmd)
2357 {
2358 + unsigned long flags;
2359 +
2360 + spin_lock_irqsave(&cmd->t_state_lock, flags);
2361 + cmd->transport_state |= CMD_T_ACTIVE;
2362 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2363 +
2364 INIT_WORK(&cmd->work, target_tmr_work);
2365 queue_work(cmd->se_dev->tmr_wq, &cmd->work);
2366 return 0;
2367 diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
2368 index 99246606a256..27981e2b9430 100644
2369 --- a/drivers/tty/serial/of_serial.c
2370 +++ b/drivers/tty/serial/of_serial.c
2371 @@ -173,6 +173,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
2372 {
2373 struct uart_8250_port port8250;
2374 memset(&port8250, 0, sizeof(port8250));
2375 + port.type = port_type;
2376 port8250.port = port;
2377
2378 if (port.fifosize)
2379 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2380 index 70715eeededd..85f398d3184d 100644
2381 --- a/drivers/usb/dwc3/gadget.c
2382 +++ b/drivers/usb/dwc3/gadget.c
2383 @@ -604,6 +604,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
2384
2385 dwc3_remove_requests(dwc, dep);
2386
2387 + /* make sure HW endpoint isn't stalled */
2388 + if (dep->flags & DWC3_EP_STALL)
2389 + __dwc3_gadget_ep_set_halt(dep, 0);
2390 +
2391 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
2392 reg &= ~DWC3_DALEPENA_EP(dep->number);
2393 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
2394 diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
2395 index a925d0cbcd41..a0863a2d2142 100644
2396 --- a/drivers/usb/gadget/inode.c
2397 +++ b/drivers/usb/gadget/inode.c
2398 @@ -1501,7 +1501,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
2399 }
2400 break;
2401
2402 -#ifndef CONFIG_USB_GADGET_PXA25X
2403 +#ifndef CONFIG_USB_PXA25X
2404 /* PXA automagically handles this request too */
2405 case USB_REQ_GET_CONFIGURATION:
2406 if (ctrl->bRequestType != 0x80)
2407 diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
2408 index 4a6d3dd68572..2f3acebb577a 100644
2409 --- a/drivers/usb/host/pci-quirks.c
2410 +++ b/drivers/usb/host/pci-quirks.c
2411 @@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
2412 DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
2413 },
2414 },
2415 + {
2416 + /* HASEE E200 */
2417 + .matches = {
2418 + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
2419 + DMI_MATCH(DMI_BOARD_NAME, "E210"),
2420 + DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
2421 + },
2422 + },
2423 { }
2424 };
2425
2426 @@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
2427 {
2428 int try_handoff = 1, tried_handoff = 0;
2429
2430 - /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
2431 - * the handoff on its unused controller. Skip it. */
2432 - if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
2433 + /*
2434 + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
2435 + * the handoff on its unused controller. Skip it.
2436 + *
2437 + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
2438 + */
2439 + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
2440 + pdev->device == 0x27cc)) {
2441 if (dmi_check_system(ehci_dmi_nohandoff_table))
2442 try_handoff = 0;
2443 }
2444 diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
2445 index f6568b5e9b06..71dcacbab398 100644
2446 --- a/drivers/usb/misc/usbtest.c
2447 +++ b/drivers/usb/misc/usbtest.c
2448 @@ -7,7 +7,7 @@
2449 #include <linux/moduleparam.h>
2450 #include <linux/scatterlist.h>
2451 #include <linux/mutex.h>
2452 -
2453 +#include <linux/timer.h>
2454 #include <linux/usb.h>
2455
2456 #define SIMPLE_IO_TIMEOUT 10000 /* in milliseconds */
2457 @@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
2458 return sg;
2459 }
2460
2461 +static void sg_timeout(unsigned long _req)
2462 +{
2463 + struct usb_sg_request *req = (struct usb_sg_request *) _req;
2464 +
2465 + req->status = -ETIMEDOUT;
2466 + usb_sg_cancel(req);
2467 +}
2468 +
2469 static int perform_sglist(
2470 struct usbtest_dev *tdev,
2471 unsigned iterations,
2472 @@ -495,6 +503,9 @@ static int perform_sglist(
2473 {
2474 struct usb_device *udev = testdev_to_usbdev(tdev);
2475 int retval = 0;
2476 + struct timer_list sg_timer;
2477 +
2478 + setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
2479
2480 while (retval == 0 && iterations-- > 0) {
2481 retval = usb_sg_init(req, udev, pipe,
2482 @@ -505,7 +516,10 @@ static int perform_sglist(
2483
2484 if (retval)
2485 break;
2486 + mod_timer(&sg_timer, jiffies +
2487 + msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
2488 usb_sg_wait(req);
2489 + del_timer_sync(&sg_timer);
2490 retval = req->status;
2491
2492 /* FIXME check resulting data pattern */
2493 @@ -1320,6 +1334,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
2494 urb->context = &completion;
2495 urb->complete = unlink1_callback;
2496
2497 + if (usb_pipeout(urb->pipe)) {
2498 + simple_fill_buf(urb);
2499 + urb->transfer_flags |= URB_ZERO_PACKET;
2500 + }
2501 +
2502 /* keep the endpoint busy. there are lots of hc/hcd-internal
2503 * states, and testing should get to all of them over time.
2504 *
2505 @@ -1450,6 +1469,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
2506 unlink_queued_callback, &ctx);
2507 ctx.urbs[i]->transfer_dma = buf_dma;
2508 ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
2509 +
2510 + if (usb_pipeout(ctx.urbs[i]->pipe)) {
2511 + simple_fill_buf(ctx.urbs[i]);
2512 + ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
2513 + }
2514 }
2515
2516 /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
2517 diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
2518 index 6e146d723b37..69e49be8866b 100644
2519 --- a/drivers/usb/phy/phy-isp1301-omap.c
2520 +++ b/drivers/usb/phy/phy-isp1301-omap.c
2521 @@ -1295,7 +1295,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
2522 return isp1301_otg_enable(isp);
2523 return 0;
2524
2525 -#elif !defined(CONFIG_USB_GADGET_OMAP)
2526 +#elif !IS_ENABLED(CONFIG_USB_OMAP)
2527 // FIXME update its refcount
2528 otg->host = host;
2529
2530 diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
2531 index 35a2373cde67..9374bd2aba20 100644
2532 --- a/drivers/usb/serial/bus.c
2533 +++ b/drivers/usb/serial/bus.c
2534 @@ -97,13 +97,19 @@ static int usb_serial_device_remove(struct device *dev)
2535 struct usb_serial_port *port;
2536 int retval = 0;
2537 int minor;
2538 + int autopm_err;
2539
2540 port = to_usb_serial_port(dev);
2541 if (!port)
2542 return -ENODEV;
2543
2544 - /* make sure suspend/resume doesn't race against port_remove */
2545 - usb_autopm_get_interface(port->serial->interface);
2546 + /*
2547 + * Make sure suspend/resume doesn't race against port_remove.
2548 + *
2549 + * Note that no further runtime PM callbacks will be made if
2550 + * autopm_get fails.
2551 + */
2552 + autopm_err = usb_autopm_get_interface(port->serial->interface);
2553
2554 minor = port->minor;
2555 tty_unregister_device(usb_serial_tty_driver, minor);
2556 @@ -117,7 +123,9 @@ static int usb_serial_device_remove(struct device *dev)
2557 dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
2558 driver->description, minor);
2559
2560 - usb_autopm_put_interface(port->serial->interface);
2561 + if (!autopm_err)
2562 + usb_autopm_put_interface(port->serial->interface);
2563 +
2564 return retval;
2565 }
2566
2567 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2568 index 948a19f0cdf7..70ede84f4f6b 100644
2569 --- a/drivers/usb/serial/option.c
2570 +++ b/drivers/usb/serial/option.c
2571 @@ -1925,6 +1925,7 @@ static int option_send_setup(struct usb_serial_port *port)
2572 struct option_private *priv = intfdata->private;
2573 struct usb_wwan_port_private *portdata;
2574 int val = 0;
2575 + int res;
2576
2577 portdata = usb_get_serial_port_data(port);
2578
2579 @@ -1933,9 +1934,17 @@ static int option_send_setup(struct usb_serial_port *port)
2580 if (portdata->rts_state)
2581 val |= 0x02;
2582
2583 - return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2584 + res = usb_autopm_get_interface(serial->interface);
2585 + if (res)
2586 + return res;
2587 +
2588 + res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
2589 0x22, 0x21, val, priv->bInterfaceNumber, NULL,
2590 0, USB_CTRL_SET_TIMEOUT);
2591 +
2592 + usb_autopm_put_interface(serial->interface);
2593 +
2594 + return res;
2595 }
2596
2597 MODULE_AUTHOR(DRIVER_AUTHOR);
2598 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
2599 index 6c0a542e8ec1..43d93dbf7d71 100644
2600 --- a/drivers/usb/serial/qcserial.c
2601 +++ b/drivers/usb/serial/qcserial.c
2602 @@ -145,12 +145,33 @@ static const struct usb_device_id id_table[] = {
2603 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)}, /* Sierra Wireless EM7355 Device Management */
2604 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)}, /* Sierra Wireless EM7355 NMEA */
2605 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)}, /* Sierra Wireless EM7355 Modem */
2606 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)}, /* Sierra Wireless Modem Device Management */
2607 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)}, /* Sierra Wireless Modem NMEA */
2608 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)}, /* Sierra Wireless Modem Modem */
2609 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)}, /* Sierra Wireless MC7305/MC7355 Device Management */
2610 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)}, /* Sierra Wireless MC7305/MC7355 NMEA */
2611 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)}, /* Sierra Wireless MC7305/MC7355 Modem */
2612 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
2613 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
2614 {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
2615 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)}, /* Sierra Wireless Modem Device Management */
2616 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)}, /* Sierra Wireless Modem NMEA */
2617 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)}, /* Sierra Wireless Modem Modem */
2618 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)}, /* Sierra Wireless Modem Device Management */
2619 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)}, /* Sierra Wireless Modem NMEA */
2620 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)}, /* Sierra Wireless Modem Modem */
2621 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)}, /* Netgear AirCard 341U Device Management */
2622 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)}, /* Netgear AirCard 341U NMEA */
2623 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)}, /* Netgear AirCard 341U Modem */
2624 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)}, /* Sierra Wireless Modem Device Management */
2625 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)}, /* Sierra Wireless Modem NMEA */
2626 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)}, /* Sierra Wireless Modem Modem */
2627 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)}, /* Sierra Wireless Modem Device Management */
2628 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)}, /* Sierra Wireless Modem NMEA */
2629 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)}, /* Sierra Wireless Modem Modem */
2630 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)}, /* Sierra Wireless Modem Device Management */
2631 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)}, /* Sierra Wireless Modem NMEA */
2632 + {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)}, /* Sierra Wireless Modem Modem */
2633 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
2634 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
2635 {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
2636 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
2637 index 6b192e602ce0..37480348e39b 100644
2638 --- a/drivers/usb/serial/sierra.c
2639 +++ b/drivers/usb/serial/sierra.c
2640 @@ -58,6 +58,7 @@ struct sierra_intf_private {
2641 spinlock_t susp_lock;
2642 unsigned int suspended:1;
2643 int in_flight;
2644 + unsigned int open_ports;
2645 };
2646
2647 static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
2648 @@ -759,6 +760,7 @@ static void sierra_close(struct usb_serial_port *port)
2649 struct usb_serial *serial = port->serial;
2650 struct sierra_port_private *portdata;
2651 struct sierra_intf_private *intfdata = port->serial->private;
2652 + struct urb *urb;
2653
2654 portdata = usb_get_serial_port_data(port);
2655
2656 @@ -767,7 +769,6 @@ static void sierra_close(struct usb_serial_port *port)
2657
2658 mutex_lock(&serial->disc_mutex);
2659 if (!serial->disconnected) {
2660 - serial->interface->needs_remote_wakeup = 0;
2661 /* odd error handling due to pm counters */
2662 if (!usb_autopm_get_interface(serial->interface))
2663 sierra_send_setup(port);
2664 @@ -778,8 +779,22 @@ static void sierra_close(struct usb_serial_port *port)
2665 mutex_unlock(&serial->disc_mutex);
2666 spin_lock_irq(&intfdata->susp_lock);
2667 portdata->opened = 0;
2668 + if (--intfdata->open_ports == 0)
2669 + serial->interface->needs_remote_wakeup = 0;
2670 spin_unlock_irq(&intfdata->susp_lock);
2671
2672 + for (;;) {
2673 + urb = usb_get_from_anchor(&portdata->delayed);
2674 + if (!urb)
2675 + break;
2676 + kfree(urb->transfer_buffer);
2677 + usb_free_urb(urb);
2678 + usb_autopm_put_interface_async(serial->interface);
2679 + spin_lock(&portdata->lock);
2680 + portdata->outstanding_urbs--;
2681 + spin_unlock(&portdata->lock);
2682 + }
2683 +
2684 sierra_stop_rx_urbs(port);
2685 for (i = 0; i < portdata->num_in_urbs; i++) {
2686 sierra_release_urb(portdata->in_urbs[i]);
2687 @@ -816,23 +831,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
2688 usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
2689
2690 err = sierra_submit_rx_urbs(port, GFP_KERNEL);
2691 - if (err) {
2692 - /* get rid of everything as in close */
2693 - sierra_close(port);
2694 - /* restore balance for autopm */
2695 - if (!serial->disconnected)
2696 - usb_autopm_put_interface(serial->interface);
2697 - return err;
2698 - }
2699 + if (err)
2700 + goto err_submit;
2701 +
2702 sierra_send_setup(port);
2703
2704 - serial->interface->needs_remote_wakeup = 1;
2705 spin_lock_irq(&intfdata->susp_lock);
2706 portdata->opened = 1;
2707 + if (++intfdata->open_ports == 1)
2708 + serial->interface->needs_remote_wakeup = 1;
2709 spin_unlock_irq(&intfdata->susp_lock);
2710 usb_autopm_put_interface(serial->interface);
2711
2712 return 0;
2713 +
2714 +err_submit:
2715 + sierra_stop_rx_urbs(port);
2716 +
2717 + for (i = 0; i < portdata->num_in_urbs; i++) {
2718 + sierra_release_urb(portdata->in_urbs[i]);
2719 + portdata->in_urbs[i] = NULL;
2720 + }
2721 +
2722 + return err;
2723 }
2724
2725
2726 @@ -928,6 +949,7 @@ static int sierra_port_remove(struct usb_serial_port *port)
2727 struct sierra_port_private *portdata;
2728
2729 portdata = usb_get_serial_port_data(port);
2730 + usb_set_serial_port_data(port, NULL);
2731 kfree(portdata);
2732
2733 return 0;
2734 @@ -944,6 +966,8 @@ static void stop_read_write_urbs(struct usb_serial *serial)
2735 for (i = 0; i < serial->num_ports; ++i) {
2736 port = serial->port[i];
2737 portdata = usb_get_serial_port_data(port);
2738 + if (!portdata)
2739 + continue;
2740 sierra_stop_rx_urbs(port);
2741 usb_kill_anchored_urbs(&portdata->active);
2742 }
2743 @@ -986,6 +1010,9 @@ static int sierra_resume(struct usb_serial *serial)
2744 port = serial->port[i];
2745 portdata = usb_get_serial_port_data(port);
2746
2747 + if (!portdata)
2748 + continue;
2749 +
2750 while ((urb = usb_get_from_anchor(&portdata->delayed))) {
2751 usb_anchor_urb(urb, &portdata->active);
2752 intfdata->in_flight++;
2753 @@ -993,8 +1020,12 @@ static int sierra_resume(struct usb_serial *serial)
2754 if (err < 0) {
2755 intfdata->in_flight--;
2756 usb_unanchor_urb(urb);
2757 - usb_scuttle_anchored_urbs(&portdata->delayed);
2758 - break;
2759 + kfree(urb->transfer_buffer);
2760 + usb_free_urb(urb);
2761 + spin_lock(&portdata->lock);
2762 + portdata->outstanding_urbs--;
2763 + spin_unlock(&portdata->lock);
2764 + continue;
2765 }
2766 }
2767
2768 diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
2769 index b078440e822f..d91a9883e869 100644
2770 --- a/drivers/usb/serial/usb_wwan.c
2771 +++ b/drivers/usb/serial/usb_wwan.c
2772 @@ -228,8 +228,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
2773 usb_pipeendpoint(this_urb->pipe), i);
2774
2775 err = usb_autopm_get_interface_async(port->serial->interface);
2776 - if (err < 0)
2777 + if (err < 0) {
2778 + clear_bit(i, &portdata->out_busy);
2779 break;
2780 + }
2781
2782 /* send the data */
2783 memcpy(this_urb->transfer_buffer, buf, todo);
2784 @@ -386,6 +388,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
2785 portdata = usb_get_serial_port_data(port);
2786 intfdata = serial->private;
2787
2788 + if (port->interrupt_in_urb) {
2789 + err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
2790 + if (err) {
2791 + dev_dbg(&port->dev, "%s: submit int urb failed: %d\n",
2792 + __func__, err);
2793 + }
2794 + }
2795 +
2796 /* Start reading from the IN endpoint */
2797 for (i = 0; i < N_IN_URB; i++) {
2798 urb = portdata->in_urbs[i];
2799 @@ -412,12 +422,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
2800 }
2801 EXPORT_SYMBOL(usb_wwan_open);
2802
2803 +static void unbusy_queued_urb(struct urb *urb,
2804 + struct usb_wwan_port_private *portdata)
2805 +{
2806 + int i;
2807 +
2808 + for (i = 0; i < N_OUT_URB; i++) {
2809 + if (urb == portdata->out_urbs[i]) {
2810 + clear_bit(i, &portdata->out_busy);
2811 + break;
2812 + }
2813 + }
2814 +}
2815 +
2816 void usb_wwan_close(struct usb_serial_port *port)
2817 {
2818 int i;
2819 struct usb_serial *serial = port->serial;
2820 struct usb_wwan_port_private *portdata;
2821 struct usb_wwan_intf_private *intfdata = port->serial->private;
2822 + struct urb *urb;
2823
2824 portdata = usb_get_serial_port_data(port);
2825
2826 @@ -426,10 +450,19 @@ void usb_wwan_close(struct usb_serial_port *port)
2827 portdata->opened = 0;
2828 spin_unlock_irq(&intfdata->susp_lock);
2829
2830 + for (;;) {
2831 + urb = usb_get_from_anchor(&portdata->delayed);
2832 + if (!urb)
2833 + break;
2834 + unbusy_queued_urb(urb, portdata);
2835 + usb_autopm_put_interface_async(serial->interface);
2836 + }
2837 +
2838 for (i = 0; i < N_IN_URB; i++)
2839 usb_kill_urb(portdata->in_urbs[i]);
2840 for (i = 0; i < N_OUT_URB; i++)
2841 usb_kill_urb(portdata->out_urbs[i]);
2842 + usb_kill_urb(port->interrupt_in_urb);
2843
2844 /* balancing - important as an error cannot be handled*/
2845 usb_autopm_get_interface_no_resume(serial->interface);
2846 @@ -463,7 +496,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
2847 struct usb_wwan_port_private *portdata;
2848 struct urb *urb;
2849 u8 *buffer;
2850 - int err;
2851 int i;
2852
2853 if (!port->bulk_in_size || !port->bulk_out_size)
2854 @@ -503,13 +535,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
2855
2856 usb_set_serial_port_data(port, portdata);
2857
2858 - if (port->interrupt_in_urb) {
2859 - err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
2860 - if (err)
2861 - dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
2862 - __func__, err);
2863 - }
2864 -
2865 return 0;
2866
2867 bail_out_error2:
2868 @@ -577,44 +602,29 @@ static void stop_read_write_urbs(struct usb_serial *serial)
2869 int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
2870 {
2871 struct usb_wwan_intf_private *intfdata = serial->private;
2872 - int b;
2873
2874 + spin_lock_irq(&intfdata->susp_lock);
2875 if (PMSG_IS_AUTO(message)) {
2876 - spin_lock_irq(&intfdata->susp_lock);
2877 - b = intfdata->in_flight;
2878 - spin_unlock_irq(&intfdata->susp_lock);
2879 -
2880 - if (b)
2881 + if (intfdata->in_flight) {
2882 + spin_unlock_irq(&intfdata->susp_lock);
2883 return -EBUSY;
2884 + }
2885 }
2886 -
2887 - spin_lock_irq(&intfdata->susp_lock);
2888 intfdata->suspended = 1;
2889 spin_unlock_irq(&intfdata->susp_lock);
2890 +
2891 stop_read_write_urbs(serial);
2892
2893 return 0;
2894 }
2895 EXPORT_SYMBOL(usb_wwan_suspend);
2896
2897 -static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
2898 -{
2899 - int i;
2900 -
2901 - for (i = 0; i < N_OUT_URB; i++) {
2902 - if (urb == portdata->out_urbs[i]) {
2903 - clear_bit(i, &portdata->out_busy);
2904 - break;
2905 - }
2906 - }
2907 -}
2908 -
2909 -static void play_delayed(struct usb_serial_port *port)
2910 +static int play_delayed(struct usb_serial_port *port)
2911 {
2912 struct usb_wwan_intf_private *data;
2913 struct usb_wwan_port_private *portdata;
2914 struct urb *urb;
2915 - int err;
2916 + int err = 0;
2917
2918 portdata = usb_get_serial_port_data(port);
2919 data = port->serial->private;
2920 @@ -631,6 +641,8 @@ static void play_delayed(struct usb_serial_port *port)
2921 break;
2922 }
2923 }
2924 +
2925 + return err;
2926 }
2927
2928 int usb_wwan_resume(struct usb_serial *serial)
2929 @@ -640,54 +652,51 @@ int usb_wwan_resume(struct usb_serial *serial)
2930 struct usb_wwan_intf_private *intfdata = serial->private;
2931 struct usb_wwan_port_private *portdata;
2932 struct urb *urb;
2933 - int err = 0;
2934 -
2935 - /* get the interrupt URBs resubmitted unconditionally */
2936 - for (i = 0; i < serial->num_ports; i++) {
2937 - port = serial->port[i];
2938 - if (!port->interrupt_in_urb) {
2939 - dev_dbg(&port->dev, "%s: No interrupt URB for port\n", __func__);
2940 - continue;
2941 - }
2942 - err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
2943 - dev_dbg(&port->dev, "Submitted interrupt URB for port (result %d)\n", err);
2944 - if (err < 0) {
2945 - dev_err(&port->dev, "%s: Error %d for interrupt URB\n",
2946 - __func__, err);
2947 - goto err_out;
2948 - }
2949 - }
2950 + int err;
2951 + int err_count = 0;
2952
2953 + spin_lock_irq(&intfdata->susp_lock);
2954 for (i = 0; i < serial->num_ports; i++) {
2955 /* walk all ports */
2956 port = serial->port[i];
2957 portdata = usb_get_serial_port_data(port);
2958
2959 /* skip closed ports */
2960 - spin_lock_irq(&intfdata->susp_lock);
2961 - if (!portdata || !portdata->opened) {
2962 - spin_unlock_irq(&intfdata->susp_lock);
2963 + if (!portdata || !portdata->opened)
2964 continue;
2965 +
2966 + if (port->interrupt_in_urb) {
2967 + err = usb_submit_urb(port->interrupt_in_urb,
2968 + GFP_ATOMIC);
2969 + if (err) {
2970 + dev_err(&port->dev,
2971 + "%s: submit int urb failed: %d\n",
2972 + __func__, err);
2973 + err_count++;
2974 + }
2975 }
2976
2977 + err = play_delayed(port);
2978 + if (err)
2979 + err_count++;
2980 +
2981 for (j = 0; j < N_IN_URB; j++) {
2982 urb = portdata->in_urbs[j];
2983 err = usb_submit_urb(urb, GFP_ATOMIC);
2984 if (err < 0) {
2985 dev_err(&port->dev, "%s: Error %d for bulk URB %d\n",
2986 __func__, err, i);
2987 - spin_unlock_irq(&intfdata->susp_lock);
2988 - goto err_out;
2989 + err_count++;
2990 }
2991 }
2992 - play_delayed(port);
2993 - spin_unlock_irq(&intfdata->susp_lock);
2994 }
2995 - spin_lock_irq(&intfdata->susp_lock);
2996 intfdata->suspended = 0;
2997 spin_unlock_irq(&intfdata->susp_lock);
2998 -err_out:
2999 - return err;
3000 +
3001 + if (err_count)
3002 + return -EIO;
3003 +
3004 + return 0;
3005 }
3006 EXPORT_SYMBOL(usb_wwan_resume);
3007 #endif
3008 diff --git a/drivers/video/fbdev/matrox/matroxfb_base.h b/drivers/video/fbdev/matrox/matroxfb_base.h
3009 index 556d96ce40bf..89a8a89a5eb2 100644
3010 --- a/drivers/video/fbdev/matrox/matroxfb_base.h
3011 +++ b/drivers/video/fbdev/matrox/matroxfb_base.h
3012 @@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
3013
3014 #define mga_fifo(n) do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
3015
3016 -#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000)
3017 +#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
3018
3019 /* code speedup */
3020 #ifdef CONFIG_FB_MATROX_MILLENIUM
3021 diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
3022 index 7d44d669d5b6..43a0a52fc527 100644
3023 --- a/drivers/video/fbdev/offb.c
3024 +++ b/drivers/video/fbdev/offb.c
3025 @@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos;
3026 #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
3027 #define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
3028
3029 -#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
3030 -
3031 -static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
3032 -{
3033 - u32 bpp = info->var.bits_per_pixel;
3034 -
3035 - return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
3036 -}
3037 -
3038 /*
3039 * Set a single color register. The values supplied are already
3040 * rounded down to the hardware's capabilities (according to the
3041 @@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
3042 mask <<= info->var.transp.offset;
3043 value |= mask;
3044 }
3045 - pal[regno] = offb_cmap_byteswap(info, value);
3046 + pal[regno] = value;
3047 return 0;
3048 }
3049
3050 diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
3051 index ff52618cafbe..5d7341520544 100644
3052 --- a/drivers/w1/w1.c
3053 +++ b/drivers/w1/w1.c
3054 @@ -1078,6 +1078,8 @@ static void w1_search_process(struct w1_master *dev, u8 search_type)
3055 * w1_process_callbacks() - execute each dev->async_list callback entry
3056 * @dev: w1_master device
3057 *
3058 + * The w1 master list_mutex must be held.
3059 + *
3060 * Return: 1 if there were commands to executed 0 otherwise
3061 */
3062 int w1_process_callbacks(struct w1_master *dev)
3063 diff --git a/drivers/w1/w1_int.c b/drivers/w1/w1_int.c
3064 index 9b084db739c7..728039d2efe1 100644
3065 --- a/drivers/w1/w1_int.c
3066 +++ b/drivers/w1/w1_int.c
3067 @@ -219,9 +219,13 @@ void __w1_remove_master_device(struct w1_master *dev)
3068
3069 if (msleep_interruptible(1000))
3070 flush_signals(current);
3071 + mutex_lock(&dev->list_mutex);
3072 w1_process_callbacks(dev);
3073 + mutex_unlock(&dev->list_mutex);
3074 }
3075 + mutex_lock(&dev->list_mutex);
3076 w1_process_callbacks(dev);
3077 + mutex_unlock(&dev->list_mutex);
3078
3079 memset(&msg, 0, sizeof(msg));
3080 msg.id.mst.id = dev->id;
3081 diff --git a/fs/aio.c b/fs/aio.c
3082 index a0ed6c7d2cd2..e609e15f36b9 100644
3083 --- a/fs/aio.c
3084 +++ b/fs/aio.c
3085 @@ -1021,6 +1021,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
3086
3087 /* everything turned out well, dispose of the aiocb. */
3088 kiocb_free(iocb);
3089 + put_reqs_available(ctx, 1);
3090
3091 /*
3092 * We have to order our ring_info tail store above and test
3093 @@ -1062,6 +1063,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
3094 if (head == tail)
3095 goto out;
3096
3097 + head %= ctx->nr_events;
3098 + tail %= ctx->nr_events;
3099 +
3100 while (ret < nr) {
3101 long avail;
3102 struct io_event *ev;
3103 @@ -1100,8 +1104,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
3104 flush_dcache_page(ctx->ring_pages[0]);
3105
3106 pr_debug("%li h%u t%u\n", ret, head, tail);
3107 -
3108 - put_reqs_available(ctx, ret);
3109 out:
3110 mutex_unlock(&ctx->ring_lock);
3111
3112 diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
3113 index 10db21fa0926..b2e9b2063572 100644
3114 --- a/fs/btrfs/backref.c
3115 +++ b/fs/btrfs/backref.c
3116 @@ -984,11 +984,12 @@ again:
3117 goto out;
3118 }
3119 if (ref->count && ref->parent) {
3120 - if (extent_item_pos && !ref->inode_list) {
3121 + if (extent_item_pos && !ref->inode_list &&
3122 + ref->level == 0) {
3123 u32 bsz;
3124 struct extent_buffer *eb;
3125 bsz = btrfs_level_size(fs_info->extent_root,
3126 - info_level);
3127 + ref->level);
3128 eb = read_tree_block(fs_info->extent_root,
3129 ref->parent, bsz, 0);
3130 if (!eb || !extent_buffer_uptodate(eb)) {
3131 @@ -1404,9 +1405,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
3132 * returns <0 on error
3133 */
3134 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
3135 - struct btrfs_extent_item *ei, u32 item_size,
3136 - struct btrfs_extent_inline_ref **out_eiref,
3137 - int *out_type)
3138 + struct btrfs_key *key,
3139 + struct btrfs_extent_item *ei, u32 item_size,
3140 + struct btrfs_extent_inline_ref **out_eiref,
3141 + int *out_type)
3142 {
3143 unsigned long end;
3144 u64 flags;
3145 @@ -1416,19 +1418,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
3146 /* first call */
3147 flags = btrfs_extent_flags(eb, ei);
3148 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3149 - info = (struct btrfs_tree_block_info *)(ei + 1);
3150 - *out_eiref =
3151 - (struct btrfs_extent_inline_ref *)(info + 1);
3152 + if (key->type == BTRFS_METADATA_ITEM_KEY) {
3153 + /* a skinny metadata extent */
3154 + *out_eiref =
3155 + (struct btrfs_extent_inline_ref *)(ei + 1);
3156 + } else {
3157 + WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
3158 + info = (struct btrfs_tree_block_info *)(ei + 1);
3159 + *out_eiref =
3160 + (struct btrfs_extent_inline_ref *)(info + 1);
3161 + }
3162 } else {
3163 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
3164 }
3165 *ptr = (unsigned long)*out_eiref;
3166 - if ((void *)*ptr >= (void *)ei + item_size)
3167 + if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
3168 return -ENOENT;
3169 }
3170
3171 end = (unsigned long)ei + item_size;
3172 - *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
3173 + *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
3174 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
3175
3176 *ptr += btrfs_extent_inline_ref_size(*out_type);
3177 @@ -1447,8 +1456,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
3178 * <0 on error.
3179 */
3180 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
3181 - struct btrfs_extent_item *ei, u32 item_size,
3182 - u64 *out_root, u8 *out_level)
3183 + struct btrfs_key *key, struct btrfs_extent_item *ei,
3184 + u32 item_size, u64 *out_root, u8 *out_level)
3185 {
3186 int ret;
3187 int type;
3188 @@ -1459,8 +1468,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
3189 return 1;
3190
3191 while (1) {
3192 - ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
3193 - &eiref, &type);
3194 + ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
3195 + &eiref, &type);
3196 if (ret < 0)
3197 return ret;
3198
3199 diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
3200 index a910b27a8ad9..519b49e51f57 100644
3201 --- a/fs/btrfs/backref.h
3202 +++ b/fs/btrfs/backref.h
3203 @@ -40,8 +40,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
3204 u64 *flags);
3205
3206 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
3207 - struct btrfs_extent_item *ei, u32 item_size,
3208 - u64 *out_root, u8 *out_level);
3209 + struct btrfs_key *key, struct btrfs_extent_item *ei,
3210 + u32 item_size, u64 *out_root, u8 *out_level);
3211
3212 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
3213 u64 extent_item_objectid,
3214 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
3215 index ba6b88528dc7..9e80f527776a 100644
3216 --- a/fs/btrfs/ctree.h
3217 +++ b/fs/btrfs/ctree.h
3218 @@ -1113,6 +1113,12 @@ struct btrfs_qgroup_limit_item {
3219 __le64 rsv_excl;
3220 } __attribute__ ((__packed__));
3221
3222 +/* For raid type sysfs entries */
3223 +struct raid_kobject {
3224 + int raid_type;
3225 + struct kobject kobj;
3226 +};
3227 +
3228 struct btrfs_space_info {
3229 spinlock_t lock;
3230
3231 @@ -1163,7 +1169,7 @@ struct btrfs_space_info {
3232 wait_queue_head_t wait;
3233
3234 struct kobject kobj;
3235 - struct kobject block_group_kobjs[BTRFS_NR_RAID_TYPES];
3236 + struct kobject *block_group_kobjs[BTRFS_NR_RAID_TYPES];
3237 };
3238
3239 #define BTRFS_BLOCK_RSV_GLOBAL 1
3240 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
3241 index 983314932af3..a62a5bdc0502 100644
3242 --- a/fs/btrfs/disk-io.c
3243 +++ b/fs/btrfs/disk-io.c
3244 @@ -3633,6 +3633,11 @@ int close_ctree(struct btrfs_root *root)
3245
3246 btrfs_free_block_groups(fs_info);
3247
3248 + /*
3249 + * we must make sure there is not any read request to
3250 + * submit after we stopping all workers.
3251 + */
3252 + invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
3253 btrfs_stop_all_workers(fs_info);
3254
3255 free_root_pointers(fs_info, 1);
3256 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3257 index 5590af92094b..5c343a9909cd 100644
3258 --- a/fs/btrfs/extent-tree.c
3259 +++ b/fs/btrfs/extent-tree.c
3260 @@ -3401,10 +3401,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3261 return ret;
3262 }
3263
3264 - for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3265 + for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3266 INIT_LIST_HEAD(&found->block_groups[i]);
3267 - kobject_init(&found->block_group_kobjs[i], &btrfs_raid_ktype);
3268 - }
3269 init_rwsem(&found->groups_sem);
3270 spin_lock_init(&found->lock);
3271 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3272 @@ -8327,8 +8325,9 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
3273 list_del(&space_info->list);
3274 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
3275 struct kobject *kobj;
3276 - kobj = &space_info->block_group_kobjs[i];
3277 - if (kobj->parent) {
3278 + kobj = space_info->block_group_kobjs[i];
3279 + space_info->block_group_kobjs[i] = NULL;
3280 + if (kobj) {
3281 kobject_del(kobj);
3282 kobject_put(kobj);
3283 }
3284 @@ -8352,17 +8351,26 @@ static void __link_block_group(struct btrfs_space_info *space_info,
3285 up_write(&space_info->groups_sem);
3286
3287 if (first) {
3288 - struct kobject *kobj = &space_info->block_group_kobjs[index];
3289 + struct raid_kobject *rkobj;
3290 int ret;
3291
3292 - kobject_get(&space_info->kobj); /* put in release */
3293 - ret = kobject_add(kobj, &space_info->kobj, "%s",
3294 - get_raid_name(index));
3295 + rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
3296 + if (!rkobj)
3297 + goto out_err;
3298 + rkobj->raid_type = index;
3299 + kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
3300 + ret = kobject_add(&rkobj->kobj, &space_info->kobj,
3301 + "%s", get_raid_name(index));
3302 if (ret) {
3303 - pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
3304 - kobject_put(&space_info->kobj);
3305 + kobject_put(&rkobj->kobj);
3306 + goto out_err;
3307 }
3308 + space_info->block_group_kobjs[index] = &rkobj->kobj;
3309 }
3310 +
3311 + return;
3312 +out_err:
3313 + pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
3314 }
3315
3316 static struct btrfs_block_group_cache *
3317 @@ -8697,6 +8705,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
3318 struct btrfs_root *tree_root = root->fs_info->tree_root;
3319 struct btrfs_key key;
3320 struct inode *inode;
3321 + struct kobject *kobj = NULL;
3322 int ret;
3323 int index;
3324 int factor;
3325 @@ -8796,11 +8805,15 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
3326 */
3327 list_del_init(&block_group->list);
3328 if (list_empty(&block_group->space_info->block_groups[index])) {
3329 - kobject_del(&block_group->space_info->block_group_kobjs[index]);
3330 - kobject_put(&block_group->space_info->block_group_kobjs[index]);
3331 + kobj = block_group->space_info->block_group_kobjs[index];
3332 + block_group->space_info->block_group_kobjs[index] = NULL;
3333 clear_avail_alloc_bits(root->fs_info, block_group->flags);
3334 }
3335 up_write(&block_group->space_info->groups_sem);
3336 + if (kobj) {
3337 + kobject_del(kobj);
3338 + kobject_put(kobj);
3339 + }
3340
3341 if (block_group->cached == BTRFS_CACHE_STARTED)
3342 wait_block_group_cache_done(block_group);
3343 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
3344 index 3955e475ceec..a2badb027ae6 100644
3345 --- a/fs/btrfs/extent_io.c
3346 +++ b/fs/btrfs/extent_io.c
3347 @@ -1693,6 +1693,7 @@ again:
3348 * shortening the size of the delalloc range we're searching
3349 */
3350 free_extent_state(cached_state);
3351 + cached_state = NULL;
3352 if (!loops) {
3353 max_bytes = PAGE_CACHE_SIZE;
3354 loops = 1;
3355 @@ -2353,7 +2354,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
3356 {
3357 int uptodate = (err == 0);
3358 struct extent_io_tree *tree;
3359 - int ret;
3360 + int ret = 0;
3361
3362 tree = &BTRFS_I(page->mapping->host)->io_tree;
3363
3364 @@ -2367,6 +2368,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
3365 if (!uptodate) {
3366 ClearPageUptodate(page);
3367 SetPageError(page);
3368 + ret = ret < 0 ? ret : -EIO;
3369 + mapping_set_error(page->mapping, ret);
3370 }
3371 return 0;
3372 }
3373 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
3374 index ae6af072b635..3029925e96d7 100644
3375 --- a/fs/btrfs/file.c
3376 +++ b/fs/btrfs/file.c
3377 @@ -780,6 +780,18 @@ next_slot:
3378 extent_end = search_start;
3379 }
3380
3381 + /*
3382 + * Don't skip extent items representing 0 byte lengths. They
3383 + * used to be created (bug) if while punching holes we hit
3384 + * -ENOSPC condition. So if we find one here, just ensure we
3385 + * delete it, otherwise we would insert a new file extent item
3386 + * with the same key (offset) as that 0 bytes length file
3387 + * extent item in the call to setup_items_for_insert() later
3388 + * in this function.
3389 + */
3390 + if (extent_end == key.offset && extent_end >= search_start)
3391 + goto delete_extent_item;
3392 +
3393 if (extent_end <= search_start) {
3394 path->slots[0]++;
3395 goto next_slot;
3396 @@ -893,6 +905,7 @@ next_slot:
3397 * | ------ extent ------ |
3398 */
3399 if (start <= key.offset && end >= extent_end) {
3400 +delete_extent_item:
3401 if (del_nr == 0) {
3402 del_slot = path->slots[0];
3403 del_nr = 1;
3404 @@ -2187,13 +2200,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
3405 bool same_page = ((offset >> PAGE_CACHE_SHIFT) ==
3406 ((offset + len - 1) >> PAGE_CACHE_SHIFT));
3407 bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
3408 - u64 ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
3409 + u64 ino_size;
3410
3411 ret = btrfs_wait_ordered_range(inode, offset, len);
3412 if (ret)
3413 return ret;
3414
3415 mutex_lock(&inode->i_mutex);
3416 + ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
3417 /*
3418 * We needn't truncate any page which is beyond the end of the file
3419 * because we are sure there is no data there.
3420 @@ -2347,7 +2361,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
3421 }
3422
3423 trans->block_rsv = &root->fs_info->trans_block_rsv;
3424 - if (cur_offset < ino_size) {
3425 + /*
3426 + * Don't insert file hole extent item if it's for a range beyond eof
3427 + * (because it's useless) or if it represents a 0 bytes range (when
3428 + * cur_offset == drop_end).
3429 + */
3430 + if (cur_offset < ino_size && cur_offset < drop_end) {
3431 ret = fill_holes(trans, inode, path, cur_offset, drop_end);
3432 if (ret) {
3433 err = ret;
3434 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
3435 index 73f3de7a083c..a6bd654dcd47 100644
3436 --- a/fs/btrfs/free-space-cache.c
3437 +++ b/fs/btrfs/free-space-cache.c
3438 @@ -831,7 +831,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
3439
3440 if (!matched) {
3441 __btrfs_remove_free_space_cache(ctl);
3442 - btrfs_err(fs_info, "block group %llu has wrong amount of free space",
3443 + btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
3444 block_group->key.objectid);
3445 ret = -1;
3446 }
3447 @@ -843,7 +843,7 @@ out:
3448 spin_unlock(&block_group->lock);
3449 ret = 0;
3450
3451 - btrfs_err(fs_info, "failed to load free space cache for block group %llu",
3452 + btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
3453 block_group->key.objectid);
3454 }
3455
3456 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
3457 index 0be77993378e..12afb0dd3734 100644
3458 --- a/fs/btrfs/scrub.c
3459 +++ b/fs/btrfs/scrub.c
3460 @@ -588,8 +588,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
3461
3462 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3463 do {
3464 - ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
3465 - &ref_root, &ref_level);
3466 + ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
3467 + item_size, &ref_root,
3468 + &ref_level);
3469 printk_in_rcu(KERN_WARNING
3470 "BTRFS: %s at logical %llu on dev %s, "
3471 "sector %llu: metadata %s (level %d) in tree "
3472 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
3473 index 484aacac2c89..6c9c084aa06a 100644
3474 --- a/fs/btrfs/send.c
3475 +++ b/fs/btrfs/send.c
3476 @@ -975,7 +975,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
3477 struct btrfs_dir_item *di;
3478 struct btrfs_key di_key;
3479 char *buf = NULL;
3480 - const int buf_len = PATH_MAX;
3481 + int buf_len;
3482 u32 name_len;
3483 u32 data_len;
3484 u32 cur;
3485 @@ -985,6 +985,11 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
3486 int num;
3487 u8 type;
3488
3489 + if (found_key->type == BTRFS_XATTR_ITEM_KEY)
3490 + buf_len = BTRFS_MAX_XATTR_SIZE(root);
3491 + else
3492 + buf_len = PATH_MAX;
3493 +
3494 buf = kmalloc(buf_len, GFP_NOFS);
3495 if (!buf) {
3496 ret = -ENOMEM;
3497 @@ -1006,12 +1011,23 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
3498 type = btrfs_dir_type(eb, di);
3499 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
3500
3501 - /*
3502 - * Path too long
3503 - */
3504 - if (name_len + data_len > buf_len) {
3505 - ret = -ENAMETOOLONG;
3506 - goto out;
3507 + if (type == BTRFS_FT_XATTR) {
3508 + if (name_len > XATTR_NAME_MAX) {
3509 + ret = -ENAMETOOLONG;
3510 + goto out;
3511 + }
3512 + if (name_len + data_len > buf_len) {
3513 + ret = -E2BIG;
3514 + goto out;
3515 + }
3516 + } else {
3517 + /*
3518 + * Path too long
3519 + */
3520 + if (name_len + data_len > buf_len) {
3521 + ret = -ENAMETOOLONG;
3522 + goto out;
3523 + }
3524 }
3525
3526 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
3527 @@ -1628,6 +1644,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
3528 goto out;
3529 }
3530 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
3531 + if (key.type == BTRFS_ROOT_ITEM_KEY) {
3532 + ret = -ENOENT;
3533 + goto out;
3534 + }
3535 *found_inode = key.objectid;
3536 *found_type = btrfs_dir_type(path->nodes[0], di);
3537
3538 @@ -3054,33 +3074,18 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3539 if (ret < 0)
3540 goto out;
3541
3542 - if (parent_ino == sctx->cur_ino) {
3543 - /* child only renamed, not moved */
3544 - ASSERT(parent_gen == sctx->cur_inode_gen);
3545 - ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3546 - from_path);
3547 - if (ret < 0)
3548 - goto out;
3549 - ret = fs_path_add_path(from_path, name);
3550 - if (ret < 0)
3551 - goto out;
3552 - } else {
3553 - /* child moved and maybe renamed too */
3554 - sctx->send_progress = pm->ino;
3555 - ret = get_cur_path(sctx, pm->ino, pm->gen, from_path);
3556 - if (ret < 0)
3557 - goto out;
3558 - }
3559 + ret = get_cur_path(sctx, parent_ino, parent_gen,
3560 + from_path);
3561 + if (ret < 0)
3562 + goto out;
3563 + ret = fs_path_add_path(from_path, name);
3564 + if (ret < 0)
3565 + goto out;
3566
3567 - fs_path_free(name);
3568 + fs_path_reset(name);
3569 + to_path = name;
3570 name = NULL;
3571
3572 - to_path = fs_path_alloc();
3573 - if (!to_path) {
3574 - ret = -ENOMEM;
3575 - goto out;
3576 - }
3577 -
3578 sctx->send_progress = sctx->cur_ino + 1;
3579 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3580 if (ret < 0)
3581 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
3582 index c5eb2143dc66..4825cd2b10c2 100644
3583 --- a/fs/btrfs/sysfs.c
3584 +++ b/fs/btrfs/sysfs.c
3585 @@ -254,6 +254,7 @@ static ssize_t global_rsv_reserved_show(struct kobject *kobj,
3586 BTRFS_ATTR(global_rsv_reserved, 0444, global_rsv_reserved_show);
3587
3588 #define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
3589 +#define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
3590
3591 static ssize_t raid_bytes_show(struct kobject *kobj,
3592 struct kobj_attribute *attr, char *buf);
3593 @@ -266,7 +267,7 @@ static ssize_t raid_bytes_show(struct kobject *kobj,
3594 {
3595 struct btrfs_space_info *sinfo = to_space_info(kobj->parent);
3596 struct btrfs_block_group_cache *block_group;
3597 - int index = kobj - sinfo->block_group_kobjs;
3598 + int index = to_raid_kobj(kobj)->raid_type;
3599 u64 val = 0;
3600
3601 down_read(&sinfo->groups_sem);
3602 @@ -288,7 +289,7 @@ static struct attribute *raid_attributes[] = {
3603
3604 static void release_raid_kobj(struct kobject *kobj)
3605 {
3606 - kobject_put(kobj->parent);
3607 + kfree(to_raid_kobj(kobj));
3608 }
3609
3610 struct kobj_type btrfs_raid_ktype = {
3611 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
3612 index 49d7fab73360..57b699410fb8 100644
3613 --- a/fs/btrfs/volumes.c
3614 +++ b/fs/btrfs/volumes.c
3615 @@ -1452,6 +1452,22 @@ out:
3616 return ret;
3617 }
3618
3619 +/*
3620 + * Function to update ctime/mtime for a given device path.
3621 + * Mainly used for ctime/mtime based probe like libblkid.
3622 + */
3623 +static void update_dev_time(char *path_name)
3624 +{
3625 + struct file *filp;
3626 +
3627 + filp = filp_open(path_name, O_RDWR, 0);
3628 + if (!filp)
3629 + return;
3630 + file_update_time(filp);
3631 + filp_close(filp, NULL);
3632 + return;
3633 +}
3634 +
3635 static int btrfs_rm_dev_item(struct btrfs_root *root,
3636 struct btrfs_device *device)
3637 {
3638 @@ -1674,11 +1690,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
3639 struct btrfs_fs_devices *fs_devices;
3640 fs_devices = root->fs_info->fs_devices;
3641 while (fs_devices) {
3642 - if (fs_devices->seed == cur_devices)
3643 + if (fs_devices->seed == cur_devices) {
3644 + fs_devices->seed = cur_devices->seed;
3645 break;
3646 + }
3647 fs_devices = fs_devices->seed;
3648 }
3649 - fs_devices->seed = cur_devices->seed;
3650 cur_devices->seed = NULL;
3651 lock_chunks(root);
3652 __btrfs_close_devices(cur_devices);
3653 @@ -1704,10 +1721,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
3654
3655 ret = 0;
3656
3657 - /* Notify udev that device has changed */
3658 - if (bdev)
3659 + if (bdev) {
3660 + /* Notify udev that device has changed */
3661 btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
3662
3663 + /* Update ctime/mtime for device path for libblkid */
3664 + update_dev_time(device_path);
3665 + }
3666 +
3667 error_brelse:
3668 brelse(bh);
3669 if (bdev)
3670 @@ -1883,7 +1904,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
3671 fs_devices->seeding = 0;
3672 fs_devices->num_devices = 0;
3673 fs_devices->open_devices = 0;
3674 - fs_devices->total_devices = 0;
3675 fs_devices->seed = seed_devices;
3676
3677 generate_random_uuid(fs_devices->fsid);
3678 @@ -2146,6 +2166,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
3679 ret = btrfs_commit_transaction(trans, root);
3680 }
3681
3682 + /* Update ctime/mtime for libblkid */
3683 + update_dev_time(device_path);
3684 return ret;
3685
3686 error_trans:
3687 @@ -6058,10 +6080,14 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
3688 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
3689 struct btrfs_device *device;
3690
3691 - mutex_lock(&fs_devices->device_list_mutex);
3692 - list_for_each_entry(device, &fs_devices->devices, dev_list)
3693 - device->dev_root = fs_info->dev_root;
3694 - mutex_unlock(&fs_devices->device_list_mutex);
3695 + while (fs_devices) {
3696 + mutex_lock(&fs_devices->device_list_mutex);
3697 + list_for_each_entry(device, &fs_devices->devices, dev_list)
3698 + device->dev_root = fs_info->dev_root;
3699 + mutex_unlock(&fs_devices->device_list_mutex);
3700 +
3701 + fs_devices = fs_devices->seed;
3702 + }
3703 }
3704
3705 static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
3706 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3707 index 3802f8c94acc..1fb6ad2ac92d 100644
3708 --- a/fs/cifs/smb2pdu.c
3709 +++ b/fs/cifs/smb2pdu.c
3710 @@ -1089,6 +1089,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3711 int rc = 0;
3712 unsigned int num_iovecs = 2;
3713 __u32 file_attributes = 0;
3714 + char *dhc_buf = NULL, *lc_buf = NULL;
3715
3716 cifs_dbg(FYI, "create/open\n");
3717
3718 @@ -1155,6 +1156,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3719 kfree(copy_path);
3720 return rc;
3721 }
3722 + lc_buf = iov[num_iovecs-1].iov_base;
3723 }
3724
3725 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
3726 @@ -1169,9 +1171,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3727 if (rc) {
3728 cifs_small_buf_release(req);
3729 kfree(copy_path);
3730 - kfree(iov[num_iovecs-1].iov_base);
3731 + kfree(lc_buf);
3732 return rc;
3733 }
3734 + dhc_buf = iov[num_iovecs-1].iov_base;
3735 }
3736
3737 rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
3738 @@ -1203,6 +1206,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3739 *oplock = rsp->OplockLevel;
3740 creat_exit:
3741 kfree(copy_path);
3742 + kfree(lc_buf);
3743 + kfree(dhc_buf);
3744 free_rsp_buf(resp_buftype, rsp);
3745 return rc;
3746 }
3747 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
3748 index af903128891c..ead00467282d 100644
3749 --- a/fs/eventpoll.c
3750 +++ b/fs/eventpoll.c
3751 @@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = {
3752 void eventpoll_release_file(struct file *file)
3753 {
3754 struct eventpoll *ep;
3755 - struct epitem *epi;
3756 + struct epitem *epi, *next;
3757
3758 /*
3759 * We don't want to get "file->f_lock" because it is not
3760 @@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file)
3761 * Besides, ep_remove() acquires the lock, so we can't hold it here.
3762 */
3763 mutex_lock(&epmutex);
3764 - list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
3765 + list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
3766 ep = epi->ep;
3767 mutex_lock_nested(&ep->mtx, 0);
3768 ep_remove(ep, epi);
3769 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3770 index 66946aa62127..f542e486a4a4 100644
3771 --- a/fs/ext4/ext4.h
3772 +++ b/fs/ext4/ext4.h
3773 @@ -2771,7 +2771,8 @@ extern void ext4_io_submit(struct ext4_io_submit *io);
3774 extern int ext4_bio_write_page(struct ext4_io_submit *io,
3775 struct page *page,
3776 int len,
3777 - struct writeback_control *wbc);
3778 + struct writeback_control *wbc,
3779 + bool keep_towrite);
3780
3781 /* mmp.c */
3782 extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
3783 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3784 index 01b0c208f625..f312c47b7d18 100644
3785 --- a/fs/ext4/extents.c
3786 +++ b/fs/ext4/extents.c
3787 @@ -4744,6 +4744,13 @@ static long ext4_zero_range(struct file *file, loff_t offset,
3788 if (!S_ISREG(inode->i_mode))
3789 return -EINVAL;
3790
3791 + /* Call ext4_force_commit to flush all data in case of data=journal. */
3792 + if (ext4_should_journal_data(inode)) {
3793 + ret = ext4_force_commit(inode->i_sb);
3794 + if (ret)
3795 + return ret;
3796 + }
3797 +
3798 /*
3799 * Write out all dirty pages to avoid race conditions
3800 * Then release them.
3801 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3802 index d7b7462a0e13..5bc199445dc2 100644
3803 --- a/fs/ext4/inode.c
3804 +++ b/fs/ext4/inode.c
3805 @@ -1846,6 +1846,7 @@ static int ext4_writepage(struct page *page,
3806 struct buffer_head *page_bufs = NULL;
3807 struct inode *inode = page->mapping->host;
3808 struct ext4_io_submit io_submit;
3809 + bool keep_towrite = false;
3810
3811 trace_ext4_writepage(page);
3812 size = i_size_read(inode);
3813 @@ -1876,6 +1877,7 @@ static int ext4_writepage(struct page *page,
3814 unlock_page(page);
3815 return 0;
3816 }
3817 + keep_towrite = true;
3818 }
3819
3820 if (PageChecked(page) && ext4_should_journal_data(inode))
3821 @@ -1892,7 +1894,7 @@ static int ext4_writepage(struct page *page,
3822 unlock_page(page);
3823 return -ENOMEM;
3824 }
3825 - ret = ext4_bio_write_page(&io_submit, page, len, wbc);
3826 + ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
3827 ext4_io_submit(&io_submit);
3828 /* Drop io_end reference we got from init */
3829 ext4_put_io_end_defer(io_submit.io_end);
3830 @@ -1911,7 +1913,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
3831 else
3832 len = PAGE_CACHE_SIZE;
3833 clear_page_dirty_for_io(page);
3834 - err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
3835 + err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
3836 if (!err)
3837 mpd->wbc->nr_to_write--;
3838 mpd->first_page++;
3839 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3840 index c8238a26818c..fe4e668d3023 100644
3841 --- a/fs/ext4/mballoc.c
3842 +++ b/fs/ext4/mballoc.c
3843 @@ -3145,7 +3145,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3844 }
3845 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3846 start > ac->ac_o_ex.fe_logical);
3847 - BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
3848 + BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3849
3850 /* now prepare goal request */
3851
3852 diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
3853 index c18d95b50540..b6a3804a9855 100644
3854 --- a/fs/ext4/page-io.c
3855 +++ b/fs/ext4/page-io.c
3856 @@ -401,7 +401,8 @@ submit_and_retry:
3857 int ext4_bio_write_page(struct ext4_io_submit *io,
3858 struct page *page,
3859 int len,
3860 - struct writeback_control *wbc)
3861 + struct writeback_control *wbc,
3862 + bool keep_towrite)
3863 {
3864 struct inode *inode = page->mapping->host;
3865 unsigned block_start, blocksize;
3866 @@ -414,10 +415,24 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
3867 BUG_ON(!PageLocked(page));
3868 BUG_ON(PageWriteback(page));
3869
3870 - set_page_writeback(page);
3871 + if (keep_towrite)
3872 + set_page_writeback_keepwrite(page);
3873 + else
3874 + set_page_writeback(page);
3875 ClearPageError(page);
3876
3877 /*
3878 + * Comments copied from block_write_full_page_endio:
3879 + *
3880 + * The page straddles i_size. It must be zeroed out on each and every
3881 + * writepage invocation because it may be mmapped. "A file is mapped
3882 + * in multiples of the page size. For a file that is not a multiple of
3883 + * the page size, the remaining memory is zeroed when mapped, and
3884 + * writes to that region are not written out to the file."
3885 + */
3886 + if (len < PAGE_CACHE_SIZE)
3887 + zero_user_segment(page, len, PAGE_CACHE_SIZE);
3888 + /*
3889 * In the first loop we prepare and mark buffers to submit. We have to
3890 * mark all buffers in the page before submitting so that
3891 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
3892 @@ -428,19 +443,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
3893 do {
3894 block_start = bh_offset(bh);
3895 if (block_start >= len) {
3896 - /*
3897 - * Comments copied from block_write_full_page_endio:
3898 - *
3899 - * The page straddles i_size. It must be zeroed out on
3900 - * each and every writepage invocation because it may
3901 - * be mmapped. "A file is mapped in multiples of the
3902 - * page size. For a file that is not a multiple of
3903 - * the page size, the remaining memory is zeroed when
3904 - * mapped, and writes to that region are not written
3905 - * out to the file."
3906 - */
3907 - zero_user_segment(page, block_start,
3908 - block_start + blocksize);
3909 clear_buffer_dirty(bh);
3910 set_buffer_uptodate(bh);
3911 continue;
3912 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
3913 index 45abd60e2bff..bc077f3c8868 100644
3914 --- a/fs/f2fs/data.c
3915 +++ b/fs/f2fs/data.c
3916 @@ -835,6 +835,8 @@ out:
3917 unlock_page(page);
3918 if (need_balance_fs)
3919 f2fs_balance_fs(sbi);
3920 + if (wbc->for_reclaim)
3921 + f2fs_submit_merged_bio(sbi, DATA, WRITE);
3922 return 0;
3923
3924 redirty_out:
3925 diff --git a/include/linux/acpi.h b/include/linux/acpi.h
3926 index 7a8f2cd66c8b..0e2569031a6f 100644
3927 --- a/include/linux/acpi.h
3928 +++ b/include/linux/acpi.h
3929 @@ -37,6 +37,7 @@
3930
3931 #include <linux/list.h>
3932 #include <linux/mod_devicetable.h>
3933 +#include <linux/dynamic_debug.h>
3934
3935 #include <acpi/acpi.h>
3936 #include <acpi/acpi_bus.h>
3937 @@ -589,6 +590,14 @@ static inline __printf(3, 4) void
3938 acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
3939 #endif /* !CONFIG_ACPI */
3940
3941 +#if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG)
3942 +__printf(3, 4)
3943 +void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...);
3944 +#else
3945 +#define __acpi_handle_debug(descriptor, handle, fmt, ...) \
3946 + acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__);
3947 +#endif
3948 +
3949 /*
3950 * acpi_handle_<level>: Print message with ACPI prefix and object path
3951 *
3952 @@ -610,11 +619,19 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
3953 #define acpi_handle_info(handle, fmt, ...) \
3954 acpi_handle_printk(KERN_INFO, handle, fmt, ##__VA_ARGS__)
3955
3956 -/* REVISIT: Support CONFIG_DYNAMIC_DEBUG when necessary */
3957 -#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
3958 +#if defined(DEBUG)
3959 #define acpi_handle_debug(handle, fmt, ...) \
3960 acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__)
3961 #else
3962 +#if defined(CONFIG_DYNAMIC_DEBUG)
3963 +#define acpi_handle_debug(handle, fmt, ...) \
3964 +do { \
3965 + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
3966 + if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
3967 + __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \
3968 + ##__VA_ARGS__); \
3969 +} while (0)
3970 +#else
3971 #define acpi_handle_debug(handle, fmt, ...) \
3972 ({ \
3973 if (0) \
3974 @@ -622,5 +639,6 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {}
3975 0; \
3976 })
3977 #endif
3978 +#endif
3979
3980 #endif /*_LINUX_ACPI_H*/
3981 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3982 index b65166de1d9d..d0bad1a8b0bd 100644
3983 --- a/include/linux/hugetlb.h
3984 +++ b/include/linux/hugetlb.h
3985 @@ -392,15 +392,13 @@ static inline pgoff_t basepage_index(struct page *page)
3986
3987 extern void dissolve_free_huge_pages(unsigned long start_pfn,
3988 unsigned long end_pfn);
3989 -int pmd_huge_support(void);
3990 -/*
3991 - * Currently hugepage migration is enabled only for pmd-based hugepage.
3992 - * This function will be updated when hugepage migration is more widely
3993 - * supported.
3994 - */
3995 static inline int hugepage_migration_support(struct hstate *h)
3996 {
3997 - return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
3998 +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
3999 + return huge_page_shift(h) == PMD_SHIFT;
4000 +#else
4001 + return 0;
4002 +#endif
4003 }
4004
4005 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
4006 @@ -450,7 +448,6 @@ static inline pgoff_t basepage_index(struct page *page)
4007 return page->index;
4008 }
4009 #define dissolve_free_huge_pages(s, e) do {} while (0)
4010 -#define pmd_huge_support() 0
4011 #define hugepage_migration_support(h) 0
4012
4013 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
4014 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
4015 index 26e2661d3935..472c021a2d4f 100644
4016 --- a/include/linux/irqdesc.h
4017 +++ b/include/linux/irqdesc.h
4018 @@ -27,6 +27,8 @@ struct irq_desc;
4019 * @irq_count: stats field to detect stalled irqs
4020 * @last_unhandled: aging timer for unhandled count
4021 * @irqs_unhandled: stats field for spurious unhandled interrupts
4022 + * @threads_handled: stats field for deferred spurious detection of threaded handlers
4023 + * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
4024 * @lock: locking for SMP
4025 * @affinity_hint: hint to user space for preferred irq affinity
4026 * @affinity_notify: context for notification of affinity changes
4027 @@ -52,6 +54,8 @@ struct irq_desc {
4028 unsigned int irq_count; /* For detecting broken IRQs */
4029 unsigned long last_unhandled; /* Aging timer for unhandled count */
4030 unsigned int irqs_unhandled;
4031 + atomic_t threads_handled;
4032 + int threads_handled_last;
4033 raw_spinlock_t lock;
4034 struct cpumask *percpu_enabled;
4035 #ifdef CONFIG_SMP
4036 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
4037 index 3c1b968da0ca..f230a978e6ba 100644
4038 --- a/include/linux/mempolicy.h
4039 +++ b/include/linux/mempolicy.h
4040 @@ -175,6 +175,12 @@ static inline int vma_migratable(struct vm_area_struct *vma)
4041 {
4042 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
4043 return 0;
4044 +
4045 +#ifndef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
4046 + if (vma->vm_flags & VM_HUGETLB)
4047 + return 0;
4048 +#endif
4049 +
4050 /*
4051 * Migration allocates pages in the highest zone. If we cannot
4052 * do so then migration (at least from node to node) is not
4053 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
4054 index fac5509c18f0..835aa3d36719 100644
4055 --- a/include/linux/mmzone.h
4056 +++ b/include/linux/mmzone.h
4057 @@ -75,9 +75,13 @@ enum {
4058
4059 extern int page_group_by_mobility_disabled;
4060
4061 +#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
4062 +#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
4063 +
4064 static inline int get_pageblock_migratetype(struct page *page)
4065 {
4066 - return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
4067 + BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
4068 + return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
4069 }
4070
4071 struct free_area {
4072 diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
4073 index d1fe1a761047..ca71a1d347a0 100644
4074 --- a/include/linux/page-flags.h
4075 +++ b/include/linux/page-flags.h
4076 @@ -317,13 +317,23 @@ CLEARPAGEFLAG(Uptodate, uptodate)
4077 extern void cancel_dirty_page(struct page *page, unsigned int account_size);
4078
4079 int test_clear_page_writeback(struct page *page);
4080 -int test_set_page_writeback(struct page *page);
4081 +int __test_set_page_writeback(struct page *page, bool keep_write);
4082 +
4083 +#define test_set_page_writeback(page) \
4084 + __test_set_page_writeback(page, false)
4085 +#define test_set_page_writeback_keepwrite(page) \
4086 + __test_set_page_writeback(page, true)
4087
4088 static inline void set_page_writeback(struct page *page)
4089 {
4090 test_set_page_writeback(page);
4091 }
4092
4093 +static inline void set_page_writeback_keepwrite(struct page *page)
4094 +{
4095 + test_set_page_writeback_keepwrite(page);
4096 +}
4097 +
4098 #ifdef CONFIG_PAGEFLAGS_EXTENDED
4099 /*
4100 * System with lots of page flags available. This allows separate
4101 diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
4102 index 2ee8cd2466b5..c08730c10c7a 100644
4103 --- a/include/linux/pageblock-flags.h
4104 +++ b/include/linux/pageblock-flags.h
4105 @@ -30,9 +30,12 @@ enum pageblock_bits {
4106 PB_migrate,
4107 PB_migrate_end = PB_migrate + 3 - 1,
4108 /* 3 bits required for migrate types */
4109 -#ifdef CONFIG_COMPACTION
4110 PB_migrate_skip,/* If set the block is skipped by compaction */
4111 -#endif /* CONFIG_COMPACTION */
4112 +
4113 + /*
4114 + * Assume the bits will always align on a word. If this assumption
4115 + * changes then get/set pageblock needs updating.
4116 + */
4117 NR_PAGEBLOCK_BITS
4118 };
4119
4120 @@ -62,11 +65,33 @@ extern int pageblock_order;
4121 /* Forward declaration */
4122 struct page;
4123
4124 +unsigned long get_pageblock_flags_mask(struct page *page,
4125 + unsigned long end_bitidx,
4126 + unsigned long mask);
4127 +void set_pageblock_flags_mask(struct page *page,
4128 + unsigned long flags,
4129 + unsigned long end_bitidx,
4130 + unsigned long mask);
4131 +
4132 /* Declarations for getting and setting flags. See mm/page_alloc.c */
4133 -unsigned long get_pageblock_flags_group(struct page *page,
4134 - int start_bitidx, int end_bitidx);
4135 -void set_pageblock_flags_group(struct page *page, unsigned long flags,
4136 - int start_bitidx, int end_bitidx);
4137 +static inline unsigned long get_pageblock_flags_group(struct page *page,
4138 + int start_bitidx, int end_bitidx)
4139 +{
4140 + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
4141 + unsigned long mask = (1 << nr_flag_bits) - 1;
4142 +
4143 + return get_pageblock_flags_mask(page, end_bitidx, mask);
4144 +}
4145 +
4146 +static inline void set_pageblock_flags_group(struct page *page,
4147 + unsigned long flags,
4148 + int start_bitidx, int end_bitidx)
4149 +{
4150 + unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
4151 + unsigned long mask = (1 << nr_flag_bits) - 1;
4152 +
4153 + set_pageblock_flags_mask(page, flags, end_bitidx, mask);
4154 +}
4155
4156 #ifdef CONFIG_COMPACTION
4157 #define get_pageblock_skip(page) \
4158 diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
4159 index 07d0df6bf768..077904c8b70d 100644
4160 --- a/include/linux/ptrace.h
4161 +++ b/include/linux/ptrace.h
4162 @@ -5,6 +5,7 @@
4163 #include <linux/sched.h> /* For struct task_struct. */
4164 #include <linux/err.h> /* for IS_ERR_VALUE */
4165 #include <linux/bug.h> /* For BUG_ON. */
4166 +#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
4167 #include <uapi/linux/ptrace.h>
4168
4169 /*
4170 @@ -129,6 +130,37 @@ static inline void ptrace_event(int event, unsigned long message)
4171 }
4172
4173 /**
4174 + * ptrace_event_pid - possibly stop for a ptrace event notification
4175 + * @event: %PTRACE_EVENT_* value to report
4176 + * @pid: process identifier for %PTRACE_GETEVENTMSG to return
4177 + *
4178 + * Check whether @event is enabled and, if so, report @event and @pid
4179 + * to the ptrace parent. @pid is reported as the pid_t seen from the
4180 + * the ptrace parent's pid namespace.
4181 + *
4182 + * Called without locks.
4183 + */
4184 +static inline void ptrace_event_pid(int event, struct pid *pid)
4185 +{
4186 + /*
4187 + * FIXME: There's a potential race if a ptracer in a different pid
4188 + * namespace than parent attaches between computing message below and
4189 + * when we acquire tasklist_lock in ptrace_stop(). If this happens,
4190 + * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
4191 + */
4192 + unsigned long message = 0;
4193 + struct pid_namespace *ns;
4194 +
4195 + rcu_read_lock();
4196 + ns = task_active_pid_ns(rcu_dereference(current->parent));
4197 + if (ns)
4198 + message = pid_nr_ns(pid, ns);
4199 + rcu_read_unlock();
4200 +
4201 + ptrace_event(event, message);
4202 +}
4203 +
4204 +/**
4205 * ptrace_init_task - initialize ptrace state for a new child
4206 * @child: new child task
4207 * @ptrace: true if child should be ptrace'd by parent's tracer
4208 diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
4209 index d4b571c2f9fd..b0b381b0cb07 100644
4210 --- a/include/net/bluetooth/mgmt.h
4211 +++ b/include/net/bluetooth/mgmt.h
4212 @@ -181,6 +181,9 @@ struct mgmt_cp_load_link_keys {
4213 } __packed;
4214 #define MGMT_LOAD_LINK_KEYS_SIZE 3
4215
4216 +#define MGMT_LTK_UNAUTHENTICATED 0x00
4217 +#define MGMT_LTK_AUTHENTICATED 0x01
4218 +
4219 struct mgmt_ltk_info {
4220 struct mgmt_addr_info addr;
4221 __u8 type;
4222 diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
4223 index dd7c998221b3..a100c6e266c7 100644
4224 --- a/include/scsi/scsi_cmnd.h
4225 +++ b/include/scsi/scsi_cmnd.h
4226 @@ -7,6 +7,7 @@
4227 #include <linux/types.h>
4228 #include <linux/timer.h>
4229 #include <linux/scatterlist.h>
4230 +#include <scsi/scsi_device.h>
4231
4232 struct Scsi_Host;
4233 struct scsi_device;
4234 @@ -306,4 +307,20 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
4235 cmd->result = (cmd->result & 0x00ffffff) | (status << 24);
4236 }
4237
4238 +static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
4239 +{
4240 + unsigned int xfer_len = blk_rq_bytes(scmd->request);
4241 + unsigned int prot_op = scsi_get_prot_op(scmd);
4242 + unsigned int sector_size = scmd->device->sector_size;
4243 +
4244 + switch (prot_op) {
4245 + case SCSI_PROT_NORMAL:
4246 + case SCSI_PROT_WRITE_STRIP:
4247 + case SCSI_PROT_READ_INSERT:
4248 + return xfer_len;
4249 + }
4250 +
4251 + return xfer_len + (xfer_len >> ilog2(sector_size)) * 8;
4252 +}
4253 +
4254 #endif /* _SCSI_SCSI_CMND_H */
4255 diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
4256 index 33b487b5da92..daef9daa500c 100644
4257 --- a/include/target/iscsi/iscsi_transport.h
4258 +++ b/include/target/iscsi/iscsi_transport.h
4259 @@ -70,7 +70,8 @@ extern void iscsit_build_nopin_rsp(struct iscsi_cmd *, struct iscsi_conn *,
4260 extern void iscsit_build_task_mgt_rsp(struct iscsi_cmd *, struct iscsi_conn *,
4261 struct iscsi_tm_rsp *);
4262 extern int iscsit_build_text_rsp(struct iscsi_cmd *, struct iscsi_conn *,
4263 - struct iscsi_text_rsp *);
4264 + struct iscsi_text_rsp *,
4265 + enum iscsit_transport_type);
4266 extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
4267 struct iscsi_reject *);
4268 extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
4269 diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
4270 index 3a1c1eea1fff..9adc1bca1178 100644
4271 --- a/include/target/target_core_backend.h
4272 +++ b/include/target/target_core_backend.h
4273 @@ -59,6 +59,7 @@ int transport_subsystem_register(struct se_subsystem_api *);
4274 void transport_subsystem_release(struct se_subsystem_api *);
4275
4276 void target_complete_cmd(struct se_cmd *, u8);
4277 +void target_complete_cmd_with_length(struct se_cmd *, u8, int);
4278
4279 sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
4280 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
4281 diff --git a/kernel/fork.c b/kernel/fork.c
4282 index 54a8d26f612f..142904349fb5 100644
4283 --- a/kernel/fork.c
4284 +++ b/kernel/fork.c
4285 @@ -1606,10 +1606,12 @@ long do_fork(unsigned long clone_flags,
4286 */
4287 if (!IS_ERR(p)) {
4288 struct completion vfork;
4289 + struct pid *pid;
4290
4291 trace_sched_process_fork(current, p);
4292
4293 - nr = task_pid_vnr(p);
4294 + pid = get_task_pid(p, PIDTYPE_PID);
4295 + nr = pid_vnr(pid);
4296
4297 if (clone_flags & CLONE_PARENT_SETTID)
4298 put_user(nr, parent_tidptr);
4299 @@ -1624,12 +1626,14 @@ long do_fork(unsigned long clone_flags,
4300
4301 /* forking complete and child started to run, tell ptracer */
4302 if (unlikely(trace))
4303 - ptrace_event(trace, nr);
4304 + ptrace_event_pid(trace, pid);
4305
4306 if (clone_flags & CLONE_VFORK) {
4307 if (!wait_for_vfork_done(p, &vfork))
4308 - ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
4309 + ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
4310 }
4311 +
4312 + put_pid(pid);
4313 } else {
4314 nr = PTR_ERR(p);
4315 }
4316 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4317 index d34131ca372b..3dc6a61bf06a 100644
4318 --- a/kernel/irq/manage.c
4319 +++ b/kernel/irq/manage.c
4320 @@ -886,8 +886,8 @@ static int irq_thread(void *data)
4321 irq_thread_check_affinity(desc, action);
4322
4323 action_ret = handler_fn(desc, action);
4324 - if (!noirqdebug)
4325 - note_interrupt(action->irq, desc, action_ret);
4326 + if (action_ret == IRQ_HANDLED)
4327 + atomic_inc(&desc->threads_handled);
4328
4329 wake_threads_waitq(desc);
4330 }
4331 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
4332 index a1d8cc63b56e..e2514b0e439e 100644
4333 --- a/kernel/irq/spurious.c
4334 +++ b/kernel/irq/spurious.c
4335 @@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
4336 return action && (action->flags & IRQF_IRQPOLL);
4337 }
4338
4339 +#define SPURIOUS_DEFERRED 0x80000000
4340 +
4341 void note_interrupt(unsigned int irq, struct irq_desc *desc,
4342 irqreturn_t action_ret)
4343 {
4344 @@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
4345 irq_settings_is_polled(desc))
4346 return;
4347
4348 - /* we get here again via the threaded handler */
4349 - if (action_ret == IRQ_WAKE_THREAD)
4350 - return;
4351 -
4352 if (bad_action_ret(action_ret)) {
4353 report_bad_irq(irq, desc, action_ret);
4354 return;
4355 }
4356
4357 + /*
4358 + * We cannot call note_interrupt from the threaded handler
4359 + * because we need to look at the compound of all handlers
4360 + * (primary and threaded). Aside of that in the threaded
4361 + * shared case we have no serialization against an incoming
4362 + * hardware interrupt while we are dealing with a threaded
4363 + * result.
4364 + *
4365 + * So in case a thread is woken, we just note the fact and
4366 + * defer the analysis to the next hardware interrupt.
4367 + *
4368 + * The threaded handlers store whether they sucessfully
4369 + * handled an interrupt and we check whether that number
4370 + * changed versus the last invocation.
4371 + *
4372 + * We could handle all interrupts with the delayed by one
4373 + * mechanism, but for the non forced threaded case we'd just
4374 + * add pointless overhead to the straight hardirq interrupts
4375 + * for the sake of a few lines less code.
4376 + */
4377 + if (action_ret & IRQ_WAKE_THREAD) {
4378 + /*
4379 + * There is a thread woken. Check whether one of the
4380 + * shared primary handlers returned IRQ_HANDLED. If
4381 + * not we defer the spurious detection to the next
4382 + * interrupt.
4383 + */
4384 + if (action_ret == IRQ_WAKE_THREAD) {
4385 + int handled;
4386 + /*
4387 + * We use bit 31 of thread_handled_last to
4388 + * denote the deferred spurious detection
4389 + * active. No locking necessary as
4390 + * thread_handled_last is only accessed here
4391 + * and we have the guarantee that hard
4392 + * interrupts are not reentrant.
4393 + */
4394 + if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
4395 + desc->threads_handled_last |= SPURIOUS_DEFERRED;
4396 + return;
4397 + }
4398 + /*
4399 + * Check whether one of the threaded handlers
4400 + * returned IRQ_HANDLED since the last
4401 + * interrupt happened.
4402 + *
4403 + * For simplicity we just set bit 31, as it is
4404 + * set in threads_handled_last as well. So we
4405 + * avoid extra masking. And we really do not
4406 + * care about the high bits of the handled
4407 + * count. We just care about the count being
4408 + * different than the one we saw before.
4409 + */
4410 + handled = atomic_read(&desc->threads_handled);
4411 + handled |= SPURIOUS_DEFERRED;
4412 + if (handled != desc->threads_handled_last) {
4413 + action_ret = IRQ_HANDLED;
4414 + /*
4415 + * Note: We keep the SPURIOUS_DEFERRED
4416 + * bit set. We are handling the
4417 + * previous invocation right now.
4418 + * Keep it for the current one, so the
4419 + * next hardware interrupt will
4420 + * account for it.
4421 + */
4422 + desc->threads_handled_last = handled;
4423 + } else {
4424 + /*
4425 + * None of the threaded handlers felt
4426 + * responsible for the last interrupt
4427 + *
4428 + * We keep the SPURIOUS_DEFERRED bit
4429 + * set in threads_handled_last as we
4430 + * need to account for the current
4431 + * interrupt as well.
4432 + */
4433 + action_ret = IRQ_NONE;
4434 + }
4435 + } else {
4436 + /*
4437 + * One of the primary handlers returned
4438 + * IRQ_HANDLED. So we don't care about the
4439 + * threaded handlers on the same line. Clear
4440 + * the deferred detection bit.
4441 + *
4442 + * In theory we could/should check whether the
4443 + * deferred bit is set and take the result of
4444 + * the previous run into account here as
4445 + * well. But it's really not worth the
4446 + * trouble. If every other interrupt is
4447 + * handled we never trigger the spurious
4448 + * detector. And if this is just the one out
4449 + * of 100k unhandled ones which is handled
4450 + * then we merily delay the spurious detection
4451 + * by one hard interrupt. Not a real problem.
4452 + */
4453 + desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
4454 + }
4455 + }
4456 +
4457 if (unlikely(action_ret == IRQ_NONE)) {
4458 /*
4459 * If we are seeing only the odd spurious IRQ caused by
4460 diff --git a/kernel/kthread.c b/kernel/kthread.c
4461 index 9a130ec06f7a..c2390f41307b 100644
4462 --- a/kernel/kthread.c
4463 +++ b/kernel/kthread.c
4464 @@ -262,7 +262,7 @@ static void create_kthread(struct kthread_create_info *create)
4465 * kthread_stop() has been called). The return value should be zero
4466 * or a negative error number; it will be passed to kthread_stop().
4467 *
4468 - * Returns a task_struct or ERR_PTR(-ENOMEM).
4469 + * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
4470 */
4471 struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
4472 void *data, int node,
4473 @@ -298,7 +298,7 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
4474 * that thread.
4475 */
4476 if (xchg(&create->done, NULL))
4477 - return ERR_PTR(-ENOMEM);
4478 + return ERR_PTR(-EINTR);
4479 /*
4480 * kthreadd (or new kernel thread) will call complete()
4481 * shortly.
4482 diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
4483 index 14193d596d78..ab29b6a22669 100644
4484 --- a/kernel/locking/rtmutex-debug.h
4485 +++ b/kernel/locking/rtmutex-debug.h
4486 @@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
4487 {
4488 return (waiter != NULL);
4489 }
4490 +
4491 +static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
4492 +{
4493 + debug_rt_mutex_print_deadlock(w);
4494 +}
4495 diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
4496 index a620d4d08ca6..fc605941b9b8 100644
4497 --- a/kernel/locking/rtmutex.c
4498 +++ b/kernel/locking/rtmutex.c
4499 @@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
4500 owner = *p;
4501 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
4502 }
4503 +
4504 +/*
4505 + * Safe fastpath aware unlock:
4506 + * 1) Clear the waiters bit
4507 + * 2) Drop lock->wait_lock
4508 + * 3) Try to unlock the lock with cmpxchg
4509 + */
4510 +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
4511 + __releases(lock->wait_lock)
4512 +{
4513 + struct task_struct *owner = rt_mutex_owner(lock);
4514 +
4515 + clear_rt_mutex_waiters(lock);
4516 + raw_spin_unlock(&lock->wait_lock);
4517 + /*
4518 + * If a new waiter comes in between the unlock and the cmpxchg
4519 + * we have two situations:
4520 + *
4521 + * unlock(wait_lock);
4522 + * lock(wait_lock);
4523 + * cmpxchg(p, owner, 0) == owner
4524 + * mark_rt_mutex_waiters(lock);
4525 + * acquire(lock);
4526 + * or:
4527 + *
4528 + * unlock(wait_lock);
4529 + * lock(wait_lock);
4530 + * mark_rt_mutex_waiters(lock);
4531 + *
4532 + * cmpxchg(p, owner, 0) != owner
4533 + * enqueue_waiter();
4534 + * unlock(wait_lock);
4535 + * lock(wait_lock);
4536 + * wake waiter();
4537 + * unlock(wait_lock);
4538 + * lock(wait_lock);
4539 + * acquire(lock);
4540 + */
4541 + return rt_mutex_cmpxchg(lock, owner, NULL);
4542 +}
4543 +
4544 #else
4545 # define rt_mutex_cmpxchg(l,c,n) (0)
4546 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
4547 @@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
4548 lock->owner = (struct task_struct *)
4549 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
4550 }
4551 +
4552 +/*
4553 + * Simple slow path only version: lock->owner is protected by lock->wait_lock.
4554 + */
4555 +static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
4556 + __releases(lock->wait_lock)
4557 +{
4558 + lock->owner = NULL;
4559 + raw_spin_unlock(&lock->wait_lock);
4560 + return true;
4561 +}
4562 #endif
4563
4564 static inline int
4565 @@ -260,27 +312,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
4566 */
4567 int max_lock_depth = 1024;
4568
4569 +static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
4570 +{
4571 + return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
4572 +}
4573 +
4574 /*
4575 * Adjust the priority chain. Also used for deadlock detection.
4576 * Decreases task's usage by one - may thus free the task.
4577 *
4578 - * @task: the task owning the mutex (owner) for which a chain walk is probably
4579 - * needed
4580 + * @task: the task owning the mutex (owner) for which a chain walk is
4581 + * probably needed
4582 * @deadlock_detect: do we have to carry out deadlock detection?
4583 - * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
4584 - * things for a task that has just got its priority adjusted, and
4585 - * is waiting on a mutex)
4586 + * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
4587 + * things for a task that has just got its priority adjusted, and
4588 + * is waiting on a mutex)
4589 + * @next_lock: the mutex on which the owner of @orig_lock was blocked before
4590 + * we dropped its pi_lock. Is never dereferenced, only used for
4591 + * comparison to detect lock chain changes.
4592 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
4593 - * its priority to the mutex owner (can be NULL in the case
4594 - * depicted above or if the top waiter is gone away and we are
4595 - * actually deboosting the owner)
4596 - * @top_task: the current top waiter
4597 + * its priority to the mutex owner (can be NULL in the case
4598 + * depicted above or if the top waiter is gone away and we are
4599 + * actually deboosting the owner)
4600 + * @top_task: the current top waiter
4601 *
4602 * Returns 0 or -EDEADLK.
4603 */
4604 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
4605 int deadlock_detect,
4606 struct rt_mutex *orig_lock,
4607 + struct rt_mutex *next_lock,
4608 struct rt_mutex_waiter *orig_waiter,
4609 struct task_struct *top_task)
4610 {
4611 @@ -314,7 +375,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
4612 }
4613 put_task_struct(task);
4614
4615 - return deadlock_detect ? -EDEADLK : 0;
4616 + return -EDEADLK;
4617 }
4618 retry:
4619 /*
4620 @@ -339,6 +400,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
4621 goto out_unlock_pi;
4622
4623 /*
4624 + * We dropped all locks after taking a refcount on @task, so
4625 + * the task might have moved on in the lock chain or even left
4626 + * the chain completely and blocks now on an unrelated lock or
4627 + * on @orig_lock.
4628 + *
4629 + * We stored the lock on which @task was blocked in @next_lock,
4630 + * so we can detect the chain change.
4631 + */
4632 + if (next_lock != waiter->lock)
4633 + goto out_unlock_pi;
4634 +
4635 + /*
4636 * Drop out, when the task has no waiters. Note,
4637 * top_waiter can be NULL, when we are in the deboosting
4638 * mode!
4639 @@ -377,7 +450,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
4640 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
4641 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
4642 raw_spin_unlock(&lock->wait_lock);
4643 - ret = deadlock_detect ? -EDEADLK : 0;
4644 + ret = -EDEADLK;
4645 goto out_unlock_pi;
4646 }
4647
4648 @@ -422,11 +495,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
4649 __rt_mutex_adjust_prio(task);
4650 }
4651
4652 + /*
4653 + * Check whether the task which owns the current lock is pi
4654 + * blocked itself. If yes we store a pointer to the lock for
4655 + * the lock chain change detection above. After we dropped
4656 + * task->pi_lock next_lock cannot be dereferenced anymore.
4657 + */
4658 + next_lock = task_blocked_on_lock(task);
4659 +
4660 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
4661
4662 top_waiter = rt_mutex_top_waiter(lock);
4663 raw_spin_unlock(&lock->wait_lock);
4664
4665 + /*
4666 + * We reached the end of the lock chain. Stop right here. No
4667 + * point to go back just to figure that out.
4668 + */
4669 + if (!next_lock)
4670 + goto out_put_task;
4671 +
4672 if (!detect_deadlock && waiter != top_waiter)
4673 goto out_put_task;
4674
4675 @@ -536,8 +624,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
4676 {
4677 struct task_struct *owner = rt_mutex_owner(lock);
4678 struct rt_mutex_waiter *top_waiter = waiter;
4679 - unsigned long flags;
4680 + struct rt_mutex *next_lock;
4681 int chain_walk = 0, res;
4682 + unsigned long flags;
4683
4684 /*
4685 * Early deadlock detection. We really don't want the task to
4686 @@ -548,7 +637,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
4687 * which is wrong, as the other waiter is not in a deadlock
4688 * situation.
4689 */
4690 - if (detect_deadlock && owner == task)
4691 + if (owner == task)
4692 return -EDEADLK;
4693
4694 raw_spin_lock_irqsave(&task->pi_lock, flags);
4695 @@ -569,20 +658,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
4696 if (!owner)
4697 return 0;
4698
4699 + raw_spin_lock_irqsave(&owner->pi_lock, flags);
4700 if (waiter == rt_mutex_top_waiter(lock)) {
4701 - raw_spin_lock_irqsave(&owner->pi_lock, flags);
4702 rt_mutex_dequeue_pi(owner, top_waiter);
4703 rt_mutex_enqueue_pi(owner, waiter);
4704
4705 __rt_mutex_adjust_prio(owner);
4706 if (owner->pi_blocked_on)
4707 chain_walk = 1;
4708 - raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
4709 - }
4710 - else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
4711 + } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
4712 chain_walk = 1;
4713 + }
4714
4715 - if (!chain_walk)
4716 + /* Store the lock on which owner is blocked or NULL */
4717 + next_lock = task_blocked_on_lock(owner);
4718 +
4719 + raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
4720 + /*
4721 + * Even if full deadlock detection is on, if the owner is not
4722 + * blocked itself, we can avoid finding this out in the chain
4723 + * walk.
4724 + */
4725 + if (!chain_walk || !next_lock)
4726 return 0;
4727
4728 /*
4729 @@ -594,8 +691,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
4730
4731 raw_spin_unlock(&lock->wait_lock);
4732
4733 - res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
4734 - task);
4735 + res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
4736 + next_lock, waiter, task);
4737
4738 raw_spin_lock(&lock->wait_lock);
4739
4740 @@ -605,7 +702,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
4741 /*
4742 * Wake up the next waiter on the lock.
4743 *
4744 - * Remove the top waiter from the current tasks waiter list and wake it up.
4745 + * Remove the top waiter from the current tasks pi waiter list and
4746 + * wake it up.
4747 *
4748 * Called with lock->wait_lock held.
4749 */
4750 @@ -626,10 +724,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
4751 */
4752 rt_mutex_dequeue_pi(current, waiter);
4753
4754 - rt_mutex_set_owner(lock, NULL);
4755 + /*
4756 + * As we are waking up the top waiter, and the waiter stays
4757 + * queued on the lock until it gets the lock, this lock
4758 + * obviously has waiters. Just set the bit here and this has
4759 + * the added benefit of forcing all new tasks into the
4760 + * slow path making sure no task of lower priority than
4761 + * the top waiter can steal this lock.
4762 + */
4763 + lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
4764
4765 raw_spin_unlock_irqrestore(&current->pi_lock, flags);
4766
4767 + /*
4768 + * It's safe to dereference waiter as it cannot go away as
4769 + * long as we hold lock->wait_lock. The waiter task needs to
4770 + * acquire it in order to dequeue the waiter.
4771 + */
4772 wake_up_process(waiter->task);
4773 }
4774
4775 @@ -644,8 +755,8 @@ static void remove_waiter(struct rt_mutex *lock,
4776 {
4777 int first = (waiter == rt_mutex_top_waiter(lock));
4778 struct task_struct *owner = rt_mutex_owner(lock);
4779 + struct rt_mutex *next_lock = NULL;
4780 unsigned long flags;
4781 - int chain_walk = 0;
4782
4783 raw_spin_lock_irqsave(&current->pi_lock, flags);
4784 rt_mutex_dequeue(lock, waiter);
4785 @@ -669,13 +780,13 @@ static void remove_waiter(struct rt_mutex *lock,
4786 }
4787 __rt_mutex_adjust_prio(owner);
4788
4789 - if (owner->pi_blocked_on)
4790 - chain_walk = 1;
4791 + /* Store the lock on which owner is blocked or NULL */
4792 + next_lock = task_blocked_on_lock(owner);
4793
4794 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
4795 }
4796
4797 - if (!chain_walk)
4798 + if (!next_lock)
4799 return;
4800
4801 /* gets dropped in rt_mutex_adjust_prio_chain()! */
4802 @@ -683,7 +794,7 @@ static void remove_waiter(struct rt_mutex *lock,
4803
4804 raw_spin_unlock(&lock->wait_lock);
4805
4806 - rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
4807 + rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
4808
4809 raw_spin_lock(&lock->wait_lock);
4810 }
4811 @@ -696,6 +807,7 @@ static void remove_waiter(struct rt_mutex *lock,
4812 void rt_mutex_adjust_pi(struct task_struct *task)
4813 {
4814 struct rt_mutex_waiter *waiter;
4815 + struct rt_mutex *next_lock;
4816 unsigned long flags;
4817
4818 raw_spin_lock_irqsave(&task->pi_lock, flags);
4819 @@ -706,12 +818,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
4820 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
4821 return;
4822 }
4823 -
4824 + next_lock = waiter->lock;
4825 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
4826
4827 /* gets dropped in rt_mutex_adjust_prio_chain()! */
4828 get_task_struct(task);
4829 - rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
4830 +
4831 + rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
4832 }
4833
4834 /**
4835 @@ -763,6 +876,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
4836 return ret;
4837 }
4838
4839 +static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
4840 + struct rt_mutex_waiter *w)
4841 +{
4842 + /*
4843 + * If the result is not -EDEADLOCK or the caller requested
4844 + * deadlock detection, nothing to do here.
4845 + */
4846 + if (res != -EDEADLOCK || detect_deadlock)
4847 + return;
4848 +
4849 + /*
4850 + * Yell lowdly and stop the task right here.
4851 + */
4852 + rt_mutex_print_deadlock(w);
4853 + while (1) {
4854 + set_current_state(TASK_INTERRUPTIBLE);
4855 + schedule();
4856 + }
4857 +}
4858 +
4859 /*
4860 * Slow path lock function:
4861 */
4862 @@ -802,8 +935,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
4863
4864 set_current_state(TASK_RUNNING);
4865
4866 - if (unlikely(ret))
4867 + if (unlikely(ret)) {
4868 remove_waiter(lock, &waiter);
4869 + rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
4870 + }
4871
4872 /*
4873 * try_to_take_rt_mutex() sets the waiter bit
4874 @@ -859,12 +994,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
4875
4876 rt_mutex_deadlock_account_unlock(current);
4877
4878 - if (!rt_mutex_has_waiters(lock)) {
4879 - lock->owner = NULL;
4880 - raw_spin_unlock(&lock->wait_lock);
4881 - return;
4882 + /*
4883 + * We must be careful here if the fast path is enabled. If we
4884 + * have no waiters queued we cannot set owner to NULL here
4885 + * because of:
4886 + *
4887 + * foo->lock->owner = NULL;
4888 + * rtmutex_lock(foo->lock); <- fast path
4889 + * free = atomic_dec_and_test(foo->refcnt);
4890 + * rtmutex_unlock(foo->lock); <- fast path
4891 + * if (free)
4892 + * kfree(foo);
4893 + * raw_spin_unlock(foo->lock->wait_lock);
4894 + *
4895 + * So for the fastpath enabled kernel:
4896 + *
4897 + * Nothing can set the waiters bit as long as we hold
4898 + * lock->wait_lock. So we do the following sequence:
4899 + *
4900 + * owner = rt_mutex_owner(lock);
4901 + * clear_rt_mutex_waiters(lock);
4902 + * raw_spin_unlock(&lock->wait_lock);
4903 + * if (cmpxchg(&lock->owner, owner, 0) == owner)
4904 + * return;
4905 + * goto retry;
4906 + *
4907 + * The fastpath disabled variant is simple as all access to
4908 + * lock->owner is serialized by lock->wait_lock:
4909 + *
4910 + * lock->owner = NULL;
4911 + * raw_spin_unlock(&lock->wait_lock);
4912 + */
4913 + while (!rt_mutex_has_waiters(lock)) {
4914 + /* Drops lock->wait_lock ! */
4915 + if (unlock_rt_mutex_safe(lock) == true)
4916 + return;
4917 + /* Relock the rtmutex and try again */
4918 + raw_spin_lock(&lock->wait_lock);
4919 }
4920
4921 + /*
4922 + * The wakeup next waiter path does not suffer from the above
4923 + * race. See the comments there.
4924 + */
4925 wakeup_next_waiter(lock);
4926
4927 raw_spin_unlock(&lock->wait_lock);
4928 @@ -1112,7 +1284,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
4929 return 1;
4930 }
4931
4932 - ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
4933 + /* We enforce deadlock detection for futexes */
4934 + ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
4935
4936 if (ret && !rt_mutex_owner(lock)) {
4937 /*
4938 diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
4939 index a1a1dd06421d..f6a1f3c133b1 100644
4940 --- a/kernel/locking/rtmutex.h
4941 +++ b/kernel/locking/rtmutex.h
4942 @@ -24,3 +24,8 @@
4943 #define debug_rt_mutex_print_deadlock(w) do { } while (0)
4944 #define debug_rt_mutex_detect_deadlock(w,d) (d)
4945 #define debug_rt_mutex_reset_waiter(w) do { } while (0)
4946 +
4947 +static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
4948 +{
4949 + WARN(1, "rtmutex deadlock detected\n");
4950 +}
4951 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
4952 index 7228258b85ec..221229cf0190 100644
4953 --- a/kernel/printk/printk.c
4954 +++ b/kernel/printk/printk.c
4955 @@ -2413,6 +2413,7 @@ int unregister_console(struct console *console)
4956 if (console_drivers != NULL && console->flags & CON_CONSDEV)
4957 console_drivers->flags |= CON_CONSDEV;
4958
4959 + console->flags &= ~CON_ENABLED;
4960 console_unlock();
4961 console_sysfs_notify();
4962 return res;
4963 diff --git a/lib/idr.c b/lib/idr.c
4964 index 2642fa8e424d..4df67928816e 100644
4965 --- a/lib/idr.c
4966 +++ b/lib/idr.c
4967 @@ -249,7 +249,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
4968 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
4969
4970 /* if already at the top layer, we need to grow */
4971 - if (id >= 1 << (idp->layers * IDR_BITS)) {
4972 + if (id > idr_max(idp->layers)) {
4973 *starting_id = id;
4974 return -EAGAIN;
4975 }
4976 @@ -811,12 +811,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
4977 if (!p)
4978 return ERR_PTR(-EINVAL);
4979
4980 - n = (p->layer+1) * IDR_BITS;
4981 -
4982 - if (id >= (1 << n))
4983 + if (id > idr_max(p->layer + 1))
4984 return ERR_PTR(-EINVAL);
4985
4986 - n -= IDR_BITS;
4987 + n = p->layer * IDR_BITS;
4988 while ((n > 0) && p) {
4989 p = p->ary[(id >> n) & IDR_MASK];
4990 n -= IDR_BITS;
4991 diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
4992 index 99a03acb7d47..b74da447e81e 100644
4993 --- a/lib/lz4/lz4_decompress.c
4994 +++ b/lib/lz4/lz4_decompress.c
4995 @@ -108,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
4996 if (length == ML_MASK) {
4997 for (; *ip == 255; length += 255)
4998 ip++;
4999 + if (unlikely(length > (size_t)(length + *ip)))
5000 + goto _output_error;
5001 length += *ip++;
5002 }
5003
5004 @@ -157,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
5005
5006 /* write overflow error detected */
5007 _output_error:
5008 - return (int) (-(((char *)ip) - source));
5009 + return -1;
5010 }
5011
5012 static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
5013 diff --git a/mm/Kconfig b/mm/Kconfig
5014 index 1b5a95f0fa01..2f42b9c2f345 100644
5015 --- a/mm/Kconfig
5016 +++ b/mm/Kconfig
5017 @@ -264,6 +264,9 @@ config MIGRATION
5018 pages as migration can relocate pages to satisfy a huge page
5019 allocation instead of reclaiming.
5020
5021 +config ARCH_ENABLE_HUGEPAGE_MIGRATION
5022 + boolean
5023 +
5024 config PHYS_ADDR_T_64BIT
5025 def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
5026
5027 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5028 index 5177c6d4a2dd..67c927a10add 100644
5029 --- a/mm/memcontrol.c
5030 +++ b/mm/memcontrol.c
5031 @@ -2684,7 +2684,8 @@ static int mem_cgroup_try_charge(struct mem_cgroup *memcg,
5032 * free their memory.
5033 */
5034 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
5035 - fatal_signal_pending(current)))
5036 + fatal_signal_pending(current) ||
5037 + current->flags & PF_EXITING))
5038 goto bypass;
5039
5040 if (unlikely(task_in_memcg_oom(current)))
5041 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
5042 index 9ccef39a9de2..eb8fb727bd67 100644
5043 --- a/mm/memory-failure.c
5044 +++ b/mm/memory-failure.c
5045 @@ -204,9 +204,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
5046 #endif
5047 si.si_addr_lsb = compound_order(compound_head(page)) + PAGE_SHIFT;
5048
5049 - if ((flags & MF_ACTION_REQUIRED) && t == current) {
5050 + if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
5051 si.si_code = BUS_MCEERR_AR;
5052 - ret = force_sig_info(SIGBUS, &si, t);
5053 + ret = force_sig_info(SIGBUS, &si, current);
5054 } else {
5055 /*
5056 * Don't use force here, it's convenient if the signal
5057 @@ -380,20 +380,51 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
5058 }
5059 }
5060
5061 -static int task_early_kill(struct task_struct *tsk)
5062 +/*
5063 + * Find a dedicated thread which is supposed to handle SIGBUS(BUS_MCEERR_AO)
5064 + * on behalf of the thread group. Return task_struct of the (first found)
5065 + * dedicated thread if found, and return NULL otherwise.
5066 + *
5067 + * We already hold read_lock(&tasklist_lock) in the caller, so we don't
5068 + * have to call rcu_read_lock/unlock() in this function.
5069 + */
5070 +static struct task_struct *find_early_kill_thread(struct task_struct *tsk)
5071 {
5072 + struct task_struct *t;
5073 +
5074 + for_each_thread(tsk, t)
5075 + if ((t->flags & PF_MCE_PROCESS) && (t->flags & PF_MCE_EARLY))
5076 + return t;
5077 + return NULL;
5078 +}
5079 +
5080 +/*
5081 + * Determine whether a given process is "early kill" process which expects
5082 + * to be signaled when some page under the process is hwpoisoned.
5083 + * Return task_struct of the dedicated thread (main thread unless explicitly
5084 + * specified) if the process is "early kill," and otherwise returns NULL.
5085 + */
5086 +static struct task_struct *task_early_kill(struct task_struct *tsk,
5087 + int force_early)
5088 +{
5089 + struct task_struct *t;
5090 if (!tsk->mm)
5091 - return 0;
5092 - if (tsk->flags & PF_MCE_PROCESS)
5093 - return !!(tsk->flags & PF_MCE_EARLY);
5094 - return sysctl_memory_failure_early_kill;
5095 + return NULL;
5096 + if (force_early)
5097 + return tsk;
5098 + t = find_early_kill_thread(tsk);
5099 + if (t)
5100 + return t;
5101 + if (sysctl_memory_failure_early_kill)
5102 + return tsk;
5103 + return NULL;
5104 }
5105
5106 /*
5107 * Collect processes when the error hit an anonymous page.
5108 */
5109 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
5110 - struct to_kill **tkc)
5111 + struct to_kill **tkc, int force_early)
5112 {
5113 struct vm_area_struct *vma;
5114 struct task_struct *tsk;
5115 @@ -408,16 +439,17 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
5116 read_lock(&tasklist_lock);
5117 for_each_process (tsk) {
5118 struct anon_vma_chain *vmac;
5119 + struct task_struct *t = task_early_kill(tsk, force_early);
5120
5121 - if (!task_early_kill(tsk))
5122 + if (!t)
5123 continue;
5124 anon_vma_interval_tree_foreach(vmac, &av->rb_root,
5125 pgoff, pgoff) {
5126 vma = vmac->vma;
5127 if (!page_mapped_in_vma(page, vma))
5128 continue;
5129 - if (vma->vm_mm == tsk->mm)
5130 - add_to_kill(tsk, page, vma, to_kill, tkc);
5131 + if (vma->vm_mm == t->mm)
5132 + add_to_kill(t, page, vma, to_kill, tkc);
5133 }
5134 }
5135 read_unlock(&tasklist_lock);
5136 @@ -428,7 +460,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
5137 * Collect processes when the error hit a file mapped page.
5138 */
5139 static void collect_procs_file(struct page *page, struct list_head *to_kill,
5140 - struct to_kill **tkc)
5141 + struct to_kill **tkc, int force_early)
5142 {
5143 struct vm_area_struct *vma;
5144 struct task_struct *tsk;
5145 @@ -438,10 +470,10 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
5146 read_lock(&tasklist_lock);
5147 for_each_process(tsk) {
5148 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
5149 + struct task_struct *t = task_early_kill(tsk, force_early);
5150
5151 - if (!task_early_kill(tsk))
5152 + if (!t)
5153 continue;
5154 -
5155 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
5156 pgoff) {
5157 /*
5158 @@ -451,8 +483,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
5159 * Assume applications who requested early kill want
5160 * to be informed of all such data corruptions.
5161 */
5162 - if (vma->vm_mm == tsk->mm)
5163 - add_to_kill(tsk, page, vma, to_kill, tkc);
5164 + if (vma->vm_mm == t->mm)
5165 + add_to_kill(t, page, vma, to_kill, tkc);
5166 }
5167 }
5168 read_unlock(&tasklist_lock);
5169 @@ -465,7 +497,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
5170 * First preallocate one tokill structure outside the spin locks,
5171 * so that we can kill at least one process reasonably reliable.
5172 */
5173 -static void collect_procs(struct page *page, struct list_head *tokill)
5174 +static void collect_procs(struct page *page, struct list_head *tokill,
5175 + int force_early)
5176 {
5177 struct to_kill *tk;
5178
5179 @@ -476,9 +509,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
5180 if (!tk)
5181 return;
5182 if (PageAnon(page))
5183 - collect_procs_anon(page, tokill, &tk);
5184 + collect_procs_anon(page, tokill, &tk, force_early);
5185 else
5186 - collect_procs_file(page, tokill, &tk);
5187 + collect_procs_file(page, tokill, &tk, force_early);
5188 kfree(tk);
5189 }
5190
5191 @@ -963,7 +996,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
5192 * there's nothing that can be done.
5193 */
5194 if (kill)
5195 - collect_procs(ppage, &tokill);
5196 + collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
5197
5198 ret = try_to_unmap(ppage, ttu);
5199 if (ret != SWAP_SUCCESS)
5200 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
5201 index a4317da60532..154af210178b 100644
5202 --- a/mm/page-writeback.c
5203 +++ b/mm/page-writeback.c
5204 @@ -2398,7 +2398,7 @@ int test_clear_page_writeback(struct page *page)
5205 return ret;
5206 }
5207
5208 -int test_set_page_writeback(struct page *page)
5209 +int __test_set_page_writeback(struct page *page, bool keep_write)
5210 {
5211 struct address_space *mapping = page_mapping(page);
5212 int ret;
5213 @@ -2423,9 +2423,10 @@ int test_set_page_writeback(struct page *page)
5214 radix_tree_tag_clear(&mapping->page_tree,
5215 page_index(page),
5216 PAGECACHE_TAG_DIRTY);
5217 - radix_tree_tag_clear(&mapping->page_tree,
5218 - page_index(page),
5219 - PAGECACHE_TAG_TOWRITE);
5220 + if (!keep_write)
5221 + radix_tree_tag_clear(&mapping->page_tree,
5222 + page_index(page),
5223 + PAGECACHE_TAG_TOWRITE);
5224 spin_unlock_irqrestore(&mapping->tree_lock, flags);
5225 } else {
5226 ret = TestSetPageWriteback(page);
5227 @@ -2436,7 +2437,7 @@ int test_set_page_writeback(struct page *page)
5228 return ret;
5229
5230 }
5231 -EXPORT_SYMBOL(test_set_page_writeback);
5232 +EXPORT_SYMBOL(__test_set_page_writeback);
5233
5234 /*
5235 * Return true if any of the pages in the mapping are marked with the
5236 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5237 index 5dba2933c9c0..56eb0eb382b1 100644
5238 --- a/mm/page_alloc.c
5239 +++ b/mm/page_alloc.c
5240 @@ -6009,53 +6009,65 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
5241 * @end_bitidx: The last bit of interest
5242 * returns pageblock_bits flags
5243 */
5244 -unsigned long get_pageblock_flags_group(struct page *page,
5245 - int start_bitidx, int end_bitidx)
5246 +unsigned long get_pageblock_flags_mask(struct page *page,
5247 + unsigned long end_bitidx,
5248 + unsigned long mask)
5249 {
5250 struct zone *zone;
5251 unsigned long *bitmap;
5252 - unsigned long pfn, bitidx;
5253 - unsigned long flags = 0;
5254 - unsigned long value = 1;
5255 + unsigned long pfn, bitidx, word_bitidx;
5256 + unsigned long word;
5257
5258 zone = page_zone(page);
5259 pfn = page_to_pfn(page);
5260 bitmap = get_pageblock_bitmap(zone, pfn);
5261 bitidx = pfn_to_bitidx(zone, pfn);
5262 + word_bitidx = bitidx / BITS_PER_LONG;
5263 + bitidx &= (BITS_PER_LONG-1);
5264
5265 - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5266 - if (test_bit(bitidx + start_bitidx, bitmap))
5267 - flags |= value;
5268 -
5269 - return flags;
5270 + word = bitmap[word_bitidx];
5271 + bitidx += end_bitidx;
5272 + return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
5273 }
5274
5275 /**
5276 - * set_pageblock_flags_group - Set the requested group of flags for a pageblock_nr_pages block of pages
5277 + * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
5278 * @page: The page within the block of interest
5279 * @start_bitidx: The first bit of interest
5280 * @end_bitidx: The last bit of interest
5281 * @flags: The flags to set
5282 */
5283 -void set_pageblock_flags_group(struct page *page, unsigned long flags,
5284 - int start_bitidx, int end_bitidx)
5285 +void set_pageblock_flags_mask(struct page *page, unsigned long flags,
5286 + unsigned long end_bitidx,
5287 + unsigned long mask)
5288 {
5289 struct zone *zone;
5290 unsigned long *bitmap;
5291 - unsigned long pfn, bitidx;
5292 - unsigned long value = 1;
5293 + unsigned long pfn, bitidx, word_bitidx;
5294 + unsigned long old_word, word;
5295 +
5296 + BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
5297
5298 zone = page_zone(page);
5299 pfn = page_to_pfn(page);
5300 bitmap = get_pageblock_bitmap(zone, pfn);
5301 bitidx = pfn_to_bitidx(zone, pfn);
5302 + word_bitidx = bitidx / BITS_PER_LONG;
5303 + bitidx &= (BITS_PER_LONG-1);
5304 +
5305 VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
5306
5307 - for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
5308 - if (flags & value)
5309 - __set_bit(bitidx + start_bitidx, bitmap);
5310 - else
5311 - __clear_bit(bitidx + start_bitidx, bitmap);
5312 + bitidx += end_bitidx;
5313 + mask <<= (BITS_PER_LONG - bitidx - 1);
5314 + flags <<= (BITS_PER_LONG - bitidx - 1);
5315 +
5316 + word = ACCESS_ONCE(bitmap[word_bitidx]);
5317 + for (;;) {
5318 + old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
5319 + if (word == old_word)
5320 + break;
5321 + word = old_word;
5322 + }
5323 }
5324
5325 /*
5326 diff --git a/mm/rmap.c b/mm/rmap.c
5327 index 83bfafabb47b..14d1e28774e5 100644
5328 --- a/mm/rmap.c
5329 +++ b/mm/rmap.c
5330 @@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
5331 * LOCK should suffice since the actual taking of the lock must
5332 * happen _before_ what follows.
5333 */
5334 + might_sleep();
5335 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
5336 anon_vma_lock_write(anon_vma);
5337 anon_vma_unlock_write(anon_vma);
5338 @@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
5339 * above cannot corrupt).
5340 */
5341 if (!page_mapped(page)) {
5342 + rcu_read_unlock();
5343 put_anon_vma(anon_vma);
5344 - anon_vma = NULL;
5345 + return NULL;
5346 }
5347 out:
5348 rcu_read_unlock();
5349 @@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
5350 }
5351
5352 if (!page_mapped(page)) {
5353 + rcu_read_unlock();
5354 put_anon_vma(anon_vma);
5355 - anon_vma = NULL;
5356 - goto out;
5357 + return NULL;
5358 }
5359
5360 /* we pinned the anon_vma, its safe to sleep */
5361 diff --git a/mm/vmscan.c b/mm/vmscan.c
5362 index 32c661d66a45..a50bde6edbbc 100644
5363 --- a/mm/vmscan.c
5364 +++ b/mm/vmscan.c
5365 @@ -2525,10 +2525,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
5366
5367 for (i = 0; i <= ZONE_NORMAL; i++) {
5368 zone = &pgdat->node_zones[i];
5369 + if (!populated_zone(zone))
5370 + continue;
5371 +
5372 pfmemalloc_reserve += min_wmark_pages(zone);
5373 free_pages += zone_page_state(zone, NR_FREE_PAGES);
5374 }
5375
5376 + /* If there are no reserves (unexpected config) then do not throttle */
5377 + if (!pfmemalloc_reserve)
5378 + return true;
5379 +
5380 wmark_ok = free_pages > pfmemalloc_reserve / 2;
5381
5382 /* kswapd must be awake if processes are being throttled */
5383 @@ -2553,9 +2560,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
5384 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
5385 nodemask_t *nodemask)
5386 {
5387 + struct zoneref *z;
5388 struct zone *zone;
5389 - int high_zoneidx = gfp_zone(gfp_mask);
5390 - pg_data_t *pgdat;
5391 + pg_data_t *pgdat = NULL;
5392
5393 /*
5394 * Kernel threads should not be throttled as they may be indirectly
5395 @@ -2574,10 +2581,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
5396 if (fatal_signal_pending(current))
5397 goto out;
5398
5399 - /* Check if the pfmemalloc reserves are ok */
5400 - first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
5401 - pgdat = zone->zone_pgdat;
5402 - if (pfmemalloc_watermark_ok(pgdat))
5403 + /*
5404 + * Check if the pfmemalloc reserves are ok by finding the first node
5405 + * with a usable ZONE_NORMAL or lower zone. The expectation is that
5406 + * GFP_KERNEL will be required for allocating network buffers when
5407 + * swapping over the network so ZONE_HIGHMEM is unusable.
5408 + *
5409 + * Throttling is based on the first usable node and throttled processes
5410 + * wait on a queue until kswapd makes progress and wakes them. There
5411 + * is an affinity then between processes waking up and where reclaim
5412 + * progress has been made assuming the process wakes on the same node.
5413 + * More importantly, processes running on remote nodes will not compete
5414 + * for remote pfmemalloc reserves and processes on different nodes
5415 + * should make reasonable progress.
5416 + */
5417 + for_each_zone_zonelist_nodemask(zone, z, zonelist,
5418 + gfp_mask, nodemask) {
5419 + if (zone_idx(zone) > ZONE_NORMAL)
5420 + continue;
5421 +
5422 + /* Throttle based on the first usable node */
5423 + pgdat = zone->zone_pgdat;
5424 + if (pfmemalloc_watermark_ok(pgdat))
5425 + goto out;
5426 + break;
5427 + }
5428 +
5429 + /* If no zone was usable by the allocation flags then do not throttle */
5430 + if (!pgdat)
5431 goto out;
5432
5433 /* Account for the throttling */
5434 @@ -3302,7 +3333,10 @@ static int kswapd(void *p)
5435 }
5436 }
5437
5438 + tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
5439 current->reclaim_state = NULL;
5440 + lockdep_clear_current_reclaim_state();
5441 +
5442 return 0;
5443 }
5444
5445 diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
5446 index 73492b91105a..8796ffa08b43 100644
5447 --- a/net/bluetooth/6lowpan.c
5448 +++ b/net/bluetooth/6lowpan.c
5449 @@ -420,12 +420,18 @@ static int conn_send(struct l2cap_conn *conn,
5450 return 0;
5451 }
5452
5453 -static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
5454 - bdaddr_t *addr, u8 *addr_type)
5455 +static u8 get_addr_type_from_eui64(u8 byte)
5456 {
5457 - u8 *eui64;
5458 + /* Is universal(0) or local(1) bit, */
5459 + if (byte & 0x02)
5460 + return ADDR_LE_DEV_RANDOM;
5461
5462 - eui64 = ip6_daddr->s6_addr + 8;
5463 + return ADDR_LE_DEV_PUBLIC;
5464 +}
5465 +
5466 +static void copy_to_bdaddr(struct in6_addr *ip6_daddr, bdaddr_t *addr)
5467 +{
5468 + u8 *eui64 = ip6_daddr->s6_addr + 8;
5469
5470 addr->b[0] = eui64[7];
5471 addr->b[1] = eui64[6];
5472 @@ -433,16 +439,19 @@ static void get_dest_bdaddr(struct in6_addr *ip6_daddr,
5473 addr->b[3] = eui64[2];
5474 addr->b[4] = eui64[1];
5475 addr->b[5] = eui64[0];
5476 +}
5477
5478 - addr->b[5] ^= 2;
5479 +static void convert_dest_bdaddr(struct in6_addr *ip6_daddr,
5480 + bdaddr_t *addr, u8 *addr_type)
5481 +{
5482 + copy_to_bdaddr(ip6_daddr, addr);
5483
5484 - /* Set universal/local bit to 0 */
5485 - if (addr->b[5] & 1) {
5486 - addr->b[5] &= ~1;
5487 - *addr_type = ADDR_LE_DEV_PUBLIC;
5488 - } else {
5489 - *addr_type = ADDR_LE_DEV_RANDOM;
5490 - }
5491 + /* We need to toggle the U/L bit that we got from IPv6 address
5492 + * so that we get the proper address and type of the BD address.
5493 + */
5494 + addr->b[5] ^= 0x02;
5495 +
5496 + *addr_type = get_addr_type_from_eui64(addr->b[5]);
5497 }
5498
5499 static int header_create(struct sk_buff *skb, struct net_device *netdev,
5500 @@ -473,9 +482,11 @@ static int header_create(struct sk_buff *skb, struct net_device *netdev,
5501 /* Get destination BT device from skb.
5502 * If there is no such peer then discard the packet.
5503 */
5504 - get_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
5505 + convert_dest_bdaddr(&hdr->daddr, &addr, &addr_type);
5506
5507 - BT_DBG("dest addr %pMR type %d", &addr, addr_type);
5508 + BT_DBG("dest addr %pMR type %s IP %pI6c", &addr,
5509 + addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
5510 + &hdr->daddr);
5511
5512 read_lock_irqsave(&devices_lock, flags);
5513 peer = peer_lookup_ba(dev, &addr, addr_type);
5514 @@ -556,7 +567,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
5515 } else {
5516 unsigned long flags;
5517
5518 - get_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
5519 + convert_dest_bdaddr(&lowpan_cb(skb)->addr, &addr, &addr_type);
5520 eui64_addr = lowpan_cb(skb)->addr.s6_addr + 8;
5521 dev = lowpan_dev(netdev);
5522
5523 @@ -564,8 +575,10 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
5524 peer = peer_lookup_ba(dev, &addr, addr_type);
5525 read_unlock_irqrestore(&devices_lock, flags);
5526
5527 - BT_DBG("xmit from %s to %pMR (%pI6c) peer %p", netdev->name,
5528 - &addr, &lowpan_cb(skb)->addr, peer);
5529 + BT_DBG("xmit %s to %pMR type %s IP %pI6c peer %p",
5530 + netdev->name, &addr,
5531 + addr_type == ADDR_LE_DEV_PUBLIC ? "PUBLIC" : "RANDOM",
5532 + &lowpan_cb(skb)->addr, peer);
5533
5534 if (peer && peer->conn)
5535 err = send_pkt(peer->conn, netdev->dev_addr,
5536 @@ -620,13 +633,13 @@ static void set_addr(u8 *eui, u8 *addr, u8 addr_type)
5537 eui[6] = addr[1];
5538 eui[7] = addr[0];
5539
5540 - eui[0] ^= 2;
5541 -
5542 - /* Universal/local bit set, RFC 4291 */
5543 + /* Universal/local bit set, BT 6lowpan draft ch. 3.2.1 */
5544 if (addr_type == ADDR_LE_DEV_PUBLIC)
5545 - eui[0] |= 1;
5546 + eui[0] &= ~0x02;
5547 else
5548 - eui[0] &= ~1;
5549 + eui[0] |= 0x02;
5550 +
5551 + BT_DBG("type %d addr %*phC", addr_type, 8, eui);
5552 }
5553
5554 static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
5555 @@ -634,7 +647,6 @@ static void set_dev_addr(struct net_device *netdev, bdaddr_t *addr,
5556 {
5557 netdev->addr_assign_type = NET_ADDR_PERM;
5558 set_addr(netdev->dev_addr, addr->b, addr_type);
5559 - netdev->dev_addr[0] ^= 2;
5560 }
5561
5562 static void ifup(struct net_device *netdev)
5563 @@ -684,13 +696,6 @@ static int add_peer_conn(struct l2cap_conn *conn, struct lowpan_dev *dev)
5564
5565 memcpy(&peer->eui64_addr, (u8 *)&peer->peer_addr.s6_addr + 8,
5566 EUI64_ADDR_LEN);
5567 - peer->eui64_addr[0] ^= 2; /* second bit-flip (Universe/Local)
5568 - * is done according RFC2464
5569 - */
5570 -
5571 - raw_dump_inline(__func__, "peer IPv6 address",
5572 - (unsigned char *)&peer->peer_addr, 16);
5573 - raw_dump_inline(__func__, "peer EUI64 address", peer->eui64_addr, 8);
5574
5575 write_lock_irqsave(&devices_lock, flags);
5576 INIT_LIST_HEAD(&peer->list);
5577 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
5578 index 15010a230b6d..0381d55f995d 100644
5579 --- a/net/bluetooth/hci_event.c
5580 +++ b/net/bluetooth/hci_event.c
5581 @@ -1342,6 +1342,7 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
5582 * is requested.
5583 */
5584 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
5585 + conn->pending_sec_level != BT_SECURITY_FIPS &&
5586 conn->pending_sec_level != BT_SECURITY_HIGH &&
5587 conn->pending_sec_level != BT_SECURITY_MEDIUM)
5588 return 0;
5589 @@ -2957,7 +2958,8 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5590 }
5591
5592 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
5593 - conn->pending_sec_level == BT_SECURITY_HIGH) {
5594 + (conn->pending_sec_level == BT_SECURITY_HIGH ||
5595 + conn->pending_sec_level == BT_SECURITY_FIPS)) {
5596 BT_DBG("%s ignoring key unauthenticated for high security",
5597 hdev->name);
5598 goto not_found;
5599 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
5600 index ef5e5b04f34f..ade3fb4c23bc 100644
5601 --- a/net/bluetooth/l2cap_sock.c
5602 +++ b/net/bluetooth/l2cap_sock.c
5603 @@ -1180,13 +1180,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
5604 /* Check for backlog size */
5605 if (sk_acceptq_is_full(parent)) {
5606 BT_DBG("backlog full %d", parent->sk_ack_backlog);
5607 + release_sock(parent);
5608 return NULL;
5609 }
5610
5611 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
5612 GFP_ATOMIC);
5613 - if (!sk)
5614 + if (!sk) {
5615 + release_sock(parent);
5616 return NULL;
5617 + }
5618
5619 bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
5620
5621 diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
5622 index d2d4e0d5aed0..c88b2b671849 100644
5623 --- a/net/bluetooth/mgmt.c
5624 +++ b/net/bluetooth/mgmt.c
5625 @@ -4530,7 +4530,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5626
5627 for (i = 0; i < key_count; i++) {
5628 struct mgmt_ltk_info *key = &cp->keys[i];
5629 - u8 type, addr_type;
5630 + u8 type, addr_type, authenticated;
5631
5632 if (key->addr.type == BDADDR_LE_PUBLIC)
5633 addr_type = ADDR_LE_DEV_PUBLIC;
5634 @@ -4542,8 +4542,19 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5635 else
5636 type = HCI_SMP_LTK_SLAVE;
5637
5638 + switch (key->type) {
5639 + case MGMT_LTK_UNAUTHENTICATED:
5640 + authenticated = 0x00;
5641 + break;
5642 + case MGMT_LTK_AUTHENTICATED:
5643 + authenticated = 0x01;
5644 + break;
5645 + default:
5646 + continue;
5647 + }
5648 +
5649 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5650 - key->type, key->val, key->enc_size, key->ediv,
5651 + authenticated, key->val, key->enc_size, key->ediv,
5652 key->rand);
5653 }
5654
5655 @@ -5005,6 +5016,14 @@ void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
5656 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5657 }
5658
5659 +static u8 mgmt_ltk_type(struct smp_ltk *ltk)
5660 +{
5661 + if (ltk->authenticated)
5662 + return MGMT_LTK_AUTHENTICATED;
5663 +
5664 + return MGMT_LTK_UNAUTHENTICATED;
5665 +}
5666 +
5667 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5668 {
5669 struct mgmt_ev_new_long_term_key ev;
5670 @@ -5030,7 +5049,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
5671
5672 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5673 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5674 - ev.key.type = key->authenticated;
5675 + ev.key.type = mgmt_ltk_type(key);
5676 ev.key.enc_size = key->enc_size;
5677 ev.key.ediv = key->ediv;
5678 ev.key.rand = key->rand;
5679 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
5680 index dfb4e1161c10..956d127528cb 100644
5681 --- a/net/bluetooth/smp.c
5682 +++ b/net/bluetooth/smp.c
5683 @@ -908,10 +908,11 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
5684
5685 authreq = seclevel_to_authreq(sec_level);
5686
5687 - /* hcon->auth_type is set by pair_device in mgmt.c. If the MITM
5688 - * flag is set we should also set it for the SMP request.
5689 + /* Require MITM if IO Capability allows or the security level
5690 + * requires it.
5691 */
5692 - if ((hcon->auth_type & 0x01))
5693 + if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
5694 + sec_level > BT_SECURITY_MEDIUM)
5695 authreq |= SMP_AUTH_MITM;
5696
5697 if (hcon->link_mode & HCI_LM_MASTER) {
5698 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
5699 index f46e4dd0558d..152d4d25ab7c 100644
5700 --- a/scripts/package/builddeb
5701 +++ b/scripts/package/builddeb
5702 @@ -155,11 +155,11 @@ if grep -q '^CONFIG_MODULES=y' $KCONFIG_CONFIG ; then
5703 for module in $(find lib/modules/ -name *.ko); do
5704 mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module)
5705 # only keep debug symbols in the debug file
5706 - objcopy --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
5707 + $OBJCOPY --only-keep-debug $module $dbg_dir/usr/lib/debug/$module
5708 # strip original module from debug symbols
5709 - objcopy --strip-debug $module
5710 + $OBJCOPY --strip-debug $module
5711 # then add a link to those
5712 - objcopy --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
5713 + $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $module
5714 done
5715 )
5716 fi
5717 diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
5718 index 05654f5e48d5..c4d6d2e20e0d 100644
5719 --- a/tools/vm/page-types.c
5720 +++ b/tools/vm/page-types.c
5721 @@ -32,6 +32,8 @@
5722 #include <assert.h>
5723 #include <ftw.h>
5724 #include <time.h>
5725 +#include <setjmp.h>
5726 +#include <signal.h>
5727 #include <sys/types.h>
5728 #include <sys/errno.h>
5729 #include <sys/fcntl.h>
5730 @@ -824,21 +826,38 @@ static void show_file(const char *name, const struct stat *st)
5731 atime, now - st->st_atime);
5732 }
5733
5734 +static sigjmp_buf sigbus_jmp;
5735 +
5736 +static void * volatile sigbus_addr;
5737 +
5738 +static void sigbus_handler(int sig, siginfo_t *info, void *ucontex)
5739 +{
5740 + (void)sig;
5741 + (void)ucontex;
5742 + sigbus_addr = info ? info->si_addr : NULL;
5743 + siglongjmp(sigbus_jmp, 1);
5744 +}
5745 +
5746 +static struct sigaction sigbus_action = {
5747 + .sa_sigaction = sigbus_handler,
5748 + .sa_flags = SA_SIGINFO,
5749 +};
5750 +
5751 static void walk_file(const char *name, const struct stat *st)
5752 {
5753 uint8_t vec[PAGEMAP_BATCH];
5754 uint64_t buf[PAGEMAP_BATCH], flags;
5755 unsigned long nr_pages, pfn, i;
5756 + off_t off, end = st->st_size;
5757 int fd;
5758 - off_t off;
5759 ssize_t len;
5760 void *ptr;
5761 int first = 1;
5762
5763 fd = checked_open(name, O_RDONLY|O_NOATIME|O_NOFOLLOW);
5764
5765 - for (off = 0; off < st->st_size; off += len) {
5766 - nr_pages = (st->st_size - off + page_size - 1) / page_size;
5767 + for (off = 0; off < end; off += len) {
5768 + nr_pages = (end - off + page_size - 1) / page_size;
5769 if (nr_pages > PAGEMAP_BATCH)
5770 nr_pages = PAGEMAP_BATCH;
5771 len = nr_pages * page_size;
5772 @@ -855,11 +874,19 @@ static void walk_file(const char *name, const struct stat *st)
5773 if (madvise(ptr, len, MADV_RANDOM))
5774 fatal("madvice failed: %s", name);
5775
5776 + if (sigsetjmp(sigbus_jmp, 1)) {
5777 + end = off + sigbus_addr ? sigbus_addr - ptr : 0;
5778 + fprintf(stderr, "got sigbus at offset %lld: %s\n",
5779 + (long long)end, name);
5780 + goto got_sigbus;
5781 + }
5782 +
5783 /* populate ptes */
5784 for (i = 0; i < nr_pages ; i++) {
5785 if (vec[i] & 1)
5786 (void)*(volatile int *)(ptr + i * page_size);
5787 }
5788 +got_sigbus:
5789
5790 /* turn off harvesting reference bits */
5791 if (madvise(ptr, len, MADV_SEQUENTIAL))
5792 @@ -910,6 +937,7 @@ static void walk_page_cache(void)
5793
5794 kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY);
5795 pagemap_fd = checked_open("/proc/self/pagemap", O_RDONLY);
5796 + sigaction(SIGBUS, &sigbus_action, NULL);
5797
5798 if (stat(opt_file, &st))
5799 fatal("stat failed: %s\n", opt_file);
5800 @@ -925,6 +953,7 @@ static void walk_page_cache(void)
5801
5802 close(kpageflags_fd);
5803 close(pagemap_fd);
5804 + signal(SIGBUS, SIG_DFL);
5805 }
5806
5807 static void parse_file(const char *name)