Magellan Linux

Contents of /trunk/kernel-alx/patches-3.12/0106-3.12.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2423 - (show annotations) (download)
Tue Mar 25 12:29:50 2014 UTC (10 years, 1 month ago) by niro
File size: 176498 byte(s)
-added 3.12 branch
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index fcbb736d55fe..4f7c57cb6022 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -1515,6 +1515,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6
7 * atapi_dmadir: Enable ATAPI DMADIR bridge support
8
9 + * disable: Disable this device.
10 +
11 If there are multiple matching configurations changing
12 the same attribute, the last one is used.
13
14 diff --git a/Makefile b/Makefile
15 index 2b23383311ff..c2f0b7985b41 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,6 +1,6 @@
19 VERSION = 3
20 PATCHLEVEL = 12
21 -SUBLEVEL = 6
22 +SUBLEVEL = 7
23 EXTRAVERSION =
24 NAME = One Giant Leap for Frogkind
25
26 diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
27 index 413b4c29e782..8a01060f421a 100644
28 --- a/arch/arm/boot/dts/r8a7790.dtsi
29 +++ b/arch/arm/boot/dts/r8a7790.dtsi
30 @@ -152,7 +152,7 @@
31
32 sdhi0: sdhi@ee100000 {
33 compatible = "renesas,sdhi-r8a7790";
34 - reg = <0 0xee100000 0 0x100>;
35 + reg = <0 0xee100000 0 0x200>;
36 interrupt-parent = <&gic>;
37 interrupts = <0 165 4>;
38 cap-sd-highspeed;
39 @@ -161,7 +161,7 @@
40
41 sdhi1: sdhi@ee120000 {
42 compatible = "renesas,sdhi-r8a7790";
43 - reg = <0 0xee120000 0 0x100>;
44 + reg = <0 0xee120000 0 0x200>;
45 interrupt-parent = <&gic>;
46 interrupts = <0 166 4>;
47 cap-sd-highspeed;
48 diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
49 index 80559cbdbc87..aad69ba7503f 100644
50 --- a/arch/arm/boot/dts/sun7i-a20.dtsi
51 +++ b/arch/arm/boot/dts/sun7i-a20.dtsi
52 @@ -170,7 +170,7 @@
53 emac: ethernet@01c0b000 {
54 compatible = "allwinner,sun4i-emac";
55 reg = <0x01c0b000 0x1000>;
56 - interrupts = <0 55 1>;
57 + interrupts = <0 55 4>;
58 clocks = <&ahb_gates 17>;
59 status = "disabled";
60 };
61 @@ -186,7 +186,7 @@
62 pio: pinctrl@01c20800 {
63 compatible = "allwinner,sun7i-a20-pinctrl";
64 reg = <0x01c20800 0x400>;
65 - interrupts = <0 28 1>;
66 + interrupts = <0 28 4>;
67 clocks = <&apb0_gates 5>;
68 gpio-controller;
69 interrupt-controller;
70 @@ -230,12 +230,12 @@
71 timer@01c20c00 {
72 compatible = "allwinner,sun4i-timer";
73 reg = <0x01c20c00 0x90>;
74 - interrupts = <0 22 1>,
75 - <0 23 1>,
76 - <0 24 1>,
77 - <0 25 1>,
78 - <0 67 1>,
79 - <0 68 1>;
80 + interrupts = <0 22 4>,
81 + <0 23 4>,
82 + <0 24 4>,
83 + <0 25 4>,
84 + <0 67 4>,
85 + <0 68 4>;
86 clocks = <&osc24M>;
87 };
88
89 @@ -247,7 +247,7 @@
90 uart0: serial@01c28000 {
91 compatible = "snps,dw-apb-uart";
92 reg = <0x01c28000 0x400>;
93 - interrupts = <0 1 1>;
94 + interrupts = <0 1 4>;
95 reg-shift = <2>;
96 reg-io-width = <4>;
97 clocks = <&apb1_gates 16>;
98 @@ -257,7 +257,7 @@
99 uart1: serial@01c28400 {
100 compatible = "snps,dw-apb-uart";
101 reg = <0x01c28400 0x400>;
102 - interrupts = <0 2 1>;
103 + interrupts = <0 2 4>;
104 reg-shift = <2>;
105 reg-io-width = <4>;
106 clocks = <&apb1_gates 17>;
107 @@ -267,7 +267,7 @@
108 uart2: serial@01c28800 {
109 compatible = "snps,dw-apb-uart";
110 reg = <0x01c28800 0x400>;
111 - interrupts = <0 3 1>;
112 + interrupts = <0 3 4>;
113 reg-shift = <2>;
114 reg-io-width = <4>;
115 clocks = <&apb1_gates 18>;
116 @@ -277,7 +277,7 @@
117 uart3: serial@01c28c00 {
118 compatible = "snps,dw-apb-uart";
119 reg = <0x01c28c00 0x400>;
120 - interrupts = <0 4 1>;
121 + interrupts = <0 4 4>;
122 reg-shift = <2>;
123 reg-io-width = <4>;
124 clocks = <&apb1_gates 19>;
125 @@ -287,7 +287,7 @@
126 uart4: serial@01c29000 {
127 compatible = "snps,dw-apb-uart";
128 reg = <0x01c29000 0x400>;
129 - interrupts = <0 17 1>;
130 + interrupts = <0 17 4>;
131 reg-shift = <2>;
132 reg-io-width = <4>;
133 clocks = <&apb1_gates 20>;
134 @@ -297,7 +297,7 @@
135 uart5: serial@01c29400 {
136 compatible = "snps,dw-apb-uart";
137 reg = <0x01c29400 0x400>;
138 - interrupts = <0 18 1>;
139 + interrupts = <0 18 4>;
140 reg-shift = <2>;
141 reg-io-width = <4>;
142 clocks = <&apb1_gates 21>;
143 @@ -307,7 +307,7 @@
144 uart6: serial@01c29800 {
145 compatible = "snps,dw-apb-uart";
146 reg = <0x01c29800 0x400>;
147 - interrupts = <0 19 1>;
148 + interrupts = <0 19 4>;
149 reg-shift = <2>;
150 reg-io-width = <4>;
151 clocks = <&apb1_gates 22>;
152 @@ -317,7 +317,7 @@
153 uart7: serial@01c29c00 {
154 compatible = "snps,dw-apb-uart";
155 reg = <0x01c29c00 0x400>;
156 - interrupts = <0 20 1>;
157 + interrupts = <0 20 4>;
158 reg-shift = <2>;
159 reg-io-width = <4>;
160 clocks = <&apb1_gates 23>;
161 diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
162 index dd8da2c5399f..ba1cba94c31d 100644
163 --- a/arch/arm/mach-omap2/board-ldp.c
164 +++ b/arch/arm/mach-omap2/board-ldp.c
165 @@ -243,12 +243,18 @@ static void __init ldp_display_init(void)
166
167 static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
168 {
169 + int res;
170 +
171 /* LCD enable GPIO */
172 ldp_lcd_pdata.enable_gpio = gpio + 7;
173
174 /* Backlight enable GPIO */
175 ldp_lcd_pdata.backlight_gpio = gpio + 15;
176
177 + res = platform_device_register(&ldp_lcd_device);
178 + if (res)
179 + pr_err("Unable to register LCD: %d\n", res);
180 +
181 return 0;
182 }
183
184 @@ -347,7 +353,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
185
186 static struct platform_device *ldp_devices[] __initdata = {
187 &ldp_gpio_keys_device,
188 - &ldp_lcd_device,
189 };
190
191 #ifdef CONFIG_OMAP_MUX
192 diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
193 index 56cebb05509e..d23c77fadb31 100644
194 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
195 +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
196 @@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
197
198 /* gpmc */
199 static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
200 - { .irq = 20 },
201 + { .irq = 20 + OMAP_INTC_START, },
202 { .irq = -1 }
203 };
204
205 @@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
206 };
207
208 static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
209 - { .irq = 52 },
210 + { .irq = 52 + OMAP_INTC_START, },
211 { .irq = -1 }
212 };
213
214 diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
215 index f234cbec0cb9..60f23440082e 100644
216 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
217 +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
218 @@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
219 };
220
221 static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
222 - { .irq = 20 },
223 + { .irq = 20 + OMAP_INTC_START, },
224 { .irq = -1 }
225 };
226
227 @@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
228
229 static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
230 static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
231 - { .irq = 24 },
232 + { .irq = 24 + OMAP_INTC_START, },
233 { .irq = -1 }
234 };
235
236 @@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
237
238 static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
239 static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
240 - { .irq = 28 },
241 + { .irq = 28 + OMAP_INTC_START, },
242 { .irq = -1 }
243 };
244
245 diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
246 index db32d5380b11..18f333c440db 100644
247 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
248 +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
249 @@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
250 .class = &dra7xx_uart_hwmod_class,
251 .clkdm_name = "l4per_clkdm",
252 .main_clk = "uart1_gfclk_mux",
253 - .flags = HWMOD_SWSUP_SIDLE_ACT,
254 + .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
255 .prcm = {
256 .omap4 = {
257 .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
258 diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
259 index fecdbf7de82e..c484d5625ffb 100644
260 --- a/arch/arm64/kernel/ptrace.c
261 +++ b/arch/arm64/kernel/ptrace.c
262 @@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
263 {
264 int err, len, type, disabled = !ctrl.enabled;
265
266 - if (disabled) {
267 - len = 0;
268 - type = HW_BREAKPOINT_EMPTY;
269 - } else {
270 - err = arch_bp_generic_fields(ctrl, &len, &type);
271 - if (err)
272 - return err;
273 -
274 - switch (note_type) {
275 - case NT_ARM_HW_BREAK:
276 - if ((type & HW_BREAKPOINT_X) != type)
277 - return -EINVAL;
278 - break;
279 - case NT_ARM_HW_WATCH:
280 - if ((type & HW_BREAKPOINT_RW) != type)
281 - return -EINVAL;
282 - break;
283 - default:
284 + attr->disabled = disabled;
285 + if (disabled)
286 + return 0;
287 +
288 + err = arch_bp_generic_fields(ctrl, &len, &type);
289 + if (err)
290 + return err;
291 +
292 + switch (note_type) {
293 + case NT_ARM_HW_BREAK:
294 + if ((type & HW_BREAKPOINT_X) != type)
295 return -EINVAL;
296 - }
297 + break;
298 + case NT_ARM_HW_WATCH:
299 + if ((type & HW_BREAKPOINT_RW) != type)
300 + return -EINVAL;
301 + break;
302 + default:
303 + return -EINVAL;
304 }
305
306 attr->bp_len = len;
307 attr->bp_type = type;
308 - attr->disabled = disabled;
309
310 return 0;
311 }
312 diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
313 index cca12f084842..9cf9635e8f44 100644
314 --- a/arch/powerpc/include/asm/exception-64s.h
315 +++ b/arch/powerpc/include/asm/exception-64s.h
316 @@ -265,7 +265,7 @@ do_kvm_##n: \
317 subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
318 beq- 1f; \
319 ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
320 -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
321 +1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
322 blt+ cr1,3f; /* abort if it is */ \
323 li r1,(n); /* will be reloaded later */ \
324 sth r1,PACA_TRAP_SAVE(r13); \
325 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
326 index 3d11d8038dee..9141e894c89b 100644
327 --- a/arch/powerpc/kernel/head_64.S
328 +++ b/arch/powerpc/kernel/head_64.S
329 @@ -467,6 +467,7 @@ _STATIC(__after_prom_start)
330 mtctr r8
331 bctr
332
333 +.balign 8
334 p_end: .llong _end - _stext
335
336 4: /* Now copy the rest of the kernel up to _end */
337 diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
338 index 043eec8461e7..46ff25149282 100644
339 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
340 +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
341 @@ -473,11 +473,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
342 slb_v = vcpu->kvm->arch.vrma_slb_v;
343 }
344
345 + preempt_disable();
346 /* Find the HPTE in the hash table */
347 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
348 HPTE_V_VALID | HPTE_V_ABSENT);
349 - if (index < 0)
350 + if (index < 0) {
351 + preempt_enable();
352 return -ENOENT;
353 + }
354 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
355 v = hptep[0] & ~HPTE_V_HVLOCK;
356 gr = kvm->arch.revmap[index].guest_rpte;
357 @@ -485,6 +488,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
358 /* Unlock the HPTE */
359 asm volatile("lwsync" : : : "memory");
360 hptep[0] = v;
361 + preempt_enable();
362
363 gpte->eaddr = eaddr;
364 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
365 diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
366 index 9c515440ad1a..ea17b3067233 100644
367 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
368 +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
369 @@ -749,6 +749,10 @@ static int slb_base_page_shift[4] = {
370 20, /* 1M, unsupported */
371 };
372
373 +/* When called from virtmode, this func should be protected by
374 + * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
375 + * can trigger deadlock issue.
376 + */
377 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
378 unsigned long valid)
379 {
380 diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
381 index 2a0a596ebf67..d77f2f6c7ff0 100644
382 --- a/arch/sh/kernel/sh_ksyms_32.c
383 +++ b/arch/sh/kernel/sh_ksyms_32.c
384 @@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
385 EXPORT_SYMBOL(copy_page);
386 EXPORT_SYMBOL(__clear_user);
387 EXPORT_SYMBOL(empty_zero_page);
388 +#ifdef CONFIG_FLATMEM
389 +/* need in pfn_valid macro */
390 +EXPORT_SYMBOL(min_low_pfn);
391 +EXPORT_SYMBOL(max_low_pfn);
392 +#endif
393
394 #define DECLARE_EXPORT(name) \
395 extern void name(void);EXPORT_SYMBOL(name)
396 diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
397 index 7b95f29e3174..3baff31e58cf 100644
398 --- a/arch/sh/lib/Makefile
399 +++ b/arch/sh/lib/Makefile
400 @@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
401 checksum.o strlen.o div64.o div64-generic.o
402
403 # Extracted from libgcc
404 -lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
405 +obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
406 ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
407 udiv_qrnnd.o
408
409 diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
410 index 36760317814f..90f289f0ec8e 100644
411 --- a/arch/sparc/include/asm/pgtable_64.h
412 +++ b/arch/sparc/include/asm/pgtable_64.h
413 @@ -616,7 +616,7 @@ static inline unsigned long pte_present(pte_t pte)
414 }
415
416 #define pte_accessible pte_accessible
417 -static inline unsigned long pte_accessible(pte_t a)
418 +static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
419 {
420 return pte_val(a) & _PAGE_VALID;
421 }
422 @@ -806,7 +806,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
423 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
424 * and SUN4V pte layout, so this inline test is fine.
425 */
426 - if (likely(mm != &init_mm) && pte_accessible(orig))
427 + if (likely(mm != &init_mm) && pte_accessible(mm, orig))
428 tlb_batch_add(mm, addr, ptep, orig, fullmm);
429 }
430
431 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
432 index 3d1999458709..bbc8b12fa443 100644
433 --- a/arch/x86/include/asm/pgtable.h
434 +++ b/arch/x86/include/asm/pgtable.h
435 @@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
436 }
437
438 #define pte_accessible pte_accessible
439 -static inline int pte_accessible(pte_t a)
440 +static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
441 {
442 - return pte_flags(a) & _PAGE_PRESENT;
443 + if (pte_flags(a) & _PAGE_PRESENT)
444 + return true;
445 +
446 + if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
447 + mm_tlb_flush_pending(mm))
448 + return true;
449 +
450 + return false;
451 }
452
453 static inline int pte_hidden(pte_t pte)
454 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
455 index ec7299566f79..a51efc90b534 100644
456 --- a/arch/x86/kernel/cpu/intel.c
457 +++ b/arch/x86/kernel/cpu/intel.c
458 @@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
459 set_cpu_cap(c, X86_FEATURE_PEBS);
460 }
461
462 - if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
463 + if (c->x86 == 6 && cpu_has_clflush &&
464 + (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
465 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
466
467 #ifdef CONFIG_X86_64
468 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
469 index dec48bfaddb8..1673940cf9c3 100644
470 --- a/arch/x86/kvm/lapic.c
471 +++ b/arch/x86/kvm/lapic.c
472 @@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
473 return;
474 }
475
476 + if (!kvm_vcpu_is_bsp(apic->vcpu))
477 + value &= ~MSR_IA32_APICBASE_BSP;
478 + vcpu->arch.apic_base = value;
479 +
480 /* update jump label if enable bit changes */
481 if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
482 if (value & MSR_IA32_APICBASE_ENABLE)
483 @@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
484 recalculate_apic_map(vcpu->kvm);
485 }
486
487 - if (!kvm_vcpu_is_bsp(apic->vcpu))
488 - value &= ~MSR_IA32_APICBASE_BSP;
489 -
490 - vcpu->arch.apic_base = value;
491 if ((old_value ^ value) & X2APIC_ENABLE) {
492 if (value & X2APIC_ENABLE) {
493 u32 id = kvm_apic_id(apic);
494 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
495 index 2b2fce1b2009..6128914ee873 100644
496 --- a/arch/x86/kvm/vmx.c
497 +++ b/arch/x86/kvm/vmx.c
498 @@ -8218,8 +8218,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
499 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
500 kvm_set_cr4(vcpu, vmcs12->host_cr4);
501
502 - if (nested_cpu_has_ept(vmcs12))
503 - nested_ept_uninit_mmu_context(vcpu);
504 + nested_ept_uninit_mmu_context(vcpu);
505
506 kvm_set_cr3(vcpu, vmcs12->host_cr3);
507 kvm_mmu_reset_context(vcpu);
508 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
509 index dd74e46828c0..0596e8e0cc19 100644
510 --- a/arch/x86/mm/gup.c
511 +++ b/arch/x86/mm/gup.c
512 @@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
513 pte_t pte = gup_get_pte(ptep);
514 struct page *page;
515
516 + /* Similar to the PMD case, NUMA hinting must take slow path */
517 + if (pte_numa(pte)) {
518 + pte_unmap(ptep);
519 + return 0;
520 + }
521 +
522 if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
523 pte_unmap(ptep);
524 return 0;
525 @@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
526 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
527 return 0;
528 if (unlikely(pmd_large(pmd))) {
529 + /*
530 + * NUMA hinting faults need to be handled in the GUP
531 + * slowpath for accounting purposes and so that they
532 + * can be serialised against THP migration.
533 + */
534 + if (pmd_numa(pmd))
535 + return 0;
536 if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
537 return 0;
538 } else {
539 diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
540 index fb78bb9ad8f6..ab19263baf39 100644
541 --- a/drivers/acpi/acpi_lpss.c
542 +++ b/drivers/acpi/acpi_lpss.c
543 @@ -156,6 +156,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
544 { "80860F14", (unsigned long)&byt_sdio_dev_desc },
545 { "80860F41", (unsigned long)&byt_i2c_dev_desc },
546 { "INT33B2", },
547 + { "INT33FC", },
548
549 { }
550 };
551 diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
552 index b587ec8257b2..661a5b7f5104 100644
553 --- a/drivers/acpi/bus.c
554 +++ b/drivers/acpi/bus.c
555 @@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
556 }
557 EXPORT_SYMBOL(acpi_bus_get_private_data);
558
559 +void acpi_bus_no_hotplug(acpi_handle handle)
560 +{
561 + struct acpi_device *adev = NULL;
562 +
563 + acpi_bus_get_device(handle, &adev);
564 + if (adev)
565 + adev->flags.no_hotplug = true;
566 +}
567 +EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
568 +
569 static void acpi_print_osc_error(acpi_handle handle,
570 struct acpi_osc_context *context, char *error)
571 {
572 diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
573 index 58debb0acc3a..566cca4f9dc2 100644
574 --- a/drivers/ata/ahci_imx.c
575 +++ b/drivers/ata/ahci_imx.c
576 @@ -60,7 +60,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
577 /*
578 * set PHY Paremeters, two steps to configure the GPR13,
579 * one write for rest of parameters, mask of first write
580 - * is 0x07fffffd, and the other one write for setting
581 + * is 0x07ffffff, and the other one write for setting
582 * the mpll_clk_en.
583 */
584 regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
585 @@ -71,6 +71,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
586 | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
587 | IMX6Q_GPR13_SATA_TX_BOOST_MASK
588 | IMX6Q_GPR13_SATA_TX_LVL_MASK
589 + | IMX6Q_GPR13_SATA_MPLL_CLK_EN
590 | IMX6Q_GPR13_SATA_TX_EDGE_RATE
591 , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
592 | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
593 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
594 index 81a94a3919db..2c2780a19609 100644
595 --- a/drivers/ata/libata-core.c
596 +++ b/drivers/ata/libata-core.c
597 @@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
598 "failed to get NCQ Send/Recv Log Emask 0x%x\n",
599 err_mask);
600 } else {
601 + u8 *cmds = dev->ncq_send_recv_cmds;
602 +
603 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
604 - memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
605 - ATA_LOG_NCQ_SEND_RECV_SIZE);
606 + memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
607 +
608 + if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
609 + ata_dev_dbg(dev, "disabling queued TRIM support\n");
610 + cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
611 + ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
612 + }
613 }
614 }
615
616 @@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
617 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
618 ATA_HORKAGE_FIRMWARE_WARN },
619
620 + /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
621 + { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
622 +
623 /* Blacklist entries taken from Silicon Image 3124/3132
624 Windows driver .inf file - also several Linux problem reports */
625 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
626 @@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
627 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
628 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
629
630 + /* devices that don't properly handle queued TRIM commands */
631 + { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
632 + { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
633 +
634 /* End Marker */
635 { }
636 };
637 @@ -6520,6 +6534,7 @@ static int __init ata_parse_force_one(char **cur,
638 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
639 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
640 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
641 + { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
642 };
643 char *start = *cur, *p = *cur;
644 char *id, *val, *endp;
645 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
646 index ab58556d347c..377eb889f555 100644
647 --- a/drivers/ata/libata-scsi.c
648 +++ b/drivers/ata/libata-scsi.c
649 @@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
650 return;
651 }
652
653 + /*
654 + * XXX - UGLY HACK
655 + *
656 + * The block layer suspend/resume path is fundamentally broken due
657 + * to freezable kthreads and workqueue and may deadlock if a block
658 + * device gets removed while resume is in progress. I don't know
659 + * what the solution is short of removing freezable kthreads and
660 + * workqueues altogether.
661 + *
662 + * The following is an ugly hack to avoid kicking off device
663 + * removal while freezer is active. This is a joke but does avoid
664 + * this particular deadlock scenario.
665 + *
666 + * https://bugzilla.kernel.org/show_bug.cgi?id=62801
667 + * http://marc.info/?l=linux-kernel&m=138695698516487
668 + */
669 +#ifdef CONFIG_FREEZER
670 + while (pm_freezing)
671 + msleep(10);
672 +#endif
673 +
674 DPRINTK("ENTER\n");
675 mutex_lock(&ap->scsi_scan_mutex);
676
677 diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
678 index 4cbae4f762b1..b386be6018d7 100644
679 --- a/drivers/clocksource/dw_apb_timer_of.c
680 +++ b/drivers/clocksource/dw_apb_timer_of.c
681 @@ -108,12 +108,11 @@ static void add_clocksource(struct device_node *source_timer)
682
683 static u32 read_sched_clock(void)
684 {
685 - return __raw_readl(sched_io_base);
686 + return ~__raw_readl(sched_io_base);
687 }
688
689 static const struct of_device_id sptimer_ids[] __initconst = {
690 { .compatible = "picochip,pc3x2-rtc" },
691 - { .compatible = "snps,dw-apb-timer-sp" },
692 { /* Sentinel */ },
693 };
694
695 @@ -153,4 +152,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
696 num_called++;
697 }
698 CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
699 -CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
700 +CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
701 +CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
702 +CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
703 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
704 index eb3fdc755000..99d8ab548a34 100644
705 --- a/drivers/cpufreq/intel_pstate.c
706 +++ b/drivers/cpufreq/intel_pstate.c
707 @@ -550,6 +550,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
708 cpu = all_cpu_data[cpunum];
709
710 intel_pstate_get_cpu_pstates(cpu);
711 + if (!cpu->pstate.current_pstate) {
712 + all_cpu_data[cpunum] = NULL;
713 + kfree(cpu);
714 + return -ENODATA;
715 + }
716
717 cpu->cpu = cpunum;
718 cpu->pstate_policy =
719 diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
720 index f238cfd33847..b61c5fc64dc3 100644
721 --- a/drivers/dma/Kconfig
722 +++ b/drivers/dma/Kconfig
723 @@ -339,6 +339,7 @@ config NET_DMA
724 bool "Network: TCP receive copy offload"
725 depends on DMA_ENGINE && NET
726 default (INTEL_IOATDMA || FSL_DMA)
727 + depends on BROKEN
728 help
729 This enables the use of DMA engines in the network stack to
730 offload receive copy-to-user operations, freeing CPU cycles.
731 diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
732 index b0bb056458a3..281029daf98c 100644
733 --- a/drivers/firewire/sbp2.c
734 +++ b/drivers/firewire/sbp2.c
735 @@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
736 .cmd_per_lun = 1,
737 .can_queue = 1,
738 .sdev_attrs = sbp2_scsi_sysfs_attrs,
739 - .no_write_same = 1,
740 };
741
742 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
743 diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
744 index 7b37300973db..2baf0ddf7e02 100644
745 --- a/drivers/gpio/gpio-msm-v2.c
746 +++ b/drivers/gpio/gpio-msm-v2.c
747 @@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
748
749 spin_lock_irqsave(&tlmm_lock, irq_flags);
750 writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
751 - clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
752 + clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
753 __clear_bit(gpio, msm_gpio.enabled_irqs);
754 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
755 }
756 @@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
757
758 spin_lock_irqsave(&tlmm_lock, irq_flags);
759 __set_bit(gpio, msm_gpio.enabled_irqs);
760 - set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
761 + set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
762 writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
763 spin_unlock_irqrestore(&tlmm_lock, irq_flags);
764 }
765 diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
766 index db2de1a2dbcf..77d805a3321a 100644
767 --- a/drivers/gpio/gpio-twl4030.c
768 +++ b/drivers/gpio/gpio-twl4030.c
769 @@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
770 if (offset < TWL4030_GPIO_MAX)
771 ret = twl4030_set_gpio_direction(offset, 1);
772 else
773 - ret = -EINVAL;
774 + ret = -EINVAL; /* LED outputs can't be set as input */
775
776 if (!ret)
777 priv->direction &= ~BIT(offset);
778 @@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
779 static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
780 {
781 struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
782 - int ret = -EINVAL;
783 + int ret = 0;
784
785 mutex_lock(&priv->mutex);
786 - if (offset < TWL4030_GPIO_MAX)
787 + if (offset < TWL4030_GPIO_MAX) {
788 ret = twl4030_set_gpio_direction(offset, 0);
789 + if (ret) {
790 + mutex_unlock(&priv->mutex);
791 + return ret;
792 + }
793 + }
794 +
795 + /*
796 + * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
797 + */
798
799 priv->direction |= BIT(offset);
800 mutex_unlock(&priv->mutex);
801 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
802 index 830f7501cb4d..d0d3eae05a1a 100644
803 --- a/drivers/gpu/drm/drm_edid.c
804 +++ b/drivers/gpu/drm/drm_edid.c
805 @@ -68,6 +68,8 @@
806 #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
807 /* Force reduced-blanking timings for detailed modes */
808 #define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
809 +/* Force 8bpc */
810 +#define EDID_QUIRK_FORCE_8BPC (1 << 8)
811
812 struct detailed_mode_closure {
813 struct drm_connector *connector;
814 @@ -128,6 +130,9 @@ static struct edid_quirk {
815
816 /* Medion MD 30217 PG */
817 { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
818 +
819 + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
820 + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
821 };
822
823 /*
824 @@ -3236,6 +3241,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
825
826 drm_add_display_info(edid, &connector->display_info);
827
828 + if (quirks & EDID_QUIRK_FORCE_8BPC)
829 + connector->display_info.bpc = 8;
830 +
831 return num_modes;
832 }
833 EXPORT_SYMBOL(drm_add_edid_modes);
834 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
835 index d5c784d48671..5a25f2476c3b 100644
836 --- a/drivers/gpu/drm/i915/i915_dma.c
837 +++ b/drivers/gpu/drm/i915/i915_dma.c
838 @@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
839 drm_i915_private_t *dev_priv = dev->dev_private;
840 struct drm_i915_master_private *master_priv;
841
842 + /*
843 + * The dri breadcrumb update races against the drm master disappearing.
844 + * Instead of trying to fix this (this is by far not the only ums issue)
845 + * just don't do the update in kms mode.
846 + */
847 + if (drm_core_check_feature(dev, DRIVER_MODESET))
848 + return;
849 +
850 if (dev->primary->master) {
851 master_priv = dev->primary->master->driver_priv;
852 if (master_priv->sarea_priv)
853 @@ -1848,8 +1856,10 @@ void i915_driver_lastclose(struct drm_device * dev)
854
855 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
856 {
857 + mutex_lock(&dev->struct_mutex);
858 i915_gem_context_close(dev, file_priv);
859 i915_gem_release(dev, file_priv);
860 + mutex_unlock(&dev->struct_mutex);
861 }
862
863 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
864 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
865 index cdfb9da0e4ce..b00b32c992b0 100644
866 --- a/drivers/gpu/drm/i915/i915_gem.c
867 +++ b/drivers/gpu/drm/i915/i915_gem.c
868 @@ -2278,15 +2278,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
869 kfree(request);
870 }
871
872 -static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
873 - struct intel_ring_buffer *ring)
874 +static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
875 + struct intel_ring_buffer *ring)
876 {
877 - u32 completed_seqno;
878 - u32 acthd;
879 + u32 completed_seqno = ring->get_seqno(ring, false);
880 + u32 acthd = intel_ring_get_active_head(ring);
881 + struct drm_i915_gem_request *request;
882 +
883 + list_for_each_entry(request, &ring->request_list, list) {
884 + if (i915_seqno_passed(completed_seqno, request->seqno))
885 + continue;
886
887 - acthd = intel_ring_get_active_head(ring);
888 - completed_seqno = ring->get_seqno(ring, false);
889 + i915_set_reset_status(ring, request, acthd);
890 + }
891 +}
892
893 +static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
894 + struct intel_ring_buffer *ring)
895 +{
896 while (!list_empty(&ring->request_list)) {
897 struct drm_i915_gem_request *request;
898
899 @@ -2294,9 +2303,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
900 struct drm_i915_gem_request,
901 list);
902
903 - if (request->seqno > completed_seqno)
904 - i915_set_reset_status(ring, request, acthd);
905 -
906 i915_gem_free_request(request);
907 }
908
909 @@ -2338,8 +2344,16 @@ void i915_gem_reset(struct drm_device *dev)
910 struct intel_ring_buffer *ring;
911 int i;
912
913 + /*
914 + * Before we free the objects from the requests, we need to inspect
915 + * them for finding the guilty party. As the requests only borrow
916 + * their reference to the objects, the inspection must be done first.
917 + */
918 + for_each_ring(ring, dev_priv, i)
919 + i915_gem_reset_ring_status(dev_priv, ring);
920 +
921 for_each_ring(ring, dev_priv, i)
922 - i915_gem_reset_ring_lists(dev_priv, ring);
923 + i915_gem_reset_ring_cleanup(dev_priv, ring);
924
925 i915_gem_restore_fences(dev);
926 }
927 diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
928 index 403309c2a7d6..bb6eecb3551c 100644
929 --- a/drivers/gpu/drm/i915/i915_gem_context.c
930 +++ b/drivers/gpu/drm/i915/i915_gem_context.c
931 @@ -328,10 +328,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
932 {
933 struct drm_i915_file_private *file_priv = file->driver_priv;
934
935 - mutex_lock(&dev->struct_mutex);
936 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
937 idr_destroy(&file_priv->context_idr);
938 - mutex_unlock(&dev->struct_mutex);
939 }
940
941 static struct i915_hw_context *
942 @@ -404,11 +402,21 @@ static int do_switch(struct i915_hw_context *to)
943 if (ret)
944 return ret;
945
946 - /* Clear this page out of any CPU caches for coherent swap-in/out. Note
947 + /*
948 + * Pin can switch back to the default context if we end up calling into
949 + * evict_everything - as a last ditch gtt defrag effort that also
950 + * switches to the default context. Hence we need to reload from here.
951 + */
952 + from = ring->last_context;
953 +
954 + /*
955 + * Clear this page out of any CPU caches for coherent swap-in/out. Note
956 * that thanks to write = false in this call and us not setting any gpu
957 * write domains when putting a context object onto the active list
958 * (when switching away from it), this won't block.
959 - * XXX: We need a real interface to do this instead of trickery. */
960 + *
961 + * XXX: We need a real interface to do this instead of trickery.
962 + */
963 ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
964 if (ret) {
965 i915_gem_object_unpin(to->obj);
966 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
967 index f535670b42d1..aad6f7bfc589 100644
968 --- a/drivers/gpu/drm/i915/intel_display.c
969 +++ b/drivers/gpu/drm/i915/intel_display.c
970 @@ -6009,7 +6009,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
971 uint32_t val;
972
973 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
974 - WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
975 + WARN(crtc->active, "CRTC for pipe %c enabled\n",
976 pipe_name(crtc->pipe));
977
978 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
979 @@ -10592,7 +10592,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
980
981 intel_setup_overlay(dev);
982
983 + drm_modeset_lock_all(dev);
984 intel_modeset_setup_hw_state(dev, false);
985 + drm_modeset_unlock_all(dev);
986 }
987
988 void intel_modeset_cleanup(struct drm_device *dev)
989 @@ -10666,14 +10668,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
990 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
991 {
992 struct drm_i915_private *dev_priv = dev->dev_private;
993 + unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
994 u16 gmch_ctrl;
995
996 - pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
997 + pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
998 if (state)
999 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
1000 else
1001 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
1002 - pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
1003 + pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
1004 return 0;
1005 }
1006
1007 diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
1008 index dd7d2e182719..8160fbddbcfe 100644
1009 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
1010 +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
1011 @@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
1012 bool dsm_detected;
1013 bool optimus_detected;
1014 acpi_handle dhandle;
1015 + acpi_handle other_handle;
1016 acpi_handle rom_handle;
1017 } nouveau_dsm_priv;
1018
1019 @@ -253,19 +254,17 @@ static struct vga_switcheroo_handler nouveau_dsm_handler = {
1020
1021 static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
1022 {
1023 - acpi_handle dhandle, nvidia_handle;
1024 - acpi_status status;
1025 + acpi_handle dhandle;
1026 int retval = 0;
1027
1028 dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
1029 if (!dhandle)
1030 return false;
1031
1032 - status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
1033 - if (ACPI_FAILURE(status)) {
1034 + if (!acpi_has_method(dhandle, "_DSM")) {
1035 + nouveau_dsm_priv.other_handle = dhandle;
1036 return false;
1037 }
1038 -
1039 if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
1040 retval |= NOUVEAU_DSM_HAS_MUX;
1041
1042 @@ -331,6 +330,16 @@ static bool nouveau_dsm_detect(void)
1043 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
1044 acpi_method_name);
1045 nouveau_dsm_priv.dsm_detected = true;
1046 + /*
1047 + * On some systems hotplug events are generated for the device
1048 + * being switched off when _DSM is executed. They cause ACPI
1049 + * hotplug to trigger and attempt to remove the device from
1050 + * the system, which causes it to break down. Prevent that from
1051 + * happening by setting the no_hotplug flag for the involved
1052 + * ACPI device objects.
1053 + */
1054 + acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
1055 + acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
1056 ret = true;
1057 }
1058
1059 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
1060 index e893c5362402..32c6b0a60fb3 100644
1061 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
1062 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
1063 @@ -879,6 +879,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
1064 if (nouveau_runtime_pm == 0)
1065 return -EINVAL;
1066
1067 + /* are we optimus enabled? */
1068 + if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
1069 + DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
1070 + return -EINVAL;
1071 + }
1072 +
1073 drm_kms_helper_poll_disable(drm_dev);
1074 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
1075 nouveau_switcheroo_optimus_dsm();
1076 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1077 index 86d9ee08b13f..368e1b84f429 100644
1078 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1079 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1080 @@ -1180,23 +1180,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1081 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1082
1083 if (rdev->family >= CHIP_BONAIRE) {
1084 - u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
1085 - u32 num_rb = rdev->config.cik.max_backends_per_se;
1086 - if (num_pipe_configs > 8)
1087 - num_pipe_configs = 8;
1088 - if (num_pipe_configs == 8)
1089 - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
1090 - else if (num_pipe_configs == 4) {
1091 - if (num_rb == 4)
1092 - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
1093 - else if (num_rb < 4)
1094 - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
1095 - } else if (num_pipe_configs == 2)
1096 - fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
1097 + /* Read the pipe config from the 2D TILED SCANOUT mode.
1098 + * It should be the same for the other modes too, but not all
1099 + * modes set the pipe config field. */
1100 + u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
1101 +
1102 + fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
1103 } else if ((rdev->family == CHIP_TAHITI) ||
1104 (rdev->family == CHIP_PITCAIRN))
1105 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1106 - else if (rdev->family == CHIP_VERDE)
1107 + else if ((rdev->family == CHIP_VERDE) ||
1108 + (rdev->family == CHIP_OLAND) ||
1109 + (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
1110 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1111
1112 switch (radeon_crtc->crtc_id) {
1113 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
1114 index 9cd2bc989ac7..31f5f0e88328 100644
1115 --- a/drivers/gpu/drm/radeon/cik.c
1116 +++ b/drivers/gpu/drm/radeon/cik.c
1117 @@ -2608,7 +2608,7 @@ static u32 cik_create_bitmask(u32 bit_width)
1118 * Returns the disabled RB bitmask.
1119 */
1120 static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1121 - u32 max_rb_num, u32 se_num,
1122 + u32 max_rb_num_per_se,
1123 u32 sh_per_se)
1124 {
1125 u32 data, mask;
1126 @@ -2622,7 +2622,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1127
1128 data >>= BACKEND_DISABLE_SHIFT;
1129
1130 - mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
1131 + mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
1132
1133 return data & mask;
1134 }
1135 @@ -2639,7 +2639,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
1136 */
1137 static void cik_setup_rb(struct radeon_device *rdev,
1138 u32 se_num, u32 sh_per_se,
1139 - u32 max_rb_num)
1140 + u32 max_rb_num_per_se)
1141 {
1142 int i, j;
1143 u32 data, mask;
1144 @@ -2649,19 +2649,21 @@ static void cik_setup_rb(struct radeon_device *rdev,
1145 for (i = 0; i < se_num; i++) {
1146 for (j = 0; j < sh_per_se; j++) {
1147 cik_select_se_sh(rdev, i, j);
1148 - data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1149 + data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
1150 disabled_rbs |= data << ((i * sh_per_se + j) * CIK_RB_BITMAP_WIDTH_PER_SH);
1151 }
1152 }
1153 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1154
1155 mask = 1;
1156 - for (i = 0; i < max_rb_num; i++) {
1157 + for (i = 0; i < max_rb_num_per_se * se_num; i++) {
1158 if (!(disabled_rbs & mask))
1159 enabled_rbs |= mask;
1160 mask <<= 1;
1161 }
1162
1163 + rdev->config.cik.backend_enable_mask = enabled_rbs;
1164 +
1165 for (i = 0; i < se_num; i++) {
1166 cik_select_se_sh(rdev, i, 0xffffffff);
1167 data = 0;
1168 diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
1169 index b6286068e111..aaf7ffce8b5b 100644
1170 --- a/drivers/gpu/drm/radeon/cik_sdma.c
1171 +++ b/drivers/gpu/drm/radeon/cik_sdma.c
1172 @@ -468,7 +468,7 @@ int cik_copy_dma(struct radeon_device *rdev,
1173 radeon_ring_write(ring, 0); /* src/dst endian swap */
1174 radeon_ring_write(ring, src_offset & 0xffffffff);
1175 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
1176 - radeon_ring_write(ring, dst_offset & 0xfffffffc);
1177 + radeon_ring_write(ring, dst_offset & 0xffffffff);
1178 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
1179 src_offset += cur_size_in_bytes;
1180 dst_offset += cur_size_in_bytes;
1181 diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
1182 index 9fcd338c0fcf..86ee09783925 100644
1183 --- a/drivers/gpu/drm/radeon/dce6_afmt.c
1184 +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
1185 @@ -132,7 +132,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
1186 }
1187
1188 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
1189 - if (sad_count < 0) {
1190 + if (sad_count <= 0) {
1191 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1192 return;
1193 }
1194 @@ -193,7 +193,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
1195 }
1196
1197 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
1198 - if (sad_count < 0) {
1199 + if (sad_count <= 0) {
1200 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1201 return;
1202 }
1203 diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
1204 index 57fcc4b16a52..b347fffa4519 100644
1205 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
1206 +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
1207 @@ -81,7 +81,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
1208 }
1209
1210 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
1211 - if (sad_count < 0) {
1212 + if (sad_count <= 0) {
1213 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1214 return;
1215 }
1216 @@ -134,7 +134,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
1217 }
1218
1219 sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
1220 - if (sad_count < 0) {
1221 + if (sad_count <= 0) {
1222 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1223 return;
1224 }
1225 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1226 index cac2866d79da..954eb9afbe71 100644
1227 --- a/drivers/gpu/drm/radeon/ni.c
1228 +++ b/drivers/gpu/drm/radeon/ni.c
1229 @@ -900,6 +900,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1230 (rdev->pdev->device == 0x999C)) {
1231 rdev->config.cayman.max_simds_per_se = 6;
1232 rdev->config.cayman.max_backends_per_se = 2;
1233 + rdev->config.cayman.max_hw_contexts = 8;
1234 + rdev->config.cayman.sx_max_export_size = 256;
1235 + rdev->config.cayman.sx_max_export_pos_size = 64;
1236 + rdev->config.cayman.sx_max_export_smx_size = 192;
1237 } else if ((rdev->pdev->device == 0x9903) ||
1238 (rdev->pdev->device == 0x9904) ||
1239 (rdev->pdev->device == 0x990A) ||
1240 @@ -910,6 +914,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1241 (rdev->pdev->device == 0x999D)) {
1242 rdev->config.cayman.max_simds_per_se = 4;
1243 rdev->config.cayman.max_backends_per_se = 2;
1244 + rdev->config.cayman.max_hw_contexts = 8;
1245 + rdev->config.cayman.sx_max_export_size = 256;
1246 + rdev->config.cayman.sx_max_export_pos_size = 64;
1247 + rdev->config.cayman.sx_max_export_smx_size = 192;
1248 } else if ((rdev->pdev->device == 0x9919) ||
1249 (rdev->pdev->device == 0x9990) ||
1250 (rdev->pdev->device == 0x9991) ||
1251 @@ -920,9 +928,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1252 (rdev->pdev->device == 0x99A0)) {
1253 rdev->config.cayman.max_simds_per_se = 3;
1254 rdev->config.cayman.max_backends_per_se = 1;
1255 + rdev->config.cayman.max_hw_contexts = 4;
1256 + rdev->config.cayman.sx_max_export_size = 128;
1257 + rdev->config.cayman.sx_max_export_pos_size = 32;
1258 + rdev->config.cayman.sx_max_export_smx_size = 96;
1259 } else {
1260 rdev->config.cayman.max_simds_per_se = 2;
1261 rdev->config.cayman.max_backends_per_se = 1;
1262 + rdev->config.cayman.max_hw_contexts = 4;
1263 + rdev->config.cayman.sx_max_export_size = 128;
1264 + rdev->config.cayman.sx_max_export_pos_size = 32;
1265 + rdev->config.cayman.sx_max_export_smx_size = 96;
1266 }
1267 rdev->config.cayman.max_texture_channel_caches = 2;
1268 rdev->config.cayman.max_gprs = 256;
1269 @@ -930,10 +946,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1270 rdev->config.cayman.max_gs_threads = 32;
1271 rdev->config.cayman.max_stack_entries = 512;
1272 rdev->config.cayman.sx_num_of_sets = 8;
1273 - rdev->config.cayman.sx_max_export_size = 256;
1274 - rdev->config.cayman.sx_max_export_pos_size = 64;
1275 - rdev->config.cayman.sx_max_export_smx_size = 192;
1276 - rdev->config.cayman.max_hw_contexts = 8;
1277 rdev->config.cayman.sq_num_cf_insts = 2;
1278
1279 rdev->config.cayman.sc_prim_fifo_size = 0x40;
1280 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
1281 index 24f4960f59ee..f44ca5853ff2 100644
1282 --- a/drivers/gpu/drm/radeon/radeon.h
1283 +++ b/drivers/gpu/drm/radeon/radeon.h
1284 @@ -1930,7 +1930,7 @@ struct si_asic {
1285 unsigned sc_earlyz_tile_fifo_size;
1286
1287 unsigned num_tile_pipes;
1288 - unsigned num_backends_per_se;
1289 + unsigned backend_enable_mask;
1290 unsigned backend_disable_mask_per_asic;
1291 unsigned backend_map;
1292 unsigned num_texture_channel_caches;
1293 @@ -1960,7 +1960,7 @@ struct cik_asic {
1294 unsigned sc_earlyz_tile_fifo_size;
1295
1296 unsigned num_tile_pipes;
1297 - unsigned num_backends_per_se;
1298 + unsigned backend_enable_mask;
1299 unsigned backend_disable_mask_per_asic;
1300 unsigned backend_map;
1301 unsigned num_texture_channel_caches;
1302 diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1303 index d96070bf8388..d7e7c25feaaf 100644
1304 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1305 +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1306 @@ -34,6 +34,7 @@ static struct radeon_atpx_priv {
1307 bool atpx_detected;
1308 /* handle for device - and atpx */
1309 acpi_handle dhandle;
1310 + acpi_handle other_handle;
1311 struct radeon_atpx atpx;
1312 } radeon_atpx_priv;
1313
1314 @@ -448,9 +449,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
1315 return false;
1316
1317 status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
1318 - if (ACPI_FAILURE(status))
1319 + if (ACPI_FAILURE(status)) {
1320 + radeon_atpx_priv.other_handle = dhandle;
1321 return false;
1322 -
1323 + }
1324 radeon_atpx_priv.dhandle = dhandle;
1325 radeon_atpx_priv.atpx.handle = atpx_handle;
1326 return true;
1327 @@ -527,6 +529,16 @@ static bool radeon_atpx_detect(void)
1328 printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
1329 acpi_method_name);
1330 radeon_atpx_priv.atpx_detected = true;
1331 + /*
1332 + * On some systems hotplug events are generated for the device
1333 + * being switched off when ATPX is executed. They cause ACPI
1334 + * hotplug to trigger and attempt to remove the device from
1335 + * the system, which causes it to break down. Prevent that from
1336 + * happening by setting the no_hotplug flag for the involved
1337 + * ACPI device objects.
1338 + */
1339 + acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
1340 + acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
1341 return true;
1342 }
1343 return false;
1344 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
1345 index 61580ddc4eb2..4a3b3c55a568 100644
1346 --- a/drivers/gpu/drm/radeon/radeon_kms.c
1347 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
1348 @@ -436,6 +436,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1349 case RADEON_INFO_SI_CP_DMA_COMPUTE:
1350 *value = 1;
1351 break;
1352 + case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
1353 + if (rdev->family >= CHIP_BONAIRE) {
1354 + *value = rdev->config.cik.backend_enable_mask;
1355 + } else if (rdev->family >= CHIP_TAHITI) {
1356 + *value = rdev->config.si.backend_enable_mask;
1357 + } else {
1358 + DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
1359 + }
1360 + break;
1361 default:
1362 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
1363 return -EINVAL;
1364 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1365 index ab0a17248d55..1d029ccf428b 100644
1366 --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1367 +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1368 @@ -472,7 +472,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
1369 return -EINVAL;
1370 }
1371
1372 - if ((start >> 28) != (end >> 28)) {
1373 + if ((start >> 28) != ((end - 1) >> 28)) {
1374 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
1375 start, end);
1376 return -EINVAL;
1377 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
1378 index 1447d794c22a..3c38f0af78fb 100644
1379 --- a/drivers/gpu/drm/radeon/rs690.c
1380 +++ b/drivers/gpu/drm/radeon/rs690.c
1381 @@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
1382 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
1383 base = G_000100_MC_FB_START(base) << 16;
1384 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
1385 + /* Some boards seem to be configured for 128MB of sideport memory,
1386 + * but really only have 64MB. Just skip the sideport and use
1387 + * UMA memory.
1388 + */
1389 + if (rdev->mc.igp_sideport_enabled &&
1390 + (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
1391 + base += 128 * 1024 * 1024;
1392 + rdev->mc.real_vram_size -= 128 * 1024 * 1024;
1393 + rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1394 + }
1395
1396 /* Use K8 direct mapping for fast fb access. */
1397 rdev->fastfb_working = false;
1398 diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
1399 index 913b025ae9b3..374499db20c7 100644
1400 --- a/drivers/gpu/drm/radeon/rv770_dpm.c
1401 +++ b/drivers/gpu/drm/radeon/rv770_dpm.c
1402 @@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
1403 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
1404 ASIC_INTERNAL_MEMORY_SS, 0);
1405
1406 + /* disable ss, causes hangs on some cayman boards */
1407 + if (rdev->family == CHIP_CAYMAN) {
1408 + pi->sclk_ss = false;
1409 + pi->mclk_ss = false;
1410 + }
1411 +
1412 if (pi->sclk_ss || pi->mclk_ss)
1413 pi->dynamic_ss = true;
1414 else
1415 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1416 index fe0ec2cb2084..37acf938b779 100644
1417 --- a/drivers/gpu/drm/radeon/si.c
1418 +++ b/drivers/gpu/drm/radeon/si.c
1419 @@ -2816,7 +2816,7 @@ static void si_setup_spi(struct radeon_device *rdev,
1420 }
1421
1422 static u32 si_get_rb_disabled(struct radeon_device *rdev,
1423 - u32 max_rb_num, u32 se_num,
1424 + u32 max_rb_num_per_se,
1425 u32 sh_per_se)
1426 {
1427 u32 data, mask;
1428 @@ -2830,14 +2830,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
1429
1430 data >>= BACKEND_DISABLE_SHIFT;
1431
1432 - mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
1433 + mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
1434
1435 return data & mask;
1436 }
1437
1438 static void si_setup_rb(struct radeon_device *rdev,
1439 u32 se_num, u32 sh_per_se,
1440 - u32 max_rb_num)
1441 + u32 max_rb_num_per_se)
1442 {
1443 int i, j;
1444 u32 data, mask;
1445 @@ -2847,19 +2847,21 @@ static void si_setup_rb(struct radeon_device *rdev,
1446 for (i = 0; i < se_num; i++) {
1447 for (j = 0; j < sh_per_se; j++) {
1448 si_select_se_sh(rdev, i, j);
1449 - data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1450 + data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
1451 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
1452 }
1453 }
1454 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1455
1456 mask = 1;
1457 - for (i = 0; i < max_rb_num; i++) {
1458 + for (i = 0; i < max_rb_num_per_se * se_num; i++) {
1459 if (!(disabled_rbs & mask))
1460 enabled_rbs |= mask;
1461 mask <<= 1;
1462 }
1463
1464 + rdev->config.si.backend_enable_mask = enabled_rbs;
1465 +
1466 for (i = 0; i < se_num; i++) {
1467 si_select_se_sh(rdev, i, 0xffffffff);
1468 data = 0;
1469 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
1470 index 1006c15445e9..97f07aab1c36 100644
1471 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
1472 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
1473 @@ -116,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1474 }
1475
1476 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
1477 - drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
1478 - page_last = vma_pages(vma) +
1479 - drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
1480 + vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
1481 + page_last = vma_pages(vma) + vma->vm_pgoff -
1482 + drm_vma_node_start(&bo->vma_node);
1483
1484 if (unlikely(page_offset >= bo->num_pages)) {
1485 retval = VM_FAULT_SIGBUS;
1486 diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
1487 index f116d664b473..d47bb0f267f7 100644
1488 --- a/drivers/idle/intel_idle.c
1489 +++ b/drivers/idle/intel_idle.c
1490 @@ -361,6 +361,9 @@ static int intel_idle(struct cpuidle_device *dev,
1491
1492 if (!current_set_polling_and_test()) {
1493
1494 + if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
1495 + clflush((void *)&current_thread_info()->flags);
1496 +
1497 __monitor((void *)&current_thread_info()->flags, 0, 0);
1498 smp_mb();
1499 if (!need_resched())
1500 diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
1501 index 9dd077b78759..df393b4f7bf2 100644
1502 --- a/drivers/iio/adc/ad7887.c
1503 +++ b/drivers/iio/adc/ad7887.c
1504 @@ -211,7 +211,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
1505 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1506 .address = 1,
1507 .scan_index = 1,
1508 - .scan_type = IIO_ST('u', 12, 16, 0),
1509 + .scan_type = {
1510 + .sign = 'u',
1511 + .realbits = 12,
1512 + .storagebits = 16,
1513 + .shift = 0,
1514 + .endianness = IIO_BE,
1515 + },
1516 },
1517 .channel[1] = {
1518 .type = IIO_VOLTAGE,
1519 @@ -221,7 +227,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
1520 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1521 .address = 0,
1522 .scan_index = 0,
1523 - .scan_type = IIO_ST('u', 12, 16, 0),
1524 + .scan_type = {
1525 + .sign = 'u',
1526 + .realbits = 12,
1527 + .storagebits = 16,
1528 + .shift = 0,
1529 + .endianness = IIO_BE,
1530 + },
1531 },
1532 .channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
1533 .int_vref_mv = 2500,
1534 diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
1535 index 3fb7757a1028..368660dfe135 100644
1536 --- a/drivers/iio/imu/adis16400_core.c
1537 +++ b/drivers/iio/imu/adis16400_core.c
1538 @@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
1539 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
1540 .address = ADIS16448_BARO_OUT,
1541 .scan_index = ADIS16400_SCAN_BARO,
1542 - .scan_type = IIO_ST('s', 16, 16, 0),
1543 + .scan_type = {
1544 + .sign = 's',
1545 + .realbits = 16,
1546 + .storagebits = 16,
1547 + .endianness = IIO_BE,
1548 + },
1549 },
1550 ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
1551 IIO_CHAN_SOFT_TIMESTAMP(11)
1552 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1553 index db5d0a316d0b..ea7051ee1493 100644
1554 --- a/drivers/infiniband/ulp/isert/ib_isert.c
1555 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1556 @@ -206,7 +206,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
1557 isert_conn->conn_rx_descs = NULL;
1558 }
1559
1560 +static void isert_cq_tx_work(struct work_struct *);
1561 static void isert_cq_tx_callback(struct ib_cq *, void *);
1562 +static void isert_cq_rx_work(struct work_struct *);
1563 static void isert_cq_rx_callback(struct ib_cq *, void *);
1564
1565 static int
1566 @@ -258,26 +260,36 @@ isert_create_device_ib_res(struct isert_device *device)
1567 cq_desc[i].device = device;
1568 cq_desc[i].cq_index = i;
1569
1570 + INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
1571 device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
1572 isert_cq_rx_callback,
1573 isert_cq_event_callback,
1574 (void *)&cq_desc[i],
1575 ISER_MAX_RX_CQ_LEN, i);
1576 - if (IS_ERR(device->dev_rx_cq[i]))
1577 + if (IS_ERR(device->dev_rx_cq[i])) {
1578 + ret = PTR_ERR(device->dev_rx_cq[i]);
1579 + device->dev_rx_cq[i] = NULL;
1580 goto out_cq;
1581 + }
1582
1583 + INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
1584 device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
1585 isert_cq_tx_callback,
1586 isert_cq_event_callback,
1587 (void *)&cq_desc[i],
1588 ISER_MAX_TX_CQ_LEN, i);
1589 - if (IS_ERR(device->dev_tx_cq[i]))
1590 + if (IS_ERR(device->dev_tx_cq[i])) {
1591 + ret = PTR_ERR(device->dev_tx_cq[i]);
1592 + device->dev_tx_cq[i] = NULL;
1593 goto out_cq;
1594 + }
1595
1596 - if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
1597 + ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
1598 + if (ret)
1599 goto out_cq;
1600
1601 - if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
1602 + ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
1603 + if (ret)
1604 goto out_cq;
1605 }
1606
1607 @@ -1686,7 +1698,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
1608 {
1609 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1610
1611 - INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
1612 queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
1613 }
1614
1615 @@ -1730,7 +1741,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
1616 {
1617 struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
1618
1619 - INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
1620 queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
1621 }
1622
1623 diff --git a/drivers/input/input.c b/drivers/input/input.c
1624 index e75d015024a1..74f47980117b 100644
1625 --- a/drivers/input/input.c
1626 +++ b/drivers/input/input.c
1627 @@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
1628 break;
1629
1630 case EV_ABS:
1631 + input_alloc_absinfo(dev);
1632 + if (!dev->absinfo)
1633 + return;
1634 +
1635 __set_bit(code, dev->absbit);
1636 break;
1637
1638 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1639 index 8ee9d1556e6e..263dd921edc4 100644
1640 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1641 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
1642 @@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
1643 /* set LED in default state (end of init phase) */
1644 pcan_usb_pro_set_led(dev, 0, 1);
1645
1646 + kfree(bi);
1647 + kfree(fi);
1648 +
1649 return 0;
1650
1651 err_out:
1652 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1653 index cd76d2a6e014..a82229fe1c7f 100644
1654 --- a/drivers/net/ethernet/broadcom/tg3.c
1655 +++ b/drivers/net/ethernet/broadcom/tg3.c
1656 @@ -7608,7 +7608,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
1657 {
1658 u32 base = (u32) mapping & 0xffffffff;
1659
1660 - return (base > 0xffffdcc0) && (base + len + 8 < base);
1661 + return base + len + 8 < base;
1662 }
1663
1664 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
1665 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
1666 index c6867f926cff..c0bfc818c701 100644
1667 --- a/drivers/net/usb/dm9601.c
1668 +++ b/drivers/net/usb/dm9601.c
1669 @@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
1670 dev->net->ethtool_ops = &dm9601_ethtool_ops;
1671 dev->net->hard_header_len += DM_TX_OVERHEAD;
1672 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
1673 - dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
1674 +
1675 + /* dm9620/21a require room for 4 byte padding, even in dm9601
1676 + * mode, so we need +1 to be able to receive full size
1677 + * ethernet frames.
1678 + */
1679 + dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
1680
1681 dev->mii.dev = dev->net;
1682 dev->mii.mdio_read = dm9601_mdio_read;
1683 @@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1684 static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1685 gfp_t flags)
1686 {
1687 - int len;
1688 + int len, pad;
1689
1690 /* format:
1691 b1: packet length low
1692 @@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1693 b3..n: packet data
1694 */
1695
1696 - len = skb->len;
1697 + len = skb->len + DM_TX_OVERHEAD;
1698
1699 - if (skb_headroom(skb) < DM_TX_OVERHEAD) {
1700 + /* workaround for dm962x errata with tx fifo getting out of
1701 + * sync if a USB bulk transfer retry happens right after a
1702 + * packet with odd / maxpacket length by adding up to 3 bytes
1703 + * padding.
1704 + */
1705 + while ((len & 1) || !(len % dev->maxpacket))
1706 + len++;
1707 +
1708 + len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
1709 + pad = len - skb->len;
1710 +
1711 + if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
1712 struct sk_buff *skb2;
1713
1714 - skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
1715 + skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
1716 dev_kfree_skb_any(skb);
1717 skb = skb2;
1718 if (!skb)
1719 @@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
1720
1721 __skb_push(skb, DM_TX_OVERHEAD);
1722
1723 - /* usbnet adds padding if length is a multiple of packet size
1724 - if so, adjust length value in header */
1725 - if ((skb->len % dev->maxpacket) == 0)
1726 - len++;
1727 + if (pad) {
1728 + memset(skb->data + skb->len, 0, pad);
1729 + __skb_put(skb, pad);
1730 + }
1731
1732 skb->data[0] = len;
1733 skb->data[1] = len >> 8;
1734 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
1735 index 8d78253c26ce..a366d6b4626f 100644
1736 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
1737 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
1738 @@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1739 mask2 |= ATH9K_INT_CST;
1740 if (isr2 & AR_ISR_S2_TSFOOR)
1741 mask2 |= ATH9K_INT_TSFOOR;
1742 +
1743 + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
1744 + REG_WRITE(ah, AR_ISR_S2, isr2);
1745 + isr &= ~AR_ISR_BCNMISC;
1746 + }
1747 }
1748
1749 - isr = REG_READ(ah, AR_ISR_RAC);
1750 + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
1751 + isr = REG_READ(ah, AR_ISR_RAC);
1752 +
1753 if (isr == 0xffffffff) {
1754 *masked = 0;
1755 return false;
1756 @@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1757
1758 *masked |= ATH9K_INT_TX;
1759
1760 - s0_s = REG_READ(ah, AR_ISR_S0_S);
1761 + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
1762 + s0_s = REG_READ(ah, AR_ISR_S0_S);
1763 + s1_s = REG_READ(ah, AR_ISR_S1_S);
1764 + } else {
1765 + s0_s = REG_READ(ah, AR_ISR_S0);
1766 + REG_WRITE(ah, AR_ISR_S0, s0_s);
1767 + s1_s = REG_READ(ah, AR_ISR_S1);
1768 + REG_WRITE(ah, AR_ISR_S1, s1_s);
1769 +
1770 + isr &= ~(AR_ISR_TXOK |
1771 + AR_ISR_TXDESC |
1772 + AR_ISR_TXERR |
1773 + AR_ISR_TXEOL);
1774 + }
1775 +
1776 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
1777 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
1778 -
1779 - s1_s = REG_READ(ah, AR_ISR_S1_S);
1780 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
1781 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
1782 }
1783 @@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1784 *masked |= mask2;
1785 }
1786
1787 - if (AR_SREV_9100(ah))
1788 - return true;
1789 -
1790 - if (isr & AR_ISR_GENTMR) {
1791 + if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
1792 u32 s5_s;
1793
1794 - s5_s = REG_READ(ah, AR_ISR_S5_S);
1795 + if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
1796 + s5_s = REG_READ(ah, AR_ISR_S5_S);
1797 + } else {
1798 + s5_s = REG_READ(ah, AR_ISR_S5);
1799 + }
1800 +
1801 ah->intr_gen_timer_trigger =
1802 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
1803
1804 @@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
1805 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
1806 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1807 *masked |= ATH9K_INT_TIM_TIMER;
1808 +
1809 + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
1810 + REG_WRITE(ah, AR_ISR_S5, s5_s);
1811 + isr &= ~AR_ISR_GENTMR;
1812 + }
1813 }
1814
1815 + if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
1816 + REG_WRITE(ah, AR_ISR, isr);
1817 + REG_READ(ah, AR_ISR);
1818 + }
1819 +
1820 + if (AR_SREV_9100(ah))
1821 + return true;
1822 +
1823 if (sync_cause) {
1824 ath9k_debug_sync_cause(common, sync_cause);
1825 fatal_int =
1826 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1827 index d44258172c0f..79d67c35299b 100644
1828 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1829 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1830 @@ -147,21 +147,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1831 struct ath9k_vif_iter_data *iter_data = data;
1832 int i;
1833
1834 - for (i = 0; i < ETH_ALEN; i++)
1835 - iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
1836 + if (iter_data->hw_macaddr != NULL) {
1837 + for (i = 0; i < ETH_ALEN; i++)
1838 + iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
1839 + } else {
1840 + iter_data->hw_macaddr = mac;
1841 + }
1842 }
1843
1844 -static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
1845 +static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
1846 struct ieee80211_vif *vif)
1847 {
1848 struct ath_common *common = ath9k_hw_common(priv->ah);
1849 struct ath9k_vif_iter_data iter_data;
1850
1851 /*
1852 - * Use the hardware MAC address as reference, the hardware uses it
1853 - * together with the BSSID mask when matching addresses.
1854 + * Pick the MAC address of the first interface as the new hardware
1855 + * MAC address. The hardware will use it together with the BSSID mask
1856 + * when matching addresses.
1857 */
1858 - iter_data.hw_macaddr = common->macaddr;
1859 + iter_data.hw_macaddr = NULL;
1860 memset(&iter_data.mask, 0xff, ETH_ALEN);
1861
1862 if (vif)
1863 @@ -173,6 +178,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
1864 ath9k_htc_bssid_iter, &iter_data);
1865
1866 memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
1867 +
1868 + if (iter_data.hw_macaddr)
1869 + memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
1870 +
1871 ath_hw_setbssidmask(common);
1872 }
1873
1874 @@ -1083,7 +1092,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1875 goto out;
1876 }
1877
1878 - ath9k_htc_set_bssid_mask(priv, vif);
1879 + ath9k_htc_set_mac_bssid_mask(priv, vif);
1880
1881 priv->vif_slot |= (1 << avp->index);
1882 priv->nvifs++;
1883 @@ -1148,7 +1157,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1884
1885 ath9k_htc_set_opmode(priv);
1886
1887 - ath9k_htc_set_bssid_mask(priv, vif);
1888 + ath9k_htc_set_mac_bssid_mask(priv, vif);
1889
1890 /*
1891 * Stop ANI only if there are no associated station interfaces.
1892 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1893 index 709301f88dcd..5ba0da9d1959 100644
1894 --- a/drivers/net/wireless/ath/ath9k/main.c
1895 +++ b/drivers/net/wireless/ath/ath9k/main.c
1896 @@ -885,8 +885,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
1897 struct ath_common *common = ath9k_hw_common(ah);
1898
1899 /*
1900 - * Use the hardware MAC address as reference, the hardware uses it
1901 - * together with the BSSID mask when matching addresses.
1902 + * Pick the MAC address of the first interface as the new hardware
1903 + * MAC address. The hardware will use it together with the BSSID mask
1904 + * when matching addresses.
1905 */
1906 memset(iter_data, 0, sizeof(*iter_data));
1907 memset(&iter_data->mask, 0xff, ETH_ALEN);
1908 diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
1909 index 703f839af6ca..bb3b72ebf667 100644
1910 --- a/drivers/net/wireless/rtlwifi/pci.c
1911 +++ b/drivers/net/wireless/rtlwifi/pci.c
1912 @@ -741,6 +741,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
1913 };
1914 int index = rtlpci->rx_ring[rx_queue_idx].idx;
1915
1916 + if (rtlpci->driver_is_goingto_unload)
1917 + return;
1918 /*RX NORMAL PKT */
1919 while (count--) {
1920 /*rx descriptor */
1921 @@ -1637,6 +1639,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1922 */
1923 set_hal_stop(rtlhal);
1924
1925 + rtlpci->driver_is_goingto_unload = true;
1926 rtlpriv->cfg->ops->disable_interrupt(hw);
1927 cancel_work_sync(&rtlpriv->works.lps_change_work);
1928
1929 @@ -1654,7 +1657,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
1930 ppsc->rfchange_inprogress = true;
1931 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1932
1933 - rtlpci->driver_is_goingto_unload = true;
1934 rtlpriv->cfg->ops->hw_disable(hw);
1935 /* some things are not needed if firmware not available */
1936 if (!rtlpriv->max_fw_size)
1937 diff --git a/drivers/of/address.c b/drivers/of/address.c
1938 index b55c21890760..3c4b2af51611 100644
1939 --- a/drivers/of/address.c
1940 +++ b/drivers/of/address.c
1941 @@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
1942 (unsigned long long)cp, (unsigned long long)s,
1943 (unsigned long long)da);
1944
1945 - /*
1946 - * If the number of address cells is larger than 2 we assume the
1947 - * mapping doesn't specify a physical address. Rather, the address
1948 - * specifies an identifier that must match exactly.
1949 - */
1950 - if (na > 2 && memcmp(range, addr, na * 4) != 0)
1951 - return OF_BAD_ADDR;
1952 -
1953 if (da < cp || da >= (cp + s))
1954 return OF_BAD_ADDR;
1955 return da - cp;
1956 diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
1957 index 1ea75236a15f..be5cba52a09c 100644
1958 --- a/drivers/pci/hotplug/acpiphp_glue.c
1959 +++ b/drivers/pci/hotplug/acpiphp_glue.c
1960 @@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
1961
1962 status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
1963 if (ACPI_FAILURE(status)) {
1964 - acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
1965 + if (status != AE_NOT_FOUND)
1966 + acpi_handle_warn(handle,
1967 + "can't evaluate _ADR (%#x)\n", status);
1968 return AE_OK;
1969 }
1970
1971 @@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
1972 slot->flags &= (~SLOT_ENABLED);
1973 }
1974
1975 +static bool acpiphp_no_hotplug(acpi_handle handle)
1976 +{
1977 + struct acpi_device *adev = NULL;
1978 +
1979 + acpi_bus_get_device(handle, &adev);
1980 + return adev && adev->flags.no_hotplug;
1981 +}
1982 +
1983 +static bool slot_no_hotplug(struct acpiphp_slot *slot)
1984 +{
1985 + struct acpiphp_func *func;
1986 +
1987 + list_for_each_entry(func, &slot->funcs, sibling)
1988 + if (acpiphp_no_hotplug(func_to_handle(func)))
1989 + return true;
1990 +
1991 + return false;
1992 +}
1993
1994 /**
1995 * get_slot_status - get ACPI slot status
1996 @@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
1997 unsigned long long sta;
1998
1999 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
2000 - alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
2001 + alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
2002 + || acpiphp_no_hotplug(handle);
2003 }
2004 if (!alive) {
2005 u32 v;
2006 @@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
2007 struct pci_dev *dev, *tmp;
2008
2009 mutex_lock(&slot->crit_sect);
2010 - /* wake up all functions */
2011 - if (get_slot_status(slot) == ACPI_STA_ALL) {
2012 + if (slot_no_hotplug(slot)) {
2013 + ; /* do nothing */
2014 + } else if (get_slot_status(slot) == ACPI_STA_ALL) {
2015 /* remove stale devices if any */
2016 list_for_each_entry_safe(dev, tmp, &bus->devices,
2017 bus_list)
2018 diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
2019 index 2832576d8b12..114f5ef4b73a 100644
2020 --- a/drivers/pinctrl/pinctrl-baytrail.c
2021 +++ b/drivers/pinctrl/pinctrl-baytrail.c
2022 @@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
2023
2024 static const struct acpi_device_id byt_gpio_acpi_match[] = {
2025 { "INT33B2", 0 },
2026 + { "INT33FC", 0 },
2027 { }
2028 };
2029 MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
2030 diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
2031 index 11bd0d970a52..e2142956a8e5 100644
2032 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h
2033 +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
2034 @@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
2035 #define PINMUX_GPIO(_pin) \
2036 [GPIO_##_pin] = { \
2037 .pin = (u16)-1, \
2038 - .name = __stringify(name), \
2039 + .name = __stringify(GPIO_##_pin), \
2040 .enum_id = _pin##_DATA, \
2041 }
2042
2043 diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
2044 index 00e667296360..557af943b2f5 100644
2045 --- a/drivers/power/power_supply_core.c
2046 +++ b/drivers/power/power_supply_core.c
2047 @@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
2048 dev_set_drvdata(dev, psy);
2049 psy->dev = dev;
2050
2051 + rc = dev_set_name(dev, "%s", psy->name);
2052 + if (rc)
2053 + goto dev_set_name_failed;
2054 +
2055 INIT_WORK(&psy->changed_work, power_supply_changed_work);
2056
2057 rc = power_supply_check_supplies(psy);
2058 @@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
2059 if (rc)
2060 goto wakeup_init_failed;
2061
2062 - rc = kobject_set_name(&dev->kobj, "%s", psy->name);
2063 - if (rc)
2064 - goto kobject_set_name_failed;
2065 -
2066 rc = device_add(dev);
2067 if (rc)
2068 goto device_add_failed;
2069 @@ -553,11 +553,11 @@ create_triggers_failed:
2070 register_cooler_failed:
2071 psy_unregister_thermal(psy);
2072 register_thermal_failed:
2073 -wakeup_init_failed:
2074 device_del(dev);
2075 -kobject_set_name_failed:
2076 device_add_failed:
2077 +wakeup_init_failed:
2078 check_supplies_failed:
2079 +dev_set_name_failed:
2080 put_device(dev);
2081 success:
2082 return rc;
2083 diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
2084 index 3f4ca4e09a4c..34629ea913d4 100644
2085 --- a/drivers/s390/char/tty3270.c
2086 +++ b/drivers/s390/char/tty3270.c
2087 @@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
2088 return rc;
2089 }
2090
2091 - tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
2092 + tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
2093 if (IS_ERR(tp->screen)) {
2094 rc = PTR_ERR(tp->screen);
2095 raw3270_put_view(&tp->view);
2096 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2097 index 596480022b0a..3bb0a1d1622a 100644
2098 --- a/drivers/scsi/qla2xxx/qla_target.c
2099 +++ b/drivers/scsi/qla2xxx/qla_target.c
2100 @@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
2101 schedule_delayed_work(&tgt->sess_del_work, 0);
2102 else
2103 schedule_delayed_work(&tgt->sess_del_work,
2104 - jiffies - sess->expires);
2105 + sess->expires - jiffies);
2106 }
2107
2108 /* ha->hardware_lock supposed to be held on entry */
2109 @@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
2110 struct scsi_qla_host *vha = tgt->vha;
2111 struct qla_hw_data *ha = vha->hw;
2112 struct qla_tgt_sess *sess;
2113 - unsigned long flags;
2114 + unsigned long flags, elapsed;
2115
2116 spin_lock_irqsave(&ha->hardware_lock, flags);
2117 while (!list_empty(&tgt->del_sess_list)) {
2118 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
2119 del_list_entry);
2120 - if (time_after_eq(jiffies, sess->expires)) {
2121 + elapsed = jiffies;
2122 + if (time_after_eq(elapsed, sess->expires)) {
2123 qlt_undelete_sess(sess);
2124
2125 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
2126 @@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
2127 ha->tgt.tgt_ops->put_sess(sess);
2128 } else {
2129 schedule_delayed_work(&tgt->sess_del_work,
2130 - jiffies - sess->expires);
2131 + sess->expires - elapsed);
2132 break;
2133 }
2134 }
2135 diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
2136 index 317a821b7906..316c44401372 100644
2137 --- a/drivers/staging/comedi/drivers.c
2138 +++ b/drivers/staging/comedi/drivers.c
2139 @@ -417,7 +417,7 @@ int comedi_load_firmware(struct comedi_device *dev,
2140 release_firmware(fw);
2141 }
2142
2143 - return ret;
2144 + return ret < 0 ? ret : 0;
2145 }
2146 EXPORT_SYMBOL_GPL(comedi_load_firmware);
2147
2148 diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
2149 index 432e3f9c3301..c55f234b29e6 100644
2150 --- a/drivers/staging/comedi/drivers/8255_pci.c
2151 +++ b/drivers/staging/comedi/drivers/8255_pci.c
2152 @@ -63,7 +63,8 @@ enum pci_8255_boardid {
2153 BOARD_ADLINK_PCI7296,
2154 BOARD_CB_PCIDIO24,
2155 BOARD_CB_PCIDIO24H,
2156 - BOARD_CB_PCIDIO48H,
2157 + BOARD_CB_PCIDIO48H_OLD,
2158 + BOARD_CB_PCIDIO48H_NEW,
2159 BOARD_CB_PCIDIO96H,
2160 BOARD_NI_PCIDIO96,
2161 BOARD_NI_PCIDIO96B,
2162 @@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
2163 .dio_badr = 2,
2164 .n_8255 = 1,
2165 },
2166 - [BOARD_CB_PCIDIO48H] = {
2167 + [BOARD_CB_PCIDIO48H_OLD] = {
2168 .name = "cb_pci-dio48h",
2169 .dio_badr = 1,
2170 .n_8255 = 2,
2171 },
2172 + [BOARD_CB_PCIDIO48H_NEW] = {
2173 + .name = "cb_pci-dio48h",
2174 + .dio_badr = 2,
2175 + .n_8255 = 2,
2176 + },
2177 [BOARD_CB_PCIDIO96H] = {
2178 .name = "cb_pci-dio96h",
2179 .dio_badr = 2,
2180 @@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
2181 { PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
2182 { PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
2183 { PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
2184 - { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
2185 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
2186 + .driver_data = BOARD_CB_PCIDIO48H_OLD },
2187 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
2188 + .driver_data = BOARD_CB_PCIDIO48H_NEW },
2189 { PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
2190 { PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
2191 { PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
2192 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2193 index 38e44b9abf0f..d5c724b317aa 100644
2194 --- a/drivers/target/iscsi/iscsi_target.c
2195 +++ b/drivers/target/iscsi/iscsi_target.c
2196 @@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
2197 */
2198 send_sig(SIGINT, np->np_thread, 1);
2199 kthread_stop(np->np_thread);
2200 + np->np_thread = NULL;
2201 }
2202
2203 np->np_transport->iscsit_free_np(np);
2204 @@ -830,24 +831,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2205 if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
2206 (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
2207 /*
2208 - * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
2209 - * that adds support for RESERVE/RELEASE. There is a bug
2210 - * add with this new functionality that sets R/W bits when
2211 - * neither CDB carries any READ or WRITE datapayloads.
2212 + * From RFC-3720 Section 10.3.1:
2213 + *
2214 + * "Either or both of R and W MAY be 1 when either the
2215 + * Expected Data Transfer Length and/or Bidirectional Read
2216 + * Expected Data Transfer Length are 0"
2217 + *
2218 + * For this case, go ahead and clear the unnecssary bits
2219 + * to avoid any confusion with ->data_direction.
2220 */
2221 - if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
2222 - hdr->flags &= ~ISCSI_FLAG_CMD_READ;
2223 - hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
2224 - goto done;
2225 - }
2226 + hdr->flags &= ~ISCSI_FLAG_CMD_READ;
2227 + hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
2228
2229 - pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
2230 + pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
2231 " set when Expected Data Transfer Length is 0 for"
2232 - " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
2233 - return iscsit_add_reject_cmd(cmd,
2234 - ISCSI_REASON_BOOKMARK_INVALID, buf);
2235 + " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
2236 }
2237 -done:
2238
2239 if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
2240 !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
2241 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
2242 index 1794c753954a..f442a9c93403 100644
2243 --- a/drivers/target/iscsi/iscsi_target_login.c
2244 +++ b/drivers/target/iscsi/iscsi_target_login.c
2245 @@ -1404,11 +1404,6 @@ old_sess_out:
2246
2247 out:
2248 stop = kthread_should_stop();
2249 - if (!stop && signal_pending(current)) {
2250 - spin_lock_bh(&np->np_thread_lock);
2251 - stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
2252 - spin_unlock_bh(&np->np_thread_lock);
2253 - }
2254 /* Wait for another socket.. */
2255 if (!stop)
2256 return 1;
2257 @@ -1416,7 +1411,6 @@ exit:
2258 iscsi_stop_login_thread_timer(np);
2259 spin_lock_bh(&np->np_thread_lock);
2260 np->np_thread_state = ISCSI_NP_THREAD_EXIT;
2261 - np->np_thread = NULL;
2262 spin_unlock_bh(&np->np_thread_lock);
2263
2264 return 0;
2265 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
2266 index d90dbb0f1a69..e5e39658034c 100644
2267 --- a/drivers/target/target_core_device.c
2268 +++ b/drivers/target/target_core_device.c
2269 @@ -1107,6 +1107,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
2270 dev->dev_attrib.block_size = block_size;
2271 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
2272 dev, block_size);
2273 +
2274 + if (dev->dev_attrib.max_bytes_per_io)
2275 + dev->dev_attrib.hw_max_sectors =
2276 + dev->dev_attrib.max_bytes_per_io / block_size;
2277 +
2278 return 0;
2279 }
2280
2281 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
2282 index b662f89dedac..55725f5f56a2 100644
2283 --- a/drivers/target/target_core_file.c
2284 +++ b/drivers/target/target_core_file.c
2285 @@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
2286 pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
2287 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
2288 TARGET_CORE_MOD_VERSION);
2289 - pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
2290 - " MaxSectors: %u\n",
2291 - hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
2292 + pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
2293 + hba->hba_id, fd_host->fd_host_id);
2294
2295 return 0;
2296 }
2297 @@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
2298 }
2299
2300 dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
2301 - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
2302 + dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
2303 + dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
2304 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
2305
2306 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
2307 diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
2308 index 37ffc5bd2399..d7772c167685 100644
2309 --- a/drivers/target/target_core_file.h
2310 +++ b/drivers/target/target_core_file.h
2311 @@ -7,7 +7,10 @@
2312 #define FD_DEVICE_QUEUE_DEPTH 32
2313 #define FD_MAX_DEVICE_QUEUE_DEPTH 128
2314 #define FD_BLOCKSIZE 512
2315 -#define FD_MAX_SECTORS 2048
2316 +/*
2317 + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
2318 + */
2319 +#define FD_MAX_BYTES 8388608
2320
2321 #define RRF_EMULATE_CDB 0x01
2322 #define RRF_GOT_LBA 0x02
2323 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2324 index 4d6f430087d0..d4a89db511b5 100644
2325 --- a/drivers/tty/n_tty.c
2326 +++ b/drivers/tty/n_tty.c
2327 @@ -93,6 +93,7 @@ struct n_tty_data {
2328 size_t canon_head;
2329 size_t echo_head;
2330 size_t echo_commit;
2331 + size_t echo_mark;
2332 DECLARE_BITMAP(char_map, 256);
2333
2334 /* private to n_tty_receive_overrun (single-threaded) */
2335 @@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
2336 {
2337 ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
2338 ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
2339 + ldata->echo_mark = 0;
2340 ldata->line_start = 0;
2341
2342 ldata->erasing = 0;
2343 @@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
2344 size_t head;
2345
2346 head = ldata->echo_head;
2347 + ldata->echo_mark = head;
2348 old = ldata->echo_commit - ldata->echo_tail;
2349
2350 /* Process committed echoes if the accumulated # of bytes
2351 @@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
2352 size_t echoed;
2353
2354 if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
2355 - ldata->echo_commit == ldata->echo_tail)
2356 + ldata->echo_mark == ldata->echo_tail)
2357 return;
2358
2359 mutex_lock(&ldata->output_lock);
2360 + ldata->echo_commit = ldata->echo_mark;
2361 echoed = __process_echoes(tty);
2362 mutex_unlock(&ldata->output_lock);
2363
2364 @@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
2365 tty->ops->flush_chars(tty);
2366 }
2367
2368 +/* NB: echo_mark and echo_head should be equivalent here */
2369 static void flush_echoes(struct tty_struct *tty)
2370 {
2371 struct n_tty_data *ldata = tty->disc_data;
2372 diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
2373 index daf710f5c3fc..8b2accbad3d1 100644
2374 --- a/drivers/tty/serial/8250/8250_dw.c
2375 +++ b/drivers/tty/serial/8250/8250_dw.c
2376 @@ -417,6 +417,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
2377 static const struct acpi_device_id dw8250_acpi_match[] = {
2378 { "INT33C4", 0 },
2379 { "INT33C5", 0 },
2380 + { "INT3434", 0 },
2381 + { "INT3435", 0 },
2382 { "80860F0A", 0 },
2383 { },
2384 };
2385 diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
2386 index f87f1a0c8c6e..5ba30e078236 100644
2387 --- a/drivers/tty/serial/pmac_zilog.c
2388 +++ b/drivers/tty/serial/pmac_zilog.c
2389 @@ -2050,6 +2050,9 @@ static int __init pmz_console_init(void)
2390 /* Probe ports */
2391 pmz_probe();
2392
2393 + if (pmz_ports_count == 0)
2394 + return -ENODEV;
2395 +
2396 /* TODO: Autoprobe console based on OF */
2397 /* pmz_console.index = i; */
2398 register_console(&pmz_console);
2399 diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
2400 index 22fad8ad5ac2..d8a55e87877f 100644
2401 --- a/drivers/tty/tty_ldsem.c
2402 +++ b/drivers/tty/tty_ldsem.c
2403 @@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
2404 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
2405 }
2406
2407 +/*
2408 + * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
2409 + * Returns 1 if count was successfully changed; @*old will have @new value.
2410 + * Returns 0 if count was not changed; @*old will have most recent sem->count
2411 + */
2412 static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
2413 {
2414 - long tmp = *old;
2415 - *old = atomic_long_cmpxchg(&sem->count, *old, new);
2416 - return *old == tmp;
2417 + long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
2418 + if (tmp == *old) {
2419 + *old = new;
2420 + return 1;
2421 + } else {
2422 + *old = tmp;
2423 + return 0;
2424 + }
2425 }
2426
2427 /*
2428 diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
2429 index 23763dcec069..d6a50b7bb4ca 100644
2430 --- a/drivers/usb/chipidea/core.c
2431 +++ b/drivers/usb/chipidea/core.c
2432 @@ -575,6 +575,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
2433 : CI_ROLE_GADGET;
2434 }
2435
2436 + /* only update vbus status for peripheral */
2437 + if (ci->role == CI_ROLE_GADGET)
2438 + ci_handle_vbus_change(ci);
2439 +
2440 ret = ci_role_start(ci, ci->role);
2441 if (ret) {
2442 dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
2443 diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
2444 index 64d7a6d9a1ad..5d874d5cf500 100644
2445 --- a/drivers/usb/chipidea/host.c
2446 +++ b/drivers/usb/chipidea/host.c
2447 @@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
2448 return ret;
2449
2450 disable_reg:
2451 - regulator_disable(ci->platdata->reg_vbus);
2452 + if (ci->platdata->reg_vbus)
2453 + regulator_disable(ci->platdata->reg_vbus);
2454
2455 put_hcd:
2456 usb_put_hcd(hcd);
2457 diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
2458 index 9333083dd111..d98fa254eaaf 100644
2459 --- a/drivers/usb/chipidea/udc.c
2460 +++ b/drivers/usb/chipidea/udc.c
2461 @@ -1818,9 +1818,6 @@ static int udc_start(struct ci_hdrc *ci)
2462 pm_runtime_no_callbacks(&ci->gadget.dev);
2463 pm_runtime_enable(&ci->gadget.dev);
2464
2465 - /* Update ci->vbus_active */
2466 - ci_handle_vbus_change(ci);
2467 -
2468 return retval;
2469
2470 remove_trans:
2471 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2472 index d3318a0df8ee..6463ca3bcfba 100644
2473 --- a/drivers/usb/class/cdc-wdm.c
2474 +++ b/drivers/usb/class/cdc-wdm.c
2475 @@ -820,13 +820,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
2476 {
2477 /* need autopm_get/put here to ensure the usbcore sees the new value */
2478 int rv = usb_autopm_get_interface(intf);
2479 - if (rv < 0)
2480 - goto err;
2481
2482 intf->needs_remote_wakeup = on;
2483 - usb_autopm_put_interface(intf);
2484 -err:
2485 - return rv;
2486 + if (!rv)
2487 + usb_autopm_put_interface(intf);
2488 + return 0;
2489 }
2490
2491 static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
2492 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2493 index b8dffd59eb25..73f5208714a4 100644
2494 --- a/drivers/usb/host/xhci-pci.c
2495 +++ b/drivers/usb/host/xhci-pci.c
2496 @@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2497 * any other sleep) on Haswell machines with LPT and LPT-LP
2498 * with the new Intel BIOS
2499 */
2500 - xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
2501 + /* Limit the quirk to only known vendors, as this triggers
2502 + * yet another BIOS bug on some other machines
2503 + * https://bugzilla.kernel.org/show_bug.cgi?id=66171
2504 + */
2505 + if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
2506 + xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
2507 }
2508 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
2509 pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
2510 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
2511 index 0d0d11880968..f7dca0b92bfb 100644
2512 --- a/drivers/usb/musb/musb_core.c
2513 +++ b/drivers/usb/musb/musb_core.c
2514 @@ -1809,9 +1809,6 @@ static void musb_free(struct musb *musb)
2515 disable_irq_wake(musb->nIrq);
2516 free_irq(musb->nIrq, musb);
2517 }
2518 - cancel_work_sync(&musb->irq_work);
2519 - if (musb->dma_controller)
2520 - dma_controller_destroy(musb->dma_controller);
2521
2522 musb_host_free(musb);
2523 }
2524 @@ -1893,6 +1890,9 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2525 musb_platform_disable(musb);
2526 musb_generic_disable(musb);
2527
2528 + /* Init IRQ workqueue before request_irq */
2529 + INIT_WORK(&musb->irq_work, musb_irq_work);
2530 +
2531 /* setup musb parts of the core (especially endpoints) */
2532 status = musb_core_init(plat->config->multipoint
2533 ? MUSB_CONTROLLER_MHDRC
2534 @@ -1902,9 +1902,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
2535
2536 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
2537
2538 - /* Init IRQ workqueue before request_irq */
2539 - INIT_WORK(&musb->irq_work, musb_irq_work);
2540 -
2541 /* attach to the IRQ */
2542 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
2543 dev_err(dev, "request_irq %d failed!\n", nIrq);
2544 @@ -1978,6 +1975,7 @@ fail4:
2545 musb_host_cleanup(musb);
2546
2547 fail3:
2548 + cancel_work_sync(&musb->irq_work);
2549 if (musb->dma_controller)
2550 dma_controller_destroy(musb->dma_controller);
2551 pm_runtime_put_sync(musb->controller);
2552 @@ -2036,6 +2034,10 @@ static int musb_remove(struct platform_device *pdev)
2553 musb_exit_debugfs(musb);
2554 musb_shutdown(pdev);
2555
2556 + if (musb->dma_controller)
2557 + dma_controller_destroy(musb->dma_controller);
2558 +
2559 + cancel_work_sync(&musb->irq_work);
2560 musb_free(musb);
2561 device_init_wakeup(dev, 0);
2562 return 0;
2563 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
2564 index 1f31e6b4c251..dc97744489b0 100644
2565 --- a/drivers/usb/serial/generic.c
2566 +++ b/drivers/usb/serial/generic.c
2567 @@ -176,14 +176,7 @@ retry:
2568 return result;
2569 }
2570
2571 - /* Try sending off another urb, unless in irq context (in which case
2572 - * there will be no free urb). */
2573 - if (!in_irq())
2574 - goto retry;
2575 -
2576 - clear_bit_unlock(USB_SERIAL_WRITE_BUSY, &port->flags);
2577 -
2578 - return 0;
2579 + goto retry; /* try sending off another urb */
2580 }
2581
2582 /**
2583 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2584 index 496b7e39d5be..cc7a24154490 100644
2585 --- a/drivers/usb/serial/option.c
2586 +++ b/drivers/usb/serial/option.c
2587 @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
2588 #define ZTE_PRODUCT_MF628 0x0015
2589 #define ZTE_PRODUCT_MF626 0x0031
2590 #define ZTE_PRODUCT_MC2718 0xffe8
2591 +#define ZTE_PRODUCT_AC2726 0xfff1
2592
2593 #define BENQ_VENDOR_ID 0x04a5
2594 #define BENQ_PRODUCT_H10 0x4068
2595 @@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
2596 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
2597 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
2598 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
2599 + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
2600
2601 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
2602 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
2603 diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
2604 index fca4c752a4ed..eae2c873b39f 100644
2605 --- a/drivers/usb/serial/zte_ev.c
2606 +++ b/drivers/usb/serial/zte_ev.c
2607 @@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
2608 { USB_DEVICE(0x19d2, 0xfffd) },
2609 { USB_DEVICE(0x19d2, 0xfffc) },
2610 { USB_DEVICE(0x19d2, 0xfffb) },
2611 - /* AC2726, AC8710_V3 */
2612 - { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
2613 + /* AC8710_V3 */
2614 { USB_DEVICE(0x19d2, 0xfff6) },
2615 { USB_DEVICE(0x19d2, 0xfff7) },
2616 { USB_DEVICE(0x19d2, 0xfff8) },
2617 diff --git a/fs/aio.c b/fs/aio.c
2618 index 6efb7f6cb22e..062a5f6a1448 100644
2619 --- a/fs/aio.c
2620 +++ b/fs/aio.c
2621 @@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
2622 int i;
2623
2624 for (i = 0; i < ctx->nr_pages; i++) {
2625 + struct page *page;
2626 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
2627 page_count(ctx->ring_pages[i]));
2628 - put_page(ctx->ring_pages[i]);
2629 + page = ctx->ring_pages[i];
2630 + if (!page)
2631 + continue;
2632 + ctx->ring_pages[i] = NULL;
2633 + put_page(page);
2634 }
2635
2636 put_aio_ring_file(ctx);
2637 @@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
2638 unsigned long flags;
2639 int rc;
2640
2641 + rc = 0;
2642 +
2643 + /* Make sure the old page hasn't already been changed */
2644 + spin_lock(&mapping->private_lock);
2645 + ctx = mapping->private_data;
2646 + if (ctx) {
2647 + pgoff_t idx;
2648 + spin_lock_irqsave(&ctx->completion_lock, flags);
2649 + idx = old->index;
2650 + if (idx < (pgoff_t)ctx->nr_pages) {
2651 + if (ctx->ring_pages[idx] != old)
2652 + rc = -EAGAIN;
2653 + } else
2654 + rc = -EINVAL;
2655 + spin_unlock_irqrestore(&ctx->completion_lock, flags);
2656 + } else
2657 + rc = -EINVAL;
2658 + spin_unlock(&mapping->private_lock);
2659 +
2660 + if (rc != 0)
2661 + return rc;
2662 +
2663 /* Writeback must be complete */
2664 BUG_ON(PageWriteback(old));
2665 - put_page(old);
2666 + get_page(new);
2667
2668 - rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
2669 + rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
2670 if (rc != MIGRATEPAGE_SUCCESS) {
2671 - get_page(old);
2672 + put_page(new);
2673 return rc;
2674 }
2675
2676 - get_page(new);
2677 -
2678 /* We can potentially race against kioctx teardown here. Use the
2679 * address_space's private data lock to protect the mapping's
2680 * private_data.
2681 @@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
2682 spin_lock_irqsave(&ctx->completion_lock, flags);
2683 migrate_page_copy(new, old);
2684 idx = old->index;
2685 - if (idx < (pgoff_t)ctx->nr_pages)
2686 - ctx->ring_pages[idx] = new;
2687 + if (idx < (pgoff_t)ctx->nr_pages) {
2688 + /* And only do the move if things haven't changed */
2689 + if (ctx->ring_pages[idx] == old)
2690 + ctx->ring_pages[idx] = new;
2691 + else
2692 + rc = -EAGAIN;
2693 + } else
2694 + rc = -EINVAL;
2695 spin_unlock_irqrestore(&ctx->completion_lock, flags);
2696 } else
2697 rc = -EBUSY;
2698 spin_unlock(&mapping->private_lock);
2699
2700 + if (rc == MIGRATEPAGE_SUCCESS)
2701 + put_page(old);
2702 + else
2703 + put_page(new);
2704 +
2705 return rc;
2706 }
2707 #endif
2708 @@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
2709 struct aio_ring *ring;
2710 unsigned nr_events = ctx->max_reqs;
2711 struct mm_struct *mm = current->mm;
2712 - unsigned long size, populate;
2713 + unsigned long size, unused;
2714 int nr_pages;
2715 int i;
2716 struct file *file;
2717 @@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
2718 return -EAGAIN;
2719 }
2720
2721 + ctx->aio_ring_file = file;
2722 + nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
2723 + / sizeof(struct io_event);
2724 +
2725 + ctx->ring_pages = ctx->internal_pages;
2726 + if (nr_pages > AIO_RING_PAGES) {
2727 + ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
2728 + GFP_KERNEL);
2729 + if (!ctx->ring_pages) {
2730 + put_aio_ring_file(ctx);
2731 + return -ENOMEM;
2732 + }
2733 + }
2734 +
2735 for (i = 0; i < nr_pages; i++) {
2736 struct page *page;
2737 page = find_or_create_page(file->f_inode->i_mapping,
2738 @@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
2739 SetPageUptodate(page);
2740 SetPageDirty(page);
2741 unlock_page(page);
2742 +
2743 + ctx->ring_pages[i] = page;
2744 }
2745 - ctx->aio_ring_file = file;
2746 - nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
2747 - / sizeof(struct io_event);
2748 + ctx->nr_pages = i;
2749
2750 - ctx->ring_pages = ctx->internal_pages;
2751 - if (nr_pages > AIO_RING_PAGES) {
2752 - ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
2753 - GFP_KERNEL);
2754 - if (!ctx->ring_pages) {
2755 - put_aio_ring_file(ctx);
2756 - return -ENOMEM;
2757 - }
2758 + if (unlikely(i != nr_pages)) {
2759 + aio_free_ring(ctx);
2760 + return -EAGAIN;
2761 }
2762
2763 ctx->mmap_size = nr_pages * PAGE_SIZE;
2764 @@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
2765 down_write(&mm->mmap_sem);
2766 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
2767 PROT_READ | PROT_WRITE,
2768 - MAP_SHARED | MAP_POPULATE, 0, &populate);
2769 + MAP_SHARED, 0, &unused);
2770 + up_write(&mm->mmap_sem);
2771 if (IS_ERR((void *)ctx->mmap_base)) {
2772 - up_write(&mm->mmap_sem);
2773 ctx->mmap_size = 0;
2774 aio_free_ring(ctx);
2775 return -EAGAIN;
2776 @@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
2777
2778 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
2779
2780 - /* We must do this while still holding mmap_sem for write, as we
2781 - * need to be protected against userspace attempting to mremap()
2782 - * or munmap() the ring buffer.
2783 - */
2784 - ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
2785 - 1, 0, ctx->ring_pages, NULL);
2786 -
2787 - /* Dropping the reference here is safe as the page cache will hold
2788 - * onto the pages for us. It is also required so that page migration
2789 - * can unmap the pages and get the right reference count.
2790 - */
2791 - for (i = 0; i < ctx->nr_pages; i++)
2792 - put_page(ctx->ring_pages[i]);
2793 -
2794 - up_write(&mm->mmap_sem);
2795 -
2796 - if (unlikely(ctx->nr_pages != nr_pages)) {
2797 - aio_free_ring(ctx);
2798 - return -EAGAIN;
2799 - }
2800 -
2801 ctx->user_id = ctx->mmap_base;
2802 ctx->nr_events = nr_events; /* trusted copy */
2803
2804 @@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
2805 aio_nr += ctx->max_reqs;
2806 spin_unlock(&aio_nr_lock);
2807
2808 - percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
2809 + percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
2810 + percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
2811
2812 err = ioctx_add_table(ctx, mm);
2813 if (err)
2814 diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
2815 index 6df8bd481425..ec3ba43b9faa 100644
2816 --- a/fs/ceph/addr.c
2817 +++ b/fs/ceph/addr.c
2818 @@ -210,13 +210,17 @@ static int readpage_nounlock(struct file *filp, struct page *page)
2819 if (err < 0) {
2820 SetPageError(page);
2821 goto out;
2822 - } else if (err < PAGE_CACHE_SIZE) {
2823 + } else {
2824 + if (err < PAGE_CACHE_SIZE) {
2825 /* zero fill remainder of page */
2826 - zero_user_segment(page, err, PAGE_CACHE_SIZE);
2827 + zero_user_segment(page, err, PAGE_CACHE_SIZE);
2828 + } else {
2829 + flush_dcache_page(page);
2830 + }
2831 }
2832 SetPageUptodate(page);
2833
2834 - if (err == 0)
2835 + if (err >= 0)
2836 ceph_readpage_to_fscache(inode, page);
2837
2838 out:
2839 diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
2840 index 6bfe65e0b038..360b622b0be0 100644
2841 --- a/fs/ceph/cache.c
2842 +++ b/fs/ceph/cache.c
2843 @@ -324,6 +324,9 @@ void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
2844 {
2845 struct ceph_inode_info *ci = ceph_inode(inode);
2846
2847 + if (!PageFsCache(page))
2848 + return;
2849 +
2850 fscache_wait_on_page_write(ci->fscache, page);
2851 fscache_uncache_page(ci->fscache, page);
2852 }
2853 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
2854 index b7bda5d9611d..788901552eb1 100644
2855 --- a/fs/ceph/mds_client.c
2856 +++ b/fs/ceph/mds_client.c
2857 @@ -642,6 +642,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
2858 req->r_unsafe_dir = NULL;
2859 }
2860
2861 + complete_all(&req->r_safe_completion);
2862 +
2863 ceph_mdsc_put_request(req);
2864 }
2865
2866 @@ -1875,8 +1877,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
2867 int mds = -1;
2868 int err = -EAGAIN;
2869
2870 - if (req->r_err || req->r_got_result)
2871 + if (req->r_err || req->r_got_result) {
2872 + if (req->r_aborted)
2873 + __unregister_request(mdsc, req);
2874 goto out;
2875 + }
2876
2877 if (req->r_timeout &&
2878 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2879 @@ -2186,7 +2191,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2880 if (head->safe) {
2881 req->r_got_safe = true;
2882 __unregister_request(mdsc, req);
2883 - complete_all(&req->r_safe_completion);
2884
2885 if (req->r_got_unsafe) {
2886 /*
2887 diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
2888 index b5ec2a268f56..45ccfbd8ea5f 100644
2889 --- a/fs/cifs/cifsproto.h
2890 +++ b/fs/cifs/cifsproto.h
2891 @@ -475,9 +475,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
2892 const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
2893 extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
2894 extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
2895 -extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
2896 - const unsigned char *path,
2897 - struct cifs_sb_info *cifs_sb, unsigned int xid);
2898 +extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
2899 + struct cifs_sb_info *cifs_sb,
2900 + struct cifs_fattr *fattr,
2901 + const unsigned char *path);
2902 extern int mdfour(unsigned char *, unsigned char *, int);
2903 extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
2904 const struct nls_table *codepage);
2905 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2906 index 5384c2a640ca..f039c23d003d 100644
2907 --- a/fs/cifs/dir.c
2908 +++ b/fs/cifs/dir.c
2909 @@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
2910 static int
2911 cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
2912 struct tcon_link *tlink, unsigned oflags, umode_t mode,
2913 - __u32 *oplock, struct cifs_fid *fid, int *created)
2914 + __u32 *oplock, struct cifs_fid *fid)
2915 {
2916 int rc = -ENOENT;
2917 int create_options = CREATE_NOT_DIR;
2918 @@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
2919 .device = 0,
2920 };
2921
2922 - *created |= FILE_CREATED;
2923 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
2924 args.uid = current_fsuid();
2925 if (inode->i_mode & S_ISGID)
2926 @@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
2927 cifs_add_pending_open(&fid, tlink, &open);
2928
2929 rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
2930 - &oplock, &fid, opened);
2931 + &oplock, &fid);
2932
2933 if (rc) {
2934 cifs_del_pending_open(&open);
2935 goto out;
2936 }
2937
2938 + if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
2939 + *opened |= FILE_CREATED;
2940 +
2941 rc = finish_open(file, direntry, generic_file_open, opened);
2942 if (rc) {
2943 if (server->ops->close)
2944 @@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
2945 struct TCP_Server_Info *server;
2946 struct cifs_fid fid;
2947 __u32 oplock;
2948 - int created = FILE_CREATED;
2949
2950 cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
2951 inode, direntry->d_name.name, direntry);
2952 @@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
2953 server->ops->new_lease_key(&fid);
2954
2955 rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
2956 - &oplock, &fid, &created);
2957 + &oplock, &fid);
2958 if (!rc && server->ops->close)
2959 server->ops->close(xid, tcon, &fid);
2960
2961 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2962 index 36f9ebb93ceb..49719b8228e5 100644
2963 --- a/fs/cifs/inode.c
2964 +++ b/fs/cifs/inode.c
2965 @@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
2966
2967 /* check for Minshall+French symlinks */
2968 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
2969 - int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
2970 + int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
2971 + full_path);
2972 if (tmprc)
2973 cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
2974 }
2975 @@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
2976
2977 /* check for Minshall+French symlinks */
2978 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
2979 - tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
2980 + tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
2981 + full_path);
2982 if (tmprc)
2983 cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
2984 }
2985 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
2986 index 7e36ceba0c7a..477e53bad551 100644
2987 --- a/fs/cifs/link.c
2988 +++ b/fs/cifs/link.c
2989 @@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
2990
2991
2992 int
2993 -CIFSCheckMFSymlink(struct cifs_fattr *fattr,
2994 - const unsigned char *path,
2995 - struct cifs_sb_info *cifs_sb, unsigned int xid)
2996 +CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
2997 + struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
2998 + const unsigned char *path)
2999 {
3000 - int rc = 0;
3001 + int rc;
3002 u8 *buf = NULL;
3003 unsigned int link_len = 0;
3004 unsigned int bytes_read = 0;
3005 - struct cifs_tcon *ptcon;
3006
3007 if (!CIFSCouldBeMFSymlink(fattr))
3008 /* it's not a symlink */
3009 return 0;
3010
3011 buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
3012 - if (!buf) {
3013 - rc = -ENOMEM;
3014 - goto out;
3015 - }
3016 + if (!buf)
3017 + return -ENOMEM;
3018
3019 - ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
3020 - if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
3021 - rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
3022 - &bytes_read, cifs_sb, xid);
3023 + if (tcon->ses->server->ops->query_mf_symlink)
3024 + rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
3025 + &bytes_read, cifs_sb, xid);
3026 else
3027 - goto out;
3028 + rc = -ENOSYS;
3029
3030 - if (rc != 0)
3031 + if (rc)
3032 goto out;
3033
3034 if (bytes_read == 0) /* not a symlink */
3035 diff --git a/fs/ext2/super.c b/fs/ext2/super.c
3036 index 288534920fe5..20d6697bd638 100644
3037 --- a/fs/ext2/super.c
3038 +++ b/fs/ext2/super.c
3039 @@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
3040 sb->s_blocksize - offset : towrite;
3041
3042 tmp_bh.b_state = 0;
3043 + tmp_bh.b_size = sb->s_blocksize;
3044 err = ext2_get_block(inode, blk, &tmp_bh, 1);
3045 if (err < 0)
3046 goto out;
3047 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3048 index af815ea9d7cc..745faaa7ef95 100644
3049 --- a/fs/ext4/ext4.h
3050 +++ b/fs/ext4/ext4.h
3051 @@ -267,6 +267,16 @@ struct ext4_io_submit {
3052 /* Translate # of blks to # of clusters */
3053 #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
3054 (sbi)->s_cluster_bits)
3055 +/* Mask out the low bits to get the starting block of the cluster */
3056 +#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
3057 + ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
3058 +#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
3059 + ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
3060 +/* Get the cluster offset */
3061 +#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
3062 + ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
3063 +#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
3064 + ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
3065
3066 /*
3067 * Structure of a blocks group descriptor
3068 diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
3069 index 17ac112ab101..3fe29de832c8 100644
3070 --- a/fs/ext4/ext4_jbd2.c
3071 +++ b/fs/ext4/ext4_jbd2.c
3072 @@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
3073 if (WARN_ON_ONCE(err)) {
3074 ext4_journal_abort_handle(where, line, __func__, bh,
3075 handle, err);
3076 + ext4_error_inode(inode, where, line,
3077 + bh->b_blocknr,
3078 + "journal_dirty_metadata failed: "
3079 + "handle type %u started at line %u, "
3080 + "credits %u/%u, errcode %d",
3081 + handle->h_type,
3082 + handle->h_line_no,
3083 + handle->h_requested_credits,
3084 + handle->h_buffer_credits, err);
3085 }
3086 } else {
3087 if (inode)
3088 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3089 index 54d52afcdb19..f76027fe58ae 100644
3090 --- a/fs/ext4/extents.c
3091 +++ b/fs/ext4/extents.c
3092 @@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
3093 {
3094 ext4_fsblk_t block = ext4_ext_pblock(ext);
3095 int len = ext4_ext_get_actual_len(ext);
3096 + ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
3097 + ext4_lblk_t last = lblock + len - 1;
3098
3099 - if (len == 0)
3100 + if (lblock > last)
3101 return 0;
3102 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
3103 }
3104 @@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
3105 if (depth == 0) {
3106 /* leaf entries */
3107 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
3108 + struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
3109 + ext4_fsblk_t pblock = 0;
3110 + ext4_lblk_t lblock = 0;
3111 + ext4_lblk_t prev = 0;
3112 + int len = 0;
3113 while (entries) {
3114 if (!ext4_valid_extent(inode, ext))
3115 return 0;
3116 +
3117 + /* Check for overlapping extents */
3118 + lblock = le32_to_cpu(ext->ee_block);
3119 + len = ext4_ext_get_actual_len(ext);
3120 + if ((lblock <= prev) && prev) {
3121 + pblock = ext4_ext_pblock(ext);
3122 + es->s_last_error_block = cpu_to_le64(pblock);
3123 + return 0;
3124 + }
3125 ext++;
3126 entries--;
3127 + prev = lblock + len - 1;
3128 }
3129 } else {
3130 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
3131 @@ -1844,8 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
3132 depth = ext_depth(inode);
3133 if (!path[depth].p_ext)
3134 goto out;
3135 - b2 = le32_to_cpu(path[depth].p_ext->ee_block);
3136 - b2 &= ~(sbi->s_cluster_ratio - 1);
3137 + b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
3138
3139 /*
3140 * get the next allocated block if the extent in the path
3141 @@ -1855,7 +1871,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
3142 b2 = ext4_ext_next_allocated_block(path);
3143 if (b2 == EXT_MAX_BLOCKS)
3144 goto out;
3145 - b2 &= ~(sbi->s_cluster_ratio - 1);
3146 + b2 = EXT4_LBLK_CMASK(sbi, b2);
3147 }
3148
3149 /* check for wrap through zero on extent logical start block*/
3150 @@ -2535,7 +2551,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
3151 * extent, we have to mark the cluster as used (store negative
3152 * cluster number in partial_cluster).
3153 */
3154 - unaligned = pblk & (sbi->s_cluster_ratio - 1);
3155 + unaligned = EXT4_PBLK_COFF(sbi, pblk);
3156 if (unaligned && (ee_len == num) &&
3157 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
3158 *partial_cluster = EXT4_B2C(sbi, pblk);
3159 @@ -2629,7 +2645,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
3160 * accidentally freeing it later on
3161 */
3162 pblk = ext4_ext_pblock(ex);
3163 - if (pblk & (sbi->s_cluster_ratio - 1))
3164 + if (EXT4_PBLK_COFF(sbi, pblk))
3165 *partial_cluster =
3166 -((long long)EXT4_B2C(sbi, pblk));
3167 ex--;
3168 @@ -3784,7 +3800,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3169 {
3170 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3171 ext4_lblk_t lblk_start, lblk_end;
3172 - lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3173 + lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
3174 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3175
3176 return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3177 @@ -3843,9 +3859,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3178 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3179
3180 /* Check towards left side */
3181 - c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3182 + c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
3183 if (c_offset) {
3184 - lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3185 + lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
3186 lblk_to = lblk_from + c_offset - 1;
3187
3188 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3189 @@ -3853,7 +3869,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3190 }
3191
3192 /* Now check towards right. */
3193 - c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3194 + c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
3195 if (allocated_clusters && c_offset) {
3196 lblk_from = lblk_start + num_blks;
3197 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3198 @@ -4061,7 +4077,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
3199 struct ext4_ext_path *path)
3200 {
3201 struct ext4_sb_info *sbi = EXT4_SB(sb);
3202 - ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3203 + ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3204 ext4_lblk_t ex_cluster_start, ex_cluster_end;
3205 ext4_lblk_t rr_cluster_start;
3206 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3207 @@ -4079,8 +4095,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
3208 (rr_cluster_start == ex_cluster_start)) {
3209 if (rr_cluster_start == ex_cluster_end)
3210 ee_start += ee_len - 1;
3211 - map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3212 - c_offset;
3213 + map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
3214 map->m_len = min(map->m_len,
3215 (unsigned) sbi->s_cluster_ratio - c_offset);
3216 /*
3217 @@ -4234,7 +4249,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3218 */
3219 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
3220 newex.ee_block = cpu_to_le32(map->m_lblk);
3221 - cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3222 + cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3223
3224 /*
3225 * If we are doing bigalloc, check to see if the extent returned
3226 @@ -4302,7 +4317,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3227 * needed so that future calls to get_implied_cluster_alloc()
3228 * work correctly.
3229 */
3230 - offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
3231 + offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
3232 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
3233 ar.goal -= offset;
3234 ar.logical -= offset;
3235 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3236 index e274e9c1171f..1ddee3dfabe3 100644
3237 --- a/fs/ext4/inode.c
3238 +++ b/fs/ext4/inode.c
3239 @@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
3240 */
3241 static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
3242 {
3243 - int retries = 0;
3244 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3245 struct ext4_inode_info *ei = EXT4_I(inode);
3246 unsigned int md_needed;
3247 @@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
3248 * in order to allocate nrblocks
3249 * worse case is one extent per block
3250 */
3251 -repeat:
3252 spin_lock(&ei->i_block_reservation_lock);
3253 /*
3254 * ext4_calc_metadata_amount() has side effects, which we have
3255 @@ -1238,10 +1236,6 @@ repeat:
3256 ei->i_da_metadata_calc_len = save_len;
3257 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
3258 spin_unlock(&ei->i_block_reservation_lock);
3259 - if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
3260 - cond_resched();
3261 - goto repeat;
3262 - }
3263 return -ENOSPC;
3264 }
3265 ei->i_reserved_meta_blocks += md_needed;
3266 @@ -1255,7 +1249,6 @@ repeat:
3267 */
3268 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
3269 {
3270 - int retries = 0;
3271 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3272 struct ext4_inode_info *ei = EXT4_I(inode);
3273 unsigned int md_needed;
3274 @@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
3275 * in order to allocate nrblocks
3276 * worse case is one extent per block
3277 */
3278 -repeat:
3279 spin_lock(&ei->i_block_reservation_lock);
3280 /*
3281 * ext4_calc_metadata_amount() has side effects, which we have
3282 @@ -1297,10 +1289,6 @@ repeat:
3283 ei->i_da_metadata_calc_len = save_len;
3284 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
3285 spin_unlock(&ei->i_block_reservation_lock);
3286 - if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
3287 - cond_resched();
3288 - goto repeat;
3289 - }
3290 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
3291 return -ENOSPC;
3292 }
3293 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
3294 index a41e3ba8cfaa..04a5c7504be9 100644
3295 --- a/fs/ext4/mballoc.c
3296 +++ b/fs/ext4/mballoc.c
3297 @@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
3298 {
3299 struct ext4_prealloc_space *pa;
3300 pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3301 +
3302 + BUG_ON(atomic_read(&pa->pa_count));
3303 + BUG_ON(pa->pa_deleted == 0);
3304 kmem_cache_free(ext4_pspace_cachep, pa);
3305 }
3306
3307 @@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3308 ext4_group_t grp;
3309 ext4_fsblk_t grp_blk;
3310
3311 - if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3312 - return;
3313 -
3314 /* in this short window concurrent discard can set pa_deleted */
3315 spin_lock(&pa->pa_lock);
3316 + if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
3317 + spin_unlock(&pa->pa_lock);
3318 + return;
3319 + }
3320 +
3321 if (pa->pa_deleted == 1) {
3322 spin_unlock(&pa->pa_lock);
3323 return;
3324 @@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
3325 ext4_get_group_no_and_offset(sb, goal, &group, &block);
3326
3327 /* set up allocation goals */
3328 - ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
3329 + ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
3330 ac->ac_status = AC_STATUS_CONTINUE;
3331 ac->ac_sb = sb;
3332 ac->ac_inode = ar->inode;
3333 @@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
3334 * blocks at the beginning or the end unless we are explicitly
3335 * requested to avoid doing so.
3336 */
3337 - overflow = block & (sbi->s_cluster_ratio - 1);
3338 + overflow = EXT4_PBLK_COFF(sbi, block);
3339 if (overflow) {
3340 if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
3341 overflow = sbi->s_cluster_ratio - overflow;
3342 @@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
3343 count += overflow;
3344 }
3345 }
3346 - overflow = count & (sbi->s_cluster_ratio - 1);
3347 + overflow = EXT4_LBLK_COFF(sbi, count);
3348 if (overflow) {
3349 if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
3350 if (count > overflow)
3351 @@ -4794,8 +4799,8 @@ do_more:
3352 " group:%d block:%d count:%lu failed"
3353 " with %d", block_group, bit, count,
3354 err);
3355 - }
3356 -
3357 + } else
3358 + EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info);
3359
3360 ext4_lock_group(sb, block_group);
3361 mb_clear_bits(bitmap_bh->b_data, bit, count_clusters);
3362 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3363 index 2c2e6cbc6bed..b947e0af9956 100644
3364 --- a/fs/ext4/super.c
3365 +++ b/fs/ext4/super.c
3366 @@ -773,7 +773,7 @@ static void ext4_put_super(struct super_block *sb)
3367 }
3368
3369 ext4_es_unregister_shrinker(sbi);
3370 - del_timer(&sbi->s_err_report);
3371 + del_timer_sync(&sbi->s_err_report);
3372 ext4_release_system_zone(sb);
3373 ext4_mb_release(sb);
3374 ext4_ext_release(sb);
3375 @@ -3288,11 +3288,19 @@ int ext4_calculate_overhead(struct super_block *sb)
3376 }
3377
3378
3379 -static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
3380 +static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
3381 {
3382 ext4_fsblk_t resv_clusters;
3383
3384 /*
3385 + * There's no need to reserve anything when we aren't using extents.
3386 + * The space estimates are exact, there are no unwritten extents,
3387 + * hole punching doesn't need new metadata... This is needed especially
3388 + * to keep ext2/3 backward compatibility.
3389 + */
3390 + if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
3391 + return 0;
3392 + /*
3393 * By default we reserve 2% or 4096 clusters, whichever is smaller.
3394 * This should cover the situations where we can not afford to run
3395 * out of space like for example punch hole, or converting
3396 @@ -3300,7 +3308,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
3397 * allocation would require 1, or 2 blocks, higher numbers are
3398 * very rare.
3399 */
3400 - resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
3401 + resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
3402 + EXT4_SB(sb)->s_cluster_bits;
3403
3404 do_div(resv_clusters, 50);
3405 resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
3406 @@ -4043,10 +4052,10 @@ no_journal:
3407 "available");
3408 }
3409
3410 - err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
3411 + err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
3412 if (err) {
3413 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
3414 - "reserved pool", ext4_calculate_resv_clusters(sbi));
3415 + "reserved pool", ext4_calculate_resv_clusters(sb));
3416 goto failed_mount4a;
3417 }
3418
3419 @@ -4151,7 +4160,7 @@ failed_mount_wq:
3420 }
3421 failed_mount3:
3422 ext4_es_unregister_shrinker(sbi);
3423 - del_timer(&sbi->s_err_report);
3424 + del_timer_sync(&sbi->s_err_report);
3425 if (sbi->s_flex_groups)
3426 ext4_kvfree(sbi->s_flex_groups);
3427 percpu_counter_destroy(&sbi->s_freeclusters_counter);
3428 diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
3429 index 1f7d8057ea68..1253c2006029 100644
3430 --- a/fs/gfs2/aops.c
3431 +++ b/fs/gfs2/aops.c
3432 @@ -984,6 +984,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
3433 {
3434 struct file *file = iocb->ki_filp;
3435 struct inode *inode = file->f_mapping->host;
3436 + struct address_space *mapping = inode->i_mapping;
3437 struct gfs2_inode *ip = GFS2_I(inode);
3438 struct gfs2_holder gh;
3439 int rv;
3440 @@ -1004,6 +1005,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
3441 if (rv != 1)
3442 goto out; /* dio not valid, fall back to buffered i/o */
3443
3444 + /*
3445 + * Now since we are holding a deferred (CW) lock at this point, you
3446 + * might be wondering why this is ever needed. There is a case however
3447 + * where we've granted a deferred local lock against a cached exclusive
3448 + * glock. That is ok provided all granted local locks are deferred, but
3449 + * it also means that it is possible to encounter pages which are
3450 + * cached and possibly also mapped. So here we check for that and sort
3451 + * them out ahead of the dio. The glock state machine will take care of
3452 + * everything else.
3453 + *
3454 + * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
3455 + * the first place, mapping->nr_pages will always be zero.
3456 + */
3457 + if (mapping->nrpages) {
3458 + loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
3459 + loff_t len = iov_length(iov, nr_segs);
3460 + loff_t end = PAGE_ALIGN(offset + len) - 1;
3461 +
3462 + rv = 0;
3463 + if (len == 0)
3464 + goto out;
3465 + if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
3466 + unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
3467 + rv = filemap_write_and_wait_range(mapping, lstart, end);
3468 + if (rv)
3469 + return rv;
3470 + truncate_inode_pages_range(mapping, lstart, end);
3471 + }
3472 +
3473 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
3474 offset, nr_segs, gfs2_get_block_direct,
3475 NULL, NULL, 0);
3476 diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
3477 index 610613fb65b5..9dcb9777a5f8 100644
3478 --- a/fs/gfs2/log.c
3479 +++ b/fs/gfs2/log.c
3480 @@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
3481 struct buffer_head *bh = bd->bd_bh;
3482 struct gfs2_glock *gl = bd->bd_gl;
3483
3484 - gfs2_remove_from_ail(bd);
3485 - bd->bd_bh = NULL;
3486 bh->b_private = NULL;
3487 bd->bd_blkno = bh->b_blocknr;
3488 + gfs2_remove_from_ail(bd); /* drops ref on bh */
3489 + bd->bd_bh = NULL;
3490 bd->bd_ops = &gfs2_revoke_lops;
3491 sdp->sd_log_num_revoke++;
3492 atomic_inc(&gl->gl_revokes);
3493 diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
3494 index 932415050540..52f177be3bf8 100644
3495 --- a/fs/gfs2/meta_io.c
3496 +++ b/fs/gfs2/meta_io.c
3497 @@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
3498 struct address_space *mapping = bh->b_page->mapping;
3499 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
3500 struct gfs2_bufdata *bd = bh->b_private;
3501 + int was_pinned = 0;
3502
3503 if (test_clear_buffer_pinned(bh)) {
3504 trace_gfs2_pin(bd, 0);
3505 @@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
3506 tr->tr_num_databuf_rm++;
3507 }
3508 tr->tr_touched = 1;
3509 + was_pinned = 1;
3510 brelse(bh);
3511 }
3512 if (bd) {
3513 spin_lock(&sdp->sd_ail_lock);
3514 if (bd->bd_tr) {
3515 gfs2_trans_add_revoke(sdp, bd);
3516 + } else if (was_pinned) {
3517 + bh->b_private = NULL;
3518 + kmem_cache_free(gfs2_bufdata_cachep, bd);
3519 }
3520 spin_unlock(&sdp->sd_ail_lock);
3521 }
3522 diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
3523 index 19ff5e8c285c..21518b76cd8b 100644
3524 --- a/fs/gfs2/ops_fstype.c
3525 +++ b/fs/gfs2/ops_fstype.c
3526 @@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
3527 if (IS_ERR(s))
3528 goto error_bdev;
3529
3530 - if (s->s_root)
3531 + if (s->s_root) {
3532 + /*
3533 + * s_umount nests inside bd_mutex during
3534 + * __invalidate_device(). blkdev_put() acquires
3535 + * bd_mutex and can't be called under s_umount. Drop
3536 + * s_umount temporarily. This is safe as we're
3537 + * holding an active reference.
3538 + */
3539 + up_write(&s->s_umount);
3540 blkdev_put(bdev, mode);
3541 + down_write(&s->s_umount);
3542 + }
3543
3544 memset(&args, 0, sizeof(args));
3545 args.ar_quota = GFS2_QUOTA_DEFAULT;
3546 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3547 index 7aa9a32573bb..b0b74e58697b 100644
3548 --- a/fs/jbd2/transaction.c
3549 +++ b/fs/jbd2/transaction.c
3550 @@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
3551 * once a transaction -bzzz
3552 */
3553 jh->b_modified = 1;
3554 - J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
3555 + if (handle->h_buffer_credits <= 0) {
3556 + ret = -ENOSPC;
3557 + goto out_unlock_bh;
3558 + }
3559 handle->h_buffer_credits--;
3560 }
3561
3562 @@ -1373,7 +1376,6 @@ out_unlock_bh:
3563 jbd2_journal_put_journal_head(jh);
3564 out:
3565 JBUFFER_TRACE(jh, "exit");
3566 - WARN_ON(ret); /* All errors are bugs, so dump the stack */
3567 return ret;
3568 }
3569
3570 diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
3571 index 3e6c2e6c9cd2..4688a622b373 100644
3572 --- a/fs/xfs/xfs_qm.c
3573 +++ b/fs/xfs/xfs_qm.c
3574 @@ -137,8 +137,6 @@ xfs_qm_dqpurge(
3575 {
3576 struct xfs_mount *mp = dqp->q_mount;
3577 struct xfs_quotainfo *qi = mp->m_quotainfo;
3578 - struct xfs_dquot *gdqp = NULL;
3579 - struct xfs_dquot *pdqp = NULL;
3580
3581 xfs_dqlock(dqp);
3582 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
3583 @@ -146,21 +144,6 @@ xfs_qm_dqpurge(
3584 return EAGAIN;
3585 }
3586
3587 - /*
3588 - * If this quota has a hint attached, prepare for releasing it now.
3589 - */
3590 - gdqp = dqp->q_gdquot;
3591 - if (gdqp) {
3592 - xfs_dqlock(gdqp);
3593 - dqp->q_gdquot = NULL;
3594 - }
3595 -
3596 - pdqp = dqp->q_pdquot;
3597 - if (pdqp) {
3598 - xfs_dqlock(pdqp);
3599 - dqp->q_pdquot = NULL;
3600 - }
3601 -
3602 dqp->dq_flags |= XFS_DQ_FREEING;
3603
3604 xfs_dqflock(dqp);
3605 @@ -209,11 +192,47 @@ xfs_qm_dqpurge(
3606 XFS_STATS_DEC(xs_qm_dquot_unused);
3607
3608 xfs_qm_dqdestroy(dqp);
3609 + return 0;
3610 +}
3611 +
3612 +/*
3613 + * Release the group or project dquot pointers the user dquots maybe carrying
3614 + * around as a hint, and proceed to purge the user dquot cache if requested.
3615 +*/
3616 +STATIC int
3617 +xfs_qm_dqpurge_hints(
3618 + struct xfs_dquot *dqp,
3619 + void *data)
3620 +{
3621 + struct xfs_dquot *gdqp = NULL;
3622 + struct xfs_dquot *pdqp = NULL;
3623 + uint flags = *((uint *)data);
3624
3625 + xfs_dqlock(dqp);
3626 + if (dqp->dq_flags & XFS_DQ_FREEING) {
3627 + xfs_dqunlock(dqp);
3628 + return EAGAIN;
3629 + }
3630 +
3631 + /* If this quota has a hint attached, prepare for releasing it now */
3632 + gdqp = dqp->q_gdquot;
3633 if (gdqp)
3634 - xfs_qm_dqput(gdqp);
3635 + dqp->q_gdquot = NULL;
3636 +
3637 + pdqp = dqp->q_pdquot;
3638 if (pdqp)
3639 - xfs_qm_dqput(pdqp);
3640 + dqp->q_pdquot = NULL;
3641 +
3642 + xfs_dqunlock(dqp);
3643 +
3644 + if (gdqp)
3645 + xfs_qm_dqrele(gdqp);
3646 + if (pdqp)
3647 + xfs_qm_dqrele(pdqp);
3648 +
3649 + if (flags & XFS_QMOPT_UQUOTA)
3650 + return xfs_qm_dqpurge(dqp, NULL);
3651 +
3652 return 0;
3653 }
3654
3655 @@ -225,8 +244,18 @@ xfs_qm_dqpurge_all(
3656 struct xfs_mount *mp,
3657 uint flags)
3658 {
3659 - if (flags & XFS_QMOPT_UQUOTA)
3660 - xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
3661 + /*
3662 + * We have to release group/project dquot hint(s) from the user dquot
3663 + * at first if they are there, otherwise we would run into an infinite
3664 + * loop while walking through radix tree to purge other type of dquots
3665 + * since their refcount is not zero if the user dquot refers to them
3666 + * as hint.
3667 + *
3668 + * Call the special xfs_qm_dqpurge_hints() will end up go through the
3669 + * general xfs_qm_dqpurge() against user dquot cache if requested.
3670 + */
3671 + xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
3672 +
3673 if (flags & XFS_QMOPT_GQUOTA)
3674 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
3675 if (flags & XFS_QMOPT_PQUOTA)
3676 diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
3677 index 2611577869ae..d78b353b12b4 100644
3678 --- a/include/acpi/acpi_bus.h
3679 +++ b/include/acpi/acpi_bus.h
3680 @@ -169,7 +169,8 @@ struct acpi_device_flags {
3681 u32 ejectable:1;
3682 u32 power_manageable:1;
3683 u32 match_driver:1;
3684 - u32 reserved:27;
3685 + u32 no_hotplug:1;
3686 + u32 reserved:26;
3687 };
3688
3689 /* File System */
3690 @@ -357,6 +358,7 @@ extern struct kobject *acpi_kobj;
3691 extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
3692 void acpi_bus_private_data_handler(acpi_handle, void *);
3693 int acpi_bus_get_private_data(acpi_handle, void **);
3694 +void acpi_bus_no_hotplug(acpi_handle handle);
3695 extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
3696 extern int register_acpi_notifier(struct notifier_block *);
3697 extern int unregister_acpi_notifier(struct notifier_block *);
3698 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
3699 index f330d28e4d0e..b12079afbd5f 100644
3700 --- a/include/asm-generic/pgtable.h
3701 +++ b/include/asm-generic/pgtable.h
3702 @@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
3703 #endif
3704
3705 #ifndef pte_accessible
3706 -# define pte_accessible(pte) ((void)(pte),1)
3707 +# define pte_accessible(mm, pte) ((void)(pte), 1)
3708 #endif
3709
3710 #ifndef flush_tlb_fix_spurious_fault
3711 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
3712 index 3d79e513c0b3..0bd7a2ec8a45 100644
3713 --- a/include/drm/drm_pciids.h
3714 +++ b/include/drm/drm_pciids.h
3715 @@ -588,7 +588,7 @@
3716 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3717 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
3718 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
3719 - {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
3720 + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
3721 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3722 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3723 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3724 diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
3725 index 669fef5c745a..3e0fbe441763 100644
3726 --- a/include/linux/auxvec.h
3727 +++ b/include/linux/auxvec.h
3728 @@ -3,6 +3,6 @@
3729
3730 #include <uapi/linux/auxvec.h>
3731
3732 -#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
3733 +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
3734 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
3735 #endif /* _LINUX_AUXVEC_H */
3736 diff --git a/include/linux/libata.h b/include/linux/libata.h
3737 index 0e23c26485f4..9b503376738f 100644
3738 --- a/include/linux/libata.h
3739 +++ b/include/linux/libata.h
3740 @@ -418,6 +418,7 @@ enum {
3741 ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
3742 ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
3743 ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
3744 + ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
3745
3746 /* DMA mask for user DMA control: User visible values; DO NOT
3747 renumber */
3748 diff --git a/include/linux/migrate.h b/include/linux/migrate.h
3749 index 8d3c57fdf221..ee8b14ae4f3f 100644
3750 --- a/include/linux/migrate.h
3751 +++ b/include/linux/migrate.h
3752 @@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
3753 struct page *newpage, struct page *page);
3754 extern int migrate_page_move_mapping(struct address_space *mapping,
3755 struct page *newpage, struct page *page,
3756 - struct buffer_head *head, enum migrate_mode mode);
3757 + struct buffer_head *head, enum migrate_mode mode,
3758 + int extra_count);
3759 #else
3760
3761 static inline void putback_lru_pages(struct list_head *l) {}
3762 @@ -90,10 +91,18 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
3763 #endif /* CONFIG_MIGRATION */
3764
3765 #ifdef CONFIG_NUMA_BALANCING
3766 -extern int migrate_misplaced_page(struct page *page, int node);
3767 +extern bool pmd_trans_migrating(pmd_t pmd);
3768 +extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
3769 extern int migrate_misplaced_page(struct page *page, int node);
3770 extern bool migrate_ratelimited(int node);
3771 #else
3772 +static inline bool pmd_trans_migrating(pmd_t pmd)
3773 +{
3774 + return false;
3775 +}
3776 +static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
3777 +{
3778 +}
3779 static inline int migrate_misplaced_page(struct page *page, int node)
3780 {
3781 return -EAGAIN; /* can't migrate now */
3782 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
3783 index d9851eeb6e1d..8e082f18fb6a 100644
3784 --- a/include/linux/mm_types.h
3785 +++ b/include/linux/mm_types.h
3786 @@ -435,6 +435,14 @@ struct mm_struct {
3787 */
3788 int first_nid;
3789 #endif
3790 +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
3791 + /*
3792 + * An operation with batched TLB flushing is going on. Anything that
3793 + * can move process memory needs to flush the TLB when moving a
3794 + * PROT_NONE or PROT_NUMA mapped page.
3795 + */
3796 + bool tlb_flush_pending;
3797 +#endif
3798 struct uprobes_state uprobes_state;
3799 };
3800
3801 @@ -455,4 +463,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
3802 return mm->cpu_vm_mask_var;
3803 }
3804
3805 +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
3806 +/*
3807 + * Memory barriers to keep this state in sync are graciously provided by
3808 + * the page table locks, outside of which no page table modifications happen.
3809 + * The barriers below prevent the compiler from re-ordering the instructions
3810 + * around the memory barriers that are already present in the code.
3811 + */
3812 +static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
3813 +{
3814 + barrier();
3815 + return mm->tlb_flush_pending;
3816 +}
3817 +static inline void set_tlb_flush_pending(struct mm_struct *mm)
3818 +{
3819 + mm->tlb_flush_pending = true;
3820 +
3821 + /*
3822 + * Guarantee that the tlb_flush_pending store does not leak into the
3823 + * critical section updating the page tables
3824 + */
3825 + smp_mb__before_spinlock();
3826 +}
3827 +/* Clearing is done after a TLB flush, which also provides a barrier. */
3828 +static inline void clear_tlb_flush_pending(struct mm_struct *mm)
3829 +{
3830 + barrier();
3831 + mm->tlb_flush_pending = false;
3832 +}
3833 +#else
3834 +static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
3835 +{
3836 + return false;
3837 +}
3838 +static inline void set_tlb_flush_pending(struct mm_struct *mm)
3839 +{
3840 +}
3841 +static inline void clear_tlb_flush_pending(struct mm_struct *mm)
3842 +{
3843 +}
3844 +#endif
3845 +
3846 #endif /* _LINUX_MM_TYPES_H */
3847 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
3848 index 8e00f9f6f963..9e7db9e73cc1 100644
3849 --- a/include/linux/reboot.h
3850 +++ b/include/linux/reboot.h
3851 @@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
3852 * Architecture-specific implementations of sys_reboot commands.
3853 */
3854
3855 +extern void migrate_to_reboot_cpu(void);
3856 extern void machine_restart(char *cmd);
3857 extern void machine_halt(void);
3858 extern void machine_power_off(void);
3859 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
3860 index 5bdb8b7d2a69..23bfd1028457 100644
3861 --- a/include/target/target_core_base.h
3862 +++ b/include/target/target_core_base.h
3863 @@ -624,6 +624,7 @@ struct se_dev_attrib {
3864 u32 unmap_granularity;
3865 u32 unmap_granularity_alignment;
3866 u32 max_write_same_len;
3867 + u32 max_bytes_per_io;
3868 struct se_device *da_dev;
3869 struct config_group da_group;
3870 };
3871 diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
3872 index 46d41e8b0dcc..a71442bd7ec9 100644
3873 --- a/include/uapi/drm/radeon_drm.h
3874 +++ b/include/uapi/drm/radeon_drm.h
3875 @@ -981,6 +981,8 @@ struct drm_radeon_cs {
3876 #define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
3877 /* query if CP DMA is supported on the compute ring */
3878 #define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
3879 +/* query the number of render backends */
3880 +#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
3881
3882
3883 struct drm_radeon_info {
3884 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
3885 index 5c9127dc1b66..b6fd78344c53 100644
3886 --- a/kernel/cgroup.c
3887 +++ b/kernel/cgroup.c
3888 @@ -4490,14 +4490,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3889 list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
3890 root->number_of_cgroups++;
3891
3892 - /* each css holds a ref to the cgroup's dentry and the parent css */
3893 - for_each_root_subsys(root, ss) {
3894 - struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
3895 -
3896 - dget(dentry);
3897 - css_get(css->parent);
3898 - }
3899 -
3900 /* hold a ref to the parent's dentry */
3901 dget(parent->dentry);
3902
3903 @@ -4509,6 +4501,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3904 if (err)
3905 goto err_destroy;
3906
3907 + /* each css holds a ref to the cgroup's dentry and parent css */
3908 + dget(dentry);
3909 + css_get(css->parent);
3910 +
3911 + /* mark it consumed for error path */
3912 + css_ar[ss->subsys_id] = NULL;
3913 +
3914 if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
3915 parent->parent) {
3916 pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
3917 @@ -4555,6 +4554,14 @@ err_free_cgrp:
3918 return err;
3919
3920 err_destroy:
3921 + for_each_root_subsys(root, ss) {
3922 + struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
3923 +
3924 + if (css) {
3925 + percpu_ref_cancel_init(&css->refcnt);
3926 + ss->css_free(css);
3927 + }
3928 + }
3929 cgroup_destroy_locked(cgrp);
3930 mutex_unlock(&cgroup_mutex);
3931 mutex_unlock(&dentry->d_inode->i_mutex);
3932 @@ -4716,8 +4723,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
3933 * will be invoked to perform the rest of destruction once the
3934 * percpu refs of all css's are confirmed to be killed.
3935 */
3936 - for_each_root_subsys(cgrp->root, ss)
3937 - kill_css(cgroup_css(cgrp, ss));
3938 + for_each_root_subsys(cgrp->root, ss) {
3939 + struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
3940 +
3941 + if (css)
3942 + kill_css(css);
3943 + }
3944
3945 /*
3946 * Mark @cgrp dead. This prevents further task migration and child
3947 diff --git a/kernel/fork.c b/kernel/fork.c
3948 index 086fe73ad6bd..690cfacaed71 100644
3949 --- a/kernel/fork.c
3950 +++ b/kernel/fork.c
3951 @@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
3952 spin_lock_init(&mm->page_table_lock);
3953 mm_init_aio(mm);
3954 mm_init_owner(mm, p);
3955 + clear_tlb_flush_pending(mm);
3956
3957 if (likely(!mm_alloc_pgd(mm))) {
3958 mm->def_flags = 0;
3959 diff --git a/kernel/freezer.c b/kernel/freezer.c
3960 index b462fa197517..aa6a8aadb911 100644
3961 --- a/kernel/freezer.c
3962 +++ b/kernel/freezer.c
3963 @@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
3964 bool pm_freezing;
3965 bool pm_nosig_freezing;
3966
3967 +/*
3968 + * Temporary export for the deadlock workaround in ata_scsi_hotplug().
3969 + * Remove once the hack becomes unnecessary.
3970 + */
3971 +EXPORT_SYMBOL_GPL(pm_freezing);
3972 +
3973 /* protects freezing and frozen transitions */
3974 static DEFINE_SPINLOCK(freezer_lock);
3975
3976 diff --git a/kernel/kexec.c b/kernel/kexec.c
3977 index ecd783dda9ae..355e13af62c5 100644
3978 --- a/kernel/kexec.c
3979 +++ b/kernel/kexec.c
3980 @@ -1680,6 +1680,7 @@ int kernel_kexec(void)
3981 {
3982 kexec_in_progress = true;
3983 kernel_restart_prepare(NULL);
3984 + migrate_to_reboot_cpu();
3985 printk(KERN_EMERG "Starting new kernel\n");
3986 machine_shutdown();
3987 }
3988 diff --git a/kernel/reboot.c b/kernel/reboot.c
3989 index f813b3474646..662c83fc16b7 100644
3990 --- a/kernel/reboot.c
3991 +++ b/kernel/reboot.c
3992 @@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
3993 }
3994 EXPORT_SYMBOL(unregister_reboot_notifier);
3995
3996 -static void migrate_to_reboot_cpu(void)
3997 +void migrate_to_reboot_cpu(void)
3998 {
3999 /* The boot cpu is always logical cpu 0 */
4000 int cpu = reboot_cpu;
4001 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
4002 index 513fc2fd5109..7765ad82736a 100644
4003 --- a/kernel/sched/fair.c
4004 +++ b/kernel/sched/fair.c
4005 @@ -974,6 +974,13 @@ void task_numa_work(struct callback_head *work)
4006 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
4007 continue;
4008
4009 + /*
4010 + * Skip inaccessible VMAs to avoid any confusion between
4011 + * PROT_NONE and NUMA hinting ptes
4012 + */
4013 + if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
4014 + continue;
4015 +
4016 do {
4017 start = max(start, vma->vm_start);
4018 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
4019 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
4020 index 01970c8e64df..417b1b3fd7e9 100644
4021 --- a/kernel/sched/rt.c
4022 +++ b/kernel/sched/rt.c
4023 @@ -899,6 +899,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
4024 {
4025 struct rq *rq = rq_of_rt_rq(rt_rq);
4026
4027 +#ifdef CONFIG_RT_GROUP_SCHED
4028 + /*
4029 + * Change rq's cpupri only if rt_rq is the top queue.
4030 + */
4031 + if (&rq->rt != rt_rq)
4032 + return;
4033 +#endif
4034 if (rq->online && prio < prev_prio)
4035 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
4036 }
4037 @@ -908,6 +915,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
4038 {
4039 struct rq *rq = rq_of_rt_rq(rt_rq);
4040
4041 +#ifdef CONFIG_RT_GROUP_SCHED
4042 + /*
4043 + * Change rq's cpupri only if rt_rq is the top queue.
4044 + */
4045 + if (&rq->rt != rt_rq)
4046 + return;
4047 +#endif
4048 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
4049 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
4050 }
4051 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
4052 index f3bd09eecb7e..1345d9ff0662 100644
4053 --- a/kernel/trace/ftrace.c
4054 +++ b/kernel/trace/ftrace.c
4055 @@ -775,7 +775,7 @@ static int ftrace_profile_init(void)
4056 int cpu;
4057 int ret = 0;
4058
4059 - for_each_online_cpu(cpu) {
4060 + for_each_possible_cpu(cpu) {
4061 ret = ftrace_profile_init_cpu(cpu);
4062 if (ret)
4063 break;
4064 diff --git a/mm/compaction.c b/mm/compaction.c
4065 index b5326b141a25..74ad00908c79 100644
4066 --- a/mm/compaction.c
4067 +++ b/mm/compaction.c
4068 @@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
4069 bool migrate_scanner)
4070 {
4071 struct zone *zone = cc->zone;
4072 +
4073 + if (cc->ignore_skip_hint)
4074 + return;
4075 +
4076 if (!page)
4077 return;
4078
4079 diff --git a/mm/fremap.c b/mm/fremap.c
4080 index 5bff08147768..bbc4d660221a 100644
4081 --- a/mm/fremap.c
4082 +++ b/mm/fremap.c
4083 @@ -208,9 +208,10 @@ get_write_lock:
4084 if (mapping_cap_account_dirty(mapping)) {
4085 unsigned long addr;
4086 struct file *file = get_file(vma->vm_file);
4087 + /* mmap_region may free vma; grab the info now */
4088 + vm_flags = vma->vm_flags;
4089
4090 - addr = mmap_region(file, start, size,
4091 - vma->vm_flags, pgoff);
4092 + addr = mmap_region(file, start, size, vm_flags, pgoff);
4093 fput(file);
4094 if (IS_ERR_VALUE(addr)) {
4095 err = addr;
4096 @@ -218,7 +219,7 @@ get_write_lock:
4097 BUG_ON(addr != start);
4098 err = 0;
4099 }
4100 - goto out;
4101 + goto out_freed;
4102 }
4103 mutex_lock(&mapping->i_mmap_mutex);
4104 flush_dcache_mmap_lock(mapping);
4105 @@ -253,6 +254,7 @@ get_write_lock:
4106 out:
4107 if (vma)
4108 vm_flags = vma->vm_flags;
4109 +out_freed:
4110 if (likely(!has_write_lock))
4111 up_read(&mm->mmap_sem);
4112 else
4113 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4114 index cca80d96e509..47962456ed87 100644
4115 --- a/mm/huge_memory.c
4116 +++ b/mm/huge_memory.c
4117 @@ -884,6 +884,10 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
4118 ret = 0;
4119 goto out_unlock;
4120 }
4121 +
4122 + /* mmap_sem prevents this happening but warn if that changes */
4123 + WARN_ON(pmd_trans_migrating(pmd));
4124 +
4125 if (unlikely(pmd_trans_splitting(pmd))) {
4126 /* split huge page running from under us */
4127 spin_unlock(&src_mm->page_table_lock);
4128 @@ -1240,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
4129 if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
4130 return ERR_PTR(-EFAULT);
4131
4132 + /* Full NUMA hinting faults to serialise migration in fault paths */
4133 + if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
4134 + goto out;
4135 +
4136 page = pmd_page(*pmd);
4137 VM_BUG_ON(!PageHead(page));
4138 if (flags & FOLL_TOUCH) {
4139 @@ -1290,6 +1298,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
4140 if (unlikely(!pmd_same(pmd, *pmdp)))
4141 goto out_unlock;
4142
4143 + /*
4144 + * If there are potential migrations, wait for completion and retry
4145 + * without disrupting NUMA hinting information. Do not relock and
4146 + * check_same as the page may no longer be mapped.
4147 + */
4148 + if (unlikely(pmd_trans_migrating(*pmdp))) {
4149 + spin_unlock(&mm->page_table_lock);
4150 + wait_migrate_huge_page(vma->anon_vma, pmdp);
4151 + goto out;
4152 + }
4153 +
4154 page = pmd_page(pmd);
4155 page_nid = page_to_nid(page);
4156 count_vm_numa_event(NUMA_HINT_FAULTS);
4157 @@ -1306,23 +1325,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
4158 /* If the page was locked, there are no parallel migrations */
4159 if (page_locked)
4160 goto clear_pmdnuma;
4161 + }
4162
4163 - /*
4164 - * Otherwise wait for potential migrations and retry. We do
4165 - * relock and check_same as the page may no longer be mapped.
4166 - * As the fault is being retried, do not account for it.
4167 - */
4168 + /* Migration could have started since the pmd_trans_migrating check */
4169 + if (!page_locked) {
4170 spin_unlock(&mm->page_table_lock);
4171 wait_on_page_locked(page);
4172 page_nid = -1;
4173 goto out;
4174 }
4175
4176 - /* Page is misplaced, serialise migrations and parallel THP splits */
4177 + /*
4178 + * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
4179 + * to serialises splits
4180 + */
4181 get_page(page);
4182 spin_unlock(&mm->page_table_lock);
4183 - if (!page_locked)
4184 - lock_page(page);
4185 anon_vma = page_lock_anon_vma_read(page);
4186
4187 /* Confirm the PTE did not while locked */
4188 @@ -1334,6 +1352,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
4189 goto out_unlock;
4190 }
4191
4192 + /* Bail if we fail to protect against THP splits for any reason */
4193 + if (unlikely(!anon_vma)) {
4194 + put_page(page);
4195 + page_nid = -1;
4196 + goto clear_pmdnuma;
4197 + }
4198 +
4199 /*
4200 * Migrate the THP to the requested node, returns with page unlocked
4201 * and pmd_numa cleared.
4202 @@ -1466,20 +1491,24 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
4203
4204 if (__pmd_trans_huge_lock(pmd, vma) == 1) {
4205 pmd_t entry;
4206 - entry = pmdp_get_and_clear(mm, addr, pmd);
4207 if (!prot_numa) {
4208 + entry = pmdp_get_and_clear(mm, addr, pmd);
4209 + if (pmd_numa(entry))
4210 + entry = pmd_mknonnuma(entry);
4211 entry = pmd_modify(entry, newprot);
4212 BUG_ON(pmd_write(entry));
4213 + set_pmd_at(mm, addr, pmd, entry);
4214 } else {
4215 struct page *page = pmd_page(*pmd);
4216 + entry = *pmd;
4217
4218 /* only check non-shared pages */
4219 if (page_mapcount(page) == 1 &&
4220 !pmd_numa(*pmd)) {
4221 entry = pmd_mknuma(entry);
4222 + set_pmd_at(mm, addr, pmd, entry);
4223 }
4224 }
4225 - set_pmd_at(mm, addr, pmd, entry);
4226 spin_unlock(&vma->vm_mm->page_table_lock);
4227 ret = 1;
4228 }
4229 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4230 index 36cc2d0570ab..e0e979276df0 100644
4231 --- a/mm/memcontrol.c
4232 +++ b/mm/memcontrol.c
4233 @@ -337,7 +337,7 @@ struct mem_cgroup {
4234 static size_t memcg_size(void)
4235 {
4236 return sizeof(struct mem_cgroup) +
4237 - nr_node_ids * sizeof(struct mem_cgroup_per_node);
4238 + nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4239 }
4240
4241 /* internal only representation about the status of kmem accounting. */
4242 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
4243 index bf3351b5115e..9aea53f4551c 100644
4244 --- a/mm/memory-failure.c
4245 +++ b/mm/memory-failure.c
4246 @@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
4247 BUG_ON(!PageHWPoison(p));
4248 return SWAP_FAIL;
4249 }
4250 + /*
4251 + * We pinned the head page for hwpoison handling,
4252 + * now we split the thp and we are interested in
4253 + * the hwpoisoned raw page, so move the refcount
4254 + * to it.
4255 + */
4256 + if (hpage != p) {
4257 + put_page(hpage);
4258 + get_page(p);
4259 + }
4260 /* THP is split, so ppage should be the real poisoned page. */
4261 ppage = p;
4262 }
4263 @@ -1519,10 +1529,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
4264 if (ret > 0)
4265 ret = -EIO;
4266 } else {
4267 - set_page_hwpoison_huge_page(hpage);
4268 - dequeue_hwpoisoned_huge_page(hpage);
4269 - atomic_long_add(1 << compound_order(hpage),
4270 - &num_poisoned_pages);
4271 + /* overcommit hugetlb page will be freed to buddy */
4272 + if (PageHuge(page)) {
4273 + set_page_hwpoison_huge_page(hpage);
4274 + dequeue_hwpoisoned_huge_page(hpage);
4275 + atomic_long_add(1 << compound_order(hpage),
4276 + &num_poisoned_pages);
4277 + } else {
4278 + SetPageHWPoison(page);
4279 + atomic_long_inc(&num_poisoned_pages);
4280 + }
4281 }
4282 return ret;
4283 }
4284 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
4285 index 04729647f359..6b22d8f9bfb8 100644
4286 --- a/mm/mempolicy.c
4287 +++ b/mm/mempolicy.c
4288 @@ -1317,7 +1317,7 @@ static long do_mbind(unsigned long start, unsigned long len,
4289 if (nr_failed && (flags & MPOL_MF_STRICT))
4290 err = -EIO;
4291 } else
4292 - putback_lru_pages(&pagelist);
4293 + putback_movable_pages(&pagelist);
4294
4295 up_write(&mm->mmap_sem);
4296 mpol_out:
4297 diff --git a/mm/migrate.c b/mm/migrate.c
4298 index c04692774e88..e3cf71dd1288 100644
4299 --- a/mm/migrate.c
4300 +++ b/mm/migrate.c
4301 @@ -36,6 +36,7 @@
4302 #include <linux/hugetlb_cgroup.h>
4303 #include <linux/gfp.h>
4304 #include <linux/balloon_compaction.h>
4305 +#include <linux/mmu_notifier.h>
4306
4307 #include <asm/tlbflush.h>
4308
4309 @@ -315,14 +316,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
4310 */
4311 int migrate_page_move_mapping(struct address_space *mapping,
4312 struct page *newpage, struct page *page,
4313 - struct buffer_head *head, enum migrate_mode mode)
4314 + struct buffer_head *head, enum migrate_mode mode,
4315 + int extra_count)
4316 {
4317 - int expected_count = 0;
4318 + int expected_count = 1 + extra_count;
4319 void **pslot;
4320
4321 if (!mapping) {
4322 /* Anonymous page without mapping */
4323 - if (page_count(page) != 1)
4324 + if (page_count(page) != expected_count)
4325 return -EAGAIN;
4326 return MIGRATEPAGE_SUCCESS;
4327 }
4328 @@ -332,7 +334,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
4329 pslot = radix_tree_lookup_slot(&mapping->page_tree,
4330 page_index(page));
4331
4332 - expected_count = 2 + page_has_private(page);
4333 + expected_count += 1 + page_has_private(page);
4334 if (page_count(page) != expected_count ||
4335 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
4336 spin_unlock_irq(&mapping->tree_lock);
4337 @@ -525,7 +527,7 @@ int migrate_page(struct address_space *mapping,
4338
4339 BUG_ON(PageWriteback(page)); /* Writeback must be complete */
4340
4341 - rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
4342 + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
4343
4344 if (rc != MIGRATEPAGE_SUCCESS)
4345 return rc;
4346 @@ -552,7 +554,7 @@ int buffer_migrate_page(struct address_space *mapping,
4347
4348 head = page_buffers(page);
4349
4350 - rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
4351 + rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
4352
4353 if (rc != MIGRATEPAGE_SUCCESS)
4354 return rc;
4355 @@ -1596,6 +1598,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
4356 return 1;
4357 }
4358
4359 +bool pmd_trans_migrating(pmd_t pmd)
4360 +{
4361 + struct page *page = pmd_page(pmd);
4362 + return PageLocked(page);
4363 +}
4364 +
4365 +void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
4366 +{
4367 + struct page *page = pmd_page(*pmd);
4368 + wait_on_page_locked(page);
4369 +}
4370 +
4371 /*
4372 * Attempt to migrate a misplaced page to the specified destination
4373 * node. Caller is expected to have an elevated reference count on
4374 @@ -1655,12 +1669,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4375 unsigned long address,
4376 struct page *page, int node)
4377 {
4378 - unsigned long haddr = address & HPAGE_PMD_MASK;
4379 pg_data_t *pgdat = NODE_DATA(node);
4380 int isolated = 0;
4381 struct page *new_page = NULL;
4382 struct mem_cgroup *memcg = NULL;
4383 int page_lru = page_is_file_cache(page);
4384 + unsigned long mmun_start = address & HPAGE_PMD_MASK;
4385 + unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
4386 + pmd_t orig_entry;
4387
4388 /*
4389 * Don't migrate pages that are mapped in multiple processes.
4390 @@ -1690,6 +1706,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4391 goto out_fail;
4392 }
4393
4394 + if (mm_tlb_flush_pending(mm))
4395 + flush_tlb_range(vma, mmun_start, mmun_end);
4396 +
4397 /* Prepare a page as a migration target */
4398 __set_page_locked(new_page);
4399 SetPageSwapBacked(new_page);
4400 @@ -1701,9 +1720,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4401 WARN_ON(PageLRU(new_page));
4402
4403 /* Recheck the target PMD */
4404 + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
4405 spin_lock(&mm->page_table_lock);
4406 - if (unlikely(!pmd_same(*pmd, entry))) {
4407 + if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
4408 +fail_putback:
4409 spin_unlock(&mm->page_table_lock);
4410 + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
4411
4412 /* Reverse changes made by migrate_page_copy() */
4413 if (TestClearPageActive(new_page))
4414 @@ -1720,7 +1742,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4415 putback_lru_page(page);
4416 mod_zone_page_state(page_zone(page),
4417 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
4418 - goto out_fail;
4419 +
4420 + goto out_unlock;
4421 }
4422
4423 /*
4424 @@ -1732,16 +1755,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4425 */
4426 mem_cgroup_prepare_migration(page, new_page, &memcg);
4427
4428 + orig_entry = *pmd;
4429 entry = mk_pmd(new_page, vma->vm_page_prot);
4430 - entry = pmd_mknonnuma(entry);
4431 - entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4432 entry = pmd_mkhuge(entry);
4433 + entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4434
4435 - pmdp_clear_flush(vma, haddr, pmd);
4436 - set_pmd_at(mm, haddr, pmd, entry);
4437 - page_add_new_anon_rmap(new_page, vma, haddr);
4438 + /*
4439 + * Clear the old entry under pagetable lock and establish the new PTE.
4440 + * Any parallel GUP will either observe the old page blocking on the
4441 + * page lock, block on the page table lock or observe the new page.
4442 + * The SetPageUptodate on the new page and page_add_new_anon_rmap
4443 + * guarantee the copy is visible before the pagetable update.
4444 + */
4445 + flush_cache_range(vma, mmun_start, mmun_end);
4446 + page_add_new_anon_rmap(new_page, vma, mmun_start);
4447 + pmdp_clear_flush(vma, mmun_start, pmd);
4448 + set_pmd_at(mm, mmun_start, pmd, entry);
4449 + flush_tlb_range(vma, mmun_start, mmun_end);
4450 update_mmu_cache_pmd(vma, address, &entry);
4451 +
4452 + if (page_count(page) != 2) {
4453 + set_pmd_at(mm, mmun_start, pmd, orig_entry);
4454 + flush_tlb_range(vma, mmun_start, mmun_end);
4455 + update_mmu_cache_pmd(vma, address, &entry);
4456 + page_remove_rmap(new_page);
4457 + goto fail_putback;
4458 + }
4459 +
4460 page_remove_rmap(page);
4461 +
4462 /*
4463 * Finish the charge transaction under the page table lock to
4464 * prevent split_huge_page() from dividing up the charge
4465 @@ -1749,6 +1791,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4466 */
4467 mem_cgroup_end_migration(memcg, page, new_page, true);
4468 spin_unlock(&mm->page_table_lock);
4469 + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
4470
4471 unlock_page(new_page);
4472 unlock_page(page);
4473 @@ -1766,10 +1809,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
4474 out_fail:
4475 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
4476 out_dropref:
4477 - entry = pmd_mknonnuma(entry);
4478 - set_pmd_at(mm, haddr, pmd, entry);
4479 - update_mmu_cache_pmd(vma, address, &entry);
4480 + spin_lock(&mm->page_table_lock);
4481 + if (pmd_same(*pmd, entry)) {
4482 + entry = pmd_mknonnuma(entry);
4483 + set_pmd_at(mm, mmun_start, pmd, entry);
4484 + update_mmu_cache_pmd(vma, address, &entry);
4485 + }
4486 + spin_unlock(&mm->page_table_lock);
4487
4488 +out_unlock:
4489 unlock_page(page);
4490 put_page(page);
4491 return 0;
4492 diff --git a/mm/mlock.c b/mm/mlock.c
4493 index d480cd6fc475..192e6eebe4f2 100644
4494 --- a/mm/mlock.c
4495 +++ b/mm/mlock.c
4496 @@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page)
4497
4498 /**
4499 * munlock_vma_page - munlock a vma page
4500 - * @page - page to be unlocked
4501 + * @page - page to be unlocked, either a normal page or THP page head
4502 + *
4503 + * returns the size of the page as a page mask (0 for normal page,
4504 + * HPAGE_PMD_NR - 1 for THP head page)
4505 *
4506 * called from munlock()/munmap() path with page supposedly on the LRU.
4507 * When we munlock a page, because the vma where we found the page is being
4508 @@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page)
4509 */
4510 unsigned int munlock_vma_page(struct page *page)
4511 {
4512 - unsigned int page_mask = 0;
4513 + unsigned int nr_pages;
4514
4515 BUG_ON(!PageLocked(page));
4516
4517 if (TestClearPageMlocked(page)) {
4518 - unsigned int nr_pages = hpage_nr_pages(page);
4519 + nr_pages = hpage_nr_pages(page);
4520 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
4521 - page_mask = nr_pages - 1;
4522 if (!isolate_lru_page(page))
4523 __munlock_isolated_page(page);
4524 else
4525 __munlock_isolation_failed(page);
4526 + } else {
4527 + nr_pages = hpage_nr_pages(page);
4528 }
4529
4530 - return page_mask;
4531 + /*
4532 + * Regardless of the original PageMlocked flag, we determine nr_pages
4533 + * after touching the flag. This leaves a possible race with a THP page
4534 + * split, such that a whole THP page was munlocked, but nr_pages == 1.
4535 + * Returning a smaller mask due to that is OK, the worst that can
4536 + * happen is subsequent useless scanning of the former tail pages.
4537 + * The NR_MLOCK accounting can however become broken.
4538 + */
4539 + return nr_pages - 1;
4540 }
4541
4542 /**
4543 @@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
4544 {
4545 int i;
4546 int nr = pagevec_count(pvec);
4547 - int delta_munlocked = -nr;
4548 + int delta_munlocked;
4549 struct pagevec pvec_putback;
4550 int pgrescued = 0;
4551
4552 + pagevec_init(&pvec_putback, 0);
4553 +
4554 /* Phase 1: page isolation */
4555 spin_lock_irq(&zone->lru_lock);
4556 for (i = 0; i < nr; i++) {
4557 @@ -318,18 +332,21 @@ skip_munlock:
4558 /*
4559 * We won't be munlocking this page in the next phase
4560 * but we still need to release the follow_page_mask()
4561 - * pin.
4562 + * pin. We cannot do it under lru_lock however. If it's
4563 + * the last pin, __page_cache_release would deadlock.
4564 */
4565 + pagevec_add(&pvec_putback, pvec->pages[i]);
4566 pvec->pages[i] = NULL;
4567 - put_page(page);
4568 - delta_munlocked++;
4569 }
4570 }
4571 + delta_munlocked = -nr + pagevec_count(&pvec_putback);
4572 __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
4573 spin_unlock_irq(&zone->lru_lock);
4574
4575 + /* Now we can release pins of pages that we are not munlocking */
4576 + pagevec_release(&pvec_putback);
4577 +
4578 /* Phase 2: page munlock */
4579 - pagevec_init(&pvec_putback, 0);
4580 for (i = 0; i < nr; i++) {
4581 struct page *page = pvec->pages[i];
4582
4583 @@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
4584
4585 while (start < end) {
4586 struct page *page = NULL;
4587 - unsigned int page_mask, page_increm;
4588 + unsigned int page_mask;
4589 + unsigned long page_increm;
4590 struct pagevec pvec;
4591 struct zone *zone;
4592 int zoneid;
4593 @@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
4594 goto next;
4595 }
4596 }
4597 - page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
4598 + /* It's a bug to munlock in the middle of a THP page */
4599 + VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
4600 + page_increm = 1 + page_mask;
4601 start += page_increm * PAGE_SIZE;
4602 next:
4603 cond_resched();
4604 diff --git a/mm/mprotect.c b/mm/mprotect.c
4605 index 6c3f56f19275..7651a571f283 100644
4606 --- a/mm/mprotect.c
4607 +++ b/mm/mprotect.c
4608 @@ -54,13 +54,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
4609 pte_t ptent;
4610 bool updated = false;
4611
4612 - ptent = ptep_modify_prot_start(mm, addr, pte);
4613 if (!prot_numa) {
4614 + ptent = ptep_modify_prot_start(mm, addr, pte);
4615 + if (pte_numa(ptent))
4616 + ptent = pte_mknonnuma(ptent);
4617 ptent = pte_modify(ptent, newprot);
4618 updated = true;
4619 } else {
4620 struct page *page;
4621
4622 + ptent = *pte;
4623 page = vm_normal_page(vma, addr, oldpte);
4624 if (page) {
4625 int this_nid = page_to_nid(page);
4626 @@ -73,6 +76,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
4627 if (!pte_numa(oldpte) &&
4628 page_mapcount(page) == 1) {
4629 ptent = pte_mknuma(ptent);
4630 + set_pte_at(mm, addr, pte, ptent);
4631 updated = true;
4632 }
4633 }
4634 @@ -89,7 +93,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
4635
4636 if (updated)
4637 pages++;
4638 - ptep_modify_prot_commit(mm, addr, pte, ptent);
4639 +
4640 + /* Only !prot_numa always clears the pte */
4641 + if (!prot_numa)
4642 + ptep_modify_prot_commit(mm, addr, pte, ptent);
4643 } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
4644 swp_entry_t entry = pte_to_swp_entry(oldpte);
4645
4646 @@ -209,6 +216,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
4647 BUG_ON(addr >= end);
4648 pgd = pgd_offset(mm, addr);
4649 flush_cache_range(vma, addr, end);
4650 + set_tlb_flush_pending(mm);
4651 do {
4652 next = pgd_addr_end(addr, end);
4653 if (pgd_none_or_clear_bad(pgd))
4654 @@ -220,6 +228,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
4655 /* Only flush the TLB if we actually modified any entries: */
4656 if (pages)
4657 flush_tlb_range(vma, start, end);
4658 + clear_tlb_flush_pending(mm);
4659
4660 return pages;
4661 }
4662 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4663 index dd886fac451a..317ea747d2cd 100644
4664 --- a/mm/page_alloc.c
4665 +++ b/mm/page_alloc.c
4666 @@ -1822,7 +1822,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
4667
4668 static bool zone_local(struct zone *local_zone, struct zone *zone)
4669 {
4670 - return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
4671 + return local_zone->node == zone->node;
4672 }
4673
4674 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
4675 @@ -1919,18 +1919,17 @@ zonelist_scan:
4676 * page was allocated in should have no effect on the
4677 * time the page has in memory before being reclaimed.
4678 *
4679 - * When zone_reclaim_mode is enabled, try to stay in
4680 - * local zones in the fastpath. If that fails, the
4681 - * slowpath is entered, which will do another pass
4682 - * starting with the local zones, but ultimately fall
4683 - * back to remote zones that do not partake in the
4684 - * fairness round-robin cycle of this zonelist.
4685 + * Try to stay in local zones in the fastpath. If
4686 + * that fails, the slowpath is entered, which will do
4687 + * another pass starting with the local zones, but
4688 + * ultimately fall back to remote zones that do not
4689 + * partake in the fairness round-robin cycle of this
4690 + * zonelist.
4691 */
4692 if (alloc_flags & ALLOC_WMARK_LOW) {
4693 if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
4694 continue;
4695 - if (zone_reclaim_mode &&
4696 - !zone_local(preferred_zone, zone))
4697 + if (!zone_local(preferred_zone, zone))
4698 continue;
4699 }
4700 /*
4701 @@ -2396,7 +2395,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
4702 * thrash fairness information for zones that are not
4703 * actually part of this zonelist's round-robin cycle.
4704 */
4705 - if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
4706 + if (!zone_local(preferred_zone, zone))
4707 continue;
4708 mod_zone_page_state(zone, NR_ALLOC_BATCH,
4709 high_wmark_pages(zone) -
4710 diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
4711 index 3929a40bd6c0..28e64f504ba5 100644
4712 --- a/mm/pgtable-generic.c
4713 +++ b/mm/pgtable-generic.c
4714 @@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
4715 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
4716 pte_t *ptep)
4717 {
4718 + struct mm_struct *mm = (vma)->vm_mm;
4719 pte_t pte;
4720 - pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
4721 - if (pte_accessible(pte))
4722 + pte = ptep_get_and_clear(mm, address, ptep);
4723 + if (pte_accessible(mm, pte))
4724 flush_tlb_page(vma, address);
4725 return pte;
4726 }
4727 @@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
4728 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
4729 pmd_t *pmdp)
4730 {
4731 + pmd_t entry = *pmdp;
4732 + if (pmd_numa(entry))
4733 + entry = pmd_mknonnuma(entry);
4734 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
4735 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
4736 }
4737 diff --git a/mm/rmap.c b/mm/rmap.c
4738 index fd3ee7a54a13..b9d2222a0ecb 100644
4739 --- a/mm/rmap.c
4740 +++ b/mm/rmap.c
4741 @@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
4742 spinlock_t *ptl;
4743
4744 if (unlikely(PageHuge(page))) {
4745 + /* when pud is not present, pte will be NULL */
4746 pte = huge_pte_offset(mm, address);
4747 + if (!pte)
4748 + return NULL;
4749 +
4750 ptl = &mm->page_table_lock;
4751 goto check;
4752 }
4753 diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
4754 index a271c27fac77..722da616438c 100644
4755 --- a/net/wireless/radiotap.c
4756 +++ b/net/wireless/radiotap.c
4757 @@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
4758 /* find payload start allowing for extended bitmap(s) */
4759
4760 if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
4761 + if ((unsigned long)iterator->_arg -
4762 + (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
4763 + (unsigned long)iterator->_max_length)
4764 + return -EINVAL;
4765 while (get_unaligned_le32(iterator->_arg) &
4766 (1 << IEEE80211_RADIOTAP_EXT)) {
4767 iterator->_arg += sizeof(uint32_t);
4768 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
4769 index 32b10f53d0b4..2dcb37736d84 100644
4770 --- a/scripts/link-vmlinux.sh
4771 +++ b/scripts/link-vmlinux.sh
4772 @@ -82,7 +82,9 @@ kallsyms()
4773 kallsymopt="${kallsymopt} --all-symbols"
4774 fi
4775
4776 - kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
4777 + if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
4778 + kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
4779 + fi
4780
4781 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
4782 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
4783 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
4784 index d9a78fd8a2e1..392a0445265c 100644
4785 --- a/security/selinux/hooks.c
4786 +++ b/security/selinux/hooks.c
4787 @@ -3792,7 +3792,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
4788 u32 nlbl_sid;
4789 u32 nlbl_type;
4790
4791 - selinux_skb_xfrm_sid(skb, &xfrm_sid);
4792 + selinux_xfrm_skb_sid(skb, &xfrm_sid);
4793 selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
4794
4795 err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
4796 @@ -4297,8 +4297,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
4797 }
4798 err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
4799 PEER__RECV, &ad);
4800 - if (err)
4801 + if (err) {
4802 selinux_netlbl_err(skb, err, 0);
4803 + return err;
4804 + }
4805 }
4806
4807 if (secmark_active) {
4808 @@ -4809,22 +4811,32 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4809 * as fast and as clean as possible. */
4810 if (!selinux_policycap_netpeer)
4811 return selinux_ip_postroute_compat(skb, ifindex, family);
4812 +
4813 + secmark_active = selinux_secmark_enabled();
4814 + peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
4815 + if (!secmark_active && !peerlbl_active)
4816 + return NF_ACCEPT;
4817 +
4818 + sk = skb->sk;
4819 +
4820 #ifdef CONFIG_XFRM
4821 /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
4822 * packet transformation so allow the packet to pass without any checks
4823 * since we'll have another chance to perform access control checks
4824 * when the packet is on it's final way out.
4825 * NOTE: there appear to be some IPv6 multicast cases where skb->dst
4826 - * is NULL, in this case go ahead and apply access control. */
4827 - if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
4828 + * is NULL, in this case go ahead and apply access control.
4829 + * is NULL, in this case go ahead and apply access control.
4830 + * NOTE: if this is a local socket (skb->sk != NULL) that is in the
4831 + * TCP listening state we cannot wait until the XFRM processing
4832 + * is done as we will miss out on the SA label if we do;
4833 + * unfortunately, this means more work, but it is only once per
4834 + * connection. */
4835 + if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
4836 + !(sk != NULL && sk->sk_state == TCP_LISTEN))
4837 return NF_ACCEPT;
4838 #endif
4839 - secmark_active = selinux_secmark_enabled();
4840 - peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
4841 - if (!secmark_active && !peerlbl_active)
4842 - return NF_ACCEPT;
4843
4844 - sk = skb->sk;
4845 if (sk == NULL) {
4846 /* Without an associated socket the packet is either coming
4847 * from the kernel or it is being forwarded; check the packet
4848 @@ -4852,6 +4864,25 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
4849 struct sk_security_struct *sksec = sk->sk_security;
4850 if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
4851 return NF_DROP;
4852 + /* At this point, if the returned skb peerlbl is SECSID_NULL
4853 + * and the packet has been through at least one XFRM
4854 + * transformation then we must be dealing with the "final"
4855 + * form of labeled IPsec packet; since we've already applied
4856 + * all of our access controls on this packet we can safely
4857 + * pass the packet. */
4858 + if (skb_sid == SECSID_NULL) {
4859 + switch (family) {
4860 + case PF_INET:
4861 + if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
4862 + return NF_ACCEPT;
4863 + break;
4864 + case PF_INET6:
4865 + if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
4866 + return NF_ACCEPT;
4867 + default:
4868 + return NF_DROP_ERR(-ECONNREFUSED);
4869 + }
4870 + }
4871 if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
4872 return NF_DROP;
4873 secmark_perm = PACKET__SEND;
4874 @@ -5521,11 +5552,11 @@ static int selinux_setprocattr(struct task_struct *p,
4875 /* Check for ptracing, and update the task SID if ok.
4876 Otherwise, leave SID unchanged and fail. */
4877 ptsid = 0;
4878 - task_lock(p);
4879 + rcu_read_lock();
4880 tracer = ptrace_parent(p);
4881 if (tracer)
4882 ptsid = task_sid(tracer);
4883 - task_unlock(p);
4884 + rcu_read_unlock();
4885
4886 if (tracer) {
4887 error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
4888 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
4889 index 6713f04e30ba..c1af4e14b12f 100644
4890 --- a/security/selinux/include/xfrm.h
4891 +++ b/security/selinux/include/xfrm.h
4892 @@ -47,6 +47,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
4893 int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
4894 struct common_audit_data *ad, u8 proto);
4895 int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
4896 +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
4897
4898 static inline void selinux_xfrm_notify_policyload(void)
4899 {
4900 @@ -85,12 +86,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
4901 static inline void selinux_xfrm_notify_policyload(void)
4902 {
4903 }
4904 -#endif
4905
4906 -static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
4907 +static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
4908 {
4909 - int err = selinux_xfrm_decode_session(skb, sid, 0);
4910 - BUG_ON(err);
4911 + *sid = SECSID_NULL;
4912 + return 0;
4913 }
4914 +#endif
4915
4916 #endif /* _SELINUX_XFRM_H_ */
4917 diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
4918 index d03081886214..78504a18958a 100644
4919 --- a/security/selinux/xfrm.c
4920 +++ b/security/selinux/xfrm.c
4921 @@ -152,21 +152,13 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
4922 return rc;
4923 }
4924
4925 -/*
4926 - * LSM hook implementation that checks and/or returns the xfrm sid for the
4927 - * incoming packet.
4928 - */
4929 -
4930 -int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
4931 +static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
4932 + u32 *sid, int ckall)
4933 {
4934 - struct sec_path *sp;
4935 + struct sec_path *sp = skb->sp;
4936
4937 *sid = SECSID_NULL;
4938
4939 - if (skb == NULL)
4940 - return 0;
4941 -
4942 - sp = skb->sp;
4943 if (sp) {
4944 int i, sid_set = 0;
4945
4946 @@ -190,6 +182,45 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
4947 return 0;
4948 }
4949
4950 +static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
4951 +{
4952 + struct dst_entry *dst = skb_dst(skb);
4953 + struct xfrm_state *x;
4954 +
4955 + if (dst == NULL)
4956 + return SECSID_NULL;
4957 + x = dst->xfrm;
4958 + if (x == NULL || !selinux_authorizable_xfrm(x))
4959 + return SECSID_NULL;
4960 +
4961 + return x->security->ctx_sid;
4962 +}
4963 +
4964 +/*
4965 + * LSM hook implementation that checks and/or returns the xfrm sid for the
4966 + * incoming packet.
4967 + */
4968 +
4969 +int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
4970 +{
4971 + if (skb == NULL) {
4972 + *sid = SECSID_NULL;
4973 + return 0;
4974 + }
4975 + return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
4976 +}
4977 +
4978 +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
4979 +{
4980 + int rc;
4981 +
4982 + rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
4983 + if (rc == 0 && *sid == SECSID_NULL)
4984 + *sid = selinux_xfrm_skb_sid_egress(skb);
4985 +
4986 + return rc;
4987 +}
4988 +
4989 /*
4990 * Security blob allocation for xfrm_policy and xfrm_state
4991 * CTX does not have a meaningful value on input
4992 diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
4993 index 6e03b465e44e..a2104671f51d 100644
4994 --- a/sound/core/pcm_lib.c
4995 +++ b/sound/core/pcm_lib.c
4996 @@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
4997 case SNDRV_PCM_STATE_DISCONNECTED:
4998 err = -EBADFD;
4999 goto _endloop;
5000 + case SNDRV_PCM_STATE_PAUSED:
5001 + continue;
5002 }
5003 if (!tout) {
5004 snd_printd("%s write error (DMA or IRQ trouble?)\n",
5005 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5006 index a63aff2ca594..f7e76619f7c9 100644
5007 --- a/sound/pci/hda/hda_intel.c
5008 +++ b/sound/pci/hda/hda_intel.c
5009 @@ -3430,6 +3430,10 @@ static void check_probe_mask(struct azx *chip, int dev)
5010 * white/black-list for enable_msi
5011 */
5012 static struct snd_pci_quirk msi_black_list[] = {
5013 + SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
5014 + SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
5015 + SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
5016 + SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
5017 SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
5018 SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
5019 SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
5020 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5021 index 21b948af6ed0..dce47c414ea7 100644
5022 --- a/sound/pci/hda/patch_realtek.c
5023 +++ b/sound/pci/hda/patch_realtek.c
5024 @@ -4004,10 +4004,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5025 SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5026 SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5027 SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5028 + SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5029 SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5030 SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5031 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
5032 + SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5033 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
5034 + SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
5035 SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
5036 SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
5037 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
5038 diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
5039 index 0048ce5bfa2f..831a34d7cc72 100644
5040 --- a/sound/soc/codecs/wm5110.c
5041 +++ b/sound/soc/codecs/wm5110.c
5042 @@ -1031,7 +1031,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
5043 { "HPOUT2R", NULL, "OUT2R" },
5044
5045 { "HPOUT3L", NULL, "OUT3L" },
5046 - { "HPOUT3R", NULL, "OUT3L" },
5047 + { "HPOUT3R", NULL, "OUT3R" },
5048
5049 { "SPKOUTLN", NULL, "OUT4L" },
5050 { "SPKOUTLP", NULL, "OUT4L" },
5051 diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
5052 index 4dfa8dceeabf..48bae0ec500f 100644
5053 --- a/sound/soc/codecs/wm8904.c
5054 +++ b/sound/soc/codecs/wm8904.c
5055 @@ -1443,7 +1443,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
5056
5057 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
5058 case SND_SOC_DAIFMT_DSP_B:
5059 - aif1 |= WM8904_AIF_LRCLK_INV;
5060 + aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
5061 case SND_SOC_DAIFMT_DSP_A:
5062 aif1 |= 0x3;
5063 break;
5064 diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
5065 index 60b6b593c407..0d5de6003849 100644
5066 --- a/sound/soc/codecs/wm_adsp.c
5067 +++ b/sound/soc/codecs/wm_adsp.c
5068 @@ -1466,13 +1466,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
5069 return ret;
5070
5071 /* Wait for the RAM to start, should be near instantaneous */
5072 - count = 0;
5073 - do {
5074 + for (count = 0; count < 10; ++count) {
5075 ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
5076 &val);
5077 if (ret != 0)
5078 return ret;
5079 - } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
5080 +
5081 + if (val & ADSP2_RAM_RDY)
5082 + break;
5083 +
5084 + msleep(1);
5085 + }
5086
5087 if (!(val & ADSP2_RAM_RDY)) {
5088 adsp_err(dsp, "Failed to start DSP RAM\n");
5089 diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
5090 index 52af7f6fb37f..540832e9e684 100644
5091 --- a/sound/soc/tegra/tegra20_i2s.c
5092 +++ b/sound/soc/tegra/tegra20_i2s.c
5093 @@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
5094 unsigned int fmt)
5095 {
5096 struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
5097 - unsigned int mask, val;
5098 + unsigned int mask = 0, val = 0;
5099
5100 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
5101 case SND_SOC_DAIFMT_NB_NF:
5102 @@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
5103 return -EINVAL;
5104 }
5105
5106 - mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
5107 + mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
5108 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
5109 case SND_SOC_DAIFMT_CBS_CFS:
5110 - val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
5111 + val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
5112 break;
5113 case SND_SOC_DAIFMT_CBM_CFM:
5114 break;
5115 diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
5116 index 551b3c93ce93..2e7d4aca3d7d 100644
5117 --- a/sound/soc/tegra/tegra20_spdif.c
5118 +++ b/sound/soc/tegra/tegra20_spdif.c
5119 @@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
5120 {
5121 struct device *dev = dai->dev;
5122 struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
5123 - unsigned int mask, val;
5124 + unsigned int mask = 0, val = 0;
5125 int ret, spdifclock;
5126
5127 - mask = TEGRA20_SPDIF_CTRL_PACK |
5128 - TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
5129 + mask |= TEGRA20_SPDIF_CTRL_PACK |
5130 + TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
5131 switch (params_format(params)) {
5132 case SNDRV_PCM_FORMAT_S16_LE:
5133 - val = TEGRA20_SPDIF_CTRL_PACK |
5134 - TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
5135 + val |= TEGRA20_SPDIF_CTRL_PACK |
5136 + TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
5137 break;
5138 default:
5139 return -EINVAL;
5140 diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
5141 index 47565fd04505..732e9cb1be97 100644
5142 --- a/sound/soc/tegra/tegra30_i2s.c
5143 +++ b/sound/soc/tegra/tegra30_i2s.c
5144 @@ -117,7 +117,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
5145 unsigned int fmt)
5146 {
5147 struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
5148 - unsigned int mask, val;
5149 + unsigned int mask = 0, val = 0;
5150
5151 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
5152 case SND_SOC_DAIFMT_NB_NF:
5153 @@ -126,10 +126,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
5154 return -EINVAL;
5155 }
5156
5157 - mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
5158 + mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
5159 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
5160 case SND_SOC_DAIFMT_CBS_CFS:
5161 - val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
5162 + val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
5163 break;
5164 case SND_SOC_DAIFMT_CBM_CFM:
5165 break;
5166 diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
5167 index dc4de3762111..bcf1d2f0b791 100644
5168 --- a/tools/power/cpupower/utils/cpupower-set.c
5169 +++ b/tools/power/cpupower/utils/cpupower-set.c
5170 @@ -18,9 +18,9 @@
5171 #include "helpers/bitmask.h"
5172
5173 static struct option set_opts[] = {
5174 - { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
5175 - { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
5176 - { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
5177 + { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
5178 + { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
5179 + { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
5180 { },
5181 };
5182