Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.2/0103-4.2.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2709 - (hide annotations) (download)
Mon Nov 2 12:49:28 2015 UTC (8 years, 6 months ago) by niro
File size: 338536 byte(s)
-linux-4.2.4
1 niro 2709 diff --git a/Documentation/HOWTO b/Documentation/HOWTO
2     index 93aa8604630e..21152d397b88 100644
3     --- a/Documentation/HOWTO
4     +++ b/Documentation/HOWTO
5     @@ -218,16 +218,16 @@ The development process
6     Linux kernel development process currently consists of a few different
7     main kernel "branches" and lots of different subsystem-specific kernel
8     branches. These different branches are:
9     - - main 3.x kernel tree
10     - - 3.x.y -stable kernel tree
11     - - 3.x -git kernel patches
12     + - main 4.x kernel tree
13     + - 4.x.y -stable kernel tree
14     + - 4.x -git kernel patches
15     - subsystem specific kernel trees and patches
16     - - the 3.x -next kernel tree for integration tests
17     + - the 4.x -next kernel tree for integration tests
18    
19     -3.x kernel tree
20     +4.x kernel tree
21     -----------------
22     -3.x kernels are maintained by Linus Torvalds, and can be found on
23     -kernel.org in the pub/linux/kernel/v3.x/ directory. Its development
24     +4.x kernels are maintained by Linus Torvalds, and can be found on
25     +kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
26     process is as follows:
27     - As soon as a new kernel is released a two weeks window is open,
28     during this period of time maintainers can submit big diffs to
29     @@ -262,20 +262,20 @@ mailing list about kernel releases:
30     released according to perceived bug status, not according to a
31     preconceived timeline."
32    
33     -3.x.y -stable kernel tree
34     +4.x.y -stable kernel tree
35     ---------------------------
36     Kernels with 3-part versions are -stable kernels. They contain
37     relatively small and critical fixes for security problems or significant
38     -regressions discovered in a given 3.x kernel.
39     +regressions discovered in a given 4.x kernel.
40    
41     This is the recommended branch for users who want the most recent stable
42     kernel and are not interested in helping test development/experimental
43     versions.
44    
45     -If no 3.x.y kernel is available, then the highest numbered 3.x
46     +If no 4.x.y kernel is available, then the highest numbered 4.x
47     kernel is the current stable kernel.
48    
49     -3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
50     +4.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
51     are released as needs dictate. The normal release period is approximately
52     two weeks, but it can be longer if there are no pressing problems. A
53     security-related problem, instead, can cause a release to happen almost
54     @@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree
55     documents what kinds of changes are acceptable for the -stable tree, and
56     how the release process works.
57    
58     -3.x -git patches
59     +4.x -git patches
60     ------------------
61     These are daily snapshots of Linus' kernel tree which are managed in a
62     git repository (hence the name.) These patches are usually released
63     @@ -317,9 +317,9 @@ revisions to it, and maintainers can mark patches as under review,
64     accepted, or rejected. Most of these patchwork sites are listed at
65     http://patchwork.kernel.org/.
66    
67     -3.x -next kernel tree for integration tests
68     +4.x -next kernel tree for integration tests
69     ---------------------------------------------
70     -Before updates from subsystem trees are merged into the mainline 3.x
71     +Before updates from subsystem trees are merged into the mainline 4.x
72     tree, they need to be integration-tested. For this purpose, a special
73     testing repository exists into which virtually all subsystem trees are
74     pulled on an almost daily basis:
75     diff --git a/Makefile b/Makefile
76     index a6edbb11a69a..a952801a6cd5 100644
77     --- a/Makefile
78     +++ b/Makefile
79     @@ -1,6 +1,6 @@
80     VERSION = 4
81     PATCHLEVEL = 2
82     -SUBLEVEL = 3
83     +SUBLEVEL = 4
84     EXTRAVERSION =
85     NAME = Hurr durr I'ma sheep
86    
87     diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
88     index e7769c3ab5f2..ac79491ee2c0 100644
89     --- a/arch/arc/plat-axs10x/axs10x.c
90     +++ b/arch/arc/plat-axs10x/axs10x.c
91     @@ -402,6 +402,8 @@ static void __init axs103_early_init(void)
92     unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
93     if (num_cores > 2)
94     arc_set_core_freq(50 * 1000000);
95     + else if (num_cores == 2)
96     + arc_set_core_freq(75 * 1000000);
97     #endif
98    
99     switch (arc_get_core_freq()/1000000) {
100     diff --git a/arch/arm/Makefile b/arch/arm/Makefile
101     index 7451b447cc2d..2c2b28ee4811 100644
102     --- a/arch/arm/Makefile
103     +++ b/arch/arm/Makefile
104     @@ -54,6 +54,14 @@ AS += -EL
105     LD += -EL
106     endif
107    
108     +#
109     +# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
110     +# later may result in code being generated that handles signed short and signed
111     +# char struct members incorrectly. So disable it.
112     +# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
113     +#
114     +KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
115     +
116     # This selects which instruction set is used.
117     # Note that GCC does not numerically define an architecture version
118     # macro, but instead defines a whole series of macros which makes
119     diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
120     index 534f27ceb10b..fa8107dec109 100644
121     --- a/arch/arm/boot/dts/exynos5420.dtsi
122     +++ b/arch/arm/boot/dts/exynos5420.dtsi
123     @@ -1118,7 +1118,7 @@
124     interrupt-parent = <&combiner>;
125     interrupts = <3 0>;
126     clock-names = "sysmmu", "master";
127     - clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>;
128     + clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>;
129     power-domains = <&disp_pd>;
130     #iommu-cells = <0>;
131     };
132     diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
133     index 3373fd958e95..a50356243888 100644
134     --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
135     +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
136     @@ -35,7 +35,6 @@
137     compatible = "regulator-fixed";
138     reg = <1>;
139     pinctrl-names = "default";
140     - pinctrl-0 = <&pinctrl_usbh1>;
141     regulator-name = "usbh1_vbus";
142     regulator-min-microvolt = <5000000>;
143     regulator-max-microvolt = <5000000>;
144     @@ -47,7 +46,6 @@
145     compatible = "regulator-fixed";
146     reg = <2>;
147     pinctrl-names = "default";
148     - pinctrl-0 = <&pinctrl_usbotg>;
149     regulator-name = "usb_otg_vbus";
150     regulator-min-microvolt = <5000000>;
151     regulator-max-microvolt = <5000000>;
152     diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
153     index a5474113cd50..67659a0ed13e 100644
154     --- a/arch/arm/boot/dts/omap3-beagle.dts
155     +++ b/arch/arm/boot/dts/omap3-beagle.dts
156     @@ -202,7 +202,7 @@
157    
158     tfp410_pins: pinmux_tfp410_pins {
159     pinctrl-single,pins = <
160     - 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
161     + 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
162     >;
163     };
164    
165     diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
166     index 275618f19a43..5771a149ce4a 100644
167     --- a/arch/arm/boot/dts/omap5-uevm.dts
168     +++ b/arch/arm/boot/dts/omap5-uevm.dts
169     @@ -174,8 +174,8 @@
170    
171     i2c5_pins: pinmux_i2c5_pins {
172     pinctrl-single,pins = <
173     - 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
174     - 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
175     + 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
176     + 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
177     >;
178     };
179    
180     diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
181     index 6a63f30c9a69..f5f384c04335 100644
182     --- a/arch/arm/boot/dts/sun7i-a20.dtsi
183     +++ b/arch/arm/boot/dts/sun7i-a20.dtsi
184     @@ -107,7 +107,7 @@
185     720000 1200000
186     528000 1100000
187     312000 1000000
188     - 144000 900000
189     + 144000 1000000
190     >;
191     #cooling-cells = <2>;
192     cooling-min-level = <0>;
193     diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
194     index a6ad93c9bce3..fd9eefce0a7b 100644
195     --- a/arch/arm/kernel/kgdb.c
196     +++ b/arch/arm/kernel/kgdb.c
197     @@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
198     if (err)
199     return err;
200    
201     - patch_text((void *)bpt->bpt_addr,
202     - *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
203     + /* Machine is already stopped, so we can use __patch_text() directly */
204     + __patch_text((void *)bpt->bpt_addr,
205     + *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
206    
207     return err;
208     }
209    
210     int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
211     {
212     - patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
213     + /* Machine is already stopped, so we can use __patch_text() directly */
214     + __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
215    
216     return 0;
217     }
218     diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
219     index 54272e0be713..7d5379c1c443 100644
220     --- a/arch/arm/kernel/perf_event.c
221     +++ b/arch/arm/kernel/perf_event.c
222     @@ -795,8 +795,10 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
223    
224     /* Don't bother with PPIs; they're already affine */
225     irq = platform_get_irq(pdev, 0);
226     - if (irq >= 0 && irq_is_percpu(irq))
227     + if (irq >= 0 && irq_is_percpu(irq)) {
228     + cpumask_setall(&pmu->supported_cpus);
229     return 0;
230     + }
231    
232     irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
233     if (!irqs)
234     diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
235     index 423663e23791..586eef26203d 100644
236     --- a/arch/arm/kernel/signal.c
237     +++ b/arch/arm/kernel/signal.c
238     @@ -343,12 +343,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
239     */
240     thumb = handler & 1;
241    
242     -#if __LINUX_ARM_ARCH__ >= 7
243     +#if __LINUX_ARM_ARCH__ >= 6
244     /*
245     - * Clear the If-Then Thumb-2 execution state
246     - * ARM spec requires this to be all 000s in ARM mode
247     - * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
248     - * signal transition without this.
249     + * Clear the If-Then Thumb-2 execution state. ARM spec
250     + * requires this to be all 000s in ARM mode. Snapdragon
251     + * S4/Krait misbehaves on a Thumb=>ARM signal transition
252     + * without this.
253     + *
254     + * We must do this whenever we are running on a Thumb-2
255     + * capable CPU, which includes ARMv6T2. However, we elect
256     + * to do this whenever we're on an ARMv6 or later CPU for
257     + * simplicity.
258     */
259     cpsr &= ~PSR_IT_MASK;
260     #endif
261     diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
262     index 702740d37465..51a59504bef4 100644
263     --- a/arch/arm/kvm/interrupts_head.S
264     +++ b/arch/arm/kvm/interrupts_head.S
265     @@ -515,8 +515,7 @@ ARM_BE8(rev r6, r6 )
266    
267     mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
268     str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
269     - bic r2, #1 @ Clear ENABLE
270     - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
271     +
272     isb
273    
274     mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
275     @@ -529,6 +528,9 @@ ARM_BE8(rev r6, r6 )
276     mcrr p15, 4, r2, r2, c14 @ CNTVOFF
277    
278     1:
279     + mov r2, #0 @ Clear ENABLE
280     + mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
281     +
282     @ Allow physical timer/counter access for the host
283     mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
284     orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
285     diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
286     index 7b4201294187..6984342da13d 100644
287     --- a/arch/arm/kvm/mmu.c
288     +++ b/arch/arm/kvm/mmu.c
289     @@ -1792,8 +1792,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
290     if (vma->vm_flags & VM_PFNMAP) {
291     gpa_t gpa = mem->guest_phys_addr +
292     (vm_start - mem->userspace_addr);
293     - phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
294     - vm_start - vma->vm_start;
295     + phys_addr_t pa;
296     +
297     + pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
298     + pa += vm_start - vma->vm_start;
299    
300     /* IO region dirty page logging not allowed */
301     if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
302     diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
303     index 9bdf54795f05..56978199c479 100644
304     --- a/arch/arm/mach-exynos/mcpm-exynos.c
305     +++ b/arch/arm/mach-exynos/mcpm-exynos.c
306     @@ -20,6 +20,7 @@
307     #include <asm/cputype.h>
308     #include <asm/cp15.h>
309     #include <asm/mcpm.h>
310     +#include <asm/smp_plat.h>
311    
312     #include "regs-pmu.h"
313     #include "common.h"
314     @@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
315     cluster >= EXYNOS5420_NR_CLUSTERS)
316     return -EINVAL;
317    
318     - exynos_cpu_power_up(cpunr);
319     + if (!exynos_cpu_power_state(cpunr)) {
320     + exynos_cpu_power_up(cpunr);
321     +
322     + /*
323     + * This assumes the cluster number of the big cores(Cortex A15)
324     + * is 0 and the Little cores(Cortex A7) is 1.
325     + * When the system was booted from the Little core,
326     + * they should be reset during power up cpu.
327     + */
328     + if (cluster &&
329     + cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
330     + /*
331     + * Before we reset the Little cores, we should wait
332     + * the SPARE2 register is set to 1 because the init
333     + * codes of the iROM will set the register after
334     + * initialization.
335     + */
336     + while (!pmu_raw_readl(S5P_PMU_SPARE2))
337     + udelay(10);
338     +
339     + pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
340     + EXYNOS_SWRESET);
341     + }
342     + }
343     +
344     return 0;
345     }
346    
347     diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
348     index b7614333d296..fba9068ed260 100644
349     --- a/arch/arm/mach-exynos/regs-pmu.h
350     +++ b/arch/arm/mach-exynos/regs-pmu.h
351     @@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
352     #define SPREAD_ENABLE 0xF
353     #define SPREAD_USE_STANDWFI 0xF
354    
355     +#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
356     +#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
357     +
358     +#define EXYNOS5420_KFC_CORE_RESET(_nr) \
359     + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
360     +
361     #define EXYNOS5420_BB_CON1 0x0784
362     #define EXYNOS5420_BB_SEL_EN BIT(31)
363     #define EXYNOS5420_BB_PMOS_EN BIT(7)
364     diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
365     index ad9529cc4203..daa1a65f2eb7 100644
366     --- a/arch/arm/plat-pxa/ssp.c
367     +++ b/arch/arm/plat-pxa/ssp.c
368     @@ -107,7 +107,6 @@ static const struct of_device_id pxa_ssp_of_ids[] = {
369     { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
370     { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
371     { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
372     - { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
373     { },
374     };
375     MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
376     diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
377     index e8ca6eaedd02..13671a9cf016 100644
378     --- a/arch/arm64/kernel/efi.c
379     +++ b/arch/arm64/kernel/efi.c
380     @@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
381     */
382     if (!is_normal_ram(md))
383     prot = __pgprot(PROT_DEVICE_nGnRE);
384     - else if (md->type == EFI_RUNTIME_SERVICES_CODE)
385     + else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
386     + !PAGE_ALIGNED(md->phys_addr))
387     prot = PAGE_KERNEL_EXEC;
388     else
389     prot = PAGE_KERNEL;
390     diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
391     index 08cafc518b9a..0f03a8fe2314 100644
392     --- a/arch/arm64/kernel/entry-ftrace.S
393     +++ b/arch/arm64/kernel/entry-ftrace.S
394     @@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
395     ENDPROC(ftrace_stub)
396    
397     #ifdef CONFIG_FUNCTION_GRAPH_TRACER
398     + /* save return value regs*/
399     + .macro save_return_regs
400     + sub sp, sp, #64
401     + stp x0, x1, [sp]
402     + stp x2, x3, [sp, #16]
403     + stp x4, x5, [sp, #32]
404     + stp x6, x7, [sp, #48]
405     + .endm
406     +
407     + /* restore return value regs*/
408     + .macro restore_return_regs
409     + ldp x0, x1, [sp]
410     + ldp x2, x3, [sp, #16]
411     + ldp x4, x5, [sp, #32]
412     + ldp x6, x7, [sp, #48]
413     + add sp, sp, #64
414     + .endm
415     +
416     /*
417     * void ftrace_graph_caller(void)
418     *
419     @@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
420     * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
421     */
422     ENTRY(return_to_handler)
423     - str x0, [sp, #-16]!
424     + save_return_regs
425     mov x0, x29 // parent's fp
426     bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
427     mov x30, x0 // restore the original return address
428     - ldr x0, [sp], #16
429     + restore_return_regs
430     ret
431     END(return_to_handler)
432     #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
433     diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
434     index 94d98cd1aad8..27c3e6fd24c1 100644
435     --- a/arch/arm64/mm/fault.c
436     +++ b/arch/arm64/mm/fault.c
437     @@ -278,6 +278,7 @@ retry:
438     * starvation.
439     */
440     mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
441     + mm_flags |= FAULT_FLAG_TRIED;
442     goto retry;
443     }
444     }
445     diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
446     index 5a822bb790f7..066e74f666ae 100644
447     --- a/arch/m68k/include/asm/linkage.h
448     +++ b/arch/m68k/include/asm/linkage.h
449     @@ -4,4 +4,34 @@
450     #define __ALIGN .align 4
451     #define __ALIGN_STR ".align 4"
452    
453     +/*
454     + * Make sure the compiler doesn't do anything stupid with the
455     + * arguments on the stack - they are owned by the *caller*, not
456     + * the callee. This just fools gcc into not spilling into them,
457     + * and keeps it from doing tailcall recursion and/or using the
458     + * stack slots for temporaries, since they are live and "used"
459     + * all the way to the end of the function.
460     + */
461     +#define asmlinkage_protect(n, ret, args...) \
462     + __asmlinkage_protect##n(ret, ##args)
463     +#define __asmlinkage_protect_n(ret, args...) \
464     + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
465     +#define __asmlinkage_protect0(ret) \
466     + __asmlinkage_protect_n(ret)
467     +#define __asmlinkage_protect1(ret, arg1) \
468     + __asmlinkage_protect_n(ret, "m" (arg1))
469     +#define __asmlinkage_protect2(ret, arg1, arg2) \
470     + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
471     +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
472     + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
473     +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
474     + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
475     + "m" (arg4))
476     +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
477     + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
478     + "m" (arg4), "m" (arg5))
479     +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
480     + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
481     + "m" (arg4), "m" (arg5), "m" (arg6))
482     +
483     #endif
484     diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
485     index 9f71c06aebf6..209ded16806b 100644
486     --- a/arch/mips/kernel/cps-vec.S
487     +++ b/arch/mips/kernel/cps-vec.S
488     @@ -39,6 +39,7 @@
489     mfc0 \dest, CP0_CONFIG, 3
490     andi \dest, \dest, MIPS_CONF3_MT
491     beqz \dest, \nomt
492     + nop
493     .endm
494    
495     .section .text.cps-vec
496     @@ -223,10 +224,9 @@ LEAF(excep_ejtag)
497     END(excep_ejtag)
498    
499     LEAF(mips_cps_core_init)
500     -#ifdef CONFIG_MIPS_MT
501     +#ifdef CONFIG_MIPS_MT_SMP
502     /* Check that the core implements the MT ASE */
503     has_mt t0, 3f
504     - nop
505    
506     .set push
507     .set mips64r2
508     @@ -310,8 +310,9 @@ LEAF(mips_cps_boot_vpes)
509     PTR_ADDU t0, t0, t1
510    
511     /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
512     + li t9, 0
513     +#ifdef CONFIG_MIPS_MT_SMP
514     has_mt ta2, 1f
515     - li t9, 0
516    
517     /* Find the number of VPEs present in the core */
518     mfc0 t1, CP0_MVPCONF0
519     @@ -330,6 +331,7 @@ LEAF(mips_cps_boot_vpes)
520     /* Retrieve the VPE ID from EBase.CPUNum */
521     mfc0 t9, $15, 1
522     and t9, t9, t1
523     +#endif
524    
525     1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
526     li t1, VPEBOOTCFG_SIZE
527     @@ -337,7 +339,7 @@ LEAF(mips_cps_boot_vpes)
528     PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
529     PTR_ADDU v0, v0, ta3
530    
531     -#ifdef CONFIG_MIPS_MT
532     +#ifdef CONFIG_MIPS_MT_SMP
533    
534     /* If the core doesn't support MT then return */
535     bnez ta2, 1f
536     @@ -451,7 +453,7 @@ LEAF(mips_cps_boot_vpes)
537    
538     2: .set pop
539    
540     -#endif /* CONFIG_MIPS_MT */
541     +#endif /* CONFIG_MIPS_MT_SMP */
542    
543     /* Return */
544     jr ra
545     diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
546     index 008b3378653a..4ceac5cdd6b8 100644
547     --- a/arch/mips/kernel/setup.c
548     +++ b/arch/mips/kernel/setup.c
549     @@ -338,7 +338,7 @@ static void __init bootmem_init(void)
550     if (end <= reserved_end)
551     continue;
552     #ifdef CONFIG_BLK_DEV_INITRD
553     - /* mapstart should be after initrd_end */
554     + /* Skip zones before initrd and initrd itself */
555     if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
556     continue;
557     #endif
558     @@ -371,6 +371,14 @@ static void __init bootmem_init(void)
559     max_low_pfn = PFN_DOWN(HIGHMEM_START);
560     }
561    
562     +#ifdef CONFIG_BLK_DEV_INITRD
563     + /*
564     + * mapstart should be after initrd_end
565     + */
566     + if (initrd_end)
567     + mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
568     +#endif
569     +
570     /*
571     * Initialize the boot-time allocator with low memory only.
572     */
573     diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
574     index f6c44dd332e2..d6d07ad56180 100644
575     --- a/arch/mips/loongson64/common/env.c
576     +++ b/arch/mips/loongson64/common/env.c
577     @@ -64,6 +64,9 @@ void __init prom_init_env(void)
578     }
579     if (memsize == 0)
580     memsize = 256;
581     +
582     + loongson_sysconf.nr_uarts = 1;
583     +
584     pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
585     #else
586     struct boot_params *boot_p;
587     diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
588     index eeaf0245c3b1..815892ed3fe8 100644
589     --- a/arch/mips/mm/dma-default.c
590     +++ b/arch/mips/mm/dma-default.c
591     @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
592     else
593     #endif
594     #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
595     - if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
596     + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
597     dma_flag = __GFP_DMA;
598     else
599     #endif
600     diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
601     index e92726099be0..dabf4179cd7e 100644
602     --- a/arch/mips/net/bpf_jit_asm.S
603     +++ b/arch/mips/net/bpf_jit_asm.S
604     @@ -64,8 +64,20 @@ sk_load_word_positive:
605     PTR_ADDU t1, $r_skb_data, offset
606     lw $r_A, 0(t1)
607     #ifdef CONFIG_CPU_LITTLE_ENDIAN
608     +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
609     wsbh t0, $r_A
610     rotr $r_A, t0, 16
611     +# else
612     + sll t0, $r_A, 24
613     + srl t1, $r_A, 24
614     + srl t2, $r_A, 8
615     + or t0, t0, t1
616     + andi t2, t2, 0xff00
617     + andi t1, $r_A, 0xff00
618     + or t0, t0, t2
619     + sll t1, t1, 8
620     + or $r_A, t0, t1
621     +# endif
622     #endif
623     jr $r_ra
624     move $r_ret, zero
625     @@ -80,8 +92,16 @@ sk_load_half_positive:
626     PTR_ADDU t1, $r_skb_data, offset
627     lh $r_A, 0(t1)
628     #ifdef CONFIG_CPU_LITTLE_ENDIAN
629     +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
630     wsbh t0, $r_A
631     seh $r_A, t0
632     +# else
633     + sll t0, $r_A, 24
634     + andi t1, $r_A, 0xff00
635     + sra t0, t0, 16
636     + srl t1, t1, 8
637     + or $r_A, t0, t1
638     +# endif
639     #endif
640     jr $r_ra
641     move $r_ret, zero
642     @@ -148,23 +168,47 @@ sk_load_byte_positive:
643     NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
644     bpf_slow_path_common(4)
645     #ifdef CONFIG_CPU_LITTLE_ENDIAN
646     +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
647     wsbh t0, $r_s0
648     jr $r_ra
649     rotr $r_A, t0, 16
650     -#endif
651     +# else
652     + sll t0, $r_s0, 24
653     + srl t1, $r_s0, 24
654     + srl t2, $r_s0, 8
655     + or t0, t0, t1
656     + andi t2, t2, 0xff00
657     + andi t1, $r_s0, 0xff00
658     + or t0, t0, t2
659     + sll t1, t1, 8
660     + jr $r_ra
661     + or $r_A, t0, t1
662     +# endif
663     +#else
664     jr $r_ra
665     - move $r_A, $r_s0
666     + move $r_A, $r_s0
667     +#endif
668    
669     END(bpf_slow_path_word)
670    
671     NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
672     bpf_slow_path_common(2)
673     #ifdef CONFIG_CPU_LITTLE_ENDIAN
674     +# if defined(__mips_isa_rev) && (__mips_isa_rev >= 2)
675     jr $r_ra
676     wsbh $r_A, $r_s0
677     -#endif
678     +# else
679     + sll t0, $r_s0, 8
680     + andi t1, $r_s0, 0xff00
681     + andi t0, t0, 0xff00
682     + srl t1, t1, 8
683     + jr $r_ra
684     + or $r_A, t0, t1
685     +# endif
686     +#else
687     jr $r_ra
688     move $r_A, $r_s0
689     +#endif
690    
691     END(bpf_slow_path_half)
692    
693     diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
694     index 05ea8fc7f829..4816fe2fa857 100644
695     --- a/arch/powerpc/kvm/book3s.c
696     +++ b/arch/powerpc/kvm/book3s.c
697     @@ -827,12 +827,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
698     unsigned long size = kvmppc_get_gpr(vcpu, 4);
699     unsigned long addr = kvmppc_get_gpr(vcpu, 5);
700     u64 buf;
701     + int srcu_idx;
702     int ret;
703    
704     if (!is_power_of_2(size) || (size > sizeof(buf)))
705     return H_TOO_HARD;
706    
707     + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
708     ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
709     + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
710     if (ret != 0)
711     return H_TOO_HARD;
712    
713     @@ -867,6 +870,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
714     unsigned long addr = kvmppc_get_gpr(vcpu, 5);
715     unsigned long val = kvmppc_get_gpr(vcpu, 6);
716     u64 buf;
717     + int srcu_idx;
718     int ret;
719    
720     switch (size) {
721     @@ -890,7 +894,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
722     return H_TOO_HARD;
723     }
724    
725     + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
726     ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
727     + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
728     if (ret != 0)
729     return H_TOO_HARD;
730    
731     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
732     index 68d067ad4222..a9f753fb73a8 100644
733     --- a/arch/powerpc/kvm/book3s_hv.c
734     +++ b/arch/powerpc/kvm/book3s_hv.c
735     @@ -2178,7 +2178,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
736     vc->runner = vcpu;
737     if (n_ceded == vc->n_runnable) {
738     kvmppc_vcore_blocked(vc);
739     - } else if (should_resched()) {
740     + } else if (need_resched()) {
741     vc->vcore_state = VCORE_PREEMPT;
742     /* Let something else run */
743     cond_resched_lock(&vc->lock);
744     diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
745     index 76408cf0ad04..437f64350847 100644
746     --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
747     +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
748     @@ -1171,6 +1171,7 @@ mc_cont:
749     bl kvmhv_accumulate_time
750     #endif
751    
752     + mr r3, r12
753     /* Increment exit count, poke other threads to exit */
754     bl kvmhv_commence_exit
755     nop
756     diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
757     index 27f2b187a91b..ff1bb4b690b9 100644
758     --- a/arch/powerpc/platforms/pasemi/msi.c
759     +++ b/arch/powerpc/platforms/pasemi/msi.c
760     @@ -63,6 +63,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
761     static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
762     {
763     struct msi_desc *entry;
764     + irq_hw_number_t hwirq;
765    
766     pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
767    
768     @@ -70,10 +71,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
769     if (entry->irq == NO_IRQ)
770     continue;
771    
772     + hwirq = virq_to_hw(entry->irq);
773     irq_set_msi_desc(entry->irq, NULL);
774     - msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
775     - virq_to_hw(entry->irq), ALLOC_CHUNK);
776     irq_dispose_mapping(entry->irq);
777     + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
778     }
779    
780     return;
781     diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
782     index 765d8ed558d0..fd16f86e54a9 100644
783     --- a/arch/powerpc/platforms/powernv/pci.c
784     +++ b/arch/powerpc/platforms/powernv/pci.c
785     @@ -99,6 +99,7 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
786     struct pci_controller *hose = pci_bus_to_host(pdev->bus);
787     struct pnv_phb *phb = hose->private_data;
788     struct msi_desc *entry;
789     + irq_hw_number_t hwirq;
790    
791     if (WARN_ON(!phb))
792     return;
793     @@ -106,10 +107,10 @@ void pnv_teardown_msi_irqs(struct pci_dev *pdev)
794     list_for_each_entry(entry, &pdev->msi_list, list) {
795     if (entry->irq == NO_IRQ)
796     continue;
797     + hwirq = virq_to_hw(entry->irq);
798     irq_set_msi_desc(entry->irq, NULL);
799     - msi_bitmap_free_hwirqs(&phb->msi_bmp,
800     - virq_to_hw(entry->irq) - phb->msi_base, 1);
801     irq_dispose_mapping(entry->irq);
802     + msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
803     }
804     }
805     #endif /* CONFIG_PCI_MSI */
806     diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
807     index 5236e5427c38..691e8e517b3e 100644
808     --- a/arch/powerpc/sysdev/fsl_msi.c
809     +++ b/arch/powerpc/sysdev/fsl_msi.c
810     @@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
811     {
812     struct msi_desc *entry;
813     struct fsl_msi *msi_data;
814     + irq_hw_number_t hwirq;
815    
816     list_for_each_entry(entry, &pdev->msi_list, list) {
817     if (entry->irq == NO_IRQ)
818     continue;
819     + hwirq = virq_to_hw(entry->irq);
820     msi_data = irq_get_chip_data(entry->irq);
821     irq_set_msi_desc(entry->irq, NULL);
822     - msi_bitmap_free_hwirqs(&msi_data->bitmap,
823     - virq_to_hw(entry->irq), 1);
824     irq_dispose_mapping(entry->irq);
825     + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
826     }
827    
828     return;
829     diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
830     index fc46ef3b816e..4c3165fa521c 100644
831     --- a/arch/powerpc/sysdev/mpic_u3msi.c
832     +++ b/arch/powerpc/sysdev/mpic_u3msi.c
833     @@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
834     static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
835     {
836     struct msi_desc *entry;
837     + irq_hw_number_t hwirq;
838    
839     list_for_each_entry(entry, &pdev->msi_list, list) {
840     if (entry->irq == NO_IRQ)
841     continue;
842    
843     + hwirq = virq_to_hw(entry->irq);
844     irq_set_msi_desc(entry->irq, NULL);
845     - msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
846     - virq_to_hw(entry->irq), 1);
847     irq_dispose_mapping(entry->irq);
848     + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
849     }
850    
851     return;
852     diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
853     index 6eb21f2ea585..060f23775255 100644
854     --- a/arch/powerpc/sysdev/ppc4xx_msi.c
855     +++ b/arch/powerpc/sysdev/ppc4xx_msi.c
856     @@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
857     {
858     struct msi_desc *entry;
859     struct ppc4xx_msi *msi_data = &ppc4xx_msi;
860     + irq_hw_number_t hwirq;
861    
862     dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
863    
864     list_for_each_entry(entry, &dev->msi_list, list) {
865     if (entry->irq == NO_IRQ)
866     continue;
867     + hwirq = virq_to_hw(entry->irq);
868     irq_set_msi_desc(entry->irq, NULL);
869     - msi_bitmap_free_hwirqs(&msi_data->bitmap,
870     - virq_to_hw(entry->irq), 1);
871     irq_dispose_mapping(entry->irq);
872     + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
873     }
874     }
875    
876     diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
877     index d4788111c161..fac6ac9790fa 100644
878     --- a/arch/s390/boot/compressed/Makefile
879     +++ b/arch/s390/boot/compressed/Makefile
880     @@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
881    
882     KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
883     KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
884     -KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
885     +KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
886     KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
887     KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
888    
889     diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
890     index fe8d6924efaa..c78ba51ae285 100644
891     --- a/arch/s390/kernel/compat_signal.c
892     +++ b/arch/s390/kernel/compat_signal.c
893     @@ -48,6 +48,19 @@ typedef struct
894     struct ucontext32 uc;
895     } rt_sigframe32;
896    
897     +static inline void sigset_to_sigset32(unsigned long *set64,
898     + compat_sigset_word *set32)
899     +{
900     + set32[0] = (compat_sigset_word) set64[0];
901     + set32[1] = (compat_sigset_word)(set64[0] >> 32);
902     +}
903     +
904     +static inline void sigset32_to_sigset(compat_sigset_word *set32,
905     + unsigned long *set64)
906     +{
907     + set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
908     +}
909     +
910     int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
911     {
912     int err;
913     @@ -303,10 +316,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
914     {
915     struct pt_regs *regs = task_pt_regs(current);
916     sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
917     + compat_sigset_t cset;
918     sigset_t set;
919    
920     - if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
921     + if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
922     goto badframe;
923     + sigset32_to_sigset(cset.sig, set.sig);
924     set_current_blocked(&set);
925     if (restore_sigregs32(regs, &frame->sregs))
926     goto badframe;
927     @@ -323,10 +338,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
928     {
929     struct pt_regs *regs = task_pt_regs(current);
930     rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
931     + compat_sigset_t cset;
932     sigset_t set;
933    
934     - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
935     + if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
936     goto badframe;
937     + sigset32_to_sigset(cset.sig, set.sig);
938     set_current_blocked(&set);
939     if (compat_restore_altstack(&frame->uc.uc_stack))
940     goto badframe;
941     @@ -397,7 +414,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
942     return -EFAULT;
943    
944     /* Create struct sigcontext32 on the signal stack */
945     - memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
946     + sigset_to_sigset32(set->sig, sc.oldmask);
947     sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
948     if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
949     return -EFAULT;
950     @@ -458,6 +475,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
951     static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
952     struct pt_regs *regs)
953     {
954     + compat_sigset_t cset;
955     rt_sigframe32 __user *frame;
956     unsigned long restorer;
957     size_t frame_size;
958     @@ -505,11 +523,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
959     store_sigregs();
960    
961     /* Create ucontext on the signal stack. */
962     + sigset_to_sigset32(set->sig, cset.sig);
963     if (__put_user(uc_flags, &frame->uc.uc_flags) ||
964     __put_user(0, &frame->uc.uc_link) ||
965     __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
966     save_sigregs32(regs, &frame->uc.uc_mcontext) ||
967     - __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
968     + __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
969     save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
970     return -EFAULT;
971    
972     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
973     index 8cb3e438f21e..d330840a2b18 100644
974     --- a/arch/x86/entry/entry_64.S
975     +++ b/arch/x86/entry/entry_64.S
976     @@ -1219,7 +1219,18 @@ END(error_exit)
977    
978     /* Runs on exception stack */
979     ENTRY(nmi)
980     + /*
981     + * Fix up the exception frame if we're on Xen.
982     + * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
983     + * one value to the stack on native, so it may clobber the rdx
984     + * scratch slot, but it won't clobber any of the important
985     + * slots past it.
986     + *
987     + * Xen is a different story, because the Xen frame itself overlaps
988     + * the "NMI executing" variable.
989     + */
990     PARAVIRT_ADJUST_EXCEPTION_FRAME
991     +
992     /*
993     * We allow breakpoints in NMIs. If a breakpoint occurs, then
994     * the iretq it performs will take us out of NMI context.
995     @@ -1270,9 +1281,12 @@ ENTRY(nmi)
996     * we don't want to enable interrupts, because then we'll end
997     * up in an awkward situation in which IRQs are on but NMIs
998     * are off.
999     + *
1000     + * We also must not push anything to the stack before switching
1001     + * stacks lest we corrupt the "NMI executing" variable.
1002     */
1003    
1004     - SWAPGS
1005     + SWAPGS_UNSAFE_STACK
1006     cld
1007     movq %rsp, %rdx
1008     movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1009     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
1010     index 9ebc3d009373..2350ab78183a 100644
1011     --- a/arch/x86/include/asm/msr-index.h
1012     +++ b/arch/x86/include/asm/msr-index.h
1013     @@ -311,6 +311,7 @@
1014     /* C1E active bits in int pending message */
1015     #define K8_INTP_C1E_ACTIVE_MASK 0x18000000
1016     #define MSR_K8_TSEG_ADDR 0xc0010112
1017     +#define MSR_K8_TSEG_MASK 0xc0010113
1018     #define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
1019     #define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
1020     #define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
1021     diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
1022     index dca71714f860..b12f81022a6b 100644
1023     --- a/arch/x86/include/asm/preempt.h
1024     +++ b/arch/x86/include/asm/preempt.h
1025     @@ -90,9 +90,9 @@ static __always_inline bool __preempt_count_dec_and_test(void)
1026     /*
1027     * Returns true when we need to resched and can (barring IRQ state).
1028     */
1029     -static __always_inline bool should_resched(void)
1030     +static __always_inline bool should_resched(int preempt_offset)
1031     {
1032     - return unlikely(!raw_cpu_read_4(__preempt_count));
1033     + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
1034     }
1035    
1036     #ifdef CONFIG_PREEMPT
1037     diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
1038     index 9d51fae1cba3..eaba08076030 100644
1039     --- a/arch/x86/include/asm/qspinlock.h
1040     +++ b/arch/x86/include/asm/qspinlock.h
1041     @@ -39,18 +39,27 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
1042     }
1043     #endif
1044    
1045     -#define virt_queued_spin_lock virt_queued_spin_lock
1046     -
1047     -static inline bool virt_queued_spin_lock(struct qspinlock *lock)
1048     +#ifdef CONFIG_PARAVIRT
1049     +#define virt_spin_lock virt_spin_lock
1050     +static inline bool virt_spin_lock(struct qspinlock *lock)
1051     {
1052     if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
1053     return false;
1054    
1055     - while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
1056     - cpu_relax();
1057     + /*
1058     + * On hypervisors without PARAVIRT_SPINLOCKS support we fall
1059     + * back to a Test-and-Set spinlock, because fair locks have
1060     + * horrible lock 'holder' preemption issues.
1061     + */
1062     +
1063     + do {
1064     + while (atomic_read(&lock->val) != 0)
1065     + cpu_relax();
1066     + } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
1067    
1068     return true;
1069     }
1070     +#endif /* CONFIG_PARAVIRT */
1071    
1072     #include <asm-generic/qspinlock.h>
1073    
1074     diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
1075     index c42827eb86cf..25f909362b7a 100644
1076     --- a/arch/x86/kernel/alternative.c
1077     +++ b/arch/x86/kernel/alternative.c
1078     @@ -338,10 +338,15 @@ done:
1079    
1080     static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
1081     {
1082     + unsigned long flags;
1083     +
1084     if (instr[0] != 0x90)
1085     return;
1086    
1087     + local_irq_save(flags);
1088     add_nops(instr + (a->instrlen - a->padlen), a->padlen);
1089     + sync_core();
1090     + local_irq_restore(flags);
1091    
1092     DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
1093     instr, a->instrlen - a->padlen, a->padlen);
1094     diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1095     index cde732c1b495..307a49828826 100644
1096     --- a/arch/x86/kernel/apic/apic.c
1097     +++ b/arch/x86/kernel/apic/apic.c
1098     @@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
1099     apic_write(APIC_LVTT, lvtt_value);
1100    
1101     if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
1102     + /*
1103     + * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
1104     + * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
1105     + * According to Intel, MFENCE can do the serialization here.
1106     + */
1107     + asm volatile("mfence" : : : "memory");
1108     +
1109     printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
1110     return;
1111     }
1112     diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
1113     index 206052e55517..5880b482d83c 100644
1114     --- a/arch/x86/kernel/apic/io_apic.c
1115     +++ b/arch/x86/kernel/apic/io_apic.c
1116     @@ -2522,6 +2522,7 @@ void __init setup_ioapic_dest(void)
1117     int pin, ioapic, irq, irq_entry;
1118     const struct cpumask *mask;
1119     struct irq_data *idata;
1120     + struct irq_chip *chip;
1121    
1122     if (skip_ioapic_setup == 1)
1123     return;
1124     @@ -2545,9 +2546,9 @@ void __init setup_ioapic_dest(void)
1125     else
1126     mask = apic->target_cpus();
1127    
1128     - irq_set_affinity(irq, mask);
1129     + chip = irq_data_get_irq_chip(idata);
1130     + chip->irq_set_affinity(idata, mask, false);
1131     }
1132     -
1133     }
1134     #endif
1135    
1136     diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
1137     index 6326ae24e4d5..1b09c420c7ff 100644
1138     --- a/arch/x86/kernel/cpu/perf_event_intel.c
1139     +++ b/arch/x86/kernel/cpu/perf_event_intel.c
1140     @@ -2102,9 +2102,12 @@ static struct event_constraint *
1141     intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1142     struct perf_event *event)
1143     {
1144     - struct event_constraint *c1 = cpuc->event_constraint[idx];
1145     + struct event_constraint *c1 = NULL;
1146     struct event_constraint *c2;
1147    
1148     + if (idx >= 0) /* fake does < 0 */
1149     + c1 = cpuc->event_constraint[idx];
1150     +
1151     /*
1152     * first time only
1153     * - static constraint: no change across incremental scheduling calls
1154     diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
1155     index e068d6683dba..74ca2fe7a0b3 100644
1156     --- a/arch/x86/kernel/crash.c
1157     +++ b/arch/x86/kernel/crash.c
1158     @@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
1159     }
1160    
1161     #ifdef CONFIG_KEXEC_FILE
1162     -static int get_nr_ram_ranges_callback(unsigned long start_pfn,
1163     - unsigned long nr_pfn, void *arg)
1164     +static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
1165     {
1166     - int *nr_ranges = arg;
1167     + unsigned int *nr_ranges = arg;
1168    
1169     (*nr_ranges)++;
1170     return 0;
1171     @@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
1172    
1173     ced->image = image;
1174    
1175     - walk_system_ram_range(0, -1, &nr_ranges,
1176     + walk_system_ram_res(0, -1, &nr_ranges,
1177     get_nr_ram_ranges_callback);
1178    
1179     ced->max_nr_ranges = nr_ranges;
1180     diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
1181     index 58bcfb67c01f..ebb5657ee280 100644
1182     --- a/arch/x86/kernel/paravirt.c
1183     +++ b/arch/x86/kernel/paravirt.c
1184     @@ -41,10 +41,18 @@
1185     #include <asm/timer.h>
1186     #include <asm/special_insns.h>
1187    
1188     -/* nop stub */
1189     -void _paravirt_nop(void)
1190     -{
1191     -}
1192     +/*
1193     + * nop stub, which must not clobber anything *including the stack* to
1194     + * avoid confusing the entry prologues.
1195     + */
1196     +extern void _paravirt_nop(void);
1197     +asm (".pushsection .entry.text, \"ax\"\n"
1198     + ".global _paravirt_nop\n"
1199     + "_paravirt_nop:\n\t"
1200     + "ret\n\t"
1201     + ".size _paravirt_nop, . - _paravirt_nop\n\t"
1202     + ".type _paravirt_nop, @function\n\t"
1203     + ".popsection");
1204    
1205     /* identity function, which can be inlined */
1206     u32 _paravirt_ident_32(u32 x)
1207     diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1208     index f6b916387590..a90ac95562af 100644
1209     --- a/arch/x86/kernel/process_64.c
1210     +++ b/arch/x86/kernel/process_64.c
1211     @@ -497,27 +497,59 @@ void set_personality_ia32(bool x32)
1212     }
1213     EXPORT_SYMBOL_GPL(set_personality_ia32);
1214    
1215     +/*
1216     + * Called from fs/proc with a reference on @p to find the function
1217     + * which called into schedule(). This needs to be done carefully
1218     + * because the task might wake up and we might look at a stack
1219     + * changing under us.
1220     + */
1221     unsigned long get_wchan(struct task_struct *p)
1222     {
1223     - unsigned long stack;
1224     - u64 fp, ip;
1225     + unsigned long start, bottom, top, sp, fp, ip;
1226     int count = 0;
1227    
1228     if (!p || p == current || p->state == TASK_RUNNING)
1229     return 0;
1230     - stack = (unsigned long)task_stack_page(p);
1231     - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1232     +
1233     + start = (unsigned long)task_stack_page(p);
1234     + if (!start)
1235     + return 0;
1236     +
1237     + /*
1238     + * Layout of the stack page:
1239     + *
1240     + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
1241     + * PADDING
1242     + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
1243     + * stack
1244     + * ----------- bottom = start + sizeof(thread_info)
1245     + * thread_info
1246     + * ----------- start
1247     + *
1248     + * The tasks stack pointer points at the location where the
1249     + * framepointer is stored. The data on the stack is:
1250     + * ... IP FP ... IP FP
1251     + *
1252     + * We need to read FP and IP, so we need to adjust the upper
1253     + * bound by another unsigned long.
1254     + */
1255     + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
1256     + top -= 2 * sizeof(unsigned long);
1257     + bottom = start + sizeof(struct thread_info);
1258     +
1259     + sp = READ_ONCE(p->thread.sp);
1260     + if (sp < bottom || sp > top)
1261     return 0;
1262     - fp = *(u64 *)(p->thread.sp);
1263     +
1264     + fp = READ_ONCE(*(unsigned long *)sp);
1265     do {
1266     - if (fp < (unsigned long)stack ||
1267     - fp >= (unsigned long)stack+THREAD_SIZE)
1268     + if (fp < bottom || fp > top)
1269     return 0;
1270     - ip = *(u64 *)(fp+8);
1271     + ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
1272     if (!in_sched_functions(ip))
1273     return ip;
1274     - fp = *(u64 *)fp;
1275     - } while (count++ < 16);
1276     + fp = READ_ONCE(*(unsigned long *)fp);
1277     + } while (count++ < 16 && p->state != TASK_RUNNING);
1278     return 0;
1279     }
1280    
1281     diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1282     index 7437b41f6a47..dc9af7a0839a 100644
1283     --- a/arch/x86/kernel/tsc.c
1284     +++ b/arch/x86/kernel/tsc.c
1285     @@ -21,6 +21,7 @@
1286     #include <asm/hypervisor.h>
1287     #include <asm/nmi.h>
1288     #include <asm/x86_init.h>
1289     +#include <asm/geode.h>
1290    
1291     unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
1292     EXPORT_SYMBOL(cpu_khz);
1293     @@ -1013,15 +1014,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1294    
1295     static void __init check_system_tsc_reliable(void)
1296     {
1297     -#ifdef CONFIG_MGEODE_LX
1298     - /* RTSC counts during suspend */
1299     +#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1300     + if (is_geode_lx()) {
1301     + /* RTSC counts during suspend */
1302     #define RTSC_SUSP 0x100
1303     - unsigned long res_low, res_high;
1304     + unsigned long res_low, res_high;
1305    
1306     - rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1307     - /* Geode_LX - the OLPC CPU has a very reliable TSC */
1308     - if (res_low & RTSC_SUSP)
1309     - tsc_clocksource_reliable = 1;
1310     + rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1311     + /* Geode_LX - the OLPC CPU has a very reliable TSC */
1312     + if (res_low & RTSC_SUSP)
1313     + tsc_clocksource_reliable = 1;
1314     + }
1315     #endif
1316     if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1317     tsc_clocksource_reliable = 1;
1318     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1319     index 8e0c0844c6b9..2d32b67a1043 100644
1320     --- a/arch/x86/kvm/svm.c
1321     +++ b/arch/x86/kvm/svm.c
1322     @@ -513,7 +513,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1323     struct vcpu_svm *svm = to_svm(vcpu);
1324    
1325     if (svm->vmcb->control.next_rip != 0) {
1326     - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
1327     + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
1328     svm->next_rip = svm->vmcb->control.next_rip;
1329     }
1330    
1331     @@ -865,64 +865,6 @@ static void svm_disable_lbrv(struct vcpu_svm *svm)
1332     set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1333     }
1334    
1335     -#define MTRR_TYPE_UC_MINUS 7
1336     -#define MTRR2PROTVAL_INVALID 0xff
1337     -
1338     -static u8 mtrr2protval[8];
1339     -
1340     -static u8 fallback_mtrr_type(int mtrr)
1341     -{
1342     - /*
1343     - * WT and WP aren't always available in the host PAT. Treat
1344     - * them as UC and UC- respectively. Everything else should be
1345     - * there.
1346     - */
1347     - switch (mtrr)
1348     - {
1349     - case MTRR_TYPE_WRTHROUGH:
1350     - return MTRR_TYPE_UNCACHABLE;
1351     - case MTRR_TYPE_WRPROT:
1352     - return MTRR_TYPE_UC_MINUS;
1353     - default:
1354     - BUG();
1355     - }
1356     -}
1357     -
1358     -static void build_mtrr2protval(void)
1359     -{
1360     - int i;
1361     - u64 pat;
1362     -
1363     - for (i = 0; i < 8; i++)
1364     - mtrr2protval[i] = MTRR2PROTVAL_INVALID;
1365     -
1366     - /* Ignore the invalid MTRR types. */
1367     - mtrr2protval[2] = 0;
1368     - mtrr2protval[3] = 0;
1369     -
1370     - /*
1371     - * Use host PAT value to figure out the mapping from guest MTRR
1372     - * values to nested page table PAT/PCD/PWT values. We do not
1373     - * want to change the host PAT value every time we enter the
1374     - * guest.
1375     - */
1376     - rdmsrl(MSR_IA32_CR_PAT, pat);
1377     - for (i = 0; i < 8; i++) {
1378     - u8 mtrr = pat >> (8 * i);
1379     -
1380     - if (mtrr2protval[mtrr] == MTRR2PROTVAL_INVALID)
1381     - mtrr2protval[mtrr] = __cm_idx2pte(i);
1382     - }
1383     -
1384     - for (i = 0; i < 8; i++) {
1385     - if (mtrr2protval[i] == MTRR2PROTVAL_INVALID) {
1386     - u8 fallback = fallback_mtrr_type(i);
1387     - mtrr2protval[i] = mtrr2protval[fallback];
1388     - BUG_ON(mtrr2protval[i] == MTRR2PROTVAL_INVALID);
1389     - }
1390     - }
1391     -}
1392     -
1393     static __init int svm_hardware_setup(void)
1394     {
1395     int cpu;
1396     @@ -989,7 +931,6 @@ static __init int svm_hardware_setup(void)
1397     } else
1398     kvm_disable_tdp();
1399    
1400     - build_mtrr2protval();
1401     return 0;
1402    
1403     err:
1404     @@ -1144,39 +1085,6 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1405     return target_tsc - tsc;
1406     }
1407    
1408     -static void svm_set_guest_pat(struct vcpu_svm *svm, u64 *g_pat)
1409     -{
1410     - struct kvm_vcpu *vcpu = &svm->vcpu;
1411     -
1412     - /* Unlike Intel, AMD takes the guest's CR0.CD into account.
1413     - *
1414     - * AMD does not have IPAT. To emulate it for the case of guests
1415     - * with no assigned devices, just set everything to WB. If guests
1416     - * have assigned devices, however, we cannot force WB for RAM
1417     - * pages only, so use the guest PAT directly.
1418     - */
1419     - if (!kvm_arch_has_assigned_device(vcpu->kvm))
1420     - *g_pat = 0x0606060606060606;
1421     - else
1422     - *g_pat = vcpu->arch.pat;
1423     -}
1424     -
1425     -static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1426     -{
1427     - u8 mtrr;
1428     -
1429     - /*
1430     - * 1. MMIO: trust guest MTRR, so same as item 3.
1431     - * 2. No passthrough: always map as WB, and force guest PAT to WB as well
1432     - * 3. Passthrough: can't guarantee the result, try to trust guest.
1433     - */
1434     - if (!is_mmio && !kvm_arch_has_assigned_device(vcpu->kvm))
1435     - return 0;
1436     -
1437     - mtrr = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
1438     - return mtrr2protval[mtrr];
1439     -}
1440     -
1441     static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1442     {
1443     struct vmcb_control_area *control = &svm->vmcb->control;
1444     @@ -1260,6 +1168,7 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1445     * It also updates the guest-visible cr0 value.
1446     */
1447     (void)kvm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1448     + kvm_mmu_reset_context(&svm->vcpu);
1449    
1450     save->cr4 = X86_CR4_PAE;
1451     /* rdx = ?? */
1452     @@ -1272,7 +1181,6 @@ static void init_vmcb(struct vcpu_svm *svm, bool init_event)
1453     clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1454     clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1455     save->g_pat = svm->vcpu.arch.pat;
1456     - svm_set_guest_pat(svm, &save->g_pat);
1457     save->cr3 = 0;
1458     save->cr4 = 0;
1459     }
1460     @@ -3347,16 +3255,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1461     case MSR_VM_IGNNE:
1462     vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
1463     break;
1464     - case MSR_IA32_CR_PAT:
1465     - if (npt_enabled) {
1466     - if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
1467     - return 1;
1468     - vcpu->arch.pat = data;
1469     - svm_set_guest_pat(svm, &svm->vmcb->save.g_pat);
1470     - mark_dirty(svm->vmcb, VMCB_NPT);
1471     - break;
1472     - }
1473     - /* fall through */
1474     default:
1475     return kvm_set_msr_common(vcpu, msr);
1476     }
1477     @@ -4191,6 +4089,11 @@ static bool svm_has_high_real_mode_segbase(void)
1478     return true;
1479     }
1480    
1481     +static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1482     +{
1483     + return 0;
1484     +}
1485     +
1486     static void svm_cpuid_update(struct kvm_vcpu *vcpu)
1487     {
1488     }
1489     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1490     index 83b7b5cd75d5..aa9e8229571d 100644
1491     --- a/arch/x86/kvm/vmx.c
1492     +++ b/arch/x86/kvm/vmx.c
1493     @@ -6134,6 +6134,8 @@ static __init int hardware_setup(void)
1494     memcpy(vmx_msr_bitmap_longmode_x2apic,
1495     vmx_msr_bitmap_longmode, PAGE_SIZE);
1496    
1497     + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
1498     +
1499     if (enable_apicv) {
1500     for (msr = 0x800; msr <= 0x8ff; msr++)
1501     vmx_disable_intercept_msr_read_x2apic(msr);
1502     @@ -8632,17 +8634,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
1503     u64 ipat = 0;
1504    
1505     /* For VT-d and EPT combination
1506     - * 1. MMIO: guest may want to apply WC, trust it.
1507     + * 1. MMIO: always map as UC
1508     * 2. EPT with VT-d:
1509     * a. VT-d without snooping control feature: can't guarantee the
1510     - * result, try to trust guest. So the same as item 1.
1511     + * result, try to trust guest.
1512     * b. VT-d with snooping control feature: snooping control feature of
1513     * VT-d engine can guarantee the cache correctness. Just set it
1514     * to WB to keep consistent with host. So the same as item 3.
1515     * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
1516     * consistent with host MTRR
1517     */
1518     - if (!is_mmio && !kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
1519     + if (is_mmio) {
1520     + cache = MTRR_TYPE_UNCACHABLE;
1521     + goto exit;
1522     + }
1523     +
1524     + if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
1525     ipat = VMX_EPT_IPAT_BIT;
1526     cache = MTRR_TYPE_WRBACK;
1527     goto exit;
1528     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1529     index 8f0f6eca69da..32c6e6ac5964 100644
1530     --- a/arch/x86/kvm/x86.c
1531     +++ b/arch/x86/kvm/x86.c
1532     @@ -2388,6 +2388,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1533     case MSR_IA32_LASTINTFROMIP:
1534     case MSR_IA32_LASTINTTOIP:
1535     case MSR_K8_SYSCFG:
1536     + case MSR_K8_TSEG_ADDR:
1537     + case MSR_K8_TSEG_MASK:
1538     case MSR_K7_HWCR:
1539     case MSR_VM_HSAVE_PA:
1540     case MSR_K8_INT_PENDING_MSG:
1541     diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1542     index 3fba623e3ba5..f9977a7a9444 100644
1543     --- a/arch/x86/mm/init_64.c
1544     +++ b/arch/x86/mm/init_64.c
1545     @@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
1546     * has been zapped already via cleanup_highmem().
1547     */
1548     all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1549     - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
1550     + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1551    
1552     rodata_test();
1553    
1554     diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
1555     index 27062303c881..7553921c146c 100644
1556     --- a/arch/x86/pci/intel_mid_pci.c
1557     +++ b/arch/x86/pci/intel_mid_pci.c
1558     @@ -35,6 +35,9 @@
1559    
1560     #define PCIE_CAP_OFFSET 0x100
1561    
1562     +/* Quirks for the listed devices */
1563     +#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190
1564     +
1565     /* Fixed BAR fields */
1566     #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */
1567     #define PCI_FIXED_BAR_0_SIZE 0x04
1568     @@ -214,10 +217,27 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
1569     if (dev->irq_managed && dev->irq > 0)
1570     return 0;
1571    
1572     - if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
1573     + switch (intel_mid_identify_cpu()) {
1574     + case INTEL_MID_CPU_CHIP_TANGIER:
1575     polarity = 0; /* active high */
1576     - else
1577     +
1578     + /* Special treatment for IRQ0 */
1579     + if (dev->irq == 0) {
1580     + /*
1581     + * TNG has IRQ0 assigned to eMMC controller. But there
1582     + * are also other devices with bogus PCI configuration
1583     + * that have IRQ0 assigned. This check ensures that
1584     + * eMMC gets it.
1585     + */
1586     + if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
1587     + return -EBUSY;
1588     + }
1589     + break;
1590     + default:
1591     polarity = 1; /* active low */
1592     + break;
1593     + }
1594     +
1595     ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
1596    
1597     /*
1598     diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
1599     index e4308fe6afe8..c6835bfad3a1 100644
1600     --- a/arch/x86/platform/efi/efi.c
1601     +++ b/arch/x86/platform/efi/efi.c
1602     @@ -705,6 +705,70 @@ out:
1603     }
1604    
1605     /*
1606     + * Iterate the EFI memory map in reverse order because the regions
1607     + * will be mapped top-down. The end result is the same as if we had
1608     + * mapped things forward, but doesn't require us to change the
1609     + * existing implementation of efi_map_region().
1610     + */
1611     +static inline void *efi_map_next_entry_reverse(void *entry)
1612     +{
1613     + /* Initial call */
1614     + if (!entry)
1615     + return memmap.map_end - memmap.desc_size;
1616     +
1617     + entry -= memmap.desc_size;
1618     + if (entry < memmap.map)
1619     + return NULL;
1620     +
1621     + return entry;
1622     +}
1623     +
1624     +/*
1625     + * efi_map_next_entry - Return the next EFI memory map descriptor
1626     + * @entry: Previous EFI memory map descriptor
1627     + *
1628     + * This is a helper function to iterate over the EFI memory map, which
1629     + * we do in different orders depending on the current configuration.
1630     + *
1631     + * To begin traversing the memory map @entry must be %NULL.
1632     + *
1633     + * Returns %NULL when we reach the end of the memory map.
1634     + */
1635     +static void *efi_map_next_entry(void *entry)
1636     +{
1637     + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
1638     + /*
1639     + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
1640     + * config table feature requires us to map all entries
1641     + * in the same order as they appear in the EFI memory
1642     + * map. That is to say, entry N must have a lower
1643     + * virtual address than entry N+1. This is because the
1644     + * firmware toolchain leaves relative references in
1645     + * the code/data sections, which are split and become
1646     + * separate EFI memory regions. Mapping things
1647     + * out-of-order leads to the firmware accessing
1648     + * unmapped addresses.
1649     + *
1650     + * Since we need to map things this way whether or not
1651     + * the kernel actually makes use of
1652     + * EFI_PROPERTIES_TABLE, let's just switch to this
1653     + * scheme by default for 64-bit.
1654     + */
1655     + return efi_map_next_entry_reverse(entry);
1656     + }
1657     +
1658     + /* Initial call */
1659     + if (!entry)
1660     + return memmap.map;
1661     +
1662     + entry += memmap.desc_size;
1663     + if (entry >= memmap.map_end)
1664     + return NULL;
1665     +
1666     + return entry;
1667     +}
1668     +
1669     +/*
1670     * Map the efi memory ranges of the runtime services and update new_mmap with
1671     * virtual addresses.
1672     */
1673     @@ -714,7 +778,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
1674     unsigned long left = 0;
1675     efi_memory_desc_t *md;
1676    
1677     - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
1678     + p = NULL;
1679     + while ((p = efi_map_next_entry(p))) {
1680     md = p;
1681     if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
1682     #ifdef CONFIG_X86_64
1683     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1684     index 11d6fb4e8483..777ad2f03160 100644
1685     --- a/arch/x86/xen/enlighten.c
1686     +++ b/arch/x86/xen/enlighten.c
1687     @@ -33,6 +33,10 @@
1688     #include <linux/memblock.h>
1689     #include <linux/edd.h>
1690    
1691     +#ifdef CONFIG_KEXEC_CORE
1692     +#include <linux/kexec.h>
1693     +#endif
1694     +
1695     #include <xen/xen.h>
1696     #include <xen/events.h>
1697     #include <xen/interface/xen.h>
1698     @@ -1800,6 +1804,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
1699     .notifier_call = xen_hvm_cpu_notify,
1700     };
1701    
1702     +#ifdef CONFIG_KEXEC_CORE
1703     +static void xen_hvm_shutdown(void)
1704     +{
1705     + native_machine_shutdown();
1706     + if (kexec_in_progress)
1707     + xen_reboot(SHUTDOWN_soft_reset);
1708     +}
1709     +
1710     +static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1711     +{
1712     + native_machine_crash_shutdown(regs);
1713     + xen_reboot(SHUTDOWN_soft_reset);
1714     +}
1715     +#endif
1716     +
1717     static void __init xen_hvm_guest_init(void)
1718     {
1719     if (xen_pv_domain())
1720     @@ -1819,6 +1838,10 @@ static void __init xen_hvm_guest_init(void)
1721     x86_init.irqs.intr_init = xen_init_IRQ;
1722     xen_hvm_init_time_ops();
1723     xen_hvm_init_mmu_ops();
1724     +#ifdef CONFIG_KEXEC_CORE
1725     + machine_ops.shutdown = xen_hvm_shutdown;
1726     + machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1727     +#endif
1728     }
1729     #endif
1730    
1731     diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
1732     index d6283b3f5db5..9cc48d1d7abb 100644
1733     --- a/block/blk-cgroup.c
1734     +++ b/block/blk-cgroup.c
1735     @@ -387,6 +387,9 @@ static void blkg_destroy_all(struct request_queue *q)
1736     blkg_destroy(blkg);
1737     spin_unlock(&blkcg->lock);
1738     }
1739     +
1740     + q->root_blkg = NULL;
1741     + q->root_rl.blkg = NULL;
1742     }
1743    
1744     /*
1745     diff --git a/block/blk-mq.c b/block/blk-mq.c
1746     index 176262ec3731..c69902695136 100644
1747     --- a/block/blk-mq.c
1748     +++ b/block/blk-mq.c
1749     @@ -1807,7 +1807,6 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1750    
1751     hctx = q->mq_ops->map_queue(q, i);
1752     cpumask_set_cpu(i, hctx->cpumask);
1753     - cpumask_set_cpu(i, hctx->tags->cpumask);
1754     ctx->index_hw = hctx->nr_ctx;
1755     hctx->ctxs[hctx->nr_ctx++] = ctx;
1756     }
1757     @@ -1847,6 +1846,14 @@ static void blk_mq_map_swqueue(struct request_queue *q)
1758     hctx->next_cpu = cpumask_first(hctx->cpumask);
1759     hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
1760     }
1761     +
1762     + queue_for_each_ctx(q, ctx, i) {
1763     + if (!cpu_online(i))
1764     + continue;
1765     +
1766     + hctx = q->mq_ops->map_queue(q, i);
1767     + cpumask_set_cpu(i, hctx->tags->cpumask);
1768     + }
1769     }
1770    
1771     static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set)
1772     diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
1773     index 764280a91776..e9fd32e91668 100644
1774     --- a/drivers/base/cacheinfo.c
1775     +++ b/drivers/base/cacheinfo.c
1776     @@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
1777    
1778     if (sibling == cpu) /* skip itself */
1779     continue;
1780     +
1781     sib_cpu_ci = get_cpu_cacheinfo(sibling);
1782     + if (!sib_cpu_ci->info_list)
1783     + continue;
1784     +
1785     sib_leaf = sib_cpu_ci->info_list + index;
1786     cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
1787     cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
1788     @@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
1789    
1790     static void free_cache_attributes(unsigned int cpu)
1791     {
1792     + if (!per_cpu_cacheinfo(cpu))
1793     + return;
1794     +
1795     cache_shared_cpu_map_remove(cpu);
1796    
1797     kfree(per_cpu_cacheinfo(cpu));
1798     @@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1799     break;
1800     case CPU_DEAD:
1801     cache_remove_dev(cpu);
1802     - if (per_cpu_cacheinfo(cpu))
1803     - free_cache_attributes(cpu);
1804     + free_cache_attributes(cpu);
1805     break;
1806     }
1807     return notifier_from_errno(rc);
1808     diff --git a/drivers/base/property.c b/drivers/base/property.c
1809     index f3f6d167f3f1..37a7bb7b239d 100644
1810     --- a/drivers/base/property.c
1811     +++ b/drivers/base/property.c
1812     @@ -27,9 +27,10 @@
1813     */
1814     void device_add_property_set(struct device *dev, struct property_set *pset)
1815     {
1816     - if (pset)
1817     - pset->fwnode.type = FWNODE_PDATA;
1818     + if (!pset)
1819     + return;
1820    
1821     + pset->fwnode.type = FWNODE_PDATA;
1822     set_secondary_fwnode(dev, &pset->fwnode);
1823     }
1824     EXPORT_SYMBOL_GPL(device_add_property_set);
1825     diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
1826     index 5799a0b9e6cc..c8941f39c919 100644
1827     --- a/drivers/base/regmap/regmap-debugfs.c
1828     +++ b/drivers/base/regmap/regmap-debugfs.c
1829     @@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
1830     /* Calculate the length of a fixed format */
1831     static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
1832     {
1833     - snprintf(buf, buf_size, "%x", max_val);
1834     - return strlen(buf);
1835     + return snprintf(NULL, 0, "%x", max_val);
1836     }
1837    
1838     static ssize_t regmap_name_read_file(struct file *file,
1839     @@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
1840     /* If we're in the region the user is trying to read */
1841     if (p >= *ppos) {
1842     /* ...but not beyond it */
1843     - if (buf_pos >= count - 1 - tot_len)
1844     + if (buf_pos + tot_len + 1 >= count)
1845     break;
1846    
1847     /* Format the register */
1848     diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
1849     index deb3f001791f..767657565de6 100644
1850     --- a/drivers/block/xen-blkback/xenbus.c
1851     +++ b/drivers/block/xen-blkback/xenbus.c
1852     @@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
1853    
1854     static int xen_blkif_disconnect(struct xen_blkif *blkif)
1855     {
1856     + struct pending_req *req, *n;
1857     + int i = 0, j;
1858     +
1859     if (blkif->xenblkd) {
1860     kthread_stop(blkif->xenblkd);
1861     wake_up(&blkif->shutdown_wq);
1862     @@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
1863     /* Remove all persistent grants and the cache of ballooned pages. */
1864     xen_blkbk_free_caches(blkif);
1865    
1866     + /* Check that there is no request in use */
1867     + list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
1868     + list_del(&req->free_list);
1869     +
1870     + for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
1871     + kfree(req->segments[j]);
1872     +
1873     + for (j = 0; j < MAX_INDIRECT_PAGES; j++)
1874     + kfree(req->indirect_pages[j]);
1875     +
1876     + kfree(req);
1877     + i++;
1878     + }
1879     +
1880     + WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
1881     + blkif->nr_ring_pages = 0;
1882     +
1883     return 0;
1884     }
1885    
1886     static void xen_blkif_free(struct xen_blkif *blkif)
1887     {
1888     - struct pending_req *req, *n;
1889     - int i = 0, j;
1890    
1891     xen_blkif_disconnect(blkif);
1892     xen_vbd_free(&blkif->vbd);
1893     @@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
1894     BUG_ON(!list_empty(&blkif->free_pages));
1895     BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
1896    
1897     - /* Check that there is no request in use */
1898     - list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
1899     - list_del(&req->free_list);
1900     -
1901     - for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
1902     - kfree(req->segments[j]);
1903     -
1904     - for (j = 0; j < MAX_INDIRECT_PAGES; j++)
1905     - kfree(req->indirect_pages[j]);
1906     -
1907     - kfree(req);
1908     - i++;
1909     - }
1910     -
1911     - WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
1912     -
1913     kmem_cache_free(xen_blkif_cachep, blkif);
1914     }
1915    
1916     diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
1917     index 3a1fe07cfe9e..dd02356e2e86 100644
1918     --- a/drivers/clk/samsung/clk-cpu.c
1919     +++ b/drivers/clk/samsung/clk-cpu.c
1920     @@ -161,7 +161,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1921     * the values for DIV_COPY and DIV_HPM dividers need not be set.
1922     */
1923     div0 = cfg_data->div0;
1924     - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
1925     + if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
1926     div1 = cfg_data->div1;
1927     if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK)
1928     div1 = readl(base + E4210_DIV_CPU1) &
1929     @@ -182,7 +182,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1930     alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
1931     WARN_ON(alt_div >= MAX_DIV);
1932    
1933     - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
1934     + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
1935     /*
1936     * In Exynos4210, ATB clock parent is also mout_core. So
1937     * ATB clock also needs to be mantained at safe speed.
1938     @@ -203,7 +203,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1939     writel(div0, base + E4210_DIV_CPU0);
1940     wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL);
1941    
1942     - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) {
1943     + if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
1944     writel(div1, base + E4210_DIV_CPU1);
1945     wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
1946     DIV_MASK_ALL);
1947     @@ -222,7 +222,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1948     unsigned long mux_reg;
1949    
1950     /* find out the divider values to use for clock data */
1951     - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
1952     + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
1953     while ((cfg_data->prate * 1000) != ndata->new_rate) {
1954     if (cfg_data->prate == 0)
1955     return -EINVAL;
1956     @@ -237,7 +237,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1957     writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU);
1958     wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1);
1959    
1960     - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) {
1961     + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
1962     div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK);
1963     div_mask |= E4210_DIV0_ATB_MASK;
1964     }
1965     diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
1966     index 757636d166cf..4ab28cfb8d2a 100644
1967     --- a/drivers/clk/ti/clk-3xxx.c
1968     +++ b/drivers/clk/ti/clk-3xxx.c
1969     @@ -163,7 +163,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
1970     DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
1971     DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
1972     DT_CLK(NULL, "uart3_ick", "uart3_ick"),
1973     - DT_CLK(NULL, "uart4_ick", "uart4_ick"),
1974     DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
1975     DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
1976     DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
1977     @@ -308,6 +307,7 @@ static struct ti_dt_clk am35xx_clks[] = {
1978     static struct ti_dt_clk omap36xx_clks[] = {
1979     DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
1980     DT_CLK(NULL, "uart4_fck", "uart4_fck"),
1981     + DT_CLK(NULL, "uart4_ick", "uart4_ick"),
1982     { .node_name = NULL },
1983     };
1984    
1985     diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
1986     index 63b8323df918..0eb82107c421 100644
1987     --- a/drivers/clk/ti/clk-7xx.c
1988     +++ b/drivers/clk/ti/clk-7xx.c
1989     @@ -16,7 +16,6 @@
1990     #include <linux/clkdev.h>
1991     #include <linux/clk/ti.h>
1992    
1993     -#define DRA7_DPLL_ABE_DEFFREQ 180633600
1994     #define DRA7_DPLL_GMAC_DEFFREQ 1000000000
1995     #define DRA7_DPLL_USB_DEFFREQ 960000000
1996    
1997     @@ -312,27 +311,12 @@ static struct ti_dt_clk dra7xx_clks[] = {
1998     int __init dra7xx_dt_clk_init(void)
1999     {
2000     int rc;
2001     - struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck;
2002     + struct clk *dpll_ck, *hdcp_ck;
2003    
2004     ti_dt_clocks_register(dra7xx_clks);
2005    
2006     omap2_clk_disable_autoidle_all();
2007    
2008     - abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux");
2009     - sys_clkin2 = clk_get_sys(NULL, "sys_clkin2");
2010     - dpll_ck = clk_get_sys(NULL, "dpll_abe_ck");
2011     -
2012     - rc = clk_set_parent(abe_dpll_mux, sys_clkin2);
2013     - if (!rc)
2014     - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ);
2015     - if (rc)
2016     - pr_err("%s: failed to configure ABE DPLL!\n", __func__);
2017     -
2018     - dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
2019     - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
2020     - if (rc)
2021     - pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
2022     -
2023     dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
2024     rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
2025     if (rc)
2026     diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
2027     index 0136dfcdabf0..7c2a7385c2ad 100644
2028     --- a/drivers/cpufreq/acpi-cpufreq.c
2029     +++ b/drivers/cpufreq/acpi-cpufreq.c
2030     @@ -146,6 +146,9 @@ static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
2031     {
2032     struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
2033    
2034     + if (unlikely(!data))
2035     + return -ENODEV;
2036     +
2037     return cpufreq_show_cpus(data->freqdomain_cpus, buf);
2038     }
2039    
2040     diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
2041     index 528a82bf5038..99a406501e8c 100644
2042     --- a/drivers/cpufreq/cpufreq-dt.c
2043     +++ b/drivers/cpufreq/cpufreq-dt.c
2044     @@ -255,7 +255,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
2045     rcu_read_unlock();
2046    
2047     tol_uV = opp_uV * priv->voltage_tolerance / 100;
2048     - if (regulator_is_supported_voltage(cpu_reg, opp_uV,
2049     + if (regulator_is_supported_voltage(cpu_reg,
2050     + opp_uV - tol_uV,
2051     opp_uV + tol_uV)) {
2052     if (opp_uV < min_uV)
2053     min_uV = opp_uV;
2054     diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
2055     index b60698b30d30..bc2a55bc35e4 100644
2056     --- a/drivers/crypto/marvell/cesa.h
2057     +++ b/drivers/crypto/marvell/cesa.h
2058     @@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
2059    
2060     int mv_cesa_queue_req(struct crypto_async_request *req);
2061    
2062     +/*
2063     + * Helper function that indicates whether a crypto request needs to be
2064     + * cleaned up or not after being enqueued using mv_cesa_queue_req().
2065     + */
2066     +static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
2067     + int ret)
2068     +{
2069     + /*
2070     + * The queue still had some space, the request was queued
2071     + * normally, so there's no need to clean it up.
2072     + */
2073     + if (ret == -EINPROGRESS)
2074     + return false;
2075     +
2076     + /*
2077     + * The queue had not space left, but since the request is
2078     + * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
2079     + * the backlog and will be processed later. There's no need to
2080     + * clean it up.
2081     + */
2082     + if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
2083     + return false;
2084     +
2085     + /* Request wasn't queued, we need to clean it up */
2086     + return true;
2087     +}
2088     +
2089     /* TDMA functions */
2090    
2091     static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
2092     diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
2093     index 0745cf3b9c0e..3df2f4e7adb2 100644
2094     --- a/drivers/crypto/marvell/cipher.c
2095     +++ b/drivers/crypto/marvell/cipher.c
2096     @@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
2097     {
2098     struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
2099     struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
2100     -
2101     creq->req.base.engine = engine;
2102    
2103     if (creq->req.base.type == CESA_DMA_REQ)
2104     @@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
2105     return ret;
2106    
2107     ret = mv_cesa_queue_req(&req->base);
2108     - if (ret && ret != -EINPROGRESS)
2109     + if (mv_cesa_req_needs_cleanup(&req->base, ret))
2110     mv_cesa_ablkcipher_cleanup(req);
2111    
2112     return ret;
2113     @@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
2114     return ret;
2115    
2116     ret = mv_cesa_queue_req(&req->base);
2117     - if (ret && ret != -EINPROGRESS)
2118     + if (mv_cesa_req_needs_cleanup(&req->base, ret))
2119     mv_cesa_ablkcipher_cleanup(req);
2120    
2121     return ret;
2122     @@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
2123     return ret;
2124    
2125     ret = mv_cesa_queue_req(&req->base);
2126     - if (ret && ret != -EINPROGRESS)
2127     + if (mv_cesa_req_needs_cleanup(&req->base, ret))
2128     mv_cesa_ablkcipher_cleanup(req);
2129    
2130     return ret;
2131     diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
2132     index ae9272eb9c1a..e8d0d7128137 100644
2133     --- a/drivers/crypto/marvell/hash.c
2134     +++ b/drivers/crypto/marvell/hash.c
2135     @@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
2136     return 0;
2137    
2138     ret = mv_cesa_queue_req(&req->base);
2139     - if (ret && ret != -EINPROGRESS) {
2140     + if (mv_cesa_req_needs_cleanup(&req->base, ret))
2141     mv_cesa_ahash_cleanup(req);
2142     - return ret;
2143     - }
2144    
2145     return ret;
2146     }
2147     @@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req)
2148     return 0;
2149    
2150     ret = mv_cesa_queue_req(&req->base);
2151     - if (ret && ret != -EINPROGRESS)
2152     + if (mv_cesa_req_needs_cleanup(&req->base, ret))
2153     mv_cesa_ahash_cleanup(req);
2154    
2155     return ret;
2156     @@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
2157     return 0;
2158    
2159     ret = mv_cesa_queue_req(&req->base);
2160     - if (ret && ret != -EINPROGRESS)
2161     + if (mv_cesa_req_needs_cleanup(&req->base, ret))
2162     mv_cesa_ahash_cleanup(req);
2163    
2164     return ret;
2165     diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
2166     index 40afa2a16cfc..da7917a2eed2 100644
2167     --- a/drivers/dma/at_xdmac.c
2168     +++ b/drivers/dma/at_xdmac.c
2169     @@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
2170     return desc;
2171     }
2172    
2173     +void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
2174     +{
2175     + memset(&desc->lld, 0, sizeof(desc->lld));
2176     + INIT_LIST_HEAD(&desc->descs_list);
2177     + desc->direction = DMA_TRANS_NONE;
2178     + desc->xfer_size = 0;
2179     + desc->active_xfer = false;
2180     +}
2181     +
2182     /* Call must be protected by lock. */
2183     static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
2184     {
2185     @@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
2186     desc = list_first_entry(&atchan->free_descs_list,
2187     struct at_xdmac_desc, desc_node);
2188     list_del(&desc->desc_node);
2189     - desc->active_xfer = false;
2190     + at_xdmac_init_used_desc(desc);
2191     }
2192    
2193     return desc;
2194     @@ -797,10 +806,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
2195     list_add_tail(&desc->desc_node, &first->descs_list);
2196     }
2197    
2198     - prev->lld.mbr_nda = first->tx_dma_desc.phys;
2199     - dev_dbg(chan2dev(chan),
2200     - "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
2201     - __func__, prev, &prev->lld.mbr_nda);
2202     + at_xdmac_queue_desc(chan, prev, first);
2203     first->tx_dma_desc.flags = flags;
2204     first->xfer_size = buf_len;
2205     first->direction = direction;
2206     @@ -878,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
2207    
2208     if (xt->src_inc) {
2209     if (xt->src_sgl)
2210     - chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM;
2211     + chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
2212     else
2213     chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
2214     }
2215    
2216     if (xt->dst_inc) {
2217     if (xt->dst_sgl)
2218     - chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM;
2219     + chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
2220     else
2221     chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
2222     }
2223     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
2224     index cf1c87fa1edd..bedce038c6e2 100644
2225     --- a/drivers/dma/dw/core.c
2226     +++ b/drivers/dma/dw/core.c
2227     @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2228     INIT_LIST_HEAD(&dw->dma.channels);
2229     for (i = 0; i < nr_channels; i++) {
2230     struct dw_dma_chan *dwc = &dw->chan[i];
2231     - int r = nr_channels - i - 1;
2232    
2233     dwc->chan.device = &dw->dma;
2234     dma_cookie_init(&dwc->chan);
2235     @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2236    
2237     /* 7 is highest priority & 0 is lowest. */
2238     if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
2239     - dwc->priority = r;
2240     + dwc->priority = nr_channels - i - 1;
2241     else
2242     dwc->priority = i;
2243    
2244     @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2245     /* Hardware configuration */
2246     if (autocfg) {
2247     unsigned int dwc_params;
2248     + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
2249     void __iomem *addr = chip->regs + r * sizeof(u32);
2250    
2251     dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
2252     diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
2253     index ddcbbf5cd9e9..95bdbbe2a671 100644
2254     --- a/drivers/dma/pxa_dma.c
2255     +++ b/drivers/dma/pxa_dma.c
2256     @@ -888,6 +888,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
2257     struct dma_async_tx_descriptor *tx;
2258     struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
2259    
2260     + INIT_LIST_HEAD(&vd->node);
2261     tx = vchan_tx_prep(vc, vd, tx_flags);
2262     tx->tx_submit = pxad_tx_submit;
2263     dev_dbg(&chan->vc.chan.dev->device,
2264     diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
2265     index 43b57b02d050..ca94f475fd05 100644
2266     --- a/drivers/extcon/extcon.c
2267     +++ b/drivers/extcon/extcon.c
2268     @@ -126,7 +126,7 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
2269    
2270     static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
2271     {
2272     - unsigned int id = -EINVAL;
2273     + int id = -EINVAL;
2274     int i = 0;
2275    
2276     /* Find the id of extcon cable */
2277     @@ -143,7 +143,7 @@ static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
2278    
2279     static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
2280     {
2281     - unsigned int id;
2282     + int id;
2283    
2284     if (edev->max_supported == 0)
2285     return -EINVAL;
2286     @@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
2287     static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
2288     {
2289     if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
2290     - *attached = new ? true : false;
2291     + *attached = ((new >> idx) & 0x1) ? true : false;
2292     return true;
2293     }
2294    
2295     @@ -378,7 +378,7 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
2296     */
2297     int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
2298     {
2299     - unsigned int id;
2300     + int id;
2301    
2302     id = find_cable_id_by_name(edev, cable_name);
2303     if (id < 0)
2304     @@ -426,7 +426,7 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
2305     int extcon_set_cable_state(struct extcon_dev *edev,
2306     const char *cable_name, bool cable_state)
2307     {
2308     - unsigned int id;
2309     + int id;
2310    
2311     id = find_cable_id_by_name(edev, cable_name);
2312     if (id < 0)
2313     diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
2314     index e29560e6b40b..950c87f5d279 100644
2315     --- a/drivers/firmware/efi/libstub/arm-stub.c
2316     +++ b/drivers/firmware/efi/libstub/arm-stub.c
2317     @@ -13,6 +13,7 @@
2318     */
2319    
2320     #include <linux/efi.h>
2321     +#include <linux/sort.h>
2322     #include <asm/efi.h>
2323    
2324     #include "efistub.h"
2325     @@ -305,6 +306,44 @@ fail:
2326     */
2327     #define EFI_RT_VIRTUAL_BASE 0x40000000
2328    
2329     +static int cmp_mem_desc(const void *l, const void *r)
2330     +{
2331     + const efi_memory_desc_t *left = l, *right = r;
2332     +
2333     + return (left->phys_addr > right->phys_addr) ? 1 : -1;
2334     +}
2335     +
2336     +/*
2337     + * Returns whether region @left ends exactly where region @right starts,
2338     + * or false if either argument is NULL.
2339     + */
2340     +static bool regions_are_adjacent(efi_memory_desc_t *left,
2341     + efi_memory_desc_t *right)
2342     +{
2343     + u64 left_end;
2344     +
2345     + if (left == NULL || right == NULL)
2346     + return false;
2347     +
2348     + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
2349     +
2350     + return left_end == right->phys_addr;
2351     +}
2352     +
2353     +/*
2354     + * Returns whether region @left and region @right have compatible memory type
2355     + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
2356     + */
2357     +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
2358     + efi_memory_desc_t *right)
2359     +{
2360     + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
2361     + EFI_MEMORY_WC | EFI_MEMORY_UC |
2362     + EFI_MEMORY_RUNTIME;
2363     +
2364     + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
2365     +}
2366     +
2367     /*
2368     * efi_get_virtmap() - create a virtual mapping for the EFI memory map
2369     *
2370     @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
2371     int *count)
2372     {
2373     u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
2374     - efi_memory_desc_t *out = runtime_map;
2375     + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
2376     int l;
2377    
2378     - for (l = 0; l < map_size; l += desc_size) {
2379     - efi_memory_desc_t *in = (void *)memory_map + l;
2380     + /*
2381     + * To work around potential issues with the Properties Table feature
2382     + * introduced in UEFI 2.5, which may split PE/COFF executable images
2383     + * in memory into several RuntimeServicesCode and RuntimeServicesData
2384     + * regions, we need to preserve the relative offsets between adjacent
2385     + * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
2386     + * The easiest way to find adjacent regions is to sort the memory map
2387     + * before traversing it.
2388     + */
2389     + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
2390     +
2391     + for (l = 0; l < map_size; l += desc_size, prev = in) {
2392     u64 paddr, size;
2393    
2394     + in = (void *)memory_map + l;
2395     if (!(in->attribute & EFI_MEMORY_RUNTIME))
2396     continue;
2397    
2398     + paddr = in->phys_addr;
2399     + size = in->num_pages * EFI_PAGE_SIZE;
2400     +
2401     /*
2402     * Make the mapping compatible with 64k pages: this allows
2403     * a 4k page size kernel to kexec a 64k page size kernel and
2404     * vice versa.
2405     */
2406     - paddr = round_down(in->phys_addr, SZ_64K);
2407     - size = round_up(in->num_pages * EFI_PAGE_SIZE +
2408     - in->phys_addr - paddr, SZ_64K);
2409     -
2410     - /*
2411     - * Avoid wasting memory on PTEs by choosing a virtual base that
2412     - * is compatible with section mappings if this region has the
2413     - * appropriate size and physical alignment. (Sections are 2 MB
2414     - * on 4k granule kernels)
2415     - */
2416     - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
2417     - efi_virt_base = round_up(efi_virt_base, SZ_2M);
2418     + if (!regions_are_adjacent(prev, in) ||
2419     + !regions_have_compatible_memory_type_attrs(prev, in)) {
2420     +
2421     + paddr = round_down(in->phys_addr, SZ_64K);
2422     + size += in->phys_addr - paddr;
2423     +
2424     + /*
2425     + * Avoid wasting memory on PTEs by choosing a virtual
2426     + * base that is compatible with section mappings if this
2427     + * region has the appropriate size and physical
2428     + * alignment. (Sections are 2 MB on 4k granule kernels)
2429     + */
2430     + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
2431     + efi_virt_base = round_up(efi_virt_base, SZ_2M);
2432     + else
2433     + efi_virt_base = round_up(efi_virt_base, SZ_64K);
2434     + }
2435    
2436     in->virt_addr = efi_virt_base + in->phys_addr - paddr;
2437     efi_virt_base += size;
2438     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
2439     index b4d36f0f2153..c098d762089c 100644
2440     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
2441     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
2442     @@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
2443     */
2444     int amdgpu_irq_postinstall(struct drm_device *dev)
2445     {
2446     - dev->max_vblank_count = 0x001fffff;
2447     + dev->max_vblank_count = 0x00ffffff;
2448     return 0;
2449     }
2450    
2451     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2452     index 2abc661845b6..ddcfbf3b188b 100644
2453     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2454     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
2455     @@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
2456     return -EINVAL;
2457     }
2458    
2459     - if (msg_type == 1) {
2460     + switch (msg_type) {
2461     + case 0:
2462     + /* it's a create msg, calc image size (width * height) */
2463     + amdgpu_bo_kunmap(bo);
2464     +
2465     + /* try to alloc a new handle */
2466     + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
2467     + if (atomic_read(&adev->uvd.handles[i]) == handle) {
2468     + DRM_ERROR("Handle 0x%x already in use!\n", handle);
2469     + return -EINVAL;
2470     + }
2471     +
2472     + if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
2473     + adev->uvd.filp[i] = ctx->parser->filp;
2474     + return 0;
2475     + }
2476     + }
2477     +
2478     + DRM_ERROR("No more free UVD handles!\n");
2479     + return -EINVAL;
2480     +
2481     + case 1:
2482     /* it's a decode msg, calc buffer sizes */
2483     r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
2484     amdgpu_bo_kunmap(bo);
2485     if (r)
2486     return r;
2487    
2488     - } else if (msg_type == 2) {
2489     + /* validate the handle */
2490     + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
2491     + if (atomic_read(&adev->uvd.handles[i]) == handle) {
2492     + if (adev->uvd.filp[i] != ctx->parser->filp) {
2493     + DRM_ERROR("UVD handle collision detected!\n");
2494     + return -EINVAL;
2495     + }
2496     + return 0;
2497     + }
2498     + }
2499     +
2500     + DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
2501     + return -ENOENT;
2502     +
2503     + case 2:
2504     /* it's a destroy msg, free the handle */
2505     for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
2506     atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
2507     amdgpu_bo_kunmap(bo);
2508     return 0;
2509     - } else {
2510     - /* it's a create msg */
2511     - amdgpu_bo_kunmap(bo);
2512     -
2513     - if (msg_type != 0) {
2514     - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
2515     - return -EINVAL;
2516     - }
2517     -
2518     - /* it's a create msg, no special handling needed */
2519     - }
2520     -
2521     - /* create or decode, validate the handle */
2522     - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
2523     - if (atomic_read(&adev->uvd.handles[i]) == handle)
2524     - return 0;
2525     - }
2526    
2527     - /* handle not found try to alloc a new one */
2528     - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
2529     - if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
2530     - adev->uvd.filp[i] = ctx->parser->filp;
2531     - return 0;
2532     - }
2533     + default:
2534     + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
2535     + return -EINVAL;
2536     }
2537     -
2538     - DRM_ERROR("No more free UVD handles!\n");
2539     + BUG();
2540     return -EINVAL;
2541     }
2542    
2543     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2544     index 9a4e3b63f1cb..b07402fc8ded 100644
2545     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2546     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2547     @@ -787,7 +787,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2548     int r;
2549    
2550     if (mem) {
2551     - addr = mem->start << PAGE_SHIFT;
2552     + addr = (u64)mem->start << PAGE_SHIFT;
2553     if (mem->mem_type != TTM_PL_TT)
2554     addr += adev->vm_manager.vram_base_offset;
2555     } else {
2556     diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
2557     index ae8caca61e04..e60557417049 100644
2558     --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
2559     +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
2560     @@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
2561     amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
2562     }
2563     if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2564     - amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
2565     - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
2566     + amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
2567     if (ext_encoder)
2568     amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
2569     } else {
2570     diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
2571     index 4efd671d7a9b..9488ea6ea93f 100644
2572     --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
2573     +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
2574     @@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
2575     int r;
2576     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2577    
2578     - r = uvd_v4_2_hw_fini(adev);
2579     + r = amdgpu_uvd_suspend(adev);
2580     if (r)
2581     return r;
2582    
2583     - r = amdgpu_uvd_suspend(adev);
2584     + r = uvd_v4_2_hw_fini(adev);
2585     if (r)
2586     return r;
2587    
2588     diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
2589     index b756bd99c0fd..d0ed998228ef 100644
2590     --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
2591     +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
2592     @@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
2593     int r;
2594     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2595    
2596     - r = uvd_v5_0_hw_fini(adev);
2597     + r = amdgpu_uvd_suspend(adev);
2598     if (r)
2599     return r;
2600    
2601     - r = amdgpu_uvd_suspend(adev);
2602     + r = uvd_v5_0_hw_fini(adev);
2603     if (r)
2604     return r;
2605    
2606     diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2607     index 49aa931b2cb4..345eb760fd5b 100644
2608     --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2609     +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2610     @@ -214,11 +214,11 @@ static int uvd_v6_0_suspend(void *handle)
2611     int r;
2612     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2613    
2614     - r = uvd_v6_0_hw_fini(adev);
2615     + r = amdgpu_uvd_suspend(adev);
2616     if (r)
2617     return r;
2618    
2619     - r = amdgpu_uvd_suspend(adev);
2620     + r = uvd_v6_0_hw_fini(adev);
2621     if (r)
2622     return r;
2623    
2624     diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
2625     index 68552da40287..4f58a1e18de6 100644
2626     --- a/drivers/gpu/drm/amd/amdgpu/vi.c
2627     +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
2628     @@ -1290,7 +1290,8 @@ static int vi_common_early_init(void *handle)
2629     case CHIP_CARRIZO:
2630     adev->has_uvd = true;
2631     adev->cg_flags = 0;
2632     - adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
2633     + /* Disable UVD pg */
2634     + adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
2635     adev->external_rev_id = adev->rev_id + 0x1;
2636     if (amdgpu_smc_load_fw && smc_enabled)
2637     adev->firmware.smu_load = true;
2638     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2639     index eb603f1defc2..969e7898a7ed 100644
2640     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
2641     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2642     @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
2643     struct drm_dp_mst_port *port, *tmp;
2644     bool wake_tx = false;
2645    
2646     - cancel_work_sync(&mstb->mgr->work);
2647     -
2648     /*
2649     * destroy all ports - don't need lock
2650     * as there are no more references to the mst branch
2651     @@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
2652     {
2653     struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2654     struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2655     +
2656     if (!port->input) {
2657     port->vcpi.num_slots = 0;
2658    
2659     kfree(port->cached_edid);
2660    
2661     - /* we can't destroy the connector here, as
2662     - we might be holding the mode_config.mutex
2663     - from an EDID retrieval */
2664     + /*
2665     + * The only time we don't have a connector
2666     + * on an output port is if the connector init
2667     + * fails.
2668     + */
2669     if (port->connector) {
2670     + /* we can't destroy the connector here, as
2671     + * we might be holding the mode_config.mutex
2672     + * from an EDID retrieval */
2673     +
2674     mutex_lock(&mgr->destroy_connector_lock);
2675     list_add(&port->next, &mgr->destroy_connector_list);
2676     mutex_unlock(&mgr->destroy_connector_lock);
2677     schedule_work(&mgr->destroy_connector_work);
2678     return;
2679     }
2680     + /* no need to clean up vcpi
2681     + * as if we have no connector we never setup a vcpi */
2682     drm_dp_port_teardown_pdt(port, port->pdt);
2683     -
2684     - if (!port->input && port->vcpi.vcpi > 0)
2685     - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2686     }
2687     kfree(port);
2688     -
2689     - (*mgr->cbs->hotplug)(mgr);
2690     }
2691    
2692     static void drm_dp_put_port(struct drm_dp_mst_port *port)
2693     @@ -1115,12 +1117,21 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
2694     char proppath[255];
2695     build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
2696     port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
2697     -
2698     + if (!port->connector) {
2699     + /* remove it from the port list */
2700     + mutex_lock(&mstb->mgr->lock);
2701     + list_del(&port->next);
2702     + mutex_unlock(&mstb->mgr->lock);
2703     + /* drop port list reference */
2704     + drm_dp_put_port(port);
2705     + goto out;
2706     + }
2707     if (port->port_num >= 8) {
2708     port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
2709     }
2710     }
2711    
2712     +out:
2713     /* put reference to this port */
2714     drm_dp_put_port(port);
2715     }
2716     @@ -1978,6 +1989,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2717     drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2718     DP_MST_EN | DP_UPSTREAM_IS_SRC);
2719     mutex_unlock(&mgr->lock);
2720     + flush_work(&mgr->work);
2721     + flush_work(&mgr->destroy_connector_work);
2722     }
2723     EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2724    
2725     @@ -2661,7 +2674,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2726     {
2727     struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2728     struct drm_dp_mst_port *port;
2729     -
2730     + bool send_hotplug = false;
2731     /*
2732     * Not a regular list traverse as we have to drop the destroy
2733     * connector lock before destroying the connector, to avoid AB->BA
2734     @@ -2684,7 +2697,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
2735     if (!port->input && port->vcpi.vcpi > 0)
2736     drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2737     kfree(port);
2738     + send_hotplug = true;
2739     }
2740     + if (send_hotplug)
2741     + (*mgr->cbs->hotplug)(mgr);
2742     }
2743    
2744     /**
2745     @@ -2737,6 +2753,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2746     */
2747     void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2748     {
2749     + flush_work(&mgr->work);
2750     flush_work(&mgr->destroy_connector_work);
2751     mutex_lock(&mgr->payload_lock);
2752     kfree(mgr->payloads);
2753     diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
2754     index f861361a635e..4924d381b664 100644
2755     --- a/drivers/gpu/drm/drm_lock.c
2756     +++ b/drivers/gpu/drm/drm_lock.c
2757     @@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
2758     struct drm_master *master = file_priv->master;
2759     int ret = 0;
2760    
2761     + if (drm_core_check_feature(dev, DRIVER_MODESET))
2762     + return -EINVAL;
2763     +
2764     ++file_priv->lock_count;
2765    
2766     if (lock->context == DRM_KERNEL_CONTEXT) {
2767     @@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
2768     struct drm_lock *lock = data;
2769     struct drm_master *master = file_priv->master;
2770    
2771     + if (drm_core_check_feature(dev, DRIVER_MODESET))
2772     + return -EINVAL;
2773     +
2774     if (lock->context == DRM_KERNEL_CONTEXT) {
2775     DRM_ERROR("Process %d using kernel context %d\n",
2776     task_pid_nr(current), lock->context);
2777     diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
2778     index 198fc3c3291b..17522f733513 100644
2779     --- a/drivers/gpu/drm/i915/intel_bios.c
2780     +++ b/drivers/gpu/drm/i915/intel_bios.c
2781     @@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
2782     const struct bdb_header *bdb = _bdb;
2783     const u8 *base = _bdb;
2784     int index = 0;
2785     - u16 total, current_size;
2786     + u32 total, current_size;
2787     u8 current_id;
2788    
2789     /* skip to first section */
2790     @@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
2791     current_size = *((const u16 *)(base + index));
2792     index += 2;
2793    
2794     + /* The MIPI Sequence Block v3+ has a separate size field. */
2795     + if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
2796     + current_size = *((const u32 *)(base + index + 1));
2797     +
2798     if (index + current_size > total)
2799     return NULL;
2800    
2801     @@ -859,6 +863,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
2802     return;
2803     }
2804    
2805     + /* Fail gracefully for forward incompatible sequence block. */
2806     + if (sequence->version >= 3) {
2807     + DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
2808     + return;
2809     + }
2810     +
2811     DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
2812    
2813     block_size = get_blocksize(sequence);
2814     diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
2815     index 7c6225c84ba6..4649bd2ed340 100644
2816     --- a/drivers/gpu/drm/qxl/qxl_display.c
2817     +++ b/drivers/gpu/drm/qxl/qxl_display.c
2818     @@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
2819     adjusted_mode->hdisplay,
2820     adjusted_mode->vdisplay);
2821    
2822     - if (qcrtc->index == 0)
2823     + if (bo->is_primary == false)
2824     recreate_primary = true;
2825    
2826     if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
2827     @@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
2828     drm_connector_to_qxl_output(connector);
2829     struct drm_device *ddev = connector->dev;
2830     struct qxl_device *qdev = ddev->dev_private;
2831     - int connected;
2832     + bool connected = false;
2833    
2834     /* The first monitor is always connected */
2835     - connected = (output->index == 0) ||
2836     - (qdev->client_monitors_config &&
2837     - qdev->client_monitors_config->count > output->index &&
2838     - qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
2839     + if (!qdev->client_monitors_config) {
2840     + if (output->index == 0)
2841     + connected = true;
2842     + } else
2843     + connected = qdev->client_monitors_config->count > output->index &&
2844     + qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
2845    
2846     DRM_DEBUG("#%d connected: %d\n", output->index, connected);
2847     if (!connected)
2848     diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
2849     index c3872598b85a..65adb9c72377 100644
2850     --- a/drivers/gpu/drm/radeon/atombios_encoders.c
2851     +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
2852     @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
2853     } else
2854     atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2855     if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
2856     - args.ucAction = ATOM_LCD_BLON;
2857     - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2858     + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
2859     +
2860     + atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
2861     }
2862     break;
2863     case DRM_MODE_DPMS_STANDBY:
2864     @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
2865     atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
2866     }
2867     if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
2868     - atombios_dig_transmitter_setup(encoder,
2869     - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
2870     + atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
2871     if (ext_encoder)
2872     atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
2873     break;
2874     diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
2875     index ea7ba5ef16a9..6a9d80a5332d 100644
2876     --- a/drivers/hv/hv_utils_transport.c
2877     +++ b/drivers/hv/hv_utils_transport.c
2878     @@ -186,7 +186,7 @@ int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len)
2879     return -EINVAL;
2880     } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
2881     cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
2882     - if (!msg)
2883     + if (!cn_msg)
2884     return -ENOMEM;
2885     cn_msg->id.idx = hvt->cn_id.idx;
2886     cn_msg->id.val = hvt->cn_id.val;
2887     diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
2888     index bd1c99deac71..2aaedbe0b023 100644
2889     --- a/drivers/hwmon/nct6775.c
2890     +++ b/drivers/hwmon/nct6775.c
2891     @@ -354,6 +354,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
2892    
2893     /* NCT6776 specific data */
2894    
2895     +/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
2896     +#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
2897     +#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
2898     +
2899     static const s8 NCT6776_ALARM_BITS[] = {
2900     0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
2901     17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
2902     @@ -3528,8 +3532,8 @@ static int nct6775_probe(struct platform_device *pdev)
2903     data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
2904     data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
2905     data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
2906     - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
2907     - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
2908     + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
2909     + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
2910     data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
2911     data->REG_PWM[0] = NCT6775_REG_PWM;
2912     data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
2913     @@ -3600,8 +3604,8 @@ static int nct6775_probe(struct platform_device *pdev)
2914     data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
2915     data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
2916     data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
2917     - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
2918     - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
2919     + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
2920     + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
2921     data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
2922     data->REG_PWM[0] = NCT6775_REG_PWM;
2923     data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
2924     @@ -3677,8 +3681,8 @@ static int nct6775_probe(struct platform_device *pdev)
2925     data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
2926     data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
2927     data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
2928     - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
2929     - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
2930     + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
2931     + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
2932     data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
2933     data->REG_PWM[0] = NCT6775_REG_PWM;
2934     data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
2935     diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
2936     index d851e1828d6f..85761b78bb5f 100644
2937     --- a/drivers/infiniband/ulp/isert/ib_isert.c
2938     +++ b/drivers/infiniband/ulp/isert/ib_isert.c
2939     @@ -3012,9 +3012,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2940     static int
2941     isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2942     {
2943     - int ret;
2944     + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2945     + int ret = 0;
2946    
2947     switch (state) {
2948     + case ISTATE_REMOVE:
2949     + spin_lock_bh(&conn->cmd_lock);
2950     + list_del_init(&cmd->i_conn_node);
2951     + spin_unlock_bh(&conn->cmd_lock);
2952     + isert_put_cmd(isert_cmd, true);
2953     + break;
2954     case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2955     ret = isert_put_nopin(cmd, conn, false);
2956     break;
2957     @@ -3379,6 +3386,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
2958     wait_for_completion(&isert_conn->wait_comp_err);
2959     }
2960    
2961     +/**
2962     + * isert_put_unsol_pending_cmds() - Drop commands waiting for
2963     + * unsolicitate dataout
2964     + * @conn: iscsi connection
2965     + *
2966     + * We might still have commands that are waiting for unsolicited
2967     + * dataouts messages. We must put the extra reference on those
2968     + * before blocking on the target_wait_for_session_cmds
2969     + */
2970     +static void
2971     +isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
2972     +{
2973     + struct iscsi_cmd *cmd, *tmp;
2974     + static LIST_HEAD(drop_cmd_list);
2975     +
2976     + spin_lock_bh(&conn->cmd_lock);
2977     + list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
2978     + if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
2979     + (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
2980     + (cmd->write_data_done < cmd->se_cmd.data_length))
2981     + list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
2982     + }
2983     + spin_unlock_bh(&conn->cmd_lock);
2984     +
2985     + list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
2986     + list_del_init(&cmd->i_conn_node);
2987     + if (cmd->i_state != ISTATE_REMOVE) {
2988     + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2989     +
2990     + isert_info("conn %p dropping cmd %p\n", conn, cmd);
2991     + isert_put_cmd(isert_cmd, true);
2992     + }
2993     + }
2994     +}
2995     +
2996     static void isert_wait_conn(struct iscsi_conn *conn)
2997     {
2998     struct isert_conn *isert_conn = conn->context;
2999     @@ -3397,8 +3439,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
3000     isert_conn_terminate(isert_conn);
3001     mutex_unlock(&isert_conn->mutex);
3002    
3003     - isert_wait4cmds(conn);
3004     isert_wait4flush(isert_conn);
3005     + isert_put_unsol_pending_cmds(conn);
3006     + isert_wait4cmds(conn);
3007     isert_wait4logout(isert_conn);
3008    
3009     queue_work(isert_release_wq, &isert_conn->release_work);
3010     diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
3011     index 459bf4429d36..7e077bf13fe1 100644
3012     --- a/drivers/irqchip/irq-atmel-aic5.c
3013     +++ b/drivers/irqchip/irq-atmel-aic5.c
3014     @@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
3015     {
3016     struct irq_domain *domain = d->domain;
3017     struct irq_domain_chip_generic *dgc = domain->gc;
3018     - struct irq_chip_generic *gc = dgc->gc[0];
3019     + struct irq_chip_generic *bgc = dgc->gc[0];
3020     + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3021    
3022     - /* Disable interrupt on AIC5 */
3023     - irq_gc_lock(gc);
3024     + /*
3025     + * Disable interrupt on AIC5. We always take the lock of the
3026     + * first irq chip as all chips share the same registers.
3027     + */
3028     + irq_gc_lock(bgc);
3029     irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
3030     irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
3031     gc->mask_cache &= ~d->mask;
3032     - irq_gc_unlock(gc);
3033     + irq_gc_unlock(bgc);
3034     }
3035    
3036     static void aic5_unmask(struct irq_data *d)
3037     {
3038     struct irq_domain *domain = d->domain;
3039     struct irq_domain_chip_generic *dgc = domain->gc;
3040     - struct irq_chip_generic *gc = dgc->gc[0];
3041     + struct irq_chip_generic *bgc = dgc->gc[0];
3042     + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3043    
3044     - /* Enable interrupt on AIC5 */
3045     - irq_gc_lock(gc);
3046     + /*
3047     + * Enable interrupt on AIC5. We always take the lock of the
3048     + * first irq chip as all chips share the same registers.
3049     + */
3050     + irq_gc_lock(bgc);
3051     irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
3052     irq_reg_writel(gc, 1, AT91_AIC5_IECR);
3053     gc->mask_cache |= d->mask;
3054     - irq_gc_unlock(gc);
3055     + irq_gc_unlock(bgc);
3056     }
3057    
3058     static int aic5_retrigger(struct irq_data *d)
3059     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
3060     index c00e2db351ba..9a791dd52199 100644
3061     --- a/drivers/irqchip/irq-gic-v3-its.c
3062     +++ b/drivers/irqchip/irq-gic-v3-its.c
3063     @@ -921,8 +921,10 @@ retry_baser:
3064     * non-cacheable as well.
3065     */
3066     shr = tmp & GITS_BASER_SHAREABILITY_MASK;
3067     - if (!shr)
3068     + if (!shr) {
3069     cache = GITS_BASER_nC;
3070     + __flush_dcache_area(base, alloc_size);
3071     + }
3072     goto retry_baser;
3073     }
3074    
3075     @@ -1163,6 +1165,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3076     return NULL;
3077     }
3078    
3079     + __flush_dcache_area(itt, sz);
3080     +
3081     dev->its = its;
3082     dev->itt = itt;
3083     dev->nr_ites = nr_ites;
3084     diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
3085     index 9ad35f72ab4c..433fb9df848a 100644
3086     --- a/drivers/leds/Kconfig
3087     +++ b/drivers/leds/Kconfig
3088     @@ -229,7 +229,7 @@ config LEDS_LP55XX_COMMON
3089     tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
3090     depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
3091     select FW_LOADER
3092     - select FW_LOADER_USER_HELPER_FALLBACK
3093     + select FW_LOADER_USER_HELPER
3094     help
3095     This option supports common operations for LP5521/5523/55231/5562/8501
3096     devices.
3097     diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
3098     index beabfbc6f7cd..ca51d58bed24 100644
3099     --- a/drivers/leds/led-class.c
3100     +++ b/drivers/leds/led-class.c
3101     @@ -228,12 +228,15 @@ static int led_classdev_next_name(const char *init_name, char *name,
3102     {
3103     unsigned int i = 0;
3104     int ret = 0;
3105     + struct device *dev;
3106    
3107     strlcpy(name, init_name, len);
3108    
3109     - while (class_find_device(leds_class, NULL, name, match_name) &&
3110     - (ret < len))
3111     + while ((ret < len) &&
3112     + (dev = class_find_device(leds_class, NULL, name, match_name))) {
3113     + put_device(dev);
3114     ret = snprintf(name, len, "%s_%u", init_name, ++i);
3115     + }
3116    
3117     if (ret >= len)
3118     return -ENOMEM;
3119     diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
3120     index 3ee198b65843..cc7ece1712b5 100644
3121     --- a/drivers/macintosh/windfarm_core.c
3122     +++ b/drivers/macintosh/windfarm_core.c
3123     @@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
3124     {
3125     mutex_lock(&wf_lock);
3126     blocking_notifier_chain_unregister(&wf_client_list, nb);
3127     - wf_client_count++;
3128     + wf_client_count--;
3129     if (wf_client_count == 0)
3130     wf_stop_thread();
3131     mutex_unlock(&wf_lock);
3132     diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
3133     index e51de52eeb94..48b5890c28e3 100644
3134     --- a/drivers/md/bitmap.c
3135     +++ b/drivers/md/bitmap.c
3136     @@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
3137     if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
3138     ret = bitmap_storage_alloc(&store, chunks,
3139     !bitmap->mddev->bitmap_info.external,
3140     - bitmap->cluster_slot);
3141     + mddev_is_clustered(bitmap->mddev)
3142     + ? bitmap->cluster_slot : 0);
3143     if (ret)
3144     goto err;
3145    
3146     diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
3147     index 240c9f0e85e7..8a096456579b 100644
3148     --- a/drivers/md/dm-cache-policy-cleaner.c
3149     +++ b/drivers/md/dm-cache-policy-cleaner.c
3150     @@ -436,7 +436,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
3151     static struct dm_cache_policy_type wb_policy_type = {
3152     .name = "cleaner",
3153     .version = {1, 0, 0},
3154     - .hint_size = 0,
3155     + .hint_size = 4,
3156     .owner = THIS_MODULE,
3157     .create = wb_create
3158     };
3159     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3160     index 0f48fed44a17..0d28c5b9d065 100644
3161     --- a/drivers/md/dm-crypt.c
3162     +++ b/drivers/md/dm-crypt.c
3163     @@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
3164    
3165     /*
3166     * Generate a new unfragmented bio with the given size
3167     - * This should never violate the device limitations
3168     + * This should never violate the device limitations (but only because
3169     + * max_segment_size is being constrained to PAGE_SIZE).
3170     *
3171     * This function may be called concurrently. If we allocate from the mempool
3172     * concurrently, there is a possibility of deadlock. For example, if we have
3173     @@ -2058,9 +2059,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
3174     return fn(ti, cc->dev, cc->start, ti->len, data);
3175     }
3176    
3177     +static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
3178     +{
3179     + /*
3180     + * Unfortunate constraint that is required to avoid the potential
3181     + * for exceeding underlying device's max_segments limits -- due to
3182     + * crypt_alloc_buffer() possibly allocating pages for the encryption
3183     + * bio that are not as physically contiguous as the original bio.
3184     + */
3185     + limits->max_segment_size = PAGE_SIZE;
3186     +}
3187     +
3188     static struct target_type crypt_target = {
3189     .name = "crypt",
3190     - .version = {1, 14, 0},
3191     + .version = {1, 14, 1},
3192     .module = THIS_MODULE,
3193     .ctr = crypt_ctr,
3194     .dtr = crypt_dtr,
3195     @@ -2072,6 +2084,7 @@ static struct target_type crypt_target = {
3196     .message = crypt_message,
3197     .merge = crypt_merge,
3198     .iterate_devices = crypt_iterate_devices,
3199     + .io_hints = crypt_io_hints,
3200     };
3201    
3202     static int __init dm_crypt_init(void)
3203     diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
3204     index 2daa67793511..1257d484392a 100644
3205     --- a/drivers/md/dm-raid.c
3206     +++ b/drivers/md/dm-raid.c
3207     @@ -329,8 +329,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
3208     */
3209     if (min_region_size > (1 << 13)) {
3210     /* If not a power of 2, make it the next power of 2 */
3211     - if (min_region_size & (min_region_size - 1))
3212     - region_size = 1 << fls(region_size);
3213     + region_size = roundup_pow_of_two(min_region_size);
3214     DMINFO("Choosing default region size of %lu sectors",
3215     region_size);
3216     } else {
3217     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
3218     index d2bbe8cc1e97..75aef240c2d1 100644
3219     --- a/drivers/md/dm-thin.c
3220     +++ b/drivers/md/dm-thin.c
3221     @@ -4333,6 +4333,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
3222     {
3223     struct thin_c *tc = ti->private;
3224     struct pool *pool = tc->pool;
3225     + struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
3226     +
3227     + if (!pool_limits->discard_granularity)
3228     + return; /* pool's discard support is disabled */
3229    
3230     limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
3231     limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
3232     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3233     index 0d7ab20c58df..3e32f4e31bbb 100644
3234     --- a/drivers/md/dm.c
3235     +++ b/drivers/md/dm.c
3236     @@ -2952,8 +2952,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
3237    
3238     might_sleep();
3239    
3240     - map = dm_get_live_table(md, &srcu_idx);
3241     -
3242     spin_lock(&_minor_lock);
3243     idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
3244     set_bit(DMF_FREEING, &md->flags);
3245     @@ -2967,14 +2965,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
3246     * do not race with internal suspend.
3247     */
3248     mutex_lock(&md->suspend_lock);
3249     + map = dm_get_live_table(md, &srcu_idx);
3250     if (!dm_suspended_md(md)) {
3251     dm_table_presuspend_targets(map);
3252     dm_table_postsuspend_targets(map);
3253     }
3254     - mutex_unlock(&md->suspend_lock);
3255     -
3256     /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
3257     dm_put_live_table(md, srcu_idx);
3258     + mutex_unlock(&md->suspend_lock);
3259    
3260     /*
3261     * Rare, but there may be I/O requests still going to complete,
3262     diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
3263     index efb654eb5399..0875e5e7e09a 100644
3264     --- a/drivers/md/raid0.c
3265     +++ b/drivers/md/raid0.c
3266     @@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
3267     char b[BDEVNAME_SIZE];
3268     char b2[BDEVNAME_SIZE];
3269     struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
3270     - bool discard_supported = false;
3271     + unsigned short blksize = 512;
3272    
3273     if (!conf)
3274     return -ENOMEM;
3275     @@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
3276     sector_div(sectors, mddev->chunk_sectors);
3277     rdev1->sectors = sectors * mddev->chunk_sectors;
3278    
3279     + blksize = max(blksize, queue_logical_block_size(
3280     + rdev1->bdev->bd_disk->queue));
3281     +
3282     rdev_for_each(rdev2, mddev) {
3283     pr_debug("md/raid0:%s: comparing %s(%llu)"
3284     " with %s(%llu)\n",
3285     @@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
3286     }
3287     pr_debug("md/raid0:%s: FINAL %d zones\n",
3288     mdname(mddev), conf->nr_strip_zones);
3289     + /*
3290     + * now since we have the hard sector sizes, we can make sure
3291     + * chunk size is a multiple of that sector size
3292     + */
3293     + if ((mddev->chunk_sectors << 9) % blksize) {
3294     + printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
3295     + mdname(mddev),
3296     + mddev->chunk_sectors << 9, blksize);
3297     + err = -EINVAL;
3298     + goto abort;
3299     + }
3300     +
3301     err = -ENOMEM;
3302     conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
3303     conf->nr_strip_zones, GFP_KERNEL);
3304     @@ -188,19 +203,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
3305     }
3306     dev[j] = rdev1;
3307    
3308     - if (mddev->queue)
3309     - disk_stack_limits(mddev->gendisk, rdev1->bdev,
3310     - rdev1->data_offset << 9);
3311     -
3312     if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
3313     conf->has_merge_bvec = 1;
3314    
3315     if (!smallest || (rdev1->sectors < smallest->sectors))
3316     smallest = rdev1;
3317     cnt++;
3318     -
3319     - if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
3320     - discard_supported = true;
3321     }
3322     if (cnt != mddev->raid_disks) {
3323     printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
3324     @@ -261,28 +269,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
3325     (unsigned long long)smallest->sectors);
3326     }
3327    
3328     - /*
3329     - * now since we have the hard sector sizes, we can make sure
3330     - * chunk size is a multiple of that sector size
3331     - */
3332     - if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
3333     - printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
3334     - mdname(mddev),
3335     - mddev->chunk_sectors << 9);
3336     - goto abort;
3337     - }
3338     -
3339     - if (mddev->queue) {
3340     - blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
3341     - blk_queue_io_opt(mddev->queue,
3342     - (mddev->chunk_sectors << 9) * mddev->raid_disks);
3343     -
3344     - if (!discard_supported)
3345     - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
3346     - else
3347     - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
3348     - }
3349     -
3350     pr_debug("md/raid0:%s: done.\n", mdname(mddev));
3351     *private_conf = conf;
3352    
3353     @@ -433,12 +419,6 @@ static int raid0_run(struct mddev *mddev)
3354     if (md_check_no_bitmap(mddev))
3355     return -EINVAL;
3356    
3357     - if (mddev->queue) {
3358     - blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
3359     - blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
3360     - blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
3361     - }
3362     -
3363     /* if private is not null, we are here after takeover */
3364     if (mddev->private == NULL) {
3365     ret = create_strip_zones(mddev, &conf);
3366     @@ -447,6 +427,29 @@ static int raid0_run(struct mddev *mddev)
3367     mddev->private = conf;
3368     }
3369     conf = mddev->private;
3370     + if (mddev->queue) {
3371     + struct md_rdev *rdev;
3372     + bool discard_supported = false;
3373     +
3374     + blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
3375     + blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
3376     + blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
3377     +
3378     + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
3379     + blk_queue_io_opt(mddev->queue,
3380     + (mddev->chunk_sectors << 9) * mddev->raid_disks);
3381     +
3382     + rdev_for_each(rdev, mddev) {
3383     + disk_stack_limits(mddev->gendisk, rdev->bdev,
3384     + rdev->data_offset << 9);
3385     + if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3386     + discard_supported = true;
3387     + }
3388     + if (!discard_supported)
3389     + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
3390     + else
3391     + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
3392     + }
3393    
3394     /* calculate array device size */
3395     md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
3396     diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
3397     index 9e3fdbdc4037..2f4503a7f315 100644
3398     --- a/drivers/mmc/core/core.c
3399     +++ b/drivers/mmc/core/core.c
3400     @@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
3401     int err = cmd->error;
3402    
3403     /* Flag re-tuning needed on CRC errors */
3404     - if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
3405     + if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
3406     + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
3407     + (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
3408     (mrq->data && mrq->data->error == -EILSEQ) ||
3409     - (mrq->stop && mrq->stop->error == -EILSEQ))
3410     + (mrq->stop && mrq->stop->error == -EILSEQ)))
3411     mmc_retune_needed(host);
3412    
3413     if (err && cmd->retries && mmc_host_is_spi(host)) {
3414     diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
3415     index 99a9c9011c50..79979e9d5a09 100644
3416     --- a/drivers/mmc/core/host.c
3417     +++ b/drivers/mmc/core/host.c
3418     @@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host)
3419     0, &cd_gpio_invert);
3420     if (!ret)
3421     dev_info(host->parent, "Got CD GPIO\n");
3422     - else if (ret != -ENOENT)
3423     + else if (ret != -ENOENT && ret != -ENOSYS)
3424     return ret;
3425    
3426     /*
3427     @@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host)
3428     ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
3429     if (!ret)
3430     dev_info(host->parent, "Got WP GPIO\n");
3431     - else if (ret != -ENOENT)
3432     + else if (ret != -ENOENT && ret != -ENOSYS)
3433     return ret;
3434    
3435     if (of_property_read_bool(np, "disable-wp"))
3436     diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
3437     index 40e9d8e45f25..e41fb7405426 100644
3438     --- a/drivers/mmc/host/dw_mmc.c
3439     +++ b/drivers/mmc/host/dw_mmc.c
3440     @@ -99,6 +99,9 @@ struct idmac_desc {
3441    
3442     __le32 des3; /* buffer 2 physical address */
3443     };
3444     +
3445     +/* Each descriptor can transfer up to 4KB of data in chained mode */
3446     +#define DW_MCI_DESC_DATA_LENGTH 0x1000
3447     #endif /* CONFIG_MMC_DW_IDMAC */
3448    
3449     static bool dw_mci_reset(struct dw_mci *host);
3450     @@ -462,66 +465,96 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
3451     static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
3452     unsigned int sg_len)
3453     {
3454     + unsigned int desc_len;
3455     int i;
3456     if (host->dma_64bit_address == 1) {
3457     - struct idmac_desc_64addr *desc = host->sg_cpu;
3458     + struct idmac_desc_64addr *desc_first, *desc_last, *desc;
3459     +
3460     + desc_first = desc_last = desc = host->sg_cpu;
3461    
3462     - for (i = 0; i < sg_len; i++, desc++) {
3463     + for (i = 0; i < sg_len; i++) {
3464     unsigned int length = sg_dma_len(&data->sg[i]);
3465     u64 mem_addr = sg_dma_address(&data->sg[i]);
3466    
3467     - /*
3468     - * Set the OWN bit and disable interrupts for this
3469     - * descriptor
3470     - */
3471     - desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
3472     - IDMAC_DES0_CH;
3473     - /* Buffer length */
3474     - IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
3475     -
3476     - /* Physical address to DMA to/from */
3477     - desc->des4 = mem_addr & 0xffffffff;
3478     - desc->des5 = mem_addr >> 32;
3479     + for ( ; length ; desc++) {
3480     + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
3481     + length : DW_MCI_DESC_DATA_LENGTH;
3482     +
3483     + length -= desc_len;
3484     +
3485     + /*
3486     + * Set the OWN bit and disable interrupts
3487     + * for this descriptor
3488     + */
3489     + desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
3490     + IDMAC_DES0_CH;
3491     +
3492     + /* Buffer length */
3493     + IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
3494     +
3495     + /* Physical address to DMA to/from */
3496     + desc->des4 = mem_addr & 0xffffffff;
3497     + desc->des5 = mem_addr >> 32;
3498     +
3499     + /* Update physical address for the next desc */
3500     + mem_addr += desc_len;
3501     +
3502     + /* Save pointer to the last descriptor */
3503     + desc_last = desc;
3504     + }
3505     }
3506    
3507     /* Set first descriptor */
3508     - desc = host->sg_cpu;
3509     - desc->des0 |= IDMAC_DES0_FD;
3510     + desc_first->des0 |= IDMAC_DES0_FD;
3511    
3512     /* Set last descriptor */
3513     - desc = host->sg_cpu + (i - 1) *
3514     - sizeof(struct idmac_desc_64addr);
3515     - desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
3516     - desc->des0 |= IDMAC_DES0_LD;
3517     + desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
3518     + desc_last->des0 |= IDMAC_DES0_LD;
3519    
3520     } else {
3521     - struct idmac_desc *desc = host->sg_cpu;
3522     + struct idmac_desc *desc_first, *desc_last, *desc;
3523     +
3524     + desc_first = desc_last = desc = host->sg_cpu;
3525    
3526     - for (i = 0; i < sg_len; i++, desc++) {
3527     + for (i = 0; i < sg_len; i++) {
3528     unsigned int length = sg_dma_len(&data->sg[i]);
3529     u32 mem_addr = sg_dma_address(&data->sg[i]);
3530    
3531     - /*
3532     - * Set the OWN bit and disable interrupts for this
3533     - * descriptor
3534     - */
3535     - desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
3536     - IDMAC_DES0_DIC | IDMAC_DES0_CH);
3537     - /* Buffer length */
3538     - IDMAC_SET_BUFFER1_SIZE(desc, length);
3539     + for ( ; length ; desc++) {
3540     + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
3541     + length : DW_MCI_DESC_DATA_LENGTH;
3542     +
3543     + length -= desc_len;
3544     +
3545     + /*
3546     + * Set the OWN bit and disable interrupts
3547     + * for this descriptor
3548     + */
3549     + desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
3550     + IDMAC_DES0_DIC |
3551     + IDMAC_DES0_CH);
3552     +
3553     + /* Buffer length */
3554     + IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
3555    
3556     - /* Physical address to DMA to/from */
3557     - desc->des2 = cpu_to_le32(mem_addr);
3558     + /* Physical address to DMA to/from */
3559     + desc->des2 = cpu_to_le32(mem_addr);
3560     +
3561     + /* Update physical address for the next desc */
3562     + mem_addr += desc_len;
3563     +
3564     + /* Save pointer to the last descriptor */
3565     + desc_last = desc;
3566     + }
3567     }
3568    
3569     /* Set first descriptor */
3570     - desc = host->sg_cpu;
3571     - desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
3572     + desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
3573    
3574     /* Set last descriptor */
3575     - desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
3576     - desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
3577     - desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
3578     + desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
3579     + IDMAC_DES0_DIC));
3580     + desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
3581     }
3582    
3583     wmb();
3584     @@ -2394,7 +2427,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
3585     #ifdef CONFIG_MMC_DW_IDMAC
3586     mmc->max_segs = host->ring_size;
3587     mmc->max_blk_size = 65536;
3588     - mmc->max_seg_size = 0x1000;
3589     + mmc->max_seg_size = DW_MCI_DESC_DATA_LENGTH;
3590     mmc->max_req_size = mmc->max_seg_size * host->ring_size;
3591     mmc->max_blk_count = mmc->max_req_size / 512;
3592     #else
3593     diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
3594     index 946d37f94a31..f5edf9d3a18a 100644
3595     --- a/drivers/mmc/host/sdhci-pxav3.c
3596     +++ b/drivers/mmc/host/sdhci-pxav3.c
3597     @@ -135,6 +135,7 @@ static int armada_38x_quirks(struct platform_device *pdev,
3598     struct sdhci_pxa *pxa = pltfm_host->priv;
3599     struct resource *res;
3600    
3601     + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
3602     host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
3603     res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3604     "conf-sdio3");
3605     @@ -290,6 +291,9 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
3606     uhs == MMC_TIMING_UHS_DDR50) {
3607     reg_val &= ~SDIO3_CONF_CLK_INV;
3608     reg_val |= SDIO3_CONF_SD_FB_CLK;
3609     + } else if (uhs == MMC_TIMING_MMC_HS) {
3610     + reg_val &= ~SDIO3_CONF_CLK_INV;
3611     + reg_val &= ~SDIO3_CONF_SD_FB_CLK;
3612     } else {
3613     reg_val |= SDIO3_CONF_CLK_INV;
3614     reg_val &= ~SDIO3_CONF_SD_FB_CLK;
3615     @@ -398,7 +402,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
3616     if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
3617     ret = armada_38x_quirks(pdev, host);
3618     if (ret < 0)
3619     - goto err_clk_get;
3620     + goto err_mbus_win;
3621     ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
3622     if (ret < 0)
3623     goto err_mbus_win;
3624     diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
3625     index 1259cc558ce9..5465fa439c9e 100644
3626     --- a/drivers/mtd/nand/pxa3xx_nand.c
3627     +++ b/drivers/mtd/nand/pxa3xx_nand.c
3628     @@ -1473,6 +1473,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
3629     if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
3630     goto KEEP_CONFIG;
3631    
3632     + /* Set a default chunk size */
3633     + info->chunk_size = 512;
3634     +
3635     ret = pxa3xx_nand_sensing(info);
3636     if (ret) {
3637     dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
3638     diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
3639     index 6f93b2990d25..499b8e433d3d 100644
3640     --- a/drivers/mtd/nand/sunxi_nand.c
3641     +++ b/drivers/mtd/nand/sunxi_nand.c
3642     @@ -138,6 +138,10 @@
3643     #define NFC_ECC_MODE GENMASK(15, 12)
3644     #define NFC_RANDOM_SEED GENMASK(30, 16)
3645    
3646     +/* NFC_USER_DATA helper macros */
3647     +#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \
3648     + ((buf)[2] << 16) | ((buf)[3] << 24))
3649     +
3650     #define NFC_DEFAULT_TIMEOUT_MS 1000
3651    
3652     #define NFC_SRAM_SIZE 1024
3653     @@ -632,15 +636,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
3654     offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
3655    
3656     /* Fill OOB data in */
3657     - if (oob_required) {
3658     - tmp = 0xffffffff;
3659     - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
3660     - 4);
3661     - } else {
3662     - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
3663     - chip->oob_poi + offset - mtd->writesize,
3664     - 4);
3665     - }
3666     + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
3667     + layout->oobfree[i].offset),
3668     + nfc->regs + NFC_REG_USER_DATA_BASE);
3669    
3670     chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
3671    
3672     @@ -770,14 +768,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
3673     offset += ecc->size;
3674    
3675     /* Fill OOB data in */
3676     - if (oob_required) {
3677     - tmp = 0xffffffff;
3678     - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
3679     - 4);
3680     - } else {
3681     - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
3682     - 4);
3683     - }
3684     + writel(NFC_BUF_TO_USER_DATA(oob),
3685     + nfc->regs + NFC_REG_USER_DATA_BASE);
3686    
3687     tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
3688     (1 << 30);
3689     @@ -1312,6 +1304,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
3690     node);
3691     nand_release(&chip->mtd);
3692     sunxi_nand_ecc_cleanup(&chip->nand.ecc);
3693     + list_del(&chip->node);
3694     }
3695     }
3696    
3697     diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
3698     index 5bbd1f094f4e..1fc23e48fe8e 100644
3699     --- a/drivers/mtd/ubi/io.c
3700     +++ b/drivers/mtd/ubi/io.c
3701     @@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
3702     goto bad;
3703     }
3704    
3705     + if (data_size > ubi->leb_size) {
3706     + ubi_err(ubi, "bad data_size");
3707     + goto bad;
3708     + }
3709     +
3710     if (vol_type == UBI_VID_STATIC) {
3711     /*
3712     * Although from high-level point of view static volumes may
3713     diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
3714     index 80bdd5b88bac..d85c19762160 100644
3715     --- a/drivers/mtd/ubi/vtbl.c
3716     +++ b/drivers/mtd/ubi/vtbl.c
3717     @@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
3718     if (ubi->corr_peb_count)
3719     ubi_err(ubi, "%d PEBs are corrupted and not used",
3720     ubi->corr_peb_count);
3721     + return -ENOSPC;
3722     }
3723     ubi->rsvd_pebs += reserved_pebs;
3724     ubi->avail_pebs -= reserved_pebs;
3725     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
3726     index 275d9fb6fe5c..eb4489f9082f 100644
3727     --- a/drivers/mtd/ubi/wl.c
3728     +++ b/drivers/mtd/ubi/wl.c
3729     @@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
3730     if (ubi->corr_peb_count)
3731     ubi_err(ubi, "%d PEBs are corrupted and not used",
3732     ubi->corr_peb_count);
3733     + err = -ENOSPC;
3734     goto out_free;
3735     }
3736     ubi->avail_pebs -= reserved_pebs;
3737     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
3738     index 89d788d8f263..adfe1de78d99 100644
3739     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
3740     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
3741     @@ -4280,18 +4280,29 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
3742     struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
3743     cc);
3744     struct e1000_hw *hw = &adapter->hw;
3745     + u32 systimel_1, systimel_2, systimeh;
3746     cycle_t systim, systim_next;
3747     - /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
3748     - * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
3749     - * to occur between reads, so if we read a vale close to overflow, we
3750     - * wait for overflow to occur and read both registers when its safe.
3751     + /* SYSTIMH latching upon SYSTIML read does not work well.
3752     + * This means that if SYSTIML overflows after we read it but before
3753     + * we read SYSTIMH, the value of SYSTIMH has been incremented and we
3754     + * will experience a huge non linear increment in the systime value
3755     + * to fix that we test for overflow and if true, we re-read systime.
3756     */
3757     - u32 systim_overflow_latch_fix = 0x3FFFFFFF;
3758     -
3759     - do {
3760     - systim = (cycle_t)er32(SYSTIML);
3761     - } while (systim > systim_overflow_latch_fix);
3762     - systim |= (cycle_t)er32(SYSTIMH) << 32;
3763     + systimel_1 = er32(SYSTIML);
3764     + systimeh = er32(SYSTIMH);
3765     + systimel_2 = er32(SYSTIML);
3766     + /* Check for overflow. If there was no overflow, use the values */
3767     + if (systimel_1 < systimel_2) {
3768     + systim = (cycle_t)systimel_1;
3769     + systim |= (cycle_t)systimeh << 32;
3770     + } else {
3771     + /* There was an overflow, read again SYSTIMH, and use
3772     + * systimel_2
3773     + */
3774     + systimeh = er32(SYSTIMH);
3775     + systim = (cycle_t)systimel_2;
3776     + systim |= (cycle_t)systimeh << 32;
3777     + }
3778    
3779     if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
3780     u64 incvalue, time_delta, rem, temp;
3781     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
3782     index 8d7b59689722..5bc9fca67957 100644
3783     --- a/drivers/net/ethernet/intel/igb/igb_main.c
3784     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
3785     @@ -2851,7 +2851,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
3786     return;
3787    
3788     pci_sriov_set_totalvfs(pdev, 7);
3789     - igb_pci_enable_sriov(pdev, max_vfs);
3790     + igb_enable_sriov(pdev, max_vfs);
3791    
3792     #endif /* CONFIG_PCI_IOV */
3793     }
3794     diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
3795     index 2f1264b882b9..d3d094742a7e 100644
3796     --- a/drivers/net/ethernet/via/Kconfig
3797     +++ b/drivers/net/ethernet/via/Kconfig
3798     @@ -17,7 +17,7 @@ if NET_VENDOR_VIA
3799    
3800     config VIA_RHINE
3801     tristate "VIA Rhine support"
3802     - depends on (PCI || OF_IRQ)
3803     + depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
3804     depends on HAS_DMA
3805     select CRC32
3806     select MII
3807     diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
3808     index 85bfa2acb801..32d9ff1b19dc 100644
3809     --- a/drivers/net/wireless/ath/ath10k/htc.c
3810     +++ b/drivers/net/wireless/ath/ath10k/htc.c
3811     @@ -145,8 +145,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
3812     skb_cb->eid = eid;
3813     skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
3814     ret = dma_mapping_error(dev, skb_cb->paddr);
3815     - if (ret)
3816     + if (ret) {
3817     + ret = -EIO;
3818     goto err_credits;
3819     + }
3820    
3821     sg_item.transfer_id = ep->eid;
3822     sg_item.transfer_context = skb;
3823     diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
3824     index a60ef7d1d5fc..7be3ce6e0ffa 100644
3825     --- a/drivers/net/wireless/ath/ath10k/htt_tx.c
3826     +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
3827     @@ -371,8 +371,10 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
3828     skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
3829     DMA_TO_DEVICE);
3830     res = dma_mapping_error(dev, skb_cb->paddr);
3831     - if (res)
3832     + if (res) {
3833     + res = -EIO;
3834     goto err_free_txdesc;
3835     + }
3836    
3837     skb_put(txdesc, len);
3838     cmd = (struct htt_cmd *)txdesc->data;
3839     @@ -456,8 +458,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
3840     skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
3841     DMA_TO_DEVICE);
3842     res = dma_mapping_error(dev, skb_cb->paddr);
3843     - if (res)
3844     + if (res) {
3845     + res = -EIO;
3846     goto err_free_txbuf;
3847     + }
3848    
3849     switch (skb_cb->txmode) {
3850     case ATH10K_HW_TXRX_RAW:
3851     diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
3852     index 218b6af63447..0d3c474ff76d 100644
3853     --- a/drivers/net/wireless/ath/ath10k/mac.c
3854     +++ b/drivers/net/wireless/ath/ath10k/mac.c
3855     @@ -591,11 +591,19 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
3856     static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
3857     enum wmi_peer_type peer_type)
3858     {
3859     + struct ath10k_vif *arvif;
3860     + int num_peers = 0;
3861     int ret;
3862    
3863     lockdep_assert_held(&ar->conf_mutex);
3864    
3865     - if (ar->num_peers >= ar->max_num_peers)
3866     + num_peers = ar->num_peers;
3867     +
3868     + /* Each vdev consumes a peer entry as well */
3869     + list_for_each_entry(arvif, &ar->arvifs, list)
3870     + num_peers++;
3871     +
3872     + if (num_peers >= ar->max_num_peers)
3873     return -ENOBUFS;
3874    
3875     ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
3876     @@ -2995,6 +3003,8 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3877     IEEE80211_IFACE_ITER_RESUME_ALL,
3878     ath10k_mac_tx_unlock_iter,
3879     ar);
3880     +
3881     + ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3882     }
3883    
3884     void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3885     @@ -3034,38 +3044,16 @@ static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3886    
3887     lockdep_assert_held(&ar->htt.tx_lock);
3888    
3889     - switch (pause_id) {
3890     - case WMI_TLV_TX_PAUSE_ID_MCC:
3891     - case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
3892     - case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
3893     - case WMI_TLV_TX_PAUSE_ID_AP_PS:
3894     - case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
3895     - switch (action) {
3896     - case WMI_TLV_TX_PAUSE_ACTION_STOP:
3897     - ath10k_mac_vif_tx_lock(arvif, pause_id);
3898     - break;
3899     - case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3900     - ath10k_mac_vif_tx_unlock(arvif, pause_id);
3901     - break;
3902     - default:
3903     - ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3904     - action, arvif->vdev_id);
3905     - break;
3906     - }
3907     + switch (action) {
3908     + case WMI_TLV_TX_PAUSE_ACTION_STOP:
3909     + ath10k_mac_vif_tx_lock(arvif, pause_id);
3910     + break;
3911     + case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3912     + ath10k_mac_vif_tx_unlock(arvif, pause_id);
3913     break;
3914     - case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
3915     - case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
3916     - case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
3917     - case WMI_TLV_TX_PAUSE_ID_HOST:
3918     default:
3919     - /* FIXME: Some pause_ids aren't vdev specific. Instead they
3920     - * target peer_id and tid. Implementing these could improve
3921     - * traffic scheduling fairness across multiple connected
3922     - * stations in AP/IBSS modes.
3923     - */
3924     - ath10k_dbg(ar, ATH10K_DBG_MAC,
3925     - "mac ignoring unsupported tx pause vdev %i id %d\n",
3926     - arvif->vdev_id, pause_id);
3927     + ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n",
3928     + action, arvif->vdev_id);
3929     break;
3930     }
3931     }
3932     @@ -3082,12 +3070,15 @@ static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3933     struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3934     struct ath10k_mac_tx_pause *arg = data;
3935    
3936     + if (arvif->vdev_id != arg->vdev_id)
3937     + return;
3938     +
3939     ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3940     }
3941    
3942     -void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
3943     - enum wmi_tlv_tx_pause_id pause_id,
3944     - enum wmi_tlv_tx_pause_action action)
3945     +void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3946     + enum wmi_tlv_tx_pause_id pause_id,
3947     + enum wmi_tlv_tx_pause_action action)
3948     {
3949     struct ath10k_mac_tx_pause arg = {
3950     .vdev_id = vdev_id,
3951     @@ -4080,6 +4071,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
3952     sizeof(arvif->bitrate_mask.control[i].vht_mcs));
3953     }
3954    
3955     + if (ar->num_peers >= ar->max_num_peers) {
3956     + ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
3957     + return -ENOBUFS;
3958     + }
3959     +
3960     if (ar->free_vdev_map == 0) {
3961     ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
3962     ret = -EBUSY;
3963     @@ -4287,6 +4283,11 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
3964     }
3965     }
3966    
3967     + spin_lock_bh(&ar->htt.tx_lock);
3968     + if (!ar->tx_paused)
3969     + ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3970     + spin_unlock_bh(&ar->htt.tx_lock);
3971     +
3972     mutex_unlock(&ar->conf_mutex);
3973     return 0;
3974    
3975     @@ -5561,6 +5562,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3976     return ret;
3977     }
3978    
3979     +static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3980     +{
3981     + /* Even though there's a WMI enum for fragmentation threshold no known
3982     + * firmware actually implements it. Moreover it is not possible to rely
3983     + * frame fragmentation to mac80211 because firmware clears the "more
3984     + * fragments" bit in frame control making it impossible for remote
3985     + * devices to reassemble frames.
3986     + *
3987     + * Hence implement a dummy callback just to say fragmentation isn't
3988     + * supported. This effectively prevents mac80211 from doing frame
3989     + * fragmentation in software.
3990     + */
3991     + return -EOPNOTSUPP;
3992     +}
3993     +
3994     static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3995     u32 queues, bool drop)
3996     {
3997     @@ -6395,6 +6411,7 @@ static const struct ieee80211_ops ath10k_ops = {
3998     .remain_on_channel = ath10k_remain_on_channel,
3999     .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
4000     .set_rts_threshold = ath10k_set_rts_threshold,
4001     + .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
4002     .flush = ath10k_flush,
4003     .tx_last_beacon = ath10k_tx_last_beacon,
4004     .set_antenna = ath10k_set_antenna,
4005     diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
4006     index b291f063705c..e3cefe4c7cfd 100644
4007     --- a/drivers/net/wireless/ath/ath10k/mac.h
4008     +++ b/drivers/net/wireless/ath/ath10k/mac.h
4009     @@ -61,9 +61,9 @@ int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
4010    
4011     void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
4012     void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
4013     -void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id,
4014     - enum wmi_tlv_tx_pause_id pause_id,
4015     - enum wmi_tlv_tx_pause_action action);
4016     +void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
4017     + enum wmi_tlv_tx_pause_id pause_id,
4018     + enum wmi_tlv_tx_pause_action action);
4019    
4020     u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
4021     u8 hw_rate);
4022     diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
4023     index ea656e011a96..8c5cc1facc45 100644
4024     --- a/drivers/net/wireless/ath/ath10k/pci.c
4025     +++ b/drivers/net/wireless/ath/ath10k/pci.c
4026     @@ -1546,8 +1546,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
4027    
4028     req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
4029     ret = dma_mapping_error(ar->dev, req_paddr);
4030     - if (ret)
4031     + if (ret) {
4032     + ret = -EIO;
4033     goto err_dma;
4034     + }
4035    
4036     if (resp && resp_len) {
4037     tresp = kzalloc(*resp_len, GFP_KERNEL);
4038     @@ -1559,8 +1561,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
4039     resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
4040     DMA_FROM_DEVICE);
4041     ret = dma_mapping_error(ar->dev, resp_paddr);
4042     - if (ret)
4043     + if (ret) {
4044     + ret = EIO;
4045     goto err_req;
4046     + }
4047    
4048     xfer.wait_for_resp = true;
4049     xfer.resp_len = 0;
4050     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4051     index 8fdba3865c96..6f477e83099d 100644
4052     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4053     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4054     @@ -377,12 +377,34 @@ static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
4055     "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
4056     pause_id, action, vdev_map, peer_id, tid_map);
4057    
4058     - for (vdev_id = 0; vdev_map; vdev_id++) {
4059     - if (!(vdev_map & BIT(vdev_id)))
4060     - continue;
4061     -
4062     - vdev_map &= ~BIT(vdev_id);
4063     - ath10k_mac_handle_tx_pause(ar, vdev_id, pause_id, action);
4064     + switch (pause_id) {
4065     + case WMI_TLV_TX_PAUSE_ID_MCC:
4066     + case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
4067     + case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
4068     + case WMI_TLV_TX_PAUSE_ID_AP_PS:
4069     + case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
4070     + for (vdev_id = 0; vdev_map; vdev_id++) {
4071     + if (!(vdev_map & BIT(vdev_id)))
4072     + continue;
4073     +
4074     + vdev_map &= ~BIT(vdev_id);
4075     + ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
4076     + action);
4077     + }
4078     + break;
4079     + case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
4080     + case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
4081     + case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
4082     + case WMI_TLV_TX_PAUSE_ID_HOST:
4083     + ath10k_dbg(ar, ATH10K_DBG_MAC,
4084     + "mac ignoring unsupported tx pause id %d\n",
4085     + pause_id);
4086     + break;
4087     + default:
4088     + ath10k_dbg(ar, ATH10K_DBG_MAC,
4089     + "mac ignoring unknown tx pause vdev %d\n",
4090     + pause_id);
4091     + break;
4092     }
4093    
4094     kfree(tb);
4095     diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
4096     index 6c046c244705..8dd84c160cfd 100644
4097     --- a/drivers/net/wireless/ath/ath10k/wmi.c
4098     +++ b/drivers/net/wireless/ath/ath10k/wmi.c
4099     @@ -2391,6 +2391,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
4100     ath10k_warn(ar, "failed to map beacon: %d\n",
4101     ret);
4102     dev_kfree_skb_any(bcn);
4103     + ret = -EIO;
4104     goto skip;
4105     }
4106    
4107     diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
4108     index 1c6788aecc62..40d72312f3df 100644
4109     --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
4110     +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
4111     @@ -203,8 +203,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
4112    
4113     /* Copy firmware into DMA-accessible memory */
4114     fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
4115     - if (!fw)
4116     - return -ENOMEM;
4117     + if (!fw) {
4118     + status = -ENOMEM;
4119     + goto out;
4120     + }
4121     len = fw_entry->size;
4122    
4123     if (len % 4)
4124     @@ -217,6 +219,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
4125    
4126     status = rsi_copy_to_card(common, fw, len, num_blocks);
4127     kfree(fw);
4128     +
4129     +out:
4130     release_firmware(fw_entry);
4131     return status;
4132     }
4133     diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
4134     index 30c2cf7fa93b..de4900862836 100644
4135     --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
4136     +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
4137     @@ -148,8 +148,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
4138    
4139     /* Copy firmware into DMA-accessible memory */
4140     fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
4141     - if (!fw)
4142     - return -ENOMEM;
4143     + if (!fw) {
4144     + status = -ENOMEM;
4145     + goto out;
4146     + }
4147     len = fw_entry->size;
4148    
4149     if (len % 4)
4150     @@ -162,6 +164,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
4151    
4152     status = rsi_copy_to_card(common, fw, len, num_blocks);
4153     kfree(fw);
4154     +
4155     +out:
4156     release_firmware(fw_entry);
4157     return status;
4158     }
4159     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
4160     index f948c46d5132..5ff0cfd142ee 100644
4161     --- a/drivers/net/xen-netfront.c
4162     +++ b/drivers/net/xen-netfront.c
4163     @@ -1348,7 +1348,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
4164     queue->tx_evtchn = queue->rx_evtchn = 0;
4165     queue->tx_irq = queue->rx_irq = 0;
4166    
4167     - napi_synchronize(&queue->napi);
4168     + if (netif_running(info->netdev))
4169     + napi_synchronize(&queue->napi);
4170    
4171     xennet_release_tx_bufs(queue);
4172     xennet_release_rx_bufs(queue);
4173     diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
4174     index ade9eb917a4d..b796d1bd8988 100644
4175     --- a/drivers/nvdimm/pmem.c
4176     +++ b/drivers/nvdimm/pmem.c
4177     @@ -86,6 +86,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
4178     struct pmem_device *pmem = bdev->bd_disk->private_data;
4179    
4180     pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
4181     + if (rw & WRITE)
4182     + wmb_pmem();
4183     page_endio(page, rw & WRITE, 0);
4184    
4185     return 0;
4186     diff --git a/drivers/pci/access.c b/drivers/pci/access.c
4187     index b965c12168b7..502a82ca1db0 100644
4188     --- a/drivers/pci/access.c
4189     +++ b/drivers/pci/access.c
4190     @@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
4191     static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
4192     void *arg)
4193     {
4194     - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
4195     + struct pci_dev *tdev = pci_get_slot(dev->bus,
4196     + PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
4197     ssize_t ret;
4198    
4199     if (!tdev)
4200     @@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
4201     static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
4202     const void *arg)
4203     {
4204     - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
4205     + struct pci_dev *tdev = pci_get_slot(dev->bus,
4206     + PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
4207     ssize_t ret;
4208    
4209     if (!tdev)
4210     @@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
4211     .release = pci_vpd_pci22_release,
4212     };
4213    
4214     -static int pci_vpd_f0_dev_check(struct pci_dev *dev)
4215     -{
4216     - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
4217     - int ret = 0;
4218     -
4219     - if (!tdev)
4220     - return -ENODEV;
4221     - if (!tdev->vpd || !tdev->multifunction ||
4222     - dev->class != tdev->class || dev->vendor != tdev->vendor ||
4223     - dev->device != tdev->device)
4224     - ret = -ENODEV;
4225     -
4226     - pci_dev_put(tdev);
4227     - return ret;
4228     -}
4229     -
4230     int pci_vpd_pci22_init(struct pci_dev *dev)
4231     {
4232     struct pci_vpd_pci22 *vpd;
4233     @@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
4234     cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
4235     if (!cap)
4236     return -ENODEV;
4237     - if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
4238     - int ret = pci_vpd_f0_dev_check(dev);
4239    
4240     - if (ret)
4241     - return ret;
4242     - }
4243     vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
4244     if (!vpd)
4245     return -ENOMEM;
4246     diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
4247     index 6fbd3f2b5992..d3346d23963b 100644
4248     --- a/drivers/pci/bus.c
4249     +++ b/drivers/pci/bus.c
4250     @@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
4251    
4252     res->start = start;
4253     res->end = end;
4254     + res->flags &= ~IORESOURCE_UNSET;
4255     + orig_res.flags &= ~IORESOURCE_UNSET;
4256     dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
4257     &orig_res, res);
4258    
4259     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4260     index dbd13854f21e..6b1c6a915daa 100644
4261     --- a/drivers/pci/quirks.c
4262     +++ b/drivers/pci/quirks.c
4263     @@ -1906,11 +1906,27 @@ static void quirk_netmos(struct pci_dev *dev)
4264     DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
4265     PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
4266    
4267     +/*
4268     + * Quirk non-zero PCI functions to route VPD access through function 0 for
4269     + * devices that share VPD resources between functions. The functions are
4270     + * expected to be identical devices.
4271     + */
4272     static void quirk_f0_vpd_link(struct pci_dev *dev)
4273     {
4274     - if (!dev->multifunction || !PCI_FUNC(dev->devfn))
4275     + struct pci_dev *f0;
4276     +
4277     + if (!PCI_FUNC(dev->devfn))
4278     return;
4279     - dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
4280     +
4281     + f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
4282     + if (!f0)
4283     + return;
4284     +
4285     + if (f0->vpd && dev->class == f0->class &&
4286     + dev->vendor == f0->vendor && dev->device == f0->device)
4287     + dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
4288     +
4289     + pci_dev_put(f0);
4290     }
4291     DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
4292     PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
4293     diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
4294     index 803945259da8..42861cc70158 100644
4295     --- a/drivers/pcmcia/sa1100_generic.c
4296     +++ b/drivers/pcmcia/sa1100_generic.c
4297     @@ -93,7 +93,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
4298     for (i = 0; i < sinfo->nskt; i++)
4299     soc_pcmcia_remove_one(&sinfo->skt[i]);
4300    
4301     - clk_put(sinfo->clk);
4302     kfree(sinfo);
4303     return 0;
4304     }
4305     diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
4306     index cf6de2c2b329..553d70a67f80 100644
4307     --- a/drivers/pcmcia/sa11xx_base.c
4308     +++ b/drivers/pcmcia/sa11xx_base.c
4309     @@ -222,7 +222,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
4310     int i, ret = 0;
4311     struct clk *clk;
4312    
4313     - clk = clk_get(dev, NULL);
4314     + clk = devm_clk_get(dev, NULL);
4315     if (IS_ERR(clk))
4316     return PTR_ERR(clk);
4317    
4318     @@ -251,7 +251,6 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
4319     if (ret) {
4320     while (--i >= 0)
4321     soc_pcmcia_remove_one(&sinfo->skt[i]);
4322     - clk_put(clk);
4323     kfree(sinfo);
4324     } else {
4325     dev_set_drvdata(dev, sinfo);
4326     diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
4327     index 3ad7b1fa24ce..6f4f310de946 100644
4328     --- a/drivers/platform/x86/toshiba_acpi.c
4329     +++ b/drivers/platform/x86/toshiba_acpi.c
4330     @@ -2408,11 +2408,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
4331     if (error)
4332     return error;
4333    
4334     - error = toshiba_hotkey_event_type_get(dev, &events_type);
4335     - if (error) {
4336     - pr_err("Unable to query Hotkey Event Type\n");
4337     - return error;
4338     - }
4339     + if (toshiba_hotkey_event_type_get(dev, &events_type))
4340     + pr_notice("Unable to query Hotkey Event Type\n");
4341     +
4342     dev->hotkey_event_type = events_type;
4343    
4344     dev->hotkey_dev = input_allocate_device();
4345     diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
4346     index 7f3d389bd601..a67eeace6a89 100644
4347     --- a/drivers/power/avs/Kconfig
4348     +++ b/drivers/power/avs/Kconfig
4349     @@ -13,7 +13,7 @@ menuconfig POWER_AVS
4350    
4351     config ROCKCHIP_IODOMAIN
4352     tristate "Rockchip IO domain support"
4353     - depends on ARCH_ROCKCHIP && OF
4354     + depends on POWER_AVS && ARCH_ROCKCHIP && OF
4355     help
4356     Say y here to enable support io domains on Rockchip SoCs. It is
4357     necessary for the io domain setting of the SoC to match the
4358     diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
4359     index 646829132b59..1dea0e8353e0 100644
4360     --- a/drivers/regulator/axp20x-regulator.c
4361     +++ b/drivers/regulator/axp20x-regulator.c
4362     @@ -192,9 +192,9 @@ static const struct regulator_desc axp22x_regulators[] = {
4363     AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20,
4364     AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
4365     AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20,
4366     - AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)),
4367     + AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
4368     AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
4369     - AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)),
4370     + AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
4371     /* secondary switchable output of DCDC1 */
4372     AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100,
4373     AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
4374     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
4375     index 78387a6cbae5..5081533858f1 100644
4376     --- a/drivers/regulator/core.c
4377     +++ b/drivers/regulator/core.c
4378     @@ -1376,15 +1376,19 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
4379     return 0;
4380    
4381     r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
4382     - if (ret == -ENODEV) {
4383     - /*
4384     - * No supply was specified for this regulator and
4385     - * there will never be one.
4386     - */
4387     - return 0;
4388     - }
4389     -
4390     if (!r) {
4391     + if (ret == -ENODEV) {
4392     + /*
4393     + * No supply was specified for this regulator and
4394     + * there will never be one.
4395     + */
4396     + return 0;
4397     + }
4398     +
4399     + /* Did the lookup explicitly defer for us? */
4400     + if (ret == -EPROBE_DEFER)
4401     + return ret;
4402     +
4403     if (have_full_constraints()) {
4404     r = dummy_regulator_rdev;
4405     } else {
4406     diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
4407     index add419d6ff34..a56a7b243e91 100644
4408     --- a/drivers/scsi/3w-9xxx.c
4409     +++ b/drivers/scsi/3w-9xxx.c
4410     @@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
4411     .llseek = noop_llseek,
4412     };
4413    
4414     +/*
4415     + * The controllers use an inline buffer instead of a mapped SGL for small,
4416     + * single entry buffers. Note that we treat a zero-length transfer like
4417     + * a mapped SGL.
4418     + */
4419     +static bool twa_command_mapped(struct scsi_cmnd *cmd)
4420     +{
4421     + return scsi_sg_count(cmd) != 1 ||
4422     + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
4423     +}
4424     +
4425     /* This function will complete an aen request from the isr */
4426     static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
4427     {
4428     @@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
4429     }
4430    
4431     /* Now complete the io */
4432     - scsi_dma_unmap(cmd);
4433     + if (twa_command_mapped(cmd))
4434     + scsi_dma_unmap(cmd);
4435     cmd->scsi_done(cmd);
4436     tw_dev->state[request_id] = TW_S_COMPLETED;
4437     twa_free_request_id(tw_dev, request_id);
4438     @@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
4439     struct scsi_cmnd *cmd = tw_dev->srb[i];
4440    
4441     cmd->result = (DID_RESET << 16);
4442     - scsi_dma_unmap(cmd);
4443     + if (twa_command_mapped(cmd))
4444     + scsi_dma_unmap(cmd);
4445     cmd->scsi_done(cmd);
4446     }
4447     }
4448     @@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
4449     retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
4450     switch (retval) {
4451     case SCSI_MLQUEUE_HOST_BUSY:
4452     - scsi_dma_unmap(SCpnt);
4453     + if (twa_command_mapped(SCpnt))
4454     + scsi_dma_unmap(SCpnt);
4455     twa_free_request_id(tw_dev, request_id);
4456     break;
4457     case 1:
4458     SCpnt->result = (DID_ERROR << 16);
4459     - scsi_dma_unmap(SCpnt);
4460     + if (twa_command_mapped(SCpnt))
4461     + scsi_dma_unmap(SCpnt);
4462     done(SCpnt);
4463     tw_dev->state[request_id] = TW_S_COMPLETED;
4464     twa_free_request_id(tw_dev, request_id);
4465     @@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
4466     /* Map sglist from scsi layer to cmd packet */
4467    
4468     if (scsi_sg_count(srb)) {
4469     - if ((scsi_sg_count(srb) == 1) &&
4470     - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
4471     + if (!twa_command_mapped(srb)) {
4472     if (srb->sc_data_direction == DMA_TO_DEVICE ||
4473     srb->sc_data_direction == DMA_BIDIRECTIONAL)
4474     scsi_sg_copy_to_buffer(srb,
4475     @@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
4476     {
4477     struct scsi_cmnd *cmd = tw_dev->srb[request_id];
4478    
4479     - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
4480     + if (!twa_command_mapped(cmd) &&
4481     (cmd->sc_data_direction == DMA_FROM_DEVICE ||
4482     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
4483     if (scsi_sg_count(cmd) == 1) {
4484     diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
4485     index 1dafeb43333b..cab4e98b2b0e 100644
4486     --- a/drivers/scsi/hpsa.c
4487     +++ b/drivers/scsi/hpsa.c
4488     @@ -5104,7 +5104,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4489     int rc;
4490     struct ctlr_info *h;
4491     struct hpsa_scsi_dev_t *dev;
4492     - char msg[40];
4493     + char msg[48];
4494    
4495     /* find the controller to which the command to be aborted was sent */
4496     h = sdev_to_hba(scsicmd->device);
4497     @@ -5122,16 +5122,18 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4498    
4499     /* if controller locked up, we can guarantee command won't complete */
4500     if (lockup_detected(h)) {
4501     - sprintf(msg, "cmd %d RESET FAILED, lockup detected",
4502     - hpsa_get_cmd_index(scsicmd));
4503     + snprintf(msg, sizeof(msg),
4504     + "cmd %d RESET FAILED, lockup detected",
4505     + hpsa_get_cmd_index(scsicmd));
4506     hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4507     return FAILED;
4508     }
4509    
4510     /* this reset request might be the result of a lockup; check */
4511     if (detect_controller_lockup(h)) {
4512     - sprintf(msg, "cmd %d RESET FAILED, new lockup detected",
4513     - hpsa_get_cmd_index(scsicmd));
4514     + snprintf(msg, sizeof(msg),
4515     + "cmd %d RESET FAILED, new lockup detected",
4516     + hpsa_get_cmd_index(scsicmd));
4517     hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4518     return FAILED;
4519     }
4520     @@ -5145,7 +5147,8 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4521     /* send a reset to the SCSI LUN which the command was sent to */
4522     rc = hpsa_do_reset(h, dev, dev->scsi3addr, HPSA_RESET_TYPE_LUN,
4523     DEFAULT_REPLY_QUEUE);
4524     - sprintf(msg, "reset %s", rc == 0 ? "completed successfully" : "failed");
4525     + snprintf(msg, sizeof(msg), "reset %s",
4526     + rc == 0 ? "completed successfully" : "failed");
4527     hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
4528     return rc == 0 ? SUCCESS : FAILED;
4529     }
4530     diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
4531     index a9aa38903efe..cccab6188328 100644
4532     --- a/drivers/scsi/ipr.c
4533     +++ b/drivers/scsi/ipr.c
4534     @@ -4554,7 +4554,7 @@ static ssize_t ipr_store_raw_mode(struct device *dev,
4535     spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4536     res = (struct ipr_resource_entry *)sdev->hostdata;
4537     if (res) {
4538     - if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
4539     + if (ipr_is_af_dasd_device(res)) {
4540     res->raw_mode = simple_strtoul(buf, NULL, 10);
4541     len = strlen(buf);
4542     if (res->sdev)
4543     diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
4544     index 6457a8a0db9c..bf3d801ac5f9 100644
4545     --- a/drivers/scsi/scsi_error.c
4546     +++ b/drivers/scsi/scsi_error.c
4547     @@ -2169,8 +2169,17 @@ int scsi_error_handler(void *data)
4548     * We never actually get interrupted because kthread_run
4549     * disables signal delivery for the created thread.
4550     */
4551     - while (!kthread_should_stop()) {
4552     + while (true) {
4553     + /*
4554     + * The sequence in kthread_stop() sets the stop flag first
4555     + * then wakes the process. To avoid missed wakeups, the task
4556     + * should always be in a non running state before the stop
4557     + * flag is checked
4558     + */
4559     set_current_state(TASK_INTERRUPTIBLE);
4560     + if (kthread_should_stop())
4561     + break;
4562     +
4563     if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
4564     shost->host_failed != atomic_read(&shost->host_busy)) {
4565     SCSI_LOG_ERROR_RECOVERY(1,
4566     diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
4567     index c9357bb393d3..744596464d33 100644
4568     --- a/drivers/spi/spi-bcm2835.c
4569     +++ b/drivers/spi/spi-bcm2835.c
4570     @@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
4571     /* otherwise we only allow transfers within the same page
4572     * to avoid wasting time on dma_mapping when it is not practical
4573     */
4574     - if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
4575     + if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
4576     dev_warn_once(&spi->dev,
4577     "Unaligned spi tx-transfer bridging page\n");
4578     return false;
4579     }
4580     - if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
4581     + if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
4582     dev_warn_once(&spi->dev,
4583     - "Unaligned spi tx-transfer bridging page\n");
4584     + "Unaligned spi rx-transfer bridging page\n");
4585     return false;
4586     }
4587    
4588     diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
4589     index 7293d6d875c5..8e4b1a7c37ce 100644
4590     --- a/drivers/spi/spi-pxa2xx.c
4591     +++ b/drivers/spi/spi-pxa2xx.c
4592     @@ -643,6 +643,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
4593     if (!(sccr1_reg & SSCR1_TIE))
4594     mask &= ~SSSR_TFS;
4595    
4596     + /* Ignore RX timeout interrupt if it is disabled */
4597     + if (!(sccr1_reg & SSCR1_TINTE))
4598     + mask &= ~SSSR_TINT;
4599     +
4600     if (!(status & mask))
4601     return IRQ_NONE;
4602    
4603     diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
4604     index 2e32ea2f194f..be6155cba9de 100644
4605     --- a/drivers/spi/spi-xtensa-xtfpga.c
4606     +++ b/drivers/spi/spi-xtensa-xtfpga.c
4607     @@ -34,13 +34,13 @@ struct xtfpga_spi {
4608     static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
4609     unsigned addr, u32 val)
4610     {
4611     - iowrite32(val, spi->regs + addr);
4612     + __raw_writel(val, spi->regs + addr);
4613     }
4614    
4615     static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
4616     unsigned addr)
4617     {
4618     - return ioread32(spi->regs + addr);
4619     + return __raw_readl(spi->regs + addr);
4620     }
4621    
4622     static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
4623     diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
4624     index cf8b91b23a76..9ce2f156d382 100644
4625     --- a/drivers/spi/spi.c
4626     +++ b/drivers/spi/spi.c
4627     @@ -1437,8 +1437,7 @@ static struct class spi_master_class = {
4628     *
4629     * The caller is responsible for assigning the bus number and initializing
4630     * the master's methods before calling spi_register_master(); and (after errors
4631     - * adding the device) calling spi_master_put() and kfree() to prevent a memory
4632     - * leak.
4633     + * adding the device) calling spi_master_put() to prevent a memory leak.
4634     */
4635     struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
4636     {
4637     diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
4638     index c7de64171c45..97aad8f91c2f 100644
4639     --- a/drivers/spi/spidev.c
4640     +++ b/drivers/spi/spidev.c
4641     @@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
4642     kfree(spidev->rx_buffer);
4643     spidev->rx_buffer = NULL;
4644    
4645     - spidev->speed_hz = spidev->spi->max_speed_hz;
4646     + if (spidev->spi)
4647     + spidev->speed_hz = spidev->spi->max_speed_hz;
4648    
4649     /* ... after we unbound from the underlying device? */
4650     spin_lock_irq(&spidev->spi_lock);
4651     diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
4652     index 6f4811263557..b71b1f2d98d5 100644
4653     --- a/drivers/staging/android/ion/ion.c
4654     +++ b/drivers/staging/android/ion/ion.c
4655     @@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
4656     mutex_unlock(&client->lock);
4657     goto end;
4658     }
4659     - mutex_unlock(&client->lock);
4660    
4661     handle = ion_handle_create(client, buffer);
4662     - if (IS_ERR(handle))
4663     + if (IS_ERR(handle)) {
4664     + mutex_unlock(&client->lock);
4665     goto end;
4666     + }
4667    
4668     - mutex_lock(&client->lock);
4669     ret = ion_handle_add(client, handle);
4670     mutex_unlock(&client->lock);
4671     if (ret) {
4672     diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
4673     index 4299cf45f947..5e1f16c36b49 100644
4674     --- a/drivers/staging/speakup/fakekey.c
4675     +++ b/drivers/staging/speakup/fakekey.c
4676     @@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
4677     __this_cpu_write(reporting_keystroke, true);
4678     input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
4679     input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
4680     + input_sync(virt_keyboard);
4681     __this_cpu_write(reporting_keystroke, false);
4682    
4683     /* reenable preemption */
4684     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
4685     index fd092909a457..56cf1996f30f 100644
4686     --- a/drivers/target/iscsi/iscsi_target.c
4687     +++ b/drivers/target/iscsi/iscsi_target.c
4688     @@ -341,7 +341,6 @@ static struct iscsi_np *iscsit_get_np(
4689    
4690     struct iscsi_np *iscsit_add_np(
4691     struct __kernel_sockaddr_storage *sockaddr,
4692     - char *ip_str,
4693     int network_transport)
4694     {
4695     struct sockaddr_in *sock_in;
4696     @@ -370,11 +369,9 @@ struct iscsi_np *iscsit_add_np(
4697     np->np_flags |= NPF_IP_NETWORK;
4698     if (sockaddr->ss_family == AF_INET6) {
4699     sock_in6 = (struct sockaddr_in6 *)sockaddr;
4700     - snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
4701     np->np_port = ntohs(sock_in6->sin6_port);
4702     } else {
4703     sock_in = (struct sockaddr_in *)sockaddr;
4704     - sprintf(np->np_ip, "%s", ip_str);
4705     np->np_port = ntohs(sock_in->sin_port);
4706     }
4707    
4708     @@ -411,8 +408,8 @@ struct iscsi_np *iscsit_add_np(
4709     list_add_tail(&np->np_list, &g_np_list);
4710     mutex_unlock(&np_lock);
4711    
4712     - pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
4713     - np->np_ip, np->np_port, np->np_transport->name);
4714     + pr_debug("CORE[0] - Added Network Portal: %pISc:%hu on %s\n",
4715     + &np->np_sockaddr, np->np_port, np->np_transport->name);
4716    
4717     return np;
4718     }
4719     @@ -481,8 +478,8 @@ int iscsit_del_np(struct iscsi_np *np)
4720     list_del(&np->np_list);
4721     mutex_unlock(&np_lock);
4722    
4723     - pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
4724     - np->np_ip, np->np_port, np->np_transport->name);
4725     + pr_debug("CORE[0] - Removed Network Portal: %pISc:%hu on %s\n",
4726     + &np->np_sockaddr, np->np_port, np->np_transport->name);
4727    
4728     iscsit_put_transport(np->np_transport);
4729     kfree(np);
4730     @@ -3464,7 +3461,6 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
4731     tpg_np_list) {
4732     struct iscsi_np *np = tpg_np->tpg_np;
4733     bool inaddr_any = iscsit_check_inaddr_any(np);
4734     - char *fmt_str;
4735    
4736     if (np->np_network_transport != network_transport)
4737     continue;
4738     @@ -3492,15 +3488,18 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
4739     }
4740     }
4741    
4742     - if (np->np_sockaddr.ss_family == AF_INET6)
4743     - fmt_str = "TargetAddress=[%s]:%hu,%hu";
4744     - else
4745     - fmt_str = "TargetAddress=%s:%hu,%hu";
4746     -
4747     - len = sprintf(buf, fmt_str,
4748     - inaddr_any ? conn->local_ip : np->np_ip,
4749     - np->np_port,
4750     - tpg->tpgt);
4751     + if (inaddr_any) {
4752     + len = sprintf(buf, "TargetAddress="
4753     + "%s:%hu,%hu",
4754     + conn->local_ip,
4755     + np->np_port,
4756     + tpg->tpgt);
4757     + } else {
4758     + len = sprintf(buf, "TargetAddress="
4759     + "%pISpc,%hu",
4760     + &np->np_sockaddr,
4761     + tpg->tpgt);
4762     + }
4763     len += 1;
4764    
4765     if ((len + payload_len) > buffer_len) {
4766     diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
4767     index 7d0f9c00d9c2..d294f030a097 100644
4768     --- a/drivers/target/iscsi/iscsi_target.h
4769     +++ b/drivers/target/iscsi/iscsi_target.h
4770     @@ -13,7 +13,7 @@ extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
4771     extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
4772     struct iscsi_np *, int);
4773     extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
4774     - char *, int);
4775     + int);
4776     extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
4777     struct iscsi_portal_group *, bool);
4778     extern int iscsit_del_np(struct iscsi_np *);
4779     diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
4780     index c1898c84b3d2..db3b9b986954 100644
4781     --- a/drivers/target/iscsi/iscsi_target_configfs.c
4782     +++ b/drivers/target/iscsi/iscsi_target_configfs.c
4783     @@ -99,7 +99,7 @@ static ssize_t lio_target_np_store_sctp(
4784     * Use existing np->np_sockaddr for SCTP network portal reference
4785     */
4786     tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
4787     - np->np_ip, tpg_np, ISCSI_SCTP_TCP);
4788     + tpg_np, ISCSI_SCTP_TCP);
4789     if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
4790     goto out;
4791     } else {
4792     @@ -177,7 +177,7 @@ static ssize_t lio_target_np_store_iser(
4793     }
4794    
4795     tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
4796     - np->np_ip, tpg_np, ISCSI_INFINIBAND);
4797     + tpg_np, ISCSI_INFINIBAND);
4798     if (IS_ERR(tpg_np_iser)) {
4799     rc = PTR_ERR(tpg_np_iser);
4800     goto out;
4801     @@ -248,8 +248,8 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
4802     return ERR_PTR(-EINVAL);
4803     }
4804     str++; /* Skip over leading "[" */
4805     - *str2 = '\0'; /* Terminate the IPv6 address */
4806     - str2++; /* Skip over the "]" */
4807     + *str2 = '\0'; /* Terminate the unbracketed IPv6 address */
4808     + str2++; /* Skip over the \0 */
4809     port_str = strstr(str2, ":");
4810     if (!port_str) {
4811     pr_err("Unable to locate \":port\""
4812     @@ -316,7 +316,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
4813     * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
4814     *
4815     */
4816     - tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
4817     + tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
4818     ISCSI_TCP);
4819     if (IS_ERR(tpg_np)) {
4820     iscsit_put_tpg(tpg);
4821     @@ -344,8 +344,8 @@ static void lio_target_call_delnpfromtpg(
4822    
4823     se_tpg = &tpg->tpg_se_tpg;
4824     pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
4825     - " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
4826     - tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
4827     + " PORTAL: %pISc:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
4828     + tpg->tpgt, &tpg_np->tpg_np->np_sockaddr, tpg_np->tpg_np->np_port);
4829    
4830     ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
4831     if (ret < 0)
4832     diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
4833     index 7e8f65e5448f..666c0739bfbe 100644
4834     --- a/drivers/target/iscsi/iscsi_target_login.c
4835     +++ b/drivers/target/iscsi/iscsi_target_login.c
4836     @@ -823,8 +823,8 @@ static void iscsi_handle_login_thread_timeout(unsigned long data)
4837     struct iscsi_np *np = (struct iscsi_np *) data;
4838    
4839     spin_lock_bh(&np->np_thread_lock);
4840     - pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
4841     - np->np_ip, np->np_port);
4842     + pr_err("iSCSI Login timeout on Network Portal %pISc:%hu\n",
4843     + &np->np_sockaddr, np->np_port);
4844    
4845     if (np->np_login_timer_flags & ISCSI_TF_STOP) {
4846     spin_unlock_bh(&np->np_thread_lock);
4847     @@ -1302,8 +1302,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
4848     spin_lock_bh(&np->np_thread_lock);
4849     if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
4850     spin_unlock_bh(&np->np_thread_lock);
4851     - pr_err("iSCSI Network Portal on %s:%hu currently not"
4852     - " active.\n", np->np_ip, np->np_port);
4853     + pr_err("iSCSI Network Portal on %pISc:%hu currently not"
4854     + " active.\n", &np->np_sockaddr, np->np_port);
4855     iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
4856     ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
4857     goto new_sess_out;
4858     diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
4859     index e8a52f7d6204..51d1734d5390 100644
4860     --- a/drivers/target/iscsi/iscsi_target_parameters.c
4861     +++ b/drivers/target/iscsi/iscsi_target_parameters.c
4862     @@ -407,6 +407,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
4863     TYPERANGE_UTF8, USE_INITIAL_ONLY);
4864     if (!param)
4865     goto out;
4866     +
4867     /*
4868     * Extra parameters for ISER from RFC-5046
4869     */
4870     @@ -496,9 +497,9 @@ int iscsi_set_keys_to_negotiate(
4871     } else if (!strcmp(param->name, SESSIONTYPE)) {
4872     SET_PSTATE_NEGOTIATE(param);
4873     } else if (!strcmp(param->name, IFMARKER)) {
4874     - SET_PSTATE_NEGOTIATE(param);
4875     + SET_PSTATE_REJECT(param);
4876     } else if (!strcmp(param->name, OFMARKER)) {
4877     - SET_PSTATE_NEGOTIATE(param);
4878     + SET_PSTATE_REJECT(param);
4879     } else if (!strcmp(param->name, IFMARKINT)) {
4880     SET_PSTATE_REJECT(param);
4881     } else if (!strcmp(param->name, OFMARKINT)) {
4882     diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
4883     index 968068ffcb1c..de26bee4bddd 100644
4884     --- a/drivers/target/iscsi/iscsi_target_tpg.c
4885     +++ b/drivers/target/iscsi/iscsi_target_tpg.c
4886     @@ -460,7 +460,6 @@ static bool iscsit_tpg_check_network_portal(
4887     struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
4888     struct iscsi_portal_group *tpg,
4889     struct __kernel_sockaddr_storage *sockaddr,
4890     - char *ip_str,
4891     struct iscsi_tpg_np *tpg_np_parent,
4892     int network_transport)
4893     {
4894     @@ -470,8 +469,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
4895     if (!tpg_np_parent) {
4896     if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
4897     network_transport)) {
4898     - pr_err("Network Portal: %s already exists on a"
4899     - " different TPG on %s\n", ip_str,
4900     + pr_err("Network Portal: %pISc already exists on a"
4901     + " different TPG on %s\n", sockaddr,
4902     tpg->tpg_tiqn->tiqn);
4903     return ERR_PTR(-EEXIST);
4904     }
4905     @@ -484,7 +483,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
4906     return ERR_PTR(-ENOMEM);
4907     }
4908    
4909     - np = iscsit_add_np(sockaddr, ip_str, network_transport);
4910     + np = iscsit_add_np(sockaddr, network_transport);
4911     if (IS_ERR(np)) {
4912     kfree(tpg_np);
4913     return ERR_CAST(np);
4914     @@ -514,8 +513,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
4915     spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
4916     }
4917    
4918     - pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
4919     - tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
4920     + pr_debug("CORE[%s] - Added Network Portal: %pISc:%hu,%hu on %s\n",
4921     + tpg->tpg_tiqn->tiqn, &np->np_sockaddr, np->np_port, tpg->tpgt,
4922     np->np_transport->name);
4923    
4924     return tpg_np;
4925     @@ -528,8 +527,8 @@ static int iscsit_tpg_release_np(
4926     {
4927     iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
4928    
4929     - pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
4930     - tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
4931     + pr_debug("CORE[%s] - Removed Network Portal: %pISc:%hu,%hu on %s\n",
4932     + tpg->tpg_tiqn->tiqn, &np->np_sockaddr, np->np_port, tpg->tpgt,
4933     np->np_transport->name);
4934    
4935     tpg_np->tpg_np = NULL;
4936     diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
4937     index 95ff5bdecd71..28abda89ea98 100644
4938     --- a/drivers/target/iscsi/iscsi_target_tpg.h
4939     +++ b/drivers/target/iscsi/iscsi_target_tpg.h
4940     @@ -22,7 +22,7 @@ extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session
4941     extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
4942     extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
4943     extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
4944     - struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
4945     + struct __kernel_sockaddr_storage *, struct iscsi_tpg_np *,
4946     int);
4947     extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
4948     struct iscsi_tpg_np *);
4949     diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
4950     index 09e682b1c549..8f1cd194f06a 100644
4951     --- a/drivers/target/target_core_device.c
4952     +++ b/drivers/target/target_core_device.c
4953     @@ -427,8 +427,6 @@ void core_disable_device_list_for_node(
4954    
4955     hlist_del_rcu(&orig->link);
4956     clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
4957     - rcu_assign_pointer(orig->se_lun, NULL);
4958     - rcu_assign_pointer(orig->se_lun_acl, NULL);
4959     orig->lun_flags = 0;
4960     orig->creation_time = 0;
4961     orig->attach_count--;
4962     @@ -439,6 +437,9 @@ void core_disable_device_list_for_node(
4963     kref_put(&orig->pr_kref, target_pr_kref_release);
4964     wait_for_completion(&orig->pr_comp);
4965    
4966     + rcu_assign_pointer(orig->se_lun, NULL);
4967     + rcu_assign_pointer(orig->se_lun_acl, NULL);
4968     +
4969     kfree_rcu(orig, rcu_head);
4970    
4971     core_scsi3_free_pr_reg_from_nacl(dev, nacl);
4972     diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
4973     index 5ab7100de17e..e7933115087a 100644
4974     --- a/drivers/target/target_core_pr.c
4975     +++ b/drivers/target/target_core_pr.c
4976     @@ -618,7 +618,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
4977     struct se_device *dev,
4978     struct se_node_acl *nacl,
4979     struct se_lun *lun,
4980     - struct se_dev_entry *deve,
4981     + struct se_dev_entry *dest_deve,
4982     u64 mapped_lun,
4983     unsigned char *isid,
4984     u64 sa_res_key,
4985     @@ -640,7 +640,29 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
4986     INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
4987     atomic_set(&pr_reg->pr_res_holders, 0);
4988     pr_reg->pr_reg_nacl = nacl;
4989     - pr_reg->pr_reg_deve = deve;
4990     + /*
4991     + * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
4992     + * the se_dev_entry->pr_ref will have been already obtained by
4993     + * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
4994     + *
4995     + * Otherwise, locate se_dev_entry now and obtain a reference until
4996     + * registration completes in __core_scsi3_add_registration().
4997     + */
4998     + if (dest_deve) {
4999     + pr_reg->pr_reg_deve = dest_deve;
5000     + } else {
5001     + rcu_read_lock();
5002     + pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
5003     + if (!pr_reg->pr_reg_deve) {
5004     + rcu_read_unlock();
5005     + pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
5006     + nacl->initiatorname, mapped_lun);
5007     + kmem_cache_free(t10_pr_reg_cache, pr_reg);
5008     + return NULL;
5009     + }
5010     + kref_get(&pr_reg->pr_reg_deve->pr_kref);
5011     + rcu_read_unlock();
5012     + }
5013     pr_reg->pr_res_mapped_lun = mapped_lun;
5014     pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
5015     pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
5016     @@ -936,17 +958,29 @@ static int __core_scsi3_check_aptpl_registration(
5017     !(strcmp(pr_reg->pr_tport, t_port)) &&
5018     (pr_reg->pr_reg_tpgt == tpgt) &&
5019     (pr_reg->pr_aptpl_target_lun == target_lun)) {
5020     + /*
5021     + * Obtain the ->pr_reg_deve pointer + reference, that
5022     + * is released by __core_scsi3_add_registration() below.
5023     + */
5024     + rcu_read_lock();
5025     + pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
5026     + if (!pr_reg->pr_reg_deve) {
5027     + pr_err("Unable to locate PR APTPL %s mapped_lun:"
5028     + " %llu\n", nacl->initiatorname, mapped_lun);
5029     + rcu_read_unlock();
5030     + continue;
5031     + }
5032     + kref_get(&pr_reg->pr_reg_deve->pr_kref);
5033     + rcu_read_unlock();
5034    
5035     pr_reg->pr_reg_nacl = nacl;
5036     pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
5037     -
5038     list_del(&pr_reg->pr_reg_aptpl_list);
5039     spin_unlock(&pr_tmpl->aptpl_reg_lock);
5040     /*
5041     * At this point all of the pointers in *pr_reg will
5042     * be setup, so go ahead and add the registration.
5043     */
5044     -
5045     __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
5046     /*
5047     * If this registration is the reservation holder,
5048     @@ -1044,18 +1078,11 @@ static void __core_scsi3_add_registration(
5049    
5050     __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
5051     spin_unlock(&pr_tmpl->registration_lock);
5052     -
5053     - rcu_read_lock();
5054     - deve = pr_reg->pr_reg_deve;
5055     - if (deve)
5056     - set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
5057     - rcu_read_unlock();
5058     -
5059     /*
5060     * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
5061     */
5062     if (!pr_reg->pr_reg_all_tg_pt || register_move)
5063     - return;
5064     + goto out;
5065     /*
5066     * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
5067     * allocated in __core_scsi3_alloc_registration()
5068     @@ -1075,19 +1102,31 @@ static void __core_scsi3_add_registration(
5069     __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
5070     register_type);
5071     spin_unlock(&pr_tmpl->registration_lock);
5072     -
5073     + /*
5074     + * Drop configfs group dependency reference and deve->pr_kref
5075     + * obtained from __core_scsi3_alloc_registration() code.
5076     + */
5077     rcu_read_lock();
5078     deve = pr_reg_tmp->pr_reg_deve;
5079     - if (deve)
5080     + if (deve) {
5081     set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
5082     + core_scsi3_lunacl_undepend_item(deve);
5083     + pr_reg_tmp->pr_reg_deve = NULL;
5084     + }
5085     rcu_read_unlock();
5086     -
5087     - /*
5088     - * Drop configfs group dependency reference from
5089     - * __core_scsi3_alloc_registration()
5090     - */
5091     - core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
5092     }
5093     +out:
5094     + /*
5095     + * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
5096     + */
5097     + rcu_read_lock();
5098     + deve = pr_reg->pr_reg_deve;
5099     + if (deve) {
5100     + set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
5101     + kref_put(&deve->pr_kref, target_pr_kref_release);
5102     + pr_reg->pr_reg_deve = NULL;
5103     + }
5104     + rcu_read_unlock();
5105     }
5106    
5107     static int core_scsi3_alloc_registration(
5108     @@ -1785,9 +1824,11 @@ core_scsi3_decode_spec_i_port(
5109     dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
5110     dest_se_deve->mapped_lun : 0);
5111    
5112     - if (!dest_se_deve)
5113     + if (!dest_se_deve) {
5114     + kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
5115     + target_pr_kref_release);
5116     continue;
5117     -
5118     + }
5119     core_scsi3_lunacl_undepend_item(dest_se_deve);
5120     core_scsi3_nodeacl_undepend_item(dest_node_acl);
5121     core_scsi3_tpg_undepend_item(dest_tpg);
5122     @@ -1823,9 +1864,11 @@ out:
5123    
5124     kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
5125    
5126     - if (!dest_se_deve)
5127     + if (!dest_se_deve) {
5128     + kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
5129     + target_pr_kref_release);
5130     continue;
5131     -
5132     + }
5133     core_scsi3_lunacl_undepend_item(dest_se_deve);
5134     core_scsi3_nodeacl_undepend_item(dest_node_acl);
5135     core_scsi3_tpg_undepend_item(dest_tpg);
5136     diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
5137     index 4515f52546f8..47fe94ee10b8 100644
5138     --- a/drivers/target/target_core_xcopy.c
5139     +++ b/drivers/target/target_core_xcopy.c
5140     @@ -450,6 +450,8 @@ int target_xcopy_setup_pt(void)
5141     memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
5142     INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
5143     INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
5144     + INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
5145     + spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
5146    
5147     xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
5148     xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
5149     @@ -644,7 +646,7 @@ static int target_xcopy_read_source(
5150     pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
5151     (unsigned long long)src_lba, src_sectors, length);
5152    
5153     - transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
5154     + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
5155     DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
5156     xop->src_pt_cmd = xpt_cmd;
5157    
5158     @@ -704,7 +706,7 @@ static int target_xcopy_write_destination(
5159     pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
5160     (unsigned long long)dst_lba, dst_sectors, length);
5161    
5162     - transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
5163     + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
5164     DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
5165     xop->dst_pt_cmd = xpt_cmd;
5166    
5167     diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
5168     index 620dcd405ff6..42c6f71bdcc1 100644
5169     --- a/drivers/thermal/cpu_cooling.c
5170     +++ b/drivers/thermal/cpu_cooling.c
5171     @@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
5172     * efficiently. Power is stored in mW, frequency in KHz. The
5173     * resulting table is in ascending order.
5174     *
5175     - * Return: 0 on success, -E* on error.
5176     + * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
5177     + * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
5178     + * added/enabled while the function was executing.
5179     */
5180     static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
5181     u32 capacitance)
5182     @@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
5183     int num_opps = 0, cpu, i, ret = 0;
5184     unsigned long freq;
5185    
5186     - rcu_read_lock();
5187     -
5188     for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
5189     dev = get_cpu_device(cpu);
5190     if (!dev) {
5191     @@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
5192     }
5193    
5194     num_opps = dev_pm_opp_get_opp_count(dev);
5195     - if (num_opps > 0) {
5196     + if (num_opps > 0)
5197     break;
5198     - } else if (num_opps < 0) {
5199     - ret = num_opps;
5200     - goto unlock;
5201     - }
5202     + else if (num_opps < 0)
5203     + return num_opps;
5204     }
5205    
5206     - if (num_opps == 0) {
5207     - ret = -EINVAL;
5208     - goto unlock;
5209     - }
5210     + if (num_opps == 0)
5211     + return -EINVAL;
5212    
5213     power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
5214     - if (!power_table) {
5215     - ret = -ENOMEM;
5216     - goto unlock;
5217     - }
5218     + if (!power_table)
5219     + return -ENOMEM;
5220     +
5221     + rcu_read_lock();
5222    
5223     for (freq = 0, i = 0;
5224     opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
5225     @@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
5226     u32 freq_mhz, voltage_mv;
5227     u64 power;
5228    
5229     + if (i >= num_opps) {
5230     + rcu_read_unlock();
5231     + ret = -EAGAIN;
5232     + goto free_power_table;
5233     + }
5234     +
5235     freq_mhz = freq / 1000000;
5236     voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
5237    
5238     @@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
5239     power_table[i].power = power;
5240     }
5241    
5242     - if (i == 0) {
5243     + rcu_read_unlock();
5244     +
5245     + if (i != num_opps) {
5246     ret = PTR_ERR(opp);
5247     - goto unlock;
5248     + goto free_power_table;
5249     }
5250    
5251     cpufreq_device->cpu_dev = dev;
5252     cpufreq_device->dyn_power_table = power_table;
5253     cpufreq_device->dyn_power_table_entries = i;
5254    
5255     -unlock:
5256     - rcu_read_unlock();
5257     + return 0;
5258     +
5259     +free_power_table:
5260     + kfree(power_table);
5261     +
5262     return ret;
5263     }
5264    
5265     @@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np,
5266     ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
5267     if (ret) {
5268     cool_dev = ERR_PTR(ret);
5269     - goto free_table;
5270     + goto free_power_table;
5271     }
5272    
5273     snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
5274     @@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np,
5275    
5276     remove_idr:
5277     release_idr(&cpufreq_idr, cpufreq_dev->id);
5278     +free_power_table:
5279     + kfree(cpufreq_dev->dyn_power_table);
5280     free_table:
5281     kfree(cpufreq_dev->freq_table);
5282     free_time_in_idle_timestamp:
5283     @@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
5284    
5285     thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
5286     release_idr(&cpufreq_idr, cpufreq_dev->id);
5287     + kfree(cpufreq_dev->dyn_power_table);
5288     kfree(cpufreq_dev->time_in_idle_timestamp);
5289     kfree(cpufreq_dev->time_in_idle);
5290     kfree(cpufreq_dev->freq_table);
5291     diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
5292     index ee8bfacf2071..afc1879f66e0 100644
5293     --- a/drivers/tty/n_tty.c
5294     +++ b/drivers/tty/n_tty.c
5295     @@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
5296     spin_lock_irqsave(&tty->ctrl_lock, flags);
5297     tty->ctrl_status |= TIOCPKT_FLUSHREAD;
5298     spin_unlock_irqrestore(&tty->ctrl_lock, flags);
5299     - if (waitqueue_active(&tty->link->read_wait))
5300     - wake_up_interruptible(&tty->link->read_wait);
5301     + wake_up_interruptible(&tty->link->read_wait);
5302     }
5303     }
5304    
5305     @@ -1382,8 +1381,7 @@ handle_newline:
5306     put_tty_queue(c, ldata);
5307     smp_store_release(&ldata->canon_head, ldata->read_head);
5308     kill_fasync(&tty->fasync, SIGIO, POLL_IN);
5309     - if (waitqueue_active(&tty->read_wait))
5310     - wake_up_interruptible_poll(&tty->read_wait, POLLIN);
5311     + wake_up_interruptible_poll(&tty->read_wait, POLLIN);
5312     return 0;
5313     }
5314     }
5315     @@ -1667,8 +1665,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
5316    
5317     if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
5318     kill_fasync(&tty->fasync, SIGIO, POLL_IN);
5319     - if (waitqueue_active(&tty->read_wait))
5320     - wake_up_interruptible_poll(&tty->read_wait, POLLIN);
5321     + wake_up_interruptible_poll(&tty->read_wait, POLLIN);
5322     }
5323     }
5324    
5325     @@ -1887,10 +1884,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
5326     }
5327    
5328     /* The termios change make the tty ready for I/O */
5329     - if (waitqueue_active(&tty->write_wait))
5330     - wake_up_interruptible(&tty->write_wait);
5331     - if (waitqueue_active(&tty->read_wait))
5332     - wake_up_interruptible(&tty->read_wait);
5333     + wake_up_interruptible(&tty->write_wait);
5334     + wake_up_interruptible(&tty->read_wait);
5335     }
5336    
5337     /**
5338     diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
5339     index 37fff12dd4d0..c35d96ece8ff 100644
5340     --- a/drivers/tty/serial/8250/8250_core.c
5341     +++ b/drivers/tty/serial/8250/8250_core.c
5342     @@ -326,6 +326,14 @@ configured less than Maximum supported fifo bytes */
5343     UART_FCR7_64BYTE,
5344     .flags = UART_CAP_FIFO,
5345     },
5346     + [PORT_RT2880] = {
5347     + .name = "Palmchip BK-3103",
5348     + .fifo_size = 16,
5349     + .tx_loadsz = 16,
5350     + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
5351     + .rxtrig_bytes = {1, 4, 8, 14},
5352     + .flags = UART_CAP_FIFO,
5353     + },
5354     };
5355    
5356     /* Uart divisor latch read */
5357     diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
5358     index 2a8f528153e7..40326b342762 100644
5359     --- a/drivers/tty/serial/atmel_serial.c
5360     +++ b/drivers/tty/serial/atmel_serial.c
5361     @@ -2641,7 +2641,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
5362     ret = atmel_init_gpios(port, &pdev->dev);
5363     if (ret < 0) {
5364     dev_err(&pdev->dev, "Failed to initialize GPIOs.");
5365     - goto err;
5366     + goto err_clear_bit;
5367     }
5368    
5369     ret = atmel_init_port(port, pdev);
5370     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
5371     index 57fc6ee12332..774df354af55 100644
5372     --- a/drivers/tty/tty_io.c
5373     +++ b/drivers/tty/tty_io.c
5374     @@ -2136,8 +2136,24 @@ retry_open:
5375     if (!noctty &&
5376     current->signal->leader &&
5377     !current->signal->tty &&
5378     - tty->session == NULL)
5379     - __proc_set_tty(tty);
5380     + tty->session == NULL) {
5381     + /*
5382     + * Don't let a process that only has write access to the tty
5383     + * obtain the privileges associated with having a tty as
5384     + * controlling terminal (being able to reopen it with full
5385     + * access through /dev/tty, being able to perform pushback).
5386     + * Many distributions set the group of all ttys to "tty" and
5387     + * grant write-only access to all terminals for setgid tty
5388     + * binaries, which should not imply full privileges on all ttys.
5389     + *
5390     + * This could theoretically break old code that performs open()
5391     + * on a write-only file descriptor. In that case, it might be
5392     + * necessary to also permit this if
5393     + * inode_permission(inode, MAY_READ) == 0.
5394     + */
5395     + if (filp->f_mode & FMODE_READ)
5396     + __proc_set_tty(tty);
5397     + }
5398     spin_unlock_irq(&current->sighand->siglock);
5399     read_unlock(&tasklist_lock);
5400     tty_unlock(tty);
5401     @@ -2426,7 +2442,7 @@ static int fionbio(struct file *file, int __user *p)
5402     * Takes ->siglock() when updating signal->tty
5403     */
5404    
5405     -static int tiocsctty(struct tty_struct *tty, int arg)
5406     +static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
5407     {
5408     int ret = 0;
5409    
5410     @@ -2460,6 +2476,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
5411     goto unlock;
5412     }
5413     }
5414     +
5415     + /* See the comment in tty_open(). */
5416     + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
5417     + ret = -EPERM;
5418     + goto unlock;
5419     + }
5420     +
5421     proc_set_tty(tty);
5422     unlock:
5423     read_unlock(&tasklist_lock);
5424     @@ -2852,7 +2875,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5425     no_tty();
5426     return 0;
5427     case TIOCSCTTY:
5428     - return tiocsctty(tty, arg);
5429     + return tiocsctty(tty, file, arg);
5430     case TIOCGPGRP:
5431     return tiocgpgrp(tty, real_tty, p);
5432     case TIOCSPGRP:
5433     diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
5434     index 389f0e034259..fa774323ebda 100644
5435     --- a/drivers/usb/chipidea/ci_hdrc_imx.c
5436     +++ b/drivers/usb/chipidea/ci_hdrc_imx.c
5437     @@ -56,7 +56,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
5438     { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
5439     { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
5440     { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
5441     - { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data},
5442     + { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
5443     { /* sentinel */ }
5444     };
5445     MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
5446     diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
5447     index 764f668d45a9..6e53c24fa1cb 100644
5448     --- a/drivers/usb/chipidea/udc.c
5449     +++ b/drivers/usb/chipidea/udc.c
5450     @@ -656,6 +656,44 @@ __acquires(hwep->lock)
5451     return 0;
5452     }
5453    
5454     +static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
5455     +{
5456     + struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
5457     + int direction, retval = 0;
5458     + unsigned long flags;
5459     +
5460     + if (ep == NULL || hwep->ep.desc == NULL)
5461     + return -EINVAL;
5462     +
5463     + if (usb_endpoint_xfer_isoc(hwep->ep.desc))
5464     + return -EOPNOTSUPP;
5465     +
5466     + spin_lock_irqsave(hwep->lock, flags);
5467     +
5468     + if (value && hwep->dir == TX && check_transfer &&
5469     + !list_empty(&hwep->qh.queue) &&
5470     + !usb_endpoint_xfer_control(hwep->ep.desc)) {
5471     + spin_unlock_irqrestore(hwep->lock, flags);
5472     + return -EAGAIN;
5473     + }
5474     +
5475     + direction = hwep->dir;
5476     + do {
5477     + retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
5478     +
5479     + if (!value)
5480     + hwep->wedge = 0;
5481     +
5482     + if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
5483     + hwep->dir = (hwep->dir == TX) ? RX : TX;
5484     +
5485     + } while (hwep->dir != direction);
5486     +
5487     + spin_unlock_irqrestore(hwep->lock, flags);
5488     + return retval;
5489     +}
5490     +
5491     +
5492     /**
5493     * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
5494     * @gadget: gadget
5495     @@ -1051,7 +1089,7 @@ __acquires(ci->lock)
5496     num += ci->hw_ep_max / 2;
5497    
5498     spin_unlock(&ci->lock);
5499     - err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
5500     + err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
5501     spin_lock(&ci->lock);
5502     if (!err)
5503     isr_setup_status_phase(ci);
5504     @@ -1110,8 +1148,8 @@ delegate:
5505    
5506     if (err < 0) {
5507     spin_unlock(&ci->lock);
5508     - if (usb_ep_set_halt(&hwep->ep))
5509     - dev_err(ci->dev, "error: ep_set_halt\n");
5510     + if (_ep_set_halt(&hwep->ep, 1, false))
5511     + dev_err(ci->dev, "error: _ep_set_halt\n");
5512     spin_lock(&ci->lock);
5513     }
5514     }
5515     @@ -1142,9 +1180,9 @@ __acquires(ci->lock)
5516     err = isr_setup_status_phase(ci);
5517     if (err < 0) {
5518     spin_unlock(&ci->lock);
5519     - if (usb_ep_set_halt(&hwep->ep))
5520     + if (_ep_set_halt(&hwep->ep, 1, false))
5521     dev_err(ci->dev,
5522     - "error: ep_set_halt\n");
5523     + "error: _ep_set_halt\n");
5524     spin_lock(&ci->lock);
5525     }
5526     }
5527     @@ -1390,41 +1428,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
5528     */
5529     static int ep_set_halt(struct usb_ep *ep, int value)
5530     {
5531     - struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
5532     - int direction, retval = 0;
5533     - unsigned long flags;
5534     -
5535     - if (ep == NULL || hwep->ep.desc == NULL)
5536     - return -EINVAL;
5537     -
5538     - if (usb_endpoint_xfer_isoc(hwep->ep.desc))
5539     - return -EOPNOTSUPP;
5540     -
5541     - spin_lock_irqsave(hwep->lock, flags);
5542     -
5543     -#ifndef STALL_IN
5544     - /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
5545     - if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
5546     - !list_empty(&hwep->qh.queue)) {
5547     - spin_unlock_irqrestore(hwep->lock, flags);
5548     - return -EAGAIN;
5549     - }
5550     -#endif
5551     -
5552     - direction = hwep->dir;
5553     - do {
5554     - retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
5555     -
5556     - if (!value)
5557     - hwep->wedge = 0;
5558     -
5559     - if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
5560     - hwep->dir = (hwep->dir == TX) ? RX : TX;
5561     -
5562     - } while (hwep->dir != direction);
5563     -
5564     - spin_unlock_irqrestore(hwep->lock, flags);
5565     - return retval;
5566     + return _ep_set_halt(ep, value, true);
5567     }
5568    
5569     /**
5570     diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
5571     index b2a540b43f97..b9ddf0c1ffe5 100644
5572     --- a/drivers/usb/core/config.c
5573     +++ b/drivers/usb/core/config.c
5574     @@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
5575     cfgno, inum, asnum, ep->desc.bEndpointAddress);
5576     ep->ss_ep_comp.bmAttributes = 16;
5577     } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
5578     - desc->bmAttributes > 2) {
5579     + USB_SS_MULT(desc->bmAttributes) > 3) {
5580     dev_warn(ddev, "Isoc endpoint has Mult of %d in "
5581     "config %d interface %d altsetting %d ep %d: "
5582     "setting to 3\n", desc->bmAttributes + 1,
5583     @@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
5584     }
5585    
5586     if (usb_endpoint_xfer_isoc(&ep->desc))
5587     - max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
5588     + max_tx = (desc->bMaxBurst + 1) *
5589     + (USB_SS_MULT(desc->bmAttributes)) *
5590     usb_endpoint_maxp(&ep->desc);
5591     else if (usb_endpoint_xfer_int(&ep->desc))
5592     max_tx = usb_endpoint_maxp(&ep->desc) *
5593     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
5594     index d85abfed84cc..f5a381945db2 100644
5595     --- a/drivers/usb/core/quirks.c
5596     +++ b/drivers/usb/core/quirks.c
5597     @@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
5598     { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
5599     { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
5600    
5601     + /* Logitech ConferenceCam CC3000e */
5602     + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
5603     + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
5604     +
5605     + /* Logitech PTZ Pro Camera */
5606     + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
5607     +
5608     /* Logitech Quickcam Fusion */
5609     { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
5610    
5611     @@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
5612     /* Philips PSC805 audio device */
5613     { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
5614    
5615     + /* Plantronic Audio 655 DSP */
5616     + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
5617     +
5618     + /* Plantronic Audio 648 USB */
5619     + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
5620     +
5621     /* Artisman Watchdog Dongle */
5622     { USB_DEVICE(0x04b4, 0x0526), .driver_info =
5623     USB_QUIRK_CONFIG_INTF_STRINGS },
5624     diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
5625     index 9a8c936cd42c..41f841fa6c4d 100644
5626     --- a/drivers/usb/host/xhci-mem.c
5627     +++ b/drivers/usb/host/xhci-mem.c
5628     @@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
5629     * use Event Data TRBs, and we don't chain in a link TRB on short
5630     * transfers, we're basically dividing by 1.
5631     *
5632     - * xHCI 1.0 specification indicates that the Average TRB Length should
5633     - * be set to 8 for control endpoints.
5634     + * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
5635     + * should be set to 8 for control endpoints.
5636     */
5637     - if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
5638     + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
5639     ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
5640     else
5641     ep_ctx->tx_info |=
5642     @@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
5643     int size;
5644     int i, j, num_ports;
5645    
5646     - if (timer_pending(&xhci->cmd_timer))
5647     - del_timer_sync(&xhci->cmd_timer);
5648     + del_timer_sync(&xhci->cmd_timer);
5649    
5650     /* Free the Event Ring Segment Table and the actual Event Ring */
5651     size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
5652     @@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
5653    
5654     INIT_LIST_HEAD(&xhci->cmd_list);
5655    
5656     + /* init command timeout timer */
5657     + setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
5658     + (unsigned long)xhci);
5659     +
5660     page_size = readl(&xhci->op_regs->page_size);
5661     xhci_dbg_trace(xhci, trace_xhci_dbg_init,
5662     "Supported page size register = 0x%x", page_size);
5663     @@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
5664     "Wrote ERST address to ir_set 0.");
5665     xhci_print_ir_set(xhci, 0);
5666    
5667     - /* init command timeout timer */
5668     - setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
5669     - (unsigned long)xhci);
5670     -
5671     /*
5672     * XXX: Might need to set the Interrupter Moderation Register to
5673     * something other than the default (~1ms minimum between interrupts).
5674     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
5675     index 5590eac2b22d..c79d33676672 100644
5676     --- a/drivers/usb/host/xhci-pci.c
5677     +++ b/drivers/usb/host/xhci-pci.c
5678     @@ -180,51 +180,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
5679     "QUIRK: Resetting on resume");
5680     }
5681    
5682     -/*
5683     - * In some Intel xHCI controllers, in order to get D3 working,
5684     - * through a vendor specific SSIC CONFIG register at offset 0x883c,
5685     - * SSIC PORT need to be marked as "unused" before putting xHCI
5686     - * into D3. After D3 exit, the SSIC port need to be marked as "used".
5687     - * Without this change, xHCI might not enter D3 state.
5688     - * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
5689     - * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
5690     - */
5691     -static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
5692     -{
5693     - struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5694     - struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
5695     - u32 val;
5696     - void __iomem *reg;
5697     -
5698     - if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
5699     - pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
5700     -
5701     - reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
5702     -
5703     - /* Notify SSIC that SSIC profile programming is not done */
5704     - val = readl(reg) & ~PROG_DONE;
5705     - writel(val, reg);
5706     -
5707     - /* Mark SSIC port as unused(suspend) or used(resume) */
5708     - val = readl(reg);
5709     - if (suspend)
5710     - val |= SSIC_PORT_UNUSED;
5711     - else
5712     - val &= ~SSIC_PORT_UNUSED;
5713     - writel(val, reg);
5714     -
5715     - /* Notify SSIC that SSIC profile programming is done */
5716     - val = readl(reg) | PROG_DONE;
5717     - writel(val, reg);
5718     - readl(reg);
5719     - }
5720     -
5721     - reg = (void __iomem *) xhci->cap_regs + 0x80a4;
5722     - val = readl(reg);
5723     - writel(val | BIT(28), reg);
5724     - readl(reg);
5725     -}
5726     -
5727     #ifdef CONFIG_ACPI
5728     static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
5729     {
5730     @@ -345,6 +300,51 @@ static void xhci_pci_remove(struct pci_dev *dev)
5731     }
5732    
5733     #ifdef CONFIG_PM
5734     +/*
5735     + * In some Intel xHCI controllers, in order to get D3 working,
5736     + * through a vendor specific SSIC CONFIG register at offset 0x883c,
5737     + * SSIC PORT need to be marked as "unused" before putting xHCI
5738     + * into D3. After D3 exit, the SSIC port need to be marked as "used".
5739     + * Without this change, xHCI might not enter D3 state.
5740     + * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
5741     + * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
5742     + */
5743     +static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
5744     +{
5745     + struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5746     + struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
5747     + u32 val;
5748     + void __iomem *reg;
5749     +
5750     + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
5751     + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
5752     +
5753     + reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
5754     +
5755     + /* Notify SSIC that SSIC profile programming is not done */
5756     + val = readl(reg) & ~PROG_DONE;
5757     + writel(val, reg);
5758     +
5759     + /* Mark SSIC port as unused(suspend) or used(resume) */
5760     + val = readl(reg);
5761     + if (suspend)
5762     + val |= SSIC_PORT_UNUSED;
5763     + else
5764     + val &= ~SSIC_PORT_UNUSED;
5765     + writel(val, reg);
5766     +
5767     + /* Notify SSIC that SSIC profile programming is done */
5768     + val = readl(reg) | PROG_DONE;
5769     + writel(val, reg);
5770     + readl(reg);
5771     + }
5772     +
5773     + reg = (void __iomem *) xhci->cap_regs + 0x80a4;
5774     + val = readl(reg);
5775     + writel(val | BIT(28), reg);
5776     + readl(reg);
5777     +}
5778     +
5779     static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
5780     {
5781     struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5782     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
5783     index 32f4d564494a..8aadf3def901 100644
5784     --- a/drivers/usb/host/xhci-ring.c
5785     +++ b/drivers/usb/host/xhci-ring.c
5786     @@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
5787     ret = xhci_handshake(&xhci->op_regs->cmd_ring,
5788     CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
5789     if (ret < 0) {
5790     + /* we are about to kill xhci, give it one more chance */
5791     + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
5792     + &xhci->op_regs->cmd_ring);
5793     + udelay(1000);
5794     + ret = xhci_handshake(&xhci->op_regs->cmd_ring,
5795     + CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
5796     + if (ret == 0)
5797     + return 0;
5798     +
5799     xhci_err(xhci, "Stopped the command ring failed, "
5800     "maybe the host is dead\n");
5801     xhci->xhc_state |= XHCI_STATE_DYING;
5802     @@ -3041,9 +3050,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5803     struct xhci_td *td;
5804     struct scatterlist *sg;
5805     int num_sgs;
5806     - int trb_buff_len, this_sg_len, running_total;
5807     + int trb_buff_len, this_sg_len, running_total, ret;
5808     unsigned int total_packet_count;
5809     + bool zero_length_needed;
5810     bool first_trb;
5811     + int last_trb_num;
5812     u64 addr;
5813     bool more_trbs_coming;
5814    
5815     @@ -3059,13 +3070,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5816     total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
5817     usb_endpoint_maxp(&urb->ep->desc));
5818    
5819     - trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
5820     + ret = prepare_transfer(xhci, xhci->devs[slot_id],
5821     ep_index, urb->stream_id,
5822     num_trbs, urb, 0, mem_flags);
5823     - if (trb_buff_len < 0)
5824     - return trb_buff_len;
5825     + if (ret < 0)
5826     + return ret;
5827    
5828     urb_priv = urb->hcpriv;
5829     +
5830     + /* Deal with URB_ZERO_PACKET - need one more td/trb */
5831     + zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
5832     + urb_priv->length == 2;
5833     + if (zero_length_needed) {
5834     + num_trbs++;
5835     + xhci_dbg(xhci, "Creating zero length td.\n");
5836     + ret = prepare_transfer(xhci, xhci->devs[slot_id],
5837     + ep_index, urb->stream_id,
5838     + 1, urb, 1, mem_flags);
5839     + if (ret < 0)
5840     + return ret;
5841     + }
5842     +
5843     td = urb_priv->td[0];
5844    
5845     /*
5846     @@ -3095,6 +3120,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5847     trb_buff_len = urb->transfer_buffer_length;
5848    
5849     first_trb = true;
5850     + last_trb_num = zero_length_needed ? 2 : 1;
5851     /* Queue the first TRB, even if it's zero-length */
5852     do {
5853     u32 field = 0;
5854     @@ -3112,12 +3138,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5855     /* Chain all the TRBs together; clear the chain bit in the last
5856     * TRB to indicate it's the last TRB in the chain.
5857     */
5858     - if (num_trbs > 1) {
5859     + if (num_trbs > last_trb_num) {
5860     field |= TRB_CHAIN;
5861     - } else {
5862     - /* FIXME - add check for ZERO_PACKET flag before this */
5863     + } else if (num_trbs == last_trb_num) {
5864     td->last_trb = ep_ring->enqueue;
5865     field |= TRB_IOC;
5866     + } else if (zero_length_needed && num_trbs == 1) {
5867     + trb_buff_len = 0;
5868     + urb_priv->td[1]->last_trb = ep_ring->enqueue;
5869     + field |= TRB_IOC;
5870     }
5871    
5872     /* Only set interrupt on short packet for IN endpoints */
5873     @@ -3179,7 +3208,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5874     if (running_total + trb_buff_len > urb->transfer_buffer_length)
5875     trb_buff_len =
5876     urb->transfer_buffer_length - running_total;
5877     - } while (running_total < urb->transfer_buffer_length);
5878     + } while (num_trbs > 0);
5879    
5880     check_trb_math(urb, num_trbs, running_total);
5881     giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
5882     @@ -3197,7 +3226,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5883     int num_trbs;
5884     struct xhci_generic_trb *start_trb;
5885     bool first_trb;
5886     + int last_trb_num;
5887     bool more_trbs_coming;
5888     + bool zero_length_needed;
5889     int start_cycle;
5890     u32 field, length_field;
5891    
5892     @@ -3228,7 +3259,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5893     num_trbs++;
5894     running_total += TRB_MAX_BUFF_SIZE;
5895     }
5896     - /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
5897    
5898     ret = prepare_transfer(xhci, xhci->devs[slot_id],
5899     ep_index, urb->stream_id,
5900     @@ -3237,6 +3267,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5901     return ret;
5902    
5903     urb_priv = urb->hcpriv;
5904     +
5905     + /* Deal with URB_ZERO_PACKET - need one more td/trb */
5906     + zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
5907     + urb_priv->length == 2;
5908     + if (zero_length_needed) {
5909     + num_trbs++;
5910     + xhci_dbg(xhci, "Creating zero length td.\n");
5911     + ret = prepare_transfer(xhci, xhci->devs[slot_id],
5912     + ep_index, urb->stream_id,
5913     + 1, urb, 1, mem_flags);
5914     + if (ret < 0)
5915     + return ret;
5916     + }
5917     +
5918     td = urb_priv->td[0];
5919    
5920     /*
5921     @@ -3258,7 +3302,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5922     trb_buff_len = urb->transfer_buffer_length;
5923    
5924     first_trb = true;
5925     -
5926     + last_trb_num = zero_length_needed ? 2 : 1;
5927     /* Queue the first TRB, even if it's zero-length */
5928     do {
5929     u32 remainder = 0;
5930     @@ -3275,12 +3319,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5931     /* Chain all the TRBs together; clear the chain bit in the last
5932     * TRB to indicate it's the last TRB in the chain.
5933     */
5934     - if (num_trbs > 1) {
5935     + if (num_trbs > last_trb_num) {
5936     field |= TRB_CHAIN;
5937     - } else {
5938     - /* FIXME - add check for ZERO_PACKET flag before this */
5939     + } else if (num_trbs == last_trb_num) {
5940     td->last_trb = ep_ring->enqueue;
5941     field |= TRB_IOC;
5942     + } else if (zero_length_needed && num_trbs == 1) {
5943     + trb_buff_len = 0;
5944     + urb_priv->td[1]->last_trb = ep_ring->enqueue;
5945     + field |= TRB_IOC;
5946     }
5947    
5948     /* Only set interrupt on short packet for IN endpoints */
5949     @@ -3318,7 +3365,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5950     trb_buff_len = urb->transfer_buffer_length - running_total;
5951     if (trb_buff_len > TRB_MAX_BUFF_SIZE)
5952     trb_buff_len = TRB_MAX_BUFF_SIZE;
5953     - } while (running_total < urb->transfer_buffer_length);
5954     + } while (num_trbs > 0);
5955    
5956     check_trb_math(urb, num_trbs, running_total);
5957     giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
5958     @@ -3385,8 +3432,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
5959     if (start_cycle == 0)
5960     field |= 0x1;
5961    
5962     - /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
5963     - if (xhci->hci_version == 0x100) {
5964     + /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
5965     + if (xhci->hci_version >= 0x100) {
5966     if (urb->transfer_buffer_length > 0) {
5967     if (setup->bRequestType & USB_DIR_IN)
5968     field |= TRB_TX_TYPE(TRB_DATA_IN);
5969     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
5970     index 526ebc0c7e72..d7b9f484d4e9 100644
5971     --- a/drivers/usb/host/xhci.c
5972     +++ b/drivers/usb/host/xhci.c
5973     @@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
5974     "waited %u microseconds.\n",
5975     XHCI_MAX_HALT_USEC);
5976     if (!ret)
5977     - xhci->xhc_state &= ~XHCI_STATE_HALTED;
5978     + xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
5979     +
5980     return ret;
5981     }
5982    
5983     @@ -654,15 +655,6 @@ int xhci_run(struct usb_hcd *hcd)
5984     }
5985     EXPORT_SYMBOL_GPL(xhci_run);
5986    
5987     -static void xhci_only_stop_hcd(struct usb_hcd *hcd)
5988     -{
5989     - struct xhci_hcd *xhci = hcd_to_xhci(hcd);
5990     -
5991     - spin_lock_irq(&xhci->lock);
5992     - xhci_halt(xhci);
5993     - spin_unlock_irq(&xhci->lock);
5994     -}
5995     -
5996     /*
5997     * Stop xHCI driver.
5998     *
5999     @@ -677,12 +669,14 @@ void xhci_stop(struct usb_hcd *hcd)
6000     u32 temp;
6001     struct xhci_hcd *xhci = hcd_to_xhci(hcd);
6002    
6003     - if (!usb_hcd_is_primary_hcd(hcd)) {
6004     - xhci_only_stop_hcd(xhci->shared_hcd);
6005     + if (xhci->xhc_state & XHCI_STATE_HALTED)
6006     return;
6007     - }
6008    
6009     + mutex_lock(&xhci->mutex);
6010     spin_lock_irq(&xhci->lock);
6011     + xhci->xhc_state |= XHCI_STATE_HALTED;
6012     + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
6013     +
6014     /* Make sure the xHC is halted for a USB3 roothub
6015     * (xhci_stop() could be called as part of failed init).
6016     */
6017     @@ -717,6 +711,7 @@ void xhci_stop(struct usb_hcd *hcd)
6018     xhci_dbg_trace(xhci, trace_xhci_dbg_init,
6019     "xhci_stop completed - status = %x",
6020     readl(&xhci->op_regs->status));
6021     + mutex_unlock(&xhci->mutex);
6022     }
6023    
6024     /*
6025     @@ -1340,6 +1335,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
6026    
6027     if (usb_endpoint_xfer_isoc(&urb->ep->desc))
6028     size = urb->number_of_packets;
6029     + else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
6030     + urb->transfer_buffer_length > 0 &&
6031     + urb->transfer_flags & URB_ZERO_PACKET &&
6032     + !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
6033     + size = 2;
6034     else
6035     size = 1;
6036    
6037     @@ -3788,6 +3788,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
6038    
6039     mutex_lock(&xhci->mutex);
6040    
6041     + if (xhci->xhc_state) /* dying or halted */
6042     + goto out;
6043     +
6044     if (!udev->slot_id) {
6045     xhci_dbg_trace(xhci, trace_xhci_dbg_address,
6046     "Bad Slot ID %d", udev->slot_id);
6047     diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
6048     index 3ad5d19e4d04..23c794813e6a 100644
6049     --- a/drivers/usb/misc/chaoskey.c
6050     +++ b/drivers/usb/misc/chaoskey.c
6051     @@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
6052     if (this_time > max)
6053     this_time = max;
6054    
6055     - memcpy(data, dev->buf, this_time);
6056     + memcpy(data, dev->buf + dev->used, this_time);
6057    
6058     dev->used += this_time;
6059    
6060     diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
6061     index 4d1b44c232ee..d07cafb7d5f5 100644
6062     --- a/drivers/usb/musb/musb_cppi41.c
6063     +++ b/drivers/usb/musb/musb_cppi41.c
6064     @@ -614,7 +614,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
6065     {
6066     struct musb *musb = controller->musb;
6067     struct device *dev = musb->controller;
6068     - struct device_node *np = dev->of_node;
6069     + struct device_node *np = dev->parent->of_node;
6070     struct cppi41_dma_channel *cppi41_channel;
6071     int count;
6072     int i;
6073     @@ -664,7 +664,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
6074     musb_dma->status = MUSB_DMA_STATUS_FREE;
6075     musb_dma->max_len = SZ_4M;
6076    
6077     - dc = dma_request_slave_channel(dev, str);
6078     + dc = dma_request_slave_channel(dev->parent, str);
6079     if (!dc) {
6080     dev_err(dev, "Failed to request %s.\n", str);
6081     ret = -EPROBE_DEFER;
6082     @@ -695,7 +695,7 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base)
6083     struct cppi41_dma_controller *controller;
6084     int ret = 0;
6085    
6086     - if (!musb->controller->of_node) {
6087     + if (!musb->controller->parent->of_node) {
6088     dev_err(musb->controller, "Need DT for the DMA engine.\n");
6089     return NULL;
6090     }
6091     diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
6092     index 1334a3de31b8..67325ec94894 100644
6093     --- a/drivers/usb/musb/musb_dsps.c
6094     +++ b/drivers/usb/musb/musb_dsps.c
6095     @@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb)
6096    
6097     dsps_writel(reg_base, wrp->epintr_set, epmask);
6098     dsps_writel(reg_base, wrp->coreintr_set, coremask);
6099     - /* start polling for ID change. */
6100     - mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout));
6101     + /* start polling for ID change in dual-role idle mode */
6102     + if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
6103     + musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
6104     + mod_timer(&glue->timer, jiffies +
6105     + msecs_to_jiffies(wrp->poll_timeout));
6106     dsps_musb_try_idle(musb, 0);
6107     }
6108    
6109     diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
6110     index deee68eafb72..0cd85f2ccddd 100644
6111     --- a/drivers/usb/phy/phy-generic.c
6112     +++ b/drivers/usb/phy/phy-generic.c
6113     @@ -230,7 +230,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
6114     clk_rate = pdata->clk_rate;
6115     needs_vcc = pdata->needs_vcc;
6116     if (gpio_is_valid(pdata->gpio_reset)) {
6117     - err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
6118     + err = devm_gpio_request_one(dev, pdata->gpio_reset,
6119     + GPIOF_ACTIVE_LOW,
6120     dev_name(dev));
6121     if (!err)
6122     nop->gpiod_reset =
6123     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
6124     index 876423b8892c..7c8eb4c4c175 100644
6125     --- a/drivers/usb/serial/option.c
6126     +++ b/drivers/usb/serial/option.c
6127     @@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
6128     #define ZTE_PRODUCT_MF622 0x0001
6129     #define ZTE_PRODUCT_MF628 0x0015
6130     #define ZTE_PRODUCT_MF626 0x0031
6131     +#define ZTE_PRODUCT_ZM8620_X 0x0396
6132     +#define ZTE_PRODUCT_ME3620_MBIM 0x0426
6133     +#define ZTE_PRODUCT_ME3620_X 0x1432
6134     +#define ZTE_PRODUCT_ME3620_L 0x1433
6135     #define ZTE_PRODUCT_AC2726 0xfff1
6136     #define ZTE_PRODUCT_MG880 0xfffd
6137     #define ZTE_PRODUCT_CDMA_TECH 0xfffe
6138     @@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
6139     .sendsetup = BIT(1) | BIT(2) | BIT(3),
6140     };
6141    
6142     +static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
6143     + .reserved = BIT(2) | BIT(3) | BIT(4),
6144     +};
6145     +
6146     +static const struct option_blacklist_info zte_me3620_xl_blacklist = {
6147     + .reserved = BIT(3) | BIT(4) | BIT(5),
6148     +};
6149     +
6150     +static const struct option_blacklist_info zte_zm8620_x_blacklist = {
6151     + .reserved = BIT(3) | BIT(4) | BIT(5),
6152     +};
6153     +
6154     static const struct option_blacklist_info huawei_cdc12_blacklist = {
6155     .reserved = BIT(1) | BIT(2),
6156     };
6157     @@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = {
6158     .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
6159     { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
6160     .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
6161     + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
6162     + .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
6163     + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
6164     + .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
6165     + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
6166     + .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
6167     + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
6168     + .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
6169     { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
6170     { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
6171     { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
6172     diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
6173     index 6c3734d2b45a..d3ea90bef84d 100644
6174     --- a/drivers/usb/serial/whiteheat.c
6175     +++ b/drivers/usb/serial/whiteheat.c
6176     @@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
6177     static int whiteheat_firmware_attach(struct usb_serial *serial);
6178    
6179     /* function prototypes for the Connect Tech WhiteHEAT serial converter */
6180     +static int whiteheat_probe(struct usb_serial *serial,
6181     + const struct usb_device_id *id);
6182     static int whiteheat_attach(struct usb_serial *serial);
6183     static void whiteheat_release(struct usb_serial *serial);
6184     static int whiteheat_port_probe(struct usb_serial_port *port);
6185     @@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = {
6186     .description = "Connect Tech - WhiteHEAT",
6187     .id_table = id_table_std,
6188     .num_ports = 4,
6189     + .probe = whiteheat_probe,
6190     .attach = whiteheat_attach,
6191     .release = whiteheat_release,
6192     .port_probe = whiteheat_port_probe,
6193     @@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
6194     /*****************************************************************************
6195     * Connect Tech's White Heat serial driver functions
6196     *****************************************************************************/
6197     +
6198     +static int whiteheat_probe(struct usb_serial *serial,
6199     + const struct usb_device_id *id)
6200     +{
6201     + struct usb_host_interface *iface_desc;
6202     + struct usb_endpoint_descriptor *endpoint;
6203     + size_t num_bulk_in = 0;
6204     + size_t num_bulk_out = 0;
6205     + size_t min_num_bulk;
6206     + unsigned int i;
6207     +
6208     + iface_desc = serial->interface->cur_altsetting;
6209     +
6210     + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
6211     + endpoint = &iface_desc->endpoint[i].desc;
6212     + if (usb_endpoint_is_bulk_in(endpoint))
6213     + ++num_bulk_in;
6214     + if (usb_endpoint_is_bulk_out(endpoint))
6215     + ++num_bulk_out;
6216     + }
6217     +
6218     + min_num_bulk = COMMAND_PORT + 1;
6219     + if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
6220     + return -ENODEV;
6221     +
6222     + return 0;
6223     +}
6224     +
6225     static int whiteheat_attach(struct usb_serial *serial)
6226     {
6227     struct usb_serial_port *command_port;
6228     diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c
6229     index 0f73621827ab..15ab07230960 100644
6230     --- a/drivers/watchdog/imgpdc_wdt.c
6231     +++ b/drivers/watchdog/imgpdc_wdt.c
6232     @@ -316,6 +316,7 @@ static int pdc_wdt_remove(struct platform_device *pdev)
6233     {
6234     struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev);
6235    
6236     + unregister_restart_handler(&pdc_wdt->restart_handler);
6237     pdc_wdt_stop(&pdc_wdt->wdt_dev);
6238     watchdog_unregister_device(&pdc_wdt->wdt_dev);
6239     clk_disable_unprepare(pdc_wdt->wdt_clk);
6240     diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
6241     index a29afb37c48c..47bd8a14d01f 100644
6242     --- a/drivers/watchdog/sunxi_wdt.c
6243     +++ b/drivers/watchdog/sunxi_wdt.c
6244     @@ -184,7 +184,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
6245     /* Set system reset function */
6246     reg = readl(wdt_base + regs->wdt_cfg);
6247     reg &= ~(regs->wdt_reset_mask);
6248     - reg |= ~(regs->wdt_reset_val);
6249     + reg |= regs->wdt_reset_val;
6250     writel(reg, wdt_base + regs->wdt_cfg);
6251    
6252     /* Enable watchdog */
6253     diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
6254     index a1800c150839..08cb419eb4e6 100644
6255     --- a/drivers/xen/preempt.c
6256     +++ b/drivers/xen/preempt.c
6257     @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
6258     asmlinkage __visible void xen_maybe_preempt_hcall(void)
6259     {
6260     if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
6261     - && should_resched())) {
6262     + && need_resched())) {
6263     /*
6264     * Clear flag as we may be rescheduled on a different
6265     * cpu.
6266     diff --git a/fs/block_dev.c b/fs/block_dev.c
6267     index 198243717da5..1170f8ce5e7f 100644
6268     --- a/fs/block_dev.c
6269     +++ b/fs/block_dev.c
6270     @@ -1241,6 +1241,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
6271     goto out_clear;
6272     }
6273     bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
6274     + /*
6275     + * If the partition is not aligned on a page
6276     + * boundary, we can't do dax I/O to it.
6277     + */
6278     + if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
6279     + (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
6280     + bdev->bd_inode->i_flags &= ~S_DAX;
6281     }
6282     } else {
6283     if (bdev->bd_contains == bdev) {
6284     diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
6285     index 02d05817cbdf..3fc4fec9b94e 100644
6286     --- a/fs/btrfs/extent_io.c
6287     +++ b/fs/btrfs/extent_io.c
6288     @@ -2798,7 +2798,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
6289     bio_end_io_t end_io_func,
6290     int mirror_num,
6291     unsigned long prev_bio_flags,
6292     - unsigned long bio_flags)
6293     + unsigned long bio_flags,
6294     + bool force_bio_submit)
6295     {
6296     int ret = 0;
6297     struct bio *bio;
6298     @@ -2816,6 +2817,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
6299     contig = bio_end_sector(bio) == sector;
6300    
6301     if (prev_bio_flags != bio_flags || !contig ||
6302     + force_bio_submit ||
6303     merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
6304     bio_add_page(bio, page, page_size, offset) < page_size) {
6305     ret = submit_one_bio(rw, bio, mirror_num,
6306     @@ -2909,7 +2911,8 @@ static int __do_readpage(struct extent_io_tree *tree,
6307     get_extent_t *get_extent,
6308     struct extent_map **em_cached,
6309     struct bio **bio, int mirror_num,
6310     - unsigned long *bio_flags, int rw)
6311     + unsigned long *bio_flags, int rw,
6312     + u64 *prev_em_start)
6313     {
6314     struct inode *inode = page->mapping->host;
6315     u64 start = page_offset(page);
6316     @@ -2957,6 +2960,7 @@ static int __do_readpage(struct extent_io_tree *tree,
6317     }
6318     while (cur <= end) {
6319     unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
6320     + bool force_bio_submit = false;
6321    
6322     if (cur >= last_byte) {
6323     char *userpage;
6324     @@ -3007,6 +3011,49 @@ static int __do_readpage(struct extent_io_tree *tree,
6325     block_start = em->block_start;
6326     if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
6327     block_start = EXTENT_MAP_HOLE;
6328     +
6329     + /*
6330     + * If we have a file range that points to a compressed extent
6331     + * and it's followed by a consecutive file range that points to
6332     + * to the same compressed extent (possibly with a different
6333     + * offset and/or length, so it either points to the whole extent
6334     + * or only part of it), we must make sure we do not submit a
6335     + * single bio to populate the pages for the 2 ranges because
6336     + * this makes the compressed extent read zero out the pages
6337     + * belonging to the 2nd range. Imagine the following scenario:
6338     + *
6339     + * File layout
6340     + * [0 - 8K] [8K - 24K]
6341     + * | |
6342     + * | |
6343     + * points to extent X, points to extent X,
6344     + * offset 4K, length of 8K offset 0, length 16K
6345     + *
6346     + * [extent X, compressed length = 4K uncompressed length = 16K]
6347     + *
6348     + * If the bio to read the compressed extent covers both ranges,
6349     + * it will decompress extent X into the pages belonging to the
6350     + * first range and then it will stop, zeroing out the remaining
6351     + * pages that belong to the other range that points to extent X.
6352     + * So here we make sure we submit 2 bios, one for the first
6353     + * range and another one for the third range. Both will target
6354     + * the same physical extent from disk, but we can't currently
6355     + * make the compressed bio endio callback populate the pages
6356     + * for both ranges because each compressed bio is tightly
6357     + * coupled with a single extent map, and each range can have
6358     + * an extent map with a different offset value relative to the
6359     + * uncompressed data of our extent and different lengths. This
6360     + * is a corner case so we prioritize correctness over
6361     + * non-optimal behavior (submitting 2 bios for the same extent).
6362     + */
6363     + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
6364     + prev_em_start && *prev_em_start != (u64)-1 &&
6365     + *prev_em_start != em->orig_start)
6366     + force_bio_submit = true;
6367     +
6368     + if (prev_em_start)
6369     + *prev_em_start = em->orig_start;
6370     +
6371     free_extent_map(em);
6372     em = NULL;
6373    
6374     @@ -3056,7 +3103,8 @@ static int __do_readpage(struct extent_io_tree *tree,
6375     bdev, bio, pnr,
6376     end_bio_extent_readpage, mirror_num,
6377     *bio_flags,
6378     - this_bio_flag);
6379     + this_bio_flag,
6380     + force_bio_submit);
6381     if (!ret) {
6382     nr++;
6383     *bio_flags = this_bio_flag;
6384     @@ -3083,7 +3131,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
6385     get_extent_t *get_extent,
6386     struct extent_map **em_cached,
6387     struct bio **bio, int mirror_num,
6388     - unsigned long *bio_flags, int rw)
6389     + unsigned long *bio_flags, int rw,
6390     + u64 *prev_em_start)
6391     {
6392     struct inode *inode;
6393     struct btrfs_ordered_extent *ordered;
6394     @@ -3103,7 +3152,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
6395    
6396     for (index = 0; index < nr_pages; index++) {
6397     __do_readpage(tree, pages[index], get_extent, em_cached, bio,
6398     - mirror_num, bio_flags, rw);
6399     + mirror_num, bio_flags, rw, prev_em_start);
6400     page_cache_release(pages[index]);
6401     }
6402     }
6403     @@ -3113,7 +3162,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
6404     int nr_pages, get_extent_t *get_extent,
6405     struct extent_map **em_cached,
6406     struct bio **bio, int mirror_num,
6407     - unsigned long *bio_flags, int rw)
6408     + unsigned long *bio_flags, int rw,
6409     + u64 *prev_em_start)
6410     {
6411     u64 start = 0;
6412     u64 end = 0;
6413     @@ -3134,7 +3184,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
6414     index - first_index, start,
6415     end, get_extent, em_cached,
6416     bio, mirror_num, bio_flags,
6417     - rw);
6418     + rw, prev_em_start);
6419     start = page_start;
6420     end = start + PAGE_CACHE_SIZE - 1;
6421     first_index = index;
6422     @@ -3145,7 +3195,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
6423     __do_contiguous_readpages(tree, &pages[first_index],
6424     index - first_index, start,
6425     end, get_extent, em_cached, bio,
6426     - mirror_num, bio_flags, rw);
6427     + mirror_num, bio_flags, rw,
6428     + prev_em_start);
6429     }
6430    
6431     static int __extent_read_full_page(struct extent_io_tree *tree,
6432     @@ -3171,7 +3222,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
6433     }
6434    
6435     ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
6436     - bio_flags, rw);
6437     + bio_flags, rw, NULL);
6438     return ret;
6439     }
6440    
6441     @@ -3197,7 +3248,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
6442     int ret;
6443    
6444     ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
6445     - &bio_flags, READ);
6446     + &bio_flags, READ, NULL);
6447     if (bio)
6448     ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
6449     return ret;
6450     @@ -3450,7 +3501,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
6451     sector, iosize, pg_offset,
6452     bdev, &epd->bio, max_nr,
6453     end_bio_extent_writepage,
6454     - 0, 0, 0);
6455     + 0, 0, 0, false);
6456     if (ret)
6457     SetPageError(page);
6458     }
6459     @@ -3752,7 +3803,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
6460     ret = submit_extent_page(rw, tree, p, offset >> 9,
6461     PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
6462     -1, end_bio_extent_buffer_writepage,
6463     - 0, epd->bio_flags, bio_flags);
6464     + 0, epd->bio_flags, bio_flags, false);
6465     epd->bio_flags = bio_flags;
6466     if (ret) {
6467     set_btree_ioerr(p);
6468     @@ -4156,6 +4207,7 @@ int extent_readpages(struct extent_io_tree *tree,
6469     struct page *page;
6470     struct extent_map *em_cached = NULL;
6471     int nr = 0;
6472     + u64 prev_em_start = (u64)-1;
6473    
6474     for (page_idx = 0; page_idx < nr_pages; page_idx++) {
6475     page = list_entry(pages->prev, struct page, lru);
6476     @@ -4172,12 +4224,12 @@ int extent_readpages(struct extent_io_tree *tree,
6477     if (nr < ARRAY_SIZE(pagepool))
6478     continue;
6479     __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
6480     - &bio, 0, &bio_flags, READ);
6481     + &bio, 0, &bio_flags, READ, &prev_em_start);
6482     nr = 0;
6483     }
6484     if (nr)
6485     __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
6486     - &bio, 0, &bio_flags, READ);
6487     + &bio, 0, &bio_flags, READ, &prev_em_start);
6488    
6489     if (em_cached)
6490     free_extent_map(em_cached);
6491     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6492     index e33dff356460..b54e63038b96 100644
6493     --- a/fs/btrfs/inode.c
6494     +++ b/fs/btrfs/inode.c
6495     @@ -5051,7 +5051,8 @@ void btrfs_evict_inode(struct inode *inode)
6496     goto no_delete;
6497     }
6498     /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
6499     - btrfs_wait_ordered_range(inode, 0, (u64)-1);
6500     + if (!special_file(inode->i_mode))
6501     + btrfs_wait_ordered_range(inode, 0, (u64)-1);
6502    
6503     btrfs_free_io_failure_record(inode, 0, (u64)-1);
6504    
6505     diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
6506     index aa0dc2573374..afa09fce8151 100644
6507     --- a/fs/cifs/cifsencrypt.c
6508     +++ b/fs/cifs/cifsencrypt.c
6509     @@ -444,6 +444,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
6510     return 0;
6511     }
6512    
6513     +/* Server has provided av pairs/target info in the type 2 challenge
6514     + * packet and we have plucked it and stored within smb session.
6515     + * We parse that blob here to find the server given timestamp
6516     + * as part of ntlmv2 authentication (or local current time as
6517     + * default in case of failure)
6518     + */
6519     +static __le64
6520     +find_timestamp(struct cifs_ses *ses)
6521     +{
6522     + unsigned int attrsize;
6523     + unsigned int type;
6524     + unsigned int onesize = sizeof(struct ntlmssp2_name);
6525     + unsigned char *blobptr;
6526     + unsigned char *blobend;
6527     + struct ntlmssp2_name *attrptr;
6528     +
6529     + if (!ses->auth_key.len || !ses->auth_key.response)
6530     + return 0;
6531     +
6532     + blobptr = ses->auth_key.response;
6533     + blobend = blobptr + ses->auth_key.len;
6534     +
6535     + while (blobptr + onesize < blobend) {
6536     + attrptr = (struct ntlmssp2_name *) blobptr;
6537     + type = le16_to_cpu(attrptr->type);
6538     + if (type == NTLMSSP_AV_EOL)
6539     + break;
6540     + blobptr += 2; /* advance attr type */
6541     + attrsize = le16_to_cpu(attrptr->length);
6542     + blobptr += 2; /* advance attr size */
6543     + if (blobptr + attrsize > blobend)
6544     + break;
6545     + if (type == NTLMSSP_AV_TIMESTAMP) {
6546     + if (attrsize == sizeof(u64))
6547     + return *((__le64 *)blobptr);
6548     + }
6549     + blobptr += attrsize; /* advance attr value */
6550     + }
6551     +
6552     + return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
6553     +}
6554     +
6555     static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
6556     const struct nls_table *nls_cp)
6557     {
6558     @@ -641,6 +683,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
6559     struct ntlmv2_resp *ntlmv2;
6560     char ntlmv2_hash[16];
6561     unsigned char *tiblob = NULL; /* target info blob */
6562     + __le64 rsp_timestamp;
6563    
6564     if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
6565     if (!ses->domainName) {
6566     @@ -659,6 +702,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
6567     }
6568     }
6569    
6570     + /* Must be within 5 minutes of the server (or in range +/-2h
6571     + * in case of Mac OS X), so simply carry over server timestamp
6572     + * (as Windows 7 does)
6573     + */
6574     + rsp_timestamp = find_timestamp(ses);
6575     +
6576     baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
6577     tilen = ses->auth_key.len;
6578     tiblob = ses->auth_key.response;
6579     @@ -675,8 +724,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
6580     (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
6581     ntlmv2->blob_signature = cpu_to_le32(0x00000101);
6582     ntlmv2->reserved = 0;
6583     - /* Must be within 5 minutes of the server */
6584     - ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
6585     + ntlmv2->time = rsp_timestamp;
6586     +
6587     get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
6588     ntlmv2->reserved2 = 0;
6589    
6590     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
6591     index f621b44cb800..6b66dd5d1540 100644
6592     --- a/fs/cifs/inode.c
6593     +++ b/fs/cifs/inode.c
6594     @@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
6595     struct tcon_link *tlink = NULL;
6596     struct cifs_tcon *tcon = NULL;
6597     struct TCP_Server_Info *server;
6598     - struct cifs_io_parms io_parms;
6599    
6600     /*
6601     * To avoid spurious oplock breaks from server, in the case of
6602     @@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
6603     rc = -ENOSYS;
6604     cifsFileInfo_put(open_file);
6605     cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
6606     - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
6607     - unsigned int bytes_written;
6608     -
6609     - io_parms.netfid = open_file->fid.netfid;
6610     - io_parms.pid = open_file->pid;
6611     - io_parms.tcon = tcon;
6612     - io_parms.offset = 0;
6613     - io_parms.length = attrs->ia_size;
6614     - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
6615     - NULL, NULL, 1);
6616     - cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
6617     - }
6618     } else
6619     rc = -EINVAL;
6620    
6621     @@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
6622     else
6623     rc = -ENOSYS;
6624     cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
6625     - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
6626     - __u16 netfid;
6627     - int oplock = 0;
6628    
6629     - rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
6630     - GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
6631     - &oplock, NULL, cifs_sb->local_nls,
6632     - cifs_remap(cifs_sb));
6633     - if (rc == 0) {
6634     - unsigned int bytes_written;
6635     -
6636     - io_parms.netfid = netfid;
6637     - io_parms.pid = current->tgid;
6638     - io_parms.tcon = tcon;
6639     - io_parms.offset = 0;
6640     - io_parms.length = attrs->ia_size;
6641     - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
6642     - NULL, 1);
6643     - cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
6644     - CIFSSMBClose(xid, tcon, netfid);
6645     - }
6646     - }
6647     if (tlink)
6648     cifs_put_tlink(tlink);
6649    
6650     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
6651     index df91bcf56d67..18da19f4f811 100644
6652     --- a/fs/cifs/smb2ops.c
6653     +++ b/fs/cifs/smb2ops.c
6654     @@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server)
6655     break;
6656     default:
6657     server->echoes = true;
6658     - server->oplocks = true;
6659     + if (enable_oplocks) {
6660     + server->oplocks = true;
6661     + server->oplock_credits = 1;
6662     + } else
6663     + server->oplocks = false;
6664     +
6665     server->echo_credits = 1;
6666     - server->oplock_credits = 1;
6667     }
6668     server->credits -= server->echo_credits + server->oplock_credits;
6669     return 0;
6670     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
6671     index b8b4f08ee094..60dd83164ed6 100644
6672     --- a/fs/cifs/smb2pdu.c
6673     +++ b/fs/cifs/smb2pdu.c
6674     @@ -46,6 +46,7 @@
6675     #include "smb2status.h"
6676     #include "smb2glob.h"
6677     #include "cifspdu.h"
6678     +#include "cifs_spnego.h"
6679    
6680     /*
6681     * The following table defines the expected "StructureSize" of SMB2 requests
6682     @@ -486,19 +487,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
6683     cifs_dbg(FYI, "missing security blob on negprot\n");
6684    
6685     rc = cifs_enable_signing(server, ses->sign);
6686     -#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
6687     if (rc)
6688     goto neg_exit;
6689     - if (blob_length)
6690     + if (blob_length) {
6691     rc = decode_negTokenInit(security_blob, blob_length, server);
6692     - if (rc == 1)
6693     - rc = 0;
6694     - else if (rc == 0) {
6695     - rc = -EIO;
6696     - goto neg_exit;
6697     + if (rc == 1)
6698     + rc = 0;
6699     + else if (rc == 0)
6700     + rc = -EIO;
6701     }
6702     -#endif
6703     -
6704     neg_exit:
6705     free_rsp_buf(resp_buftype, rsp);
6706     return rc;
6707     @@ -592,7 +589,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
6708     __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
6709     struct TCP_Server_Info *server = ses->server;
6710     u16 blob_length = 0;
6711     - char *security_blob;
6712     + struct key *spnego_key = NULL;
6713     + char *security_blob = NULL;
6714     char *ntlmssp_blob = NULL;
6715     bool use_spnego = false; /* else use raw ntlmssp */
6716    
6717     @@ -620,7 +618,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
6718     ses->ntlmssp->sesskey_per_smbsess = true;
6719    
6720     /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
6721     - ses->sectype = RawNTLMSSP;
6722     + if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
6723     + ses->sectype = RawNTLMSSP;
6724    
6725     ssetup_ntlmssp_authenticate:
6726     if (phase == NtLmChallenge)
6727     @@ -649,7 +648,48 @@ ssetup_ntlmssp_authenticate:
6728     iov[0].iov_base = (char *)req;
6729     /* 4 for rfc1002 length field and 1 for pad */
6730     iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
6731     - if (phase == NtLmNegotiate) {
6732     +
6733     + if (ses->sectype == Kerberos) {
6734     +#ifdef CONFIG_CIFS_UPCALL
6735     + struct cifs_spnego_msg *msg;
6736     +
6737     + spnego_key = cifs_get_spnego_key(ses);
6738     + if (IS_ERR(spnego_key)) {
6739     + rc = PTR_ERR(spnego_key);
6740     + spnego_key = NULL;
6741     + goto ssetup_exit;
6742     + }
6743     +
6744     + msg = spnego_key->payload.data;
6745     + /*
6746     + * check version field to make sure that cifs.upcall is
6747     + * sending us a response in an expected form
6748     + */
6749     + if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
6750     + cifs_dbg(VFS,
6751     + "bad cifs.upcall version. Expected %d got %d",
6752     + CIFS_SPNEGO_UPCALL_VERSION, msg->version);
6753     + rc = -EKEYREJECTED;
6754     + goto ssetup_exit;
6755     + }
6756     + ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
6757     + GFP_KERNEL);
6758     + if (!ses->auth_key.response) {
6759     + cifs_dbg(VFS,
6760     + "Kerberos can't allocate (%u bytes) memory",
6761     + msg->sesskey_len);
6762     + rc = -ENOMEM;
6763     + goto ssetup_exit;
6764     + }
6765     + ses->auth_key.len = msg->sesskey_len;
6766     + blob_length = msg->secblob_len;
6767     + iov[1].iov_base = msg->data + msg->sesskey_len;
6768     + iov[1].iov_len = blob_length;
6769     +#else
6770     + rc = -EOPNOTSUPP;
6771     + goto ssetup_exit;
6772     +#endif /* CONFIG_CIFS_UPCALL */
6773     + } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
6774     ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
6775     GFP_KERNEL);
6776     if (ntlmssp_blob == NULL) {
6777     @@ -672,6 +712,8 @@ ssetup_ntlmssp_authenticate:
6778     /* with raw NTLMSSP we don't encapsulate in SPNEGO */
6779     security_blob = ntlmssp_blob;
6780     }
6781     + iov[1].iov_base = security_blob;
6782     + iov[1].iov_len = blob_length;
6783     } else if (phase == NtLmAuthenticate) {
6784     req->hdr.SessionId = ses->Suid;
6785     ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
6786     @@ -699,6 +741,8 @@ ssetup_ntlmssp_authenticate:
6787     } else {
6788     security_blob = ntlmssp_blob;
6789     }
6790     + iov[1].iov_base = security_blob;
6791     + iov[1].iov_len = blob_length;
6792     } else {
6793     cifs_dbg(VFS, "illegal ntlmssp phase\n");
6794     rc = -EIO;
6795     @@ -710,8 +754,6 @@ ssetup_ntlmssp_authenticate:
6796     cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
6797     1 /* pad */ - 4 /* rfc1001 len */);
6798     req->SecurityBufferLength = cpu_to_le16(blob_length);
6799     - iov[1].iov_base = security_blob;
6800     - iov[1].iov_len = blob_length;
6801    
6802     inc_rfc1001_len(req, blob_length - 1 /* pad */);
6803    
6804     @@ -722,6 +764,7 @@ ssetup_ntlmssp_authenticate:
6805    
6806     kfree(security_blob);
6807     rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
6808     + ses->Suid = rsp->hdr.SessionId;
6809     if (resp_buftype != CIFS_NO_BUFFER &&
6810     rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
6811     if (phase != NtLmNegotiate) {
6812     @@ -739,7 +782,6 @@ ssetup_ntlmssp_authenticate:
6813     /* NTLMSSP Negotiate sent now processing challenge (response) */
6814     phase = NtLmChallenge; /* process ntlmssp challenge */
6815     rc = 0; /* MORE_PROCESSING is not an error here but expected */
6816     - ses->Suid = rsp->hdr.SessionId;
6817     rc = decode_ntlmssp_challenge(rsp->Buffer,
6818     le16_to_cpu(rsp->SecurityBufferLength), ses);
6819     }
6820     @@ -796,6 +838,10 @@ keygen_exit:
6821     kfree(ses->auth_key.response);
6822     ses->auth_key.response = NULL;
6823     }
6824     + if (spnego_key) {
6825     + key_invalidate(spnego_key);
6826     + key_put(spnego_key);
6827     + }
6828     kfree(ses->ntlmssp);
6829    
6830     return rc;
6831     diff --git a/fs/dax.c b/fs/dax.c
6832     index a7f77e1fa18c..ef35a2014580 100644
6833     --- a/fs/dax.c
6834     +++ b/fs/dax.c
6835     @@ -116,7 +116,8 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
6836     unsigned len;
6837     if (pos == max) {
6838     unsigned blkbits = inode->i_blkbits;
6839     - sector_t block = pos >> blkbits;
6840     + long page = pos >> PAGE_SHIFT;
6841     + sector_t block = page << (PAGE_SHIFT - blkbits);
6842     unsigned first = pos - (block << blkbits);
6843     long size;
6844    
6845     diff --git a/fs/dcache.c b/fs/dcache.c
6846     index 9b5fe503f6cb..e3b44ca75a1b 100644
6847     --- a/fs/dcache.c
6848     +++ b/fs/dcache.c
6849     @@ -2926,6 +2926,13 @@ restart:
6850    
6851     if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
6852     struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
6853     + /* Escaped? */
6854     + if (dentry != vfsmnt->mnt_root) {
6855     + bptr = *buffer;
6856     + blen = *buflen;
6857     + error = 3;
6858     + break;
6859     + }
6860     /* Global root? */
6861     if (mnt != parent) {
6862     dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
6863     diff --git a/fs/namei.c b/fs/namei.c
6864     index 1c2105ed20c5..36df4818a635 100644
6865     --- a/fs/namei.c
6866     +++ b/fs/namei.c
6867     @@ -560,6 +560,24 @@ static int __nd_alloc_stack(struct nameidata *nd)
6868     return 0;
6869     }
6870    
6871     +/**
6872     + * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
6873     + * @path: nameidate to verify
6874     + *
6875     + * Rename can sometimes move a file or directory outside of a bind
6876     + * mount, path_connected allows those cases to be detected.
6877     + */
6878     +static bool path_connected(const struct path *path)
6879     +{
6880     + struct vfsmount *mnt = path->mnt;
6881     +
6882     + /* Only bind mounts can have disconnected paths */
6883     + if (mnt->mnt_root == mnt->mnt_sb->s_root)
6884     + return true;
6885     +
6886     + return is_subdir(path->dentry, mnt->mnt_root);
6887     +}
6888     +
6889     static inline int nd_alloc_stack(struct nameidata *nd)
6890     {
6891     if (likely(nd->depth != EMBEDDED_LEVELS))
6892     @@ -1296,6 +1314,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
6893     return -ECHILD;
6894     nd->path.dentry = parent;
6895     nd->seq = seq;
6896     + if (unlikely(!path_connected(&nd->path)))
6897     + return -ENOENT;
6898     break;
6899     } else {
6900     struct mount *mnt = real_mount(nd->path.mnt);
6901     @@ -1396,7 +1416,7 @@ static void follow_mount(struct path *path)
6902     }
6903     }
6904    
6905     -static void follow_dotdot(struct nameidata *nd)
6906     +static int follow_dotdot(struct nameidata *nd)
6907     {
6908     if (!nd->root.mnt)
6909     set_root(nd);
6910     @@ -1412,6 +1432,8 @@ static void follow_dotdot(struct nameidata *nd)
6911     /* rare case of legitimate dget_parent()... */
6912     nd->path.dentry = dget_parent(nd->path.dentry);
6913     dput(old);
6914     + if (unlikely(!path_connected(&nd->path)))
6915     + return -ENOENT;
6916     break;
6917     }
6918     if (!follow_up(&nd->path))
6919     @@ -1419,6 +1441,7 @@ static void follow_dotdot(struct nameidata *nd)
6920     }
6921     follow_mount(&nd->path);
6922     nd->inode = nd->path.dentry->d_inode;
6923     + return 0;
6924     }
6925    
6926     /*
6927     @@ -1535,8 +1558,6 @@ static int lookup_fast(struct nameidata *nd,
6928     negative = d_is_negative(dentry);
6929     if (read_seqcount_retry(&dentry->d_seq, seq))
6930     return -ECHILD;
6931     - if (negative)
6932     - return -ENOENT;
6933    
6934     /*
6935     * This sequence count validates that the parent had no
6936     @@ -1557,6 +1578,12 @@ static int lookup_fast(struct nameidata *nd,
6937     goto unlazy;
6938     }
6939     }
6940     + /*
6941     + * Note: do negative dentry check after revalidation in
6942     + * case that drops it.
6943     + */
6944     + if (negative)
6945     + return -ENOENT;
6946     path->mnt = mnt;
6947     path->dentry = dentry;
6948     if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
6949     @@ -1634,7 +1661,7 @@ static inline int handle_dots(struct nameidata *nd, int type)
6950     if (nd->flags & LOOKUP_RCU) {
6951     return follow_dotdot_rcu(nd);
6952     } else
6953     - follow_dotdot(nd);
6954     + return follow_dotdot(nd);
6955     }
6956     return 0;
6957     }
6958     diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
6959     index 029d688a969f..c56886829708 100644
6960     --- a/fs/nfs/delegation.c
6961     +++ b/fs/nfs/delegation.c
6962     @@ -113,7 +113,8 @@ out:
6963     return status;
6964     }
6965    
6966     -static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
6967     +static int nfs_delegation_claim_opens(struct inode *inode,
6968     + const nfs4_stateid *stateid, fmode_t type)
6969     {
6970     struct nfs_inode *nfsi = NFS_I(inode);
6971     struct nfs_open_context *ctx;
6972     @@ -140,7 +141,7 @@ again:
6973     /* Block nfs4_proc_unlck */
6974     mutex_lock(&sp->so_delegreturn_mutex);
6975     seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
6976     - err = nfs4_open_delegation_recall(ctx, state, stateid);
6977     + err = nfs4_open_delegation_recall(ctx, state, stateid, type);
6978     if (!err)
6979     err = nfs_delegation_claim_locks(ctx, state, stateid);
6980     if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
6981     @@ -411,7 +412,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
6982     do {
6983     if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
6984     break;
6985     - err = nfs_delegation_claim_opens(inode, &delegation->stateid);
6986     + err = nfs_delegation_claim_opens(inode, &delegation->stateid,
6987     + delegation->type);
6988     if (!issync || err != -EAGAIN)
6989     break;
6990     /*
6991     diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
6992     index e3c20a3ccc93..785c8525b576 100644
6993     --- a/fs/nfs/delegation.h
6994     +++ b/fs/nfs/delegation.h
6995     @@ -54,7 +54,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
6996    
6997     /* NFSv4 delegation-related procedures */
6998     int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
6999     -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
7000     +int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
7001     int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
7002     bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
7003    
7004     diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
7005     index b34f2e228601..02ec07973bc4 100644
7006     --- a/fs/nfs/filelayout/filelayout.c
7007     +++ b/fs/nfs/filelayout/filelayout.c
7008     @@ -629,23 +629,18 @@ out_put:
7009     goto out;
7010     }
7011    
7012     -static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
7013     +static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
7014     {
7015     int i;
7016    
7017     - for (i = 0; i < fl->num_fh; i++) {
7018     - if (!fl->fh_array[i])
7019     - break;
7020     - kfree(fl->fh_array[i]);
7021     + if (fl->fh_array) {
7022     + for (i = 0; i < fl->num_fh; i++) {
7023     + if (!fl->fh_array[i])
7024     + break;
7025     + kfree(fl->fh_array[i]);
7026     + }
7027     + kfree(fl->fh_array);
7028     }
7029     - kfree(fl->fh_array);
7030     - fl->fh_array = NULL;
7031     -}
7032     -
7033     -static void
7034     -_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
7035     -{
7036     - filelayout_free_fh_array(fl);
7037     kfree(fl);
7038     }
7039    
7040     @@ -716,21 +711,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
7041     /* Do we want to use a mempool here? */
7042     fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
7043     if (!fl->fh_array[i])
7044     - goto out_err_free;
7045     + goto out_err;
7046    
7047     p = xdr_inline_decode(&stream, 4);
7048     if (unlikely(!p))
7049     - goto out_err_free;
7050     + goto out_err;
7051     fl->fh_array[i]->size = be32_to_cpup(p++);
7052     if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
7053     printk(KERN_ERR "NFS: Too big fh %d received %d\n",
7054     i, fl->fh_array[i]->size);
7055     - goto out_err_free;
7056     + goto out_err;
7057     }
7058    
7059     p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
7060     if (unlikely(!p))
7061     - goto out_err_free;
7062     + goto out_err;
7063     memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
7064     dprintk("DEBUG: %s: fh len %d\n", __func__,
7065     fl->fh_array[i]->size);
7066     @@ -739,8 +734,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
7067     __free_page(scratch);
7068     return 0;
7069    
7070     -out_err_free:
7071     - filelayout_free_fh_array(fl);
7072     out_err:
7073     __free_page(scratch);
7074     return -EIO;
7075     diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
7076     index d731bbf974aa..0f020e4d8421 100644
7077     --- a/fs/nfs/nfs42proc.c
7078     +++ b/fs/nfs/nfs42proc.c
7079     @@ -175,10 +175,12 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
7080     {
7081     struct nfs_server *server = NFS_SERVER(file_inode(filep));
7082     struct nfs4_exception exception = { };
7083     - int err;
7084     + loff_t err;
7085    
7086     do {
7087     err = _nfs42_proc_llseek(filep, offset, whence);
7088     + if (err >= 0)
7089     + break;
7090     if (err == -ENOTSUPP)
7091     return -EOPNOTSUPP;
7092     err = nfs4_handle_exception(server, err, &exception);
7093     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
7094     index 73c8204ad463..d2daacad3568 100644
7095     --- a/fs/nfs/nfs4proc.c
7096     +++ b/fs/nfs/nfs4proc.c
7097     @@ -1127,6 +1127,21 @@ static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
7098     return ret;
7099     }
7100    
7101     +static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
7102     + fmode_t fmode)
7103     +{
7104     + switch(fmode & (FMODE_READ|FMODE_WRITE)) {
7105     + case FMODE_READ|FMODE_WRITE:
7106     + return state->n_rdwr != 0;
7107     + case FMODE_WRITE:
7108     + return state->n_wronly != 0;
7109     + case FMODE_READ:
7110     + return state->n_rdonly != 0;
7111     + }
7112     + WARN_ON_ONCE(1);
7113     + return false;
7114     +}
7115     +
7116     static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
7117     {
7118     int ret = 0;
7119     @@ -1561,17 +1576,13 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
7120     return opendata;
7121     }
7122    
7123     -static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
7124     +static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
7125     + fmode_t fmode)
7126     {
7127     struct nfs4_state *newstate;
7128     int ret;
7129    
7130     - if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
7131     - opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) &&
7132     - (opendata->o_arg.u.delegation_type & fmode) != fmode)
7133     - /* This mode can't have been delegated, so we must have
7134     - * a valid open_stateid to cover it - not need to reclaim.
7135     - */
7136     + if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
7137     return 0;
7138     opendata->o_arg.open_flags = 0;
7139     opendata->o_arg.fmode = fmode;
7140     @@ -1587,14 +1598,14 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod
7141     newstate = nfs4_opendata_to_nfs4_state(opendata);
7142     if (IS_ERR(newstate))
7143     return PTR_ERR(newstate);
7144     + if (newstate != opendata->state)
7145     + ret = -ESTALE;
7146     nfs4_close_state(newstate, fmode);
7147     - *res = newstate;
7148     - return 0;
7149     + return ret;
7150     }
7151    
7152     static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
7153     {
7154     - struct nfs4_state *newstate;
7155     int ret;
7156    
7157     /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
7158     @@ -1605,27 +1616,15 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
7159     clear_bit(NFS_DELEGATED_STATE, &state->flags);
7160     clear_bit(NFS_OPEN_STATE, &state->flags);
7161     smp_rmb();
7162     - if (state->n_rdwr != 0) {
7163     - ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
7164     - if (ret != 0)
7165     - return ret;
7166     - if (newstate != state)
7167     - return -ESTALE;
7168     - }
7169     - if (state->n_wronly != 0) {
7170     - ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
7171     - if (ret != 0)
7172     - return ret;
7173     - if (newstate != state)
7174     - return -ESTALE;
7175     - }
7176     - if (state->n_rdonly != 0) {
7177     - ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
7178     - if (ret != 0)
7179     - return ret;
7180     - if (newstate != state)
7181     - return -ESTALE;
7182     - }
7183     + ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
7184     + if (ret != 0)
7185     + return ret;
7186     + ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
7187     + if (ret != 0)
7188     + return ret;
7189     + ret = nfs4_open_recover_helper(opendata, FMODE_READ);
7190     + if (ret != 0)
7191     + return ret;
7192     /*
7193     * We may have performed cached opens for all three recoveries.
7194     * Check if we need to update the current stateid.
7195     @@ -1749,18 +1748,32 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7196     return err;
7197     }
7198    
7199     -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
7200     +int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
7201     + struct nfs4_state *state, const nfs4_stateid *stateid,
7202     + fmode_t type)
7203     {
7204     struct nfs_server *server = NFS_SERVER(state->inode);
7205     struct nfs4_opendata *opendata;
7206     - int err;
7207     + int err = 0;
7208    
7209     opendata = nfs4_open_recoverdata_alloc(ctx, state,
7210     NFS4_OPEN_CLAIM_DELEG_CUR_FH);
7211     if (IS_ERR(opendata))
7212     return PTR_ERR(opendata);
7213     nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
7214     - err = nfs4_open_recover(opendata, state);
7215     + clear_bit(NFS_DELEGATED_STATE, &state->flags);
7216     + switch (type & (FMODE_READ|FMODE_WRITE)) {
7217     + case FMODE_READ|FMODE_WRITE:
7218     + case FMODE_WRITE:
7219     + err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
7220     + if (err)
7221     + break;
7222     + err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
7223     + if (err)
7224     + break;
7225     + case FMODE_READ:
7226     + err = nfs4_open_recover_helper(opendata, FMODE_READ);
7227     + }
7228     nfs4_opendata_put(opendata);
7229     return nfs4_handle_delegation_recall_error(server, state, stateid, err);
7230     }
7231     diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
7232     index 7c5718ba625e..fe3ddd20ff89 100644
7233     --- a/fs/nfs/pagelist.c
7234     +++ b/fs/nfs/pagelist.c
7235     @@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
7236     * for it without upsetting the slab allocator.
7237     */
7238     if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
7239     - sizeof(struct page) > PAGE_SIZE)
7240     + sizeof(struct page *) > PAGE_SIZE)
7241     return 0;
7242    
7243     return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
7244     diff --git a/fs/nfs/read.c b/fs/nfs/read.c
7245     index ae0ff7a11b40..01b8cc8e8cfc 100644
7246     --- a/fs/nfs/read.c
7247     +++ b/fs/nfs/read.c
7248     @@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
7249     {
7250     struct nfs_pgio_mirror *mirror;
7251    
7252     + if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
7253     + pgio->pg_ops->pg_cleanup(pgio);
7254     +
7255     pgio->pg_ops = &nfs_pgio_rw_ops;
7256    
7257     /* read path should never have more than one mirror */
7258     diff --git a/fs/nfs/write.c b/fs/nfs/write.c
7259     index fdee9270ca15..b45b465bc205 100644
7260     --- a/fs/nfs/write.c
7261     +++ b/fs/nfs/write.c
7262     @@ -1223,7 +1223,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
7263     return 1;
7264     if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
7265     list_empty_careful(&flctx->flc_posix)))
7266     - return 0;
7267     + return 1;
7268    
7269     /* Check to see if there are whole file write locks */
7270     ret = 0;
7271     @@ -1351,6 +1351,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
7272     {
7273     struct nfs_pgio_mirror *mirror;
7274    
7275     + if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
7276     + pgio->pg_ops->pg_cleanup(pgio);
7277     +
7278     pgio->pg_ops = &nfs_pgio_rw_ops;
7279    
7280     nfs_pageio_stop_mirroring(pgio);
7281     diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
7282     index fdf4b41d0609..482cfd34472d 100644
7283     --- a/fs/ocfs2/dlm/dlmmaster.c
7284     +++ b/fs/ocfs2/dlm/dlmmaster.c
7285     @@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
7286     int found, ret;
7287     int set_maybe;
7288     int dispatch_assert = 0;
7289     + int dispatched = 0;
7290    
7291     if (!dlm_grab(dlm))
7292     return DLM_MASTER_RESP_NO;
7293     @@ -1658,15 +1659,18 @@ send_response:
7294     mlog(ML_ERROR, "failed to dispatch assert master work\n");
7295     response = DLM_MASTER_RESP_ERROR;
7296     dlm_lockres_put(res);
7297     - } else
7298     + } else {
7299     + dispatched = 1;
7300     __dlm_lockres_grab_inflight_worker(dlm, res);
7301     + }
7302     spin_unlock(&res->spinlock);
7303     } else {
7304     if (res)
7305     dlm_lockres_put(res);
7306     }
7307    
7308     - dlm_put(dlm);
7309     + if (!dispatched)
7310     + dlm_put(dlm);
7311     return response;
7312     }
7313    
7314     @@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
7315    
7316    
7317     /* queue up work for dlm_assert_master_worker */
7318     - dlm_grab(dlm); /* get an extra ref for the work item */
7319     dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
7320     item->u.am.lockres = res; /* already have a ref */
7321     /* can optionally ignore node numbers higher than this node */
7322     diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
7323     index ce12e0b1a31f..3d90ad7ff91f 100644
7324     --- a/fs/ocfs2/dlm/dlmrecovery.c
7325     +++ b/fs/ocfs2/dlm/dlmrecovery.c
7326     @@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
7327     unsigned int hash;
7328     int master = DLM_LOCK_RES_OWNER_UNKNOWN;
7329     u32 flags = DLM_ASSERT_MASTER_REQUERY;
7330     + int dispatched = 0;
7331    
7332     if (!dlm_grab(dlm)) {
7333     /* since the domain has gone away on this
7334     @@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
7335     dlm_put(dlm);
7336     /* sender will take care of this and retry */
7337     return ret;
7338     - } else
7339     + } else {
7340     + dispatched = 1;
7341     __dlm_lockres_grab_inflight_worker(dlm, res);
7342     + }
7343     spin_unlock(&res->spinlock);
7344     } else {
7345     /* put.. incase we are not the master */
7346     @@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
7347     }
7348     spin_unlock(&dlm->spinlock);
7349    
7350     - dlm_put(dlm);
7351     + if (!dispatched)
7352     + dlm_put(dlm);
7353     return master;
7354     }
7355    
7356     diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
7357     index 96f3448b6eb4..fd65b3f1923c 100644
7358     --- a/fs/ubifs/xattr.c
7359     +++ b/fs/ubifs/xattr.c
7360     @@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
7361     {
7362     int err;
7363    
7364     - mutex_lock(&inode->i_mutex);
7365     err = security_inode_init_security(inode, dentry, qstr,
7366     &init_xattrs, 0);
7367     - mutex_unlock(&inode->i_mutex);
7368     -
7369     if (err) {
7370     struct ubifs_info *c = dentry->i_sb->s_fs_info;
7371     ubifs_err(c, "cannot initialize security for inode %lu, error %d",
7372     diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
7373     index d0a7a4753db2..0bec580a4885 100644
7374     --- a/include/asm-generic/preempt.h
7375     +++ b/include/asm-generic/preempt.h
7376     @@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
7377     /*
7378     * Returns true when we need to resched and can (barring IRQ state).
7379     */
7380     -static __always_inline bool should_resched(void)
7381     +static __always_inline bool should_resched(int preempt_offset)
7382     {
7383     - return unlikely(!preempt_count() && tif_need_resched());
7384     + return unlikely(preempt_count() == preempt_offset &&
7385     + tif_need_resched());
7386     }
7387    
7388     #ifdef CONFIG_PREEMPT
7389     diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
7390     index 83bfb87f5bf1..e2aadbc7151f 100644
7391     --- a/include/asm-generic/qspinlock.h
7392     +++ b/include/asm-generic/qspinlock.h
7393     @@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
7394     cpu_relax();
7395     }
7396    
7397     -#ifndef virt_queued_spin_lock
7398     -static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
7399     +#ifndef virt_spin_lock
7400     +static __always_inline bool virt_spin_lock(struct qspinlock *lock)
7401     {
7402     return false;
7403     }
7404     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
7405     index 93755a629299..430c876ad717 100644
7406     --- a/include/linux/cgroup-defs.h
7407     +++ b/include/linux/cgroup-defs.h
7408     @@ -463,31 +463,8 @@ struct cgroup_subsys {
7409     unsigned int depends_on;
7410     };
7411    
7412     -extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
7413     -
7414     -/**
7415     - * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
7416     - * @tsk: target task
7417     - *
7418     - * Called from threadgroup_change_begin() and allows cgroup operations to
7419     - * synchronize against threadgroup changes using a percpu_rw_semaphore.
7420     - */
7421     -static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
7422     -{
7423     - percpu_down_read(&cgroup_threadgroup_rwsem);
7424     -}
7425     -
7426     -/**
7427     - * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
7428     - * @tsk: target task
7429     - *
7430     - * Called from threadgroup_change_end(). Counterpart of
7431     - * cgroup_threadcgroup_change_begin().
7432     - */
7433     -static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
7434     -{
7435     - percpu_up_read(&cgroup_threadgroup_rwsem);
7436     -}
7437     +void cgroup_threadgroup_change_begin(struct task_struct *tsk);
7438     +void cgroup_threadgroup_change_end(struct task_struct *tsk);
7439    
7440     #else /* CONFIG_CGROUPS */
7441    
7442     diff --git a/include/linux/init_task.h b/include/linux/init_task.h
7443     index e8493fee8160..bb9b075f0eb0 100644
7444     --- a/include/linux/init_task.h
7445     +++ b/include/linux/init_task.h
7446     @@ -25,6 +25,13 @@
7447     extern struct files_struct init_files;
7448     extern struct fs_struct init_fs;
7449    
7450     +#ifdef CONFIG_CGROUPS
7451     +#define INIT_GROUP_RWSEM(sig) \
7452     + .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
7453     +#else
7454     +#define INIT_GROUP_RWSEM(sig)
7455     +#endif
7456     +
7457     #ifdef CONFIG_CPUSETS
7458     #define INIT_CPUSET_SEQ(tsk) \
7459     .mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
7460     @@ -48,6 +55,7 @@ extern struct fs_struct init_fs;
7461     }, \
7462     .cred_guard_mutex = \
7463     __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
7464     + INIT_GROUP_RWSEM(sig) \
7465     }
7466    
7467     extern struct nsproxy init_nsproxy;
7468     diff --git a/include/linux/mm.h b/include/linux/mm.h
7469     index bf6f117fcf4d..2b05068f5878 100644
7470     --- a/include/linux/mm.h
7471     +++ b/include/linux/mm.h
7472     @@ -916,6 +916,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
7473     #endif
7474     }
7475    
7476     +#ifdef CONFIG_MEMCG
7477     +static inline struct mem_cgroup *page_memcg(struct page *page)
7478     +{
7479     + return page->mem_cgroup;
7480     +}
7481     +
7482     +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
7483     +{
7484     + page->mem_cgroup = memcg;
7485     +}
7486     +#else
7487     +static inline struct mem_cgroup *page_memcg(struct page *page)
7488     +{
7489     + return NULL;
7490     +}
7491     +
7492     +static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
7493     +{
7494     +}
7495     +#endif
7496     +
7497     /*
7498     * Some inline functions in vmstat.h depend on page_zone()
7499     */
7500     diff --git a/include/linux/preempt.h b/include/linux/preempt.h
7501     index 84991f185173..bea8dd8ff5e0 100644
7502     --- a/include/linux/preempt.h
7503     +++ b/include/linux/preempt.h
7504     @@ -84,13 +84,21 @@
7505     */
7506     #define in_nmi() (preempt_count() & NMI_MASK)
7507    
7508     +/*
7509     + * The preempt_count offset after preempt_disable();
7510     + */
7511     #if defined(CONFIG_PREEMPT_COUNT)
7512     -# define PREEMPT_DISABLE_OFFSET 1
7513     +# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
7514     #else
7515     -# define PREEMPT_DISABLE_OFFSET 0
7516     +# define PREEMPT_DISABLE_OFFSET 0
7517     #endif
7518    
7519     /*
7520     + * The preempt_count offset after spin_lock()
7521     + */
7522     +#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
7523     +
7524     +/*
7525     * The preempt_count offset needed for things like:
7526     *
7527     * spin_lock_bh()
7528     @@ -103,7 +111,7 @@
7529     *
7530     * Work as expected.
7531     */
7532     -#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
7533     +#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
7534    
7535     /*
7536     * Are we running in atomic context? WARNING: this macro cannot
7537     @@ -124,7 +132,8 @@
7538     #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
7539     extern void preempt_count_add(int val);
7540     extern void preempt_count_sub(int val);
7541     -#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
7542     +#define preempt_count_dec_and_test() \
7543     + ({ preempt_count_sub(1); should_resched(0); })
7544     #else
7545     #define preempt_count_add(val) __preempt_count_add(val)
7546     #define preempt_count_sub(val) __preempt_count_sub(val)
7547     @@ -184,7 +193,7 @@ do { \
7548    
7549     #define preempt_check_resched() \
7550     do { \
7551     - if (should_resched()) \
7552     + if (should_resched(0)) \
7553     __preempt_schedule(); \
7554     } while (0)
7555    
7556     diff --git a/include/linux/sched.h b/include/linux/sched.h
7557     index 04b5ada460b4..bfca8aa215d1 100644
7558     --- a/include/linux/sched.h
7559     +++ b/include/linux/sched.h
7560     @@ -754,6 +754,18 @@ struct signal_struct {
7561     unsigned audit_tty_log_passwd;
7562     struct tty_audit_buf *tty_audit_buf;
7563     #endif
7564     +#ifdef CONFIG_CGROUPS
7565     + /*
7566     + * group_rwsem prevents new tasks from entering the threadgroup and
7567     + * member tasks from exiting,a more specifically, setting of
7568     + * PF_EXITING. fork and exit paths are protected with this rwsem
7569     + * using threadgroup_change_begin/end(). Users which require
7570     + * threadgroup to remain stable should use threadgroup_[un]lock()
7571     + * which also takes care of exec path. Currently, cgroup is the
7572     + * only user.
7573     + */
7574     + struct rw_semaphore group_rwsem;
7575     +#endif
7576    
7577     oom_flags_t oom_flags;
7578     short oom_score_adj; /* OOM kill score adjustment */
7579     @@ -2897,12 +2909,6 @@ extern int _cond_resched(void);
7580    
7581     extern int __cond_resched_lock(spinlock_t *lock);
7582    
7583     -#ifdef CONFIG_PREEMPT_COUNT
7584     -#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
7585     -#else
7586     -#define PREEMPT_LOCK_OFFSET 0
7587     -#endif
7588     -
7589     #define cond_resched_lock(lock) ({ \
7590     ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
7591     __cond_resched_lock(lock); \
7592     diff --git a/include/linux/security.h b/include/linux/security.h
7593     index 79d85ddf8093..2f4c1f7aa7db 100644
7594     --- a/include/linux/security.h
7595     +++ b/include/linux/security.h
7596     @@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
7597     unsigned long arg4,
7598     unsigned long arg5)
7599     {
7600     - return cap_task_prctl(option, arg2, arg3, arg3, arg5);
7601     + return cap_task_prctl(option, arg2, arg3, arg4, arg5);
7602     }
7603    
7604     static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
7605     diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
7606     index bab824bde92c..d4c6b5f30acd 100644
7607     --- a/include/net/netfilter/br_netfilter.h
7608     +++ b/include/net/netfilter/br_netfilter.h
7609     @@ -59,7 +59,7 @@ static inline unsigned int
7610     br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
7611     const struct nf_hook_state *state)
7612     {
7613     - return NF_DROP;
7614     + return NF_ACCEPT;
7615     }
7616     #endif
7617    
7618     diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
7619     index 37cd3911d5c5..4023c4ce260f 100644
7620     --- a/include/net/netfilter/nf_conntrack.h
7621     +++ b/include/net/netfilter/nf_conntrack.h
7622     @@ -292,6 +292,7 @@ extern unsigned int nf_conntrack_hash_rnd;
7623     void init_nf_conntrack_hash_rnd(void);
7624    
7625     struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
7626     +void nf_ct_tmpl_free(struct nf_conn *tmpl);
7627    
7628     #define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
7629     #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
7630     diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
7631     index 2a246680a6c3..aa8bee72c9d3 100644
7632     --- a/include/net/netfilter/nf_tables.h
7633     +++ b/include/net/netfilter/nf_tables.h
7634     @@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
7635    
7636     static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
7637     {
7638     - return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
7639     + return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
7640     }
7641    
7642     unsigned int nft_parse_register(const struct nlattr *attr);
7643     diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
7644     index 0aedbb2c10e0..7e7f8875ac32 100644
7645     --- a/include/target/iscsi/iscsi_target_core.h
7646     +++ b/include/target/iscsi/iscsi_target_core.h
7647     @@ -776,7 +776,6 @@ struct iscsi_np {
7648     enum iscsi_timer_flags_table np_login_timer_flags;
7649     u32 np_exports;
7650     enum np_flags_table np_flags;
7651     - unsigned char np_ip[IPV6_ADDRESS_SPACE];
7652     u16 np_port;
7653     spinlock_t np_thread_lock;
7654     struct completion np_restart_comp;
7655     diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
7656     index 9ce083960a25..f18490985fc8 100644
7657     --- a/include/xen/interface/sched.h
7658     +++ b/include/xen/interface/sched.h
7659     @@ -107,5 +107,13 @@ struct sched_watchdog {
7660     #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
7661     #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
7662     #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
7663     +/*
7664     + * Domain asked to perform 'soft reset' for it. The expected behavior is to
7665     + * reset internal Xen state for the domain returning it to the point where it
7666     + * was created but leaving the domain's memory contents and vCPU contexts
7667     + * intact. This will allow the domain to start over and set up all Xen specific
7668     + * interfaces again.
7669     + */
7670     +#define SHUTDOWN_soft_reset 5
7671    
7672     #endif /* __XEN_PUBLIC_SCHED_H__ */
7673     diff --git a/ipc/msg.c b/ipc/msg.c
7674     index 66c4f567eb73..1471db9a7e61 100644
7675     --- a/ipc/msg.c
7676     +++ b/ipc/msg.c
7677     @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
7678     return retval;
7679     }
7680    
7681     - /* ipc_addid() locks msq upon success. */
7682     - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
7683     - if (id < 0) {
7684     - ipc_rcu_putref(msq, msg_rcu_free);
7685     - return id;
7686     - }
7687     -
7688     msq->q_stime = msq->q_rtime = 0;
7689     msq->q_ctime = get_seconds();
7690     msq->q_cbytes = msq->q_qnum = 0;
7691     @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
7692     INIT_LIST_HEAD(&msq->q_receivers);
7693     INIT_LIST_HEAD(&msq->q_senders);
7694    
7695     + /* ipc_addid() locks msq upon success. */
7696     + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
7697     + if (id < 0) {
7698     + ipc_rcu_putref(msq, msg_rcu_free);
7699     + return id;
7700     + }
7701     +
7702     ipc_unlock_object(&msq->q_perm);
7703     rcu_read_unlock();
7704    
7705     diff --git a/ipc/shm.c b/ipc/shm.c
7706     index 4aef24d91b63..0e61fd430547 100644
7707     --- a/ipc/shm.c
7708     +++ b/ipc/shm.c
7709     @@ -551,12 +551,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
7710     if (IS_ERR(file))
7711     goto no_file;
7712    
7713     - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
7714     - if (id < 0) {
7715     - error = id;
7716     - goto no_id;
7717     - }
7718     -
7719     shp->shm_cprid = task_tgid_vnr(current);
7720     shp->shm_lprid = 0;
7721     shp->shm_atim = shp->shm_dtim = 0;
7722     @@ -565,6 +559,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
7723     shp->shm_nattch = 0;
7724     shp->shm_file = file;
7725     shp->shm_creator = current;
7726     +
7727     + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
7728     + if (id < 0) {
7729     + error = id;
7730     + goto no_id;
7731     + }
7732     +
7733     list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
7734    
7735     /*
7736     diff --git a/ipc/util.c b/ipc/util.c
7737     index be4230020a1f..0f401d94b7c6 100644
7738     --- a/ipc/util.c
7739     +++ b/ipc/util.c
7740     @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
7741     rcu_read_lock();
7742     spin_lock(&new->lock);
7743    
7744     + current_euid_egid(&euid, &egid);
7745     + new->cuid = new->uid = euid;
7746     + new->gid = new->cgid = egid;
7747     +
7748     id = idr_alloc(&ids->ipcs_idr, new,
7749     (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
7750     GFP_NOWAIT);
7751     @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
7752    
7753     ids->in_use++;
7754    
7755     - current_euid_egid(&euid, &egid);
7756     - new->cuid = new->uid = euid;
7757     - new->gid = new->cgid = egid;
7758     -
7759     if (next_id < 0) {
7760     new->seq = ids->seq++;
7761     if (ids->seq > IPCID_SEQ_MAX)
7762     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
7763     index c6c4240e7d28..fe6f855de3d1 100644
7764     --- a/kernel/cgroup.c
7765     +++ b/kernel/cgroup.c
7766     @@ -46,7 +46,6 @@
7767     #include <linux/slab.h>
7768     #include <linux/spinlock.h>
7769     #include <linux/rwsem.h>
7770     -#include <linux/percpu-rwsem.h>
7771     #include <linux/string.h>
7772     #include <linux/sort.h>
7773     #include <linux/kmod.h>
7774     @@ -104,8 +103,6 @@ static DEFINE_SPINLOCK(cgroup_idr_lock);
7775     */
7776     static DEFINE_SPINLOCK(release_agent_path_lock);
7777    
7778     -struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
7779     -
7780     #define cgroup_assert_mutex_or_rcu_locked() \
7781     rcu_lockdep_assert(rcu_read_lock_held() || \
7782     lockdep_is_held(&cgroup_mutex), \
7783     @@ -870,6 +867,48 @@ static struct css_set *find_css_set(struct css_set *old_cset,
7784     return cset;
7785     }
7786    
7787     +void cgroup_threadgroup_change_begin(struct task_struct *tsk)
7788     +{
7789     + down_read(&tsk->signal->group_rwsem);
7790     +}
7791     +
7792     +void cgroup_threadgroup_change_end(struct task_struct *tsk)
7793     +{
7794     + up_read(&tsk->signal->group_rwsem);
7795     +}
7796     +
7797     +/**
7798     + * threadgroup_lock - lock threadgroup
7799     + * @tsk: member task of the threadgroup to lock
7800     + *
7801     + * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
7802     + * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
7803     + * change ->group_leader/pid. This is useful for cases where the threadgroup
7804     + * needs to stay stable across blockable operations.
7805     + *
7806     + * fork and exit explicitly call threadgroup_change_{begin|end}() for
7807     + * synchronization. While held, no new task will be added to threadgroup
7808     + * and no existing live task will have its PF_EXITING set.
7809     + *
7810     + * de_thread() does threadgroup_change_{begin|end}() when a non-leader
7811     + * sub-thread becomes a new leader.
7812     + */
7813     +static void threadgroup_lock(struct task_struct *tsk)
7814     +{
7815     + down_write(&tsk->signal->group_rwsem);
7816     +}
7817     +
7818     +/**
7819     + * threadgroup_unlock - unlock threadgroup
7820     + * @tsk: member task of the threadgroup to unlock
7821     + *
7822     + * Reverse threadgroup_lock().
7823     + */
7824     +static inline void threadgroup_unlock(struct task_struct *tsk)
7825     +{
7826     + up_write(&tsk->signal->group_rwsem);
7827     +}
7828     +
7829     static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
7830     {
7831     struct cgroup *root_cgrp = kf_root->kn->priv;
7832     @@ -2066,9 +2105,9 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
7833     lockdep_assert_held(&css_set_rwsem);
7834    
7835     /*
7836     - * We are synchronized through cgroup_threadgroup_rwsem against
7837     - * PF_EXITING setting such that we can't race against cgroup_exit()
7838     - * changing the css_set to init_css_set and dropping the old one.
7839     + * We are synchronized through threadgroup_lock() against PF_EXITING
7840     + * setting such that we can't race against cgroup_exit() changing the
7841     + * css_set to init_css_set and dropping the old one.
7842     */
7843     WARN_ON_ONCE(tsk->flags & PF_EXITING);
7844     old_cset = task_css_set(tsk);
7845     @@ -2125,11 +2164,10 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
7846     * @src_cset and add it to @preloaded_csets, which should later be cleaned
7847     * up by cgroup_migrate_finish().
7848     *
7849     - * This function may be called without holding cgroup_threadgroup_rwsem
7850     - * even if the target is a process. Threads may be created and destroyed
7851     - * but as long as cgroup_mutex is not dropped, no new css_set can be put
7852     - * into play and the preloaded css_sets are guaranteed to cover all
7853     - * migrations.
7854     + * This function may be called without holding threadgroup_lock even if the
7855     + * target is a process. Threads may be created and destroyed but as long
7856     + * as cgroup_mutex is not dropped, no new css_set can be put into play and
7857     + * the preloaded css_sets are guaranteed to cover all migrations.
7858     */
7859     static void cgroup_migrate_add_src(struct css_set *src_cset,
7860     struct cgroup *dst_cgrp,
7861     @@ -2232,7 +2270,7 @@ err:
7862     * @threadgroup: whether @leader points to the whole process or a single task
7863     *
7864     * Migrate a process or task denoted by @leader to @cgrp. If migrating a
7865     - * process, the caller must be holding cgroup_threadgroup_rwsem. The
7866     + * process, the caller must be holding threadgroup_lock of @leader. The
7867     * caller is also responsible for invoking cgroup_migrate_add_src() and
7868     * cgroup_migrate_prepare_dst() on the targets before invoking this
7869     * function and following up with cgroup_migrate_finish().
7870     @@ -2360,7 +2398,7 @@ out_release_tset:
7871     * @leader: the task or the leader of the threadgroup to be attached
7872     * @threadgroup: attach the whole threadgroup?
7873     *
7874     - * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
7875     + * Call holding cgroup_mutex and threadgroup_lock of @leader.
7876     */
7877     static int cgroup_attach_task(struct cgroup *dst_cgrp,
7878     struct task_struct *leader, bool threadgroup)
7879     @@ -2452,13 +2490,14 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
7880     if (!cgrp)
7881     return -ENODEV;
7882    
7883     - percpu_down_write(&cgroup_threadgroup_rwsem);
7884     +retry_find_task:
7885     rcu_read_lock();
7886     if (pid) {
7887     tsk = find_task_by_vpid(pid);
7888     if (!tsk) {
7889     + rcu_read_unlock();
7890     ret = -ESRCH;
7891     - goto out_unlock_rcu;
7892     + goto out_unlock_cgroup;
7893     }
7894     } else {
7895     tsk = current;
7896     @@ -2474,23 +2513,37 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
7897     */
7898     if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
7899     ret = -EINVAL;
7900     - goto out_unlock_rcu;
7901     + rcu_read_unlock();
7902     + goto out_unlock_cgroup;
7903     }
7904    
7905     get_task_struct(tsk);
7906     rcu_read_unlock();
7907    
7908     + threadgroup_lock(tsk);
7909     + if (threadgroup) {
7910     + if (!thread_group_leader(tsk)) {
7911     + /*
7912     + * a race with de_thread from another thread's exec()
7913     + * may strip us of our leadership, if this happens,
7914     + * there is no choice but to throw this task away and
7915     + * try again; this is
7916     + * "double-double-toil-and-trouble-check locking".
7917     + */
7918     + threadgroup_unlock(tsk);
7919     + put_task_struct(tsk);
7920     + goto retry_find_task;
7921     + }
7922     + }
7923     +
7924     ret = cgroup_procs_write_permission(tsk, cgrp, of);
7925     if (!ret)
7926     ret = cgroup_attach_task(cgrp, tsk, threadgroup);
7927    
7928     - put_task_struct(tsk);
7929     - goto out_unlock_threadgroup;
7930     + threadgroup_unlock(tsk);
7931    
7932     -out_unlock_rcu:
7933     - rcu_read_unlock();
7934     -out_unlock_threadgroup:
7935     - percpu_up_write(&cgroup_threadgroup_rwsem);
7936     + put_task_struct(tsk);
7937     +out_unlock_cgroup:
7938     cgroup_kn_unlock(of->kn);
7939     return ret ?: nbytes;
7940     }
7941     @@ -2635,8 +2688,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
7942    
7943     lockdep_assert_held(&cgroup_mutex);
7944    
7945     - percpu_down_write(&cgroup_threadgroup_rwsem);
7946     -
7947     /* look up all csses currently attached to @cgrp's subtree */
7948     down_read(&css_set_rwsem);
7949     css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
7950     @@ -2692,8 +2743,17 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
7951     goto out_finish;
7952     last_task = task;
7953    
7954     + threadgroup_lock(task);
7955     + /* raced against de_thread() from another thread? */
7956     + if (!thread_group_leader(task)) {
7957     + threadgroup_unlock(task);
7958     + put_task_struct(task);
7959     + continue;
7960     + }
7961     +
7962     ret = cgroup_migrate(src_cset->dfl_cgrp, task, true);
7963    
7964     + threadgroup_unlock(task);
7965     put_task_struct(task);
7966    
7967     if (WARN(ret, "cgroup: failed to update controllers for the default hierarchy (%d), further operations may crash or hang\n", ret))
7968     @@ -2703,7 +2763,6 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
7969    
7970     out_finish:
7971     cgroup_migrate_finish(&preloaded_csets);
7972     - percpu_up_write(&cgroup_threadgroup_rwsem);
7973     return ret;
7974     }
7975    
7976     @@ -5013,7 +5072,6 @@ int __init cgroup_init(void)
7977     unsigned long key;
7978     int ssid, err;
7979    
7980     - BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
7981     BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
7982     BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
7983    
7984     diff --git a/kernel/fork.c b/kernel/fork.c
7985     index 26a70dc7a915..e769c8c86f86 100644
7986     --- a/kernel/fork.c
7987     +++ b/kernel/fork.c
7988     @@ -1146,6 +1146,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
7989     tty_audit_fork(sig);
7990     sched_autogroup_fork(sig);
7991    
7992     +#ifdef CONFIG_CGROUPS
7993     + init_rwsem(&sig->group_rwsem);
7994     +#endif
7995     +
7996     sig->oom_score_adj = current->signal->oom_score_adj;
7997     sig->oom_score_adj_min = current->signal->oom_score_adj_min;
7998    
7999     diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
8000     index 0e97c142ce40..4e6267a34440 100644
8001     --- a/kernel/irq/proc.c
8002     +++ b/kernel/irq/proc.c
8003     @@ -12,6 +12,7 @@
8004     #include <linux/seq_file.h>
8005     #include <linux/interrupt.h>
8006     #include <linux/kernel_stat.h>
8007     +#include <linux/mutex.h>
8008    
8009     #include "internals.h"
8010    
8011     @@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
8012    
8013     void register_irq_proc(unsigned int irq, struct irq_desc *desc)
8014     {
8015     + static DEFINE_MUTEX(register_lock);
8016     char name [MAX_NAMELEN];
8017    
8018     - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
8019     + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
8020     return;
8021    
8022     + /*
8023     + * irq directories are registered only when a handler is
8024     + * added, not when the descriptor is created, so multiple
8025     + * tasks might try to register at the same time.
8026     + */
8027     + mutex_lock(&register_lock);
8028     +
8029     + if (desc->dir)
8030     + goto out_unlock;
8031     +
8032     memset(name, 0, MAX_NAMELEN);
8033     sprintf(name, "%d", irq);
8034    
8035     /* create /proc/irq/1234 */
8036     desc->dir = proc_mkdir(name, root_irq_dir);
8037     if (!desc->dir)
8038     - return;
8039     + goto out_unlock;
8040    
8041     #ifdef CONFIG_SMP
8042     /* create /proc/irq/<irq>/smp_affinity */
8043     @@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
8044    
8045     proc_create_data("spurious", 0444, desc->dir,
8046     &irq_spurious_proc_fops, (void *)(long)irq);
8047     +
8048     +out_unlock:
8049     + mutex_unlock(&register_lock);
8050     }
8051    
8052     void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
8053     diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
8054     index 38c49202d532..8ed01611ae73 100644
8055     --- a/kernel/locking/qspinlock.c
8056     +++ b/kernel/locking/qspinlock.c
8057     @@ -289,7 +289,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
8058     if (pv_enabled())
8059     goto queue;
8060    
8061     - if (virt_queued_spin_lock(lock))
8062     + if (virt_spin_lock(lock))
8063     return;
8064    
8065     /*
8066     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8067     index e9673433cc01..6776631676e0 100644
8068     --- a/kernel/sched/core.c
8069     +++ b/kernel/sched/core.c
8070     @@ -2461,11 +2461,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
8071     * If a task dies, then it sets TASK_DEAD in tsk->state and calls
8072     * schedule one last time. The schedule call will never return, and
8073     * the scheduled task must drop that reference.
8074     - * The test for TASK_DEAD must occur while the runqueue locks are
8075     - * still held, otherwise prev could be scheduled on another cpu, die
8076     - * there before we look at prev->state, and then the reference would
8077     - * be dropped twice.
8078     - * Manfred Spraul <manfred@colorfullife.com>
8079     + *
8080     + * We must observe prev->state before clearing prev->on_cpu (in
8081     + * finish_lock_switch), otherwise a concurrent wakeup can get prev
8082     + * running on another CPU and we could rave with its RUNNING -> DEAD
8083     + * transition, resulting in a double drop.
8084     */
8085     prev_state = prev->state;
8086     vtime_task_switch(prev);
8087     @@ -2614,13 +2614,20 @@ unsigned long nr_running(void)
8088    
8089     /*
8090     * Check if only the current task is running on the cpu.
8091     + *
8092     + * Caution: this function does not check that the caller has disabled
8093     + * preemption, thus the result might have a time-of-check-to-time-of-use
8094     + * race. The caller is responsible to use it correctly, for example:
8095     + *
8096     + * - from a non-preemptable section (of course)
8097     + *
8098     + * - from a thread that is bound to a single CPU
8099     + *
8100     + * - in a loop with very short iterations (e.g. a polling loop)
8101     */
8102     bool single_task_running(void)
8103     {
8104     - if (cpu_rq(smp_processor_id())->nr_running == 1)
8105     - return true;
8106     - else
8107     - return false;
8108     + return raw_rq()->nr_running == 1;
8109     }
8110     EXPORT_SYMBOL(single_task_running);
8111    
8112     @@ -4492,7 +4499,7 @@ SYSCALL_DEFINE0(sched_yield)
8113    
8114     int __sched _cond_resched(void)
8115     {
8116     - if (should_resched()) {
8117     + if (should_resched(0)) {
8118     preempt_schedule_common();
8119     return 1;
8120     }
8121     @@ -4510,7 +4517,7 @@ EXPORT_SYMBOL(_cond_resched);
8122     */
8123     int __cond_resched_lock(spinlock_t *lock)
8124     {
8125     - int resched = should_resched();
8126     + int resched = should_resched(PREEMPT_LOCK_OFFSET);
8127     int ret = 0;
8128    
8129     lockdep_assert_held(lock);
8130     @@ -4532,7 +4539,7 @@ int __sched __cond_resched_softirq(void)
8131     {
8132     BUG_ON(!in_softirq());
8133    
8134     - if (should_resched()) {
8135     + if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
8136     local_bh_enable();
8137     preempt_schedule_common();
8138     local_bh_disable();
8139     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
8140     index 84d48790bb6d..08ab96b366bf 100644
8141     --- a/kernel/sched/sched.h
8142     +++ b/kernel/sched/sched.h
8143     @@ -1091,9 +1091,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
8144     * After ->on_cpu is cleared, the task can be moved to a different CPU.
8145     * We must ensure this doesn't happen until the switch is completely
8146     * finished.
8147     + *
8148     + * Pairs with the control dependency and rmb in try_to_wake_up().
8149     */
8150     - smp_wmb();
8151     - prev->on_cpu = 0;
8152     + smp_store_release(&prev->on_cpu, 0);
8153     #endif
8154     #ifdef CONFIG_DEBUG_SPINLOCK
8155     /* this is a valid case when another task releases the spinlock */
8156     diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
8157     index 841b72f720e8..3a38775b50c2 100644
8158     --- a/kernel/time/clocksource.c
8159     +++ b/kernel/time/clocksource.c
8160     @@ -217,7 +217,7 @@ static void clocksource_watchdog(unsigned long data)
8161     continue;
8162    
8163     /* Check the deviation from the watchdog clocksource. */
8164     - if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
8165     + if (abs64(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
8166     pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
8167     cs->name);
8168     pr_warn(" '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
8169     diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
8170     index bca3667a2de1..a20d4110e871 100644
8171     --- a/kernel/time/timekeeping.c
8172     +++ b/kernel/time/timekeeping.c
8173     @@ -1607,7 +1607,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
8174     negative = (tick_error < 0);
8175    
8176     /* Sort out the magnitude of the correction */
8177     - tick_error = abs(tick_error);
8178     + tick_error = abs64(tick_error);
8179     for (adj = 0; tick_error > interval; adj++)
8180     tick_error >>= 1;
8181    
8182     diff --git a/lib/iommu-common.c b/lib/iommu-common.c
8183     index ff19f66d3f7f..b1c93e94ca7a 100644
8184     --- a/lib/iommu-common.c
8185     +++ b/lib/iommu-common.c
8186     @@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
8187    
8188     static inline bool need_flush(struct iommu_map_table *iommu)
8189     {
8190     - return (iommu->lazy_flush != NULL &&
8191     - (iommu->flags & IOMMU_NEED_FLUSH) != 0);
8192     + return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
8193     }
8194    
8195     static inline void set_flush(struct iommu_map_table *iommu)
8196     @@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
8197     goto bail;
8198     }
8199     }
8200     - if (n < pool->hint || need_flush(iommu)) {
8201     + if (iommu->lazy_flush &&
8202     + (n < pool->hint || need_flush(iommu))) {
8203     clear_flush(iommu);
8204     iommu->lazy_flush(iommu);
8205     }
8206     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
8207     index a8c3087089d8..62c1ec5a9d31 100644
8208     --- a/mm/hugetlb.c
8209     +++ b/mm/hugetlb.c
8210     @@ -2974,6 +2974,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
8211     continue;
8212    
8213     /*
8214     + * Shared VMAs have their own reserves and do not affect
8215     + * MAP_PRIVATE accounting but it is possible that a shared
8216     + * VMA is using the same page so check and skip such VMAs.
8217     + */
8218     + if (iter_vma->vm_flags & VM_MAYSHARE)
8219     + continue;
8220     +
8221     + /*
8222     * Unmap the page from other VMAs without their own reserves.
8223     * They get marked to be SIGKILLed if they fault in these
8224     * areas. This is because a future no-page fault on this VMA
8225     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
8226     index acb93c554f6e..237d4686482d 100644
8227     --- a/mm/memcontrol.c
8228     +++ b/mm/memcontrol.c
8229     @@ -806,12 +806,14 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
8230     }
8231    
8232     /*
8233     + * Return page count for single (non recursive) @memcg.
8234     + *
8235     * Implementation Note: reading percpu statistics for memcg.
8236     *
8237     * Both of vmstat[] and percpu_counter has threshold and do periodic
8238     * synchronization to implement "quick" read. There are trade-off between
8239     * reading cost and precision of value. Then, we may have a chance to implement
8240     - * a periodic synchronizion of counter in memcg's counter.
8241     + * a periodic synchronization of counter in memcg's counter.
8242     *
8243     * But this _read() function is used for user interface now. The user accounts
8244     * memory usage by memory cgroup and he _always_ requires exact value because
8245     @@ -821,17 +823,24 @@ mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
8246     *
8247     * If there are kernel internal actions which can make use of some not-exact
8248     * value, and reading all cpu value can be performance bottleneck in some
8249     - * common workload, threashold and synchonization as vmstat[] should be
8250     + * common workload, threshold and synchronization as vmstat[] should be
8251     * implemented.
8252     */
8253     -static long mem_cgroup_read_stat(struct mem_cgroup *memcg,
8254     - enum mem_cgroup_stat_index idx)
8255     +static unsigned long
8256     +mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
8257     {
8258     long val = 0;
8259     int cpu;
8260    
8261     + /* Per-cpu values can be negative, use a signed accumulator */
8262     for_each_possible_cpu(cpu)
8263     val += per_cpu(memcg->stat->count[idx], cpu);
8264     + /*
8265     + * Summing races with updates, so val may be negative. Avoid exposing
8266     + * transient negative values.
8267     + */
8268     + if (val < 0)
8269     + val = 0;
8270     return val;
8271     }
8272    
8273     @@ -1498,7 +1507,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
8274     for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
8275     if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
8276     continue;
8277     - pr_cont(" %s:%ldKB", mem_cgroup_stat_names[i],
8278     + pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
8279     K(mem_cgroup_read_stat(iter, i)));
8280     }
8281    
8282     @@ -3119,14 +3128,11 @@ static unsigned long tree_stat(struct mem_cgroup *memcg,
8283     enum mem_cgroup_stat_index idx)
8284     {
8285     struct mem_cgroup *iter;
8286     - long val = 0;
8287     + unsigned long val = 0;
8288    
8289     - /* Per-cpu values can be negative, use a signed accumulator */
8290     for_each_mem_cgroup_tree(iter, memcg)
8291     val += mem_cgroup_read_stat(iter, idx);
8292    
8293     - if (val < 0) /* race ? */
8294     - val = 0;
8295     return val;
8296     }
8297    
8298     @@ -3469,7 +3475,7 @@ static int memcg_stat_show(struct seq_file *m, void *v)
8299     for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
8300     if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
8301     continue;
8302     - seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
8303     + seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
8304     mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
8305     }
8306    
8307     @@ -3494,13 +3500,13 @@ static int memcg_stat_show(struct seq_file *m, void *v)
8308     (u64)memsw * PAGE_SIZE);
8309    
8310     for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
8311     - long long val = 0;
8312     + unsigned long long val = 0;
8313    
8314     if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
8315     continue;
8316     for_each_mem_cgroup_tree(mi, memcg)
8317     val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
8318     - seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
8319     + seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
8320     }
8321    
8322     for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
8323     diff --git a/mm/migrate.c b/mm/migrate.c
8324     index eb4267107d1f..fcb6204de108 100644
8325     --- a/mm/migrate.c
8326     +++ b/mm/migrate.c
8327     @@ -734,6 +734,15 @@ static int move_to_new_page(struct page *newpage, struct page *page,
8328     if (PageSwapBacked(page))
8329     SetPageSwapBacked(newpage);
8330    
8331     + /*
8332     + * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
8333     + * needs newpage's memcg set to transfer memcg dirty page accounting.
8334     + * So perform memcg migration in two steps:
8335     + * 1. set newpage->mem_cgroup (here)
8336     + * 2. clear page->mem_cgroup (below)
8337     + */
8338     + set_page_memcg(newpage, page_memcg(page));
8339     +
8340     mapping = page_mapping(page);
8341     if (!mapping)
8342     rc = migrate_page(mapping, newpage, page, mode);
8343     @@ -750,9 +759,10 @@ static int move_to_new_page(struct page *newpage, struct page *page,
8344     rc = fallback_migrate_page(mapping, newpage, page, mode);
8345    
8346     if (rc != MIGRATEPAGE_SUCCESS) {
8347     + set_page_memcg(newpage, NULL);
8348     newpage->mapping = NULL;
8349     } else {
8350     - mem_cgroup_migrate(page, newpage, false);
8351     + set_page_memcg(page, NULL);
8352     if (page_was_mapped)
8353     remove_migration_ptes(page, newpage);
8354     page->mapping = NULL;
8355     @@ -1068,7 +1078,7 @@ out:
8356     if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
8357     put_new_page(new_hpage, private);
8358     else
8359     - put_page(new_hpage);
8360     + putback_active_hugepage(new_hpage);
8361    
8362     if (result) {
8363     if (rc)
8364     diff --git a/mm/slab.c b/mm/slab.c
8365     index bbd0b47dc6a9..ae360283029c 100644
8366     --- a/mm/slab.c
8367     +++ b/mm/slab.c
8368     @@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
8369     size += BYTES_PER_WORD;
8370     }
8371     #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
8372     - if (size >= kmalloc_size(INDEX_NODE + 1)
8373     - && cachep->object_size > cache_line_size()
8374     - && ALIGN(size, cachep->align) < PAGE_SIZE) {
8375     + /*
8376     + * To activate debug pagealloc, off-slab management is necessary
8377     + * requirement. In early phase of initialization, small sized slab
8378     + * doesn't get initialized so it would not be possible. So, we need
8379     + * to check size >= 256. It guarantees that all necessary small
8380     + * sized slab is initialized in current slab initialization sequence.
8381     + */
8382     + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
8383     + size >= 256 && cachep->object_size > cache_line_size() &&
8384     + ALIGN(size, cachep->align) < PAGE_SIZE) {
8385     cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
8386     size = PAGE_SIZE;
8387     }
8388     diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
8389     index 6d0b471eede8..cc7d87d64987 100644
8390     --- a/net/batman-adv/distributed-arp-table.c
8391     +++ b/net/batman-adv/distributed-arp-table.c
8392     @@ -19,6 +19,7 @@
8393     #include "main.h"
8394    
8395     #include <linux/atomic.h>
8396     +#include <linux/bitops.h>
8397     #include <linux/byteorder/generic.h>
8398     #include <linux/errno.h>
8399     #include <linux/etherdevice.h>
8400     @@ -453,7 +454,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
8401     int j;
8402    
8403     /* check if orig node candidate is running DAT */
8404     - if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
8405     + if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
8406     goto out;
8407    
8408     /* Check if this node has already been selected... */
8409     @@ -713,9 +714,9 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
8410     uint16_t tvlv_value_len)
8411     {
8412     if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
8413     - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
8414     + clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
8415     else
8416     - orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
8417     + set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
8418     }
8419    
8420     /**
8421     diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
8422     index 7aa480b7edd0..68a9554961eb 100644
8423     --- a/net/batman-adv/multicast.c
8424     +++ b/net/batman-adv/multicast.c
8425     @@ -19,6 +19,8 @@
8426     #include "main.h"
8427    
8428     #include <linux/atomic.h>
8429     +#include <linux/bitops.h>
8430     +#include <linux/bug.h>
8431     #include <linux/byteorder/generic.h>
8432     #include <linux/errno.h>
8433     #include <linux/etherdevice.h>
8434     @@ -588,19 +590,26 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
8435     *
8436     * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
8437     * orig, has toggled then this method updates counter and list accordingly.
8438     + *
8439     + * Caller needs to hold orig->mcast_handler_lock.
8440     */
8441     static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
8442     struct batadv_orig_node *orig,
8443     uint8_t mcast_flags)
8444     {
8445     + struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
8446     + struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
8447     +
8448     /* switched from flag unset to set */
8449     if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
8450     !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
8451     atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
8452    
8453     spin_lock_bh(&bat_priv->mcast.want_lists_lock);
8454     - hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
8455     - &bat_priv->mcast.want_all_unsnoopables_list);
8456     + /* flag checks above + mcast_handler_lock prevents this */
8457     + WARN_ON(!hlist_unhashed(node));
8458     +
8459     + hlist_add_head_rcu(node, head);
8460     spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
8461     /* switched from flag set to unset */
8462     } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
8463     @@ -608,7 +617,10 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
8464     atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
8465    
8466     spin_lock_bh(&bat_priv->mcast.want_lists_lock);
8467     - hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
8468     + /* flag checks above + mcast_handler_lock prevents this */
8469     + WARN_ON(hlist_unhashed(node));
8470     +
8471     + hlist_del_init_rcu(node);
8472     spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
8473     }
8474     }
8475     @@ -621,19 +633,26 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
8476     *
8477     * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
8478     * toggled then this method updates counter and list accordingly.
8479     + *
8480     + * Caller needs to hold orig->mcast_handler_lock.
8481     */
8482     static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
8483     struct batadv_orig_node *orig,
8484     uint8_t mcast_flags)
8485     {
8486     + struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
8487     + struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
8488     +
8489     /* switched from flag unset to set */
8490     if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
8491     !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
8492     atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
8493    
8494     spin_lock_bh(&bat_priv->mcast.want_lists_lock);
8495     - hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
8496     - &bat_priv->mcast.want_all_ipv4_list);
8497     + /* flag checks above + mcast_handler_lock prevents this */
8498     + WARN_ON(!hlist_unhashed(node));
8499     +
8500     + hlist_add_head_rcu(node, head);
8501     spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
8502     /* switched from flag set to unset */
8503     } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
8504     @@ -641,7 +660,10 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
8505     atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
8506    
8507     spin_lock_bh(&bat_priv->mcast.want_lists_lock);
8508     - hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
8509     + /* flag checks above + mcast_handler_lock prevents this */
8510     + WARN_ON(hlist_unhashed(node));
8511     +
8512     + hlist_del_init_rcu(node);
8513     spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
8514     }
8515     }
8516     @@ -654,19 +676,26 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
8517     *
8518     * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
8519     * toggled then this method updates counter and list accordingly.
8520     + *
8521     + * Caller needs to hold orig->mcast_handler_lock.
8522     */
8523     static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
8524     struct batadv_orig_node *orig,
8525     uint8_t mcast_flags)
8526     {
8527     + struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
8528     + struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
8529     +
8530     /* switched from flag unset to set */
8531     if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
8532     !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
8533     atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
8534    
8535     spin_lock_bh(&bat_priv->mcast.want_lists_lock);
8536     - hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
8537     - &bat_priv->mcast.want_all_ipv6_list);
8538     + /* flag checks above + mcast_handler_lock prevents this */
8539     + WARN_ON(!hlist_unhashed(node));
8540     +
8541     + hlist_add_head_rcu(node, head);
8542     spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
8543     /* switched from flag set to unset */
8544     } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
8545     @@ -674,7 +703,10 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
8546     atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
8547    
8548     spin_lock_bh(&bat_priv->mcast.want_lists_lock);
8549     - hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
8550     + /* flag checks above + mcast_handler_lock prevents this */
8551     + WARN_ON(hlist_unhashed(node));
8552     +
8553     + hlist_del_init_rcu(node);
8554     spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
8555     }
8556     }
8557     @@ -697,39 +729,42 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
8558     uint8_t mcast_flags = BATADV_NO_FLAGS;
8559     bool orig_initialized;
8560    
8561     - orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
8562     + if (orig_mcast_enabled && tvlv_value &&
8563     + (tvlv_value_len >= sizeof(mcast_flags)))
8564     + mcast_flags = *(uint8_t *)tvlv_value;
8565     +
8566     + spin_lock_bh(&orig->mcast_handler_lock);
8567     + orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
8568     + &orig->capa_initialized);
8569    
8570     /* If mcast support is turned on decrease the disabled mcast node
8571     * counter only if we had increased it for this node before. If this
8572     * is a completely new orig_node no need to decrease the counter.
8573     */
8574     if (orig_mcast_enabled &&
8575     - !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
8576     + !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
8577     if (orig_initialized)
8578     atomic_dec(&bat_priv->mcast.num_disabled);
8579     - orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
8580     + set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
8581     /* If mcast support is being switched off or if this is an initial
8582     * OGM without mcast support then increase the disabled mcast
8583     * node counter.
8584     */
8585     } else if (!orig_mcast_enabled &&
8586     - (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
8587     + (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
8588     !orig_initialized)) {
8589     atomic_inc(&bat_priv->mcast.num_disabled);
8590     - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
8591     + clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
8592     }
8593    
8594     - orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
8595     -
8596     - if (orig_mcast_enabled && tvlv_value &&
8597     - (tvlv_value_len >= sizeof(mcast_flags)))
8598     - mcast_flags = *(uint8_t *)tvlv_value;
8599     + set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
8600    
8601     batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
8602     batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
8603     batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
8604    
8605     orig->mcast_flags = mcast_flags;
8606     + spin_unlock_bh(&orig->mcast_handler_lock);
8607     }
8608    
8609     /**
8610     @@ -763,11 +798,15 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
8611     {
8612     struct batadv_priv *bat_priv = orig->bat_priv;
8613    
8614     - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
8615     - orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
8616     + spin_lock_bh(&orig->mcast_handler_lock);
8617     +
8618     + if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
8619     + test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
8620     atomic_dec(&bat_priv->mcast.num_disabled);
8621    
8622     batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
8623     batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
8624     batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
8625     +
8626     + spin_unlock_bh(&orig->mcast_handler_lock);
8627     }
8628     diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
8629     index f0a50f31d822..46604010dcd4 100644
8630     --- a/net/batman-adv/network-coding.c
8631     +++ b/net/batman-adv/network-coding.c
8632     @@ -19,6 +19,7 @@
8633     #include "main.h"
8634    
8635     #include <linux/atomic.h>
8636     +#include <linux/bitops.h>
8637     #include <linux/byteorder/generic.h>
8638     #include <linux/compiler.h>
8639     #include <linux/debugfs.h>
8640     @@ -134,9 +135,9 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
8641     uint16_t tvlv_value_len)
8642     {
8643     if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
8644     - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
8645     + clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
8646     else
8647     - orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
8648     + set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
8649     }
8650    
8651     /**
8652     @@ -894,7 +895,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
8653     goto out;
8654    
8655     /* check if orig node is network coding enabled */
8656     - if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
8657     + if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
8658     goto out;
8659    
8660     /* accept ogms from 'good' neighbors and single hop neighbors */
8661     diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
8662     index 018b7495ad84..32a0fcfab36d 100644
8663     --- a/net/batman-adv/originator.c
8664     +++ b/net/batman-adv/originator.c
8665     @@ -696,8 +696,13 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
8666     orig_node->last_seen = jiffies;
8667     reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
8668     orig_node->bcast_seqno_reset = reset_time;
8669     +
8670     #ifdef CONFIG_BATMAN_ADV_MCAST
8671     orig_node->mcast_flags = BATADV_NO_FLAGS;
8672     + INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
8673     + INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
8674     + INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
8675     + spin_lock_init(&orig_node->mcast_handler_lock);
8676     #endif
8677    
8678     /* create a vlan object for the "untagged" LAN */
8679     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
8680     index a2fc843c2243..51cda3a7c51d 100644
8681     --- a/net/batman-adv/soft-interface.c
8682     +++ b/net/batman-adv/soft-interface.c
8683     @@ -202,6 +202,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
8684     int gw_mode;
8685     enum batadv_forw_mode forw_mode;
8686     struct batadv_orig_node *mcast_single_orig = NULL;
8687     + int network_offset = ETH_HLEN;
8688    
8689     if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
8690     goto dropped;
8691     @@ -214,14 +215,18 @@ static int batadv_interface_tx(struct sk_buff *skb,
8692     case ETH_P_8021Q:
8693     vhdr = vlan_eth_hdr(skb);
8694    
8695     - if (vhdr->h_vlan_encapsulated_proto != ethertype)
8696     + if (vhdr->h_vlan_encapsulated_proto != ethertype) {
8697     + network_offset += VLAN_HLEN;
8698     break;
8699     + }
8700    
8701     /* fall through */
8702     case ETH_P_BATMAN:
8703     goto dropped;
8704     }
8705    
8706     + skb_set_network_header(skb, network_offset);
8707     +
8708     if (batadv_bla_tx(bat_priv, skb, vid))
8709     goto dropped;
8710    
8711     diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
8712     index 5809b39c1922..c9b26291ac4c 100644
8713     --- a/net/batman-adv/translation-table.c
8714     +++ b/net/batman-adv/translation-table.c
8715     @@ -19,6 +19,7 @@
8716     #include "main.h"
8717    
8718     #include <linux/atomic.h>
8719     +#include <linux/bitops.h>
8720     #include <linux/bug.h>
8721     #include <linux/byteorder/generic.h>
8722     #include <linux/compiler.h>
8723     @@ -1882,7 +1883,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
8724     }
8725     spin_unlock_bh(list_lock);
8726     }
8727     - orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
8728     + clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
8729     }
8730    
8731     static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
8732     @@ -2841,7 +2842,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
8733     return;
8734     }
8735     }
8736     - orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
8737     + set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
8738     }
8739    
8740     static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
8741     @@ -3343,7 +3344,8 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
8742     bool has_tt_init;
8743    
8744     tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
8745     - has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
8746     + has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
8747     + &orig_node->capa_initialized);
8748    
8749     /* orig table not initialised AND first diff is in the OGM OR the ttvn
8750     * increased by one -> we can apply the attached changes
8751     diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
8752     index 67d63483618e..55610a805b53 100644
8753     --- a/net/batman-adv/types.h
8754     +++ b/net/batman-adv/types.h
8755     @@ -221,6 +221,7 @@ struct batadv_orig_bat_iv {
8756     * @batadv_dat_addr_t: address of the orig node in the distributed hash
8757     * @last_seen: time when last packet from this node was received
8758     * @bcast_seqno_reset: time when the broadcast seqno window was reset
8759     + * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
8760     * @mcast_flags: multicast flags announced by the orig node
8761     * @mcast_want_all_unsnoop_node: a list node for the
8762     * mcast.want_all_unsnoopables list
8763     @@ -268,13 +269,15 @@ struct batadv_orig_node {
8764     unsigned long last_seen;
8765     unsigned long bcast_seqno_reset;
8766     #ifdef CONFIG_BATMAN_ADV_MCAST
8767     + /* synchronizes mcast tvlv specific orig changes */
8768     + spinlock_t mcast_handler_lock;
8769     uint8_t mcast_flags;
8770     struct hlist_node mcast_want_all_unsnoopables_node;
8771     struct hlist_node mcast_want_all_ipv4_node;
8772     struct hlist_node mcast_want_all_ipv6_node;
8773     #endif
8774     - uint8_t capabilities;
8775     - uint8_t capa_initialized;
8776     + unsigned long capabilities;
8777     + unsigned long capa_initialized;
8778     atomic_t last_ttvn;
8779     unsigned char *tt_buff;
8780     int16_t tt_buff_len;
8781     @@ -313,10 +316,10 @@ struct batadv_orig_node {
8782     * (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
8783     */
8784     enum batadv_orig_capabilities {
8785     - BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
8786     - BATADV_ORIG_CAPA_HAS_NC = BIT(1),
8787     - BATADV_ORIG_CAPA_HAS_TT = BIT(2),
8788     - BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
8789     + BATADV_ORIG_CAPA_HAS_DAT,
8790     + BATADV_ORIG_CAPA_HAS_NC,
8791     + BATADV_ORIG_CAPA_HAS_TT,
8792     + BATADV_ORIG_CAPA_HAS_MCAST,
8793     };
8794    
8795     /**
8796     diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
8797     index ad82324f710f..0510a577a7b5 100644
8798     --- a/net/bluetooth/smp.c
8799     +++ b/net/bluetooth/smp.c
8800     @@ -2311,12 +2311,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
8801     if (!conn)
8802     return 1;
8803    
8804     - chan = conn->smp;
8805     - if (!chan) {
8806     - BT_ERR("SMP security requested but not available");
8807     - return 1;
8808     - }
8809     -
8810     if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
8811     return 1;
8812    
8813     @@ -2330,6 +2324,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
8814     if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
8815     return 0;
8816    
8817     + chan = conn->smp;
8818     + if (!chan) {
8819     + BT_ERR("SMP security requested but not available");
8820     + return 1;
8821     + }
8822     +
8823     l2cap_chan_lock(chan);
8824    
8825     /* If SMP is already in progress ignore this request */
8826     diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
8827     index afe905c208af..691b54fcaf2a 100644
8828     --- a/net/netfilter/ipset/ip_set_hash_gen.h
8829     +++ b/net/netfilter/ipset/ip_set_hash_gen.h
8830     @@ -152,9 +152,13 @@ htable_bits(u32 hashsize)
8831     #define SET_HOST_MASK(family) (family == AF_INET ? 32 : 128)
8832    
8833     #ifdef IP_SET_HASH_WITH_NET0
8834     +/* cidr from 0 to SET_HOST_MASK() value and c = cidr + 1 */
8835     #define NLEN(family) (SET_HOST_MASK(family) + 1)
8836     +#define CIDR_POS(c) ((c) - 1)
8837     #else
8838     +/* cidr from 1 to SET_HOST_MASK() value and c = cidr + 1 */
8839     #define NLEN(family) SET_HOST_MASK(family)
8840     +#define CIDR_POS(c) ((c) - 2)
8841     #endif
8842    
8843     #else
8844     @@ -305,7 +309,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
8845     } else if (h->nets[i].cidr[n] < cidr) {
8846     j = i;
8847     } else if (h->nets[i].cidr[n] == cidr) {
8848     - h->nets[cidr - 1].nets[n]++;
8849     + h->nets[CIDR_POS(cidr)].nets[n]++;
8850     return;
8851     }
8852     }
8853     @@ -314,7 +318,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
8854     h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
8855     }
8856     h->nets[i].cidr[n] = cidr;
8857     - h->nets[cidr - 1].nets[n] = 1;
8858     + h->nets[CIDR_POS(cidr)].nets[n] = 1;
8859     }
8860    
8861     static void
8862     @@ -325,8 +329,8 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
8863     for (i = 0; i < nets_length; i++) {
8864     if (h->nets[i].cidr[n] != cidr)
8865     continue;
8866     - h->nets[cidr - 1].nets[n]--;
8867     - if (h->nets[cidr - 1].nets[n] > 0)
8868     + h->nets[CIDR_POS(cidr)].nets[n]--;
8869     + if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
8870     return;
8871     for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
8872     h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
8873     diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
8874     index 3c862c0a76d1..a93dfebffa81 100644
8875     --- a/net/netfilter/ipset/ip_set_hash_netnet.c
8876     +++ b/net/netfilter/ipset/ip_set_hash_netnet.c
8877     @@ -131,6 +131,13 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
8878     #define HOST_MASK 32
8879     #include "ip_set_hash_gen.h"
8880    
8881     +static void
8882     +hash_netnet4_init(struct hash_netnet4_elem *e)
8883     +{
8884     + e->cidr[0] = HOST_MASK;
8885     + e->cidr[1] = HOST_MASK;
8886     +}
8887     +
8888     static int
8889     hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
8890     const struct xt_action_param *par,
8891     @@ -160,7 +167,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
8892     {
8893     const struct hash_netnet *h = set->data;
8894     ipset_adtfn adtfn = set->variant->adt[adt];
8895     - struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
8896     + struct hash_netnet4_elem e = { };
8897     struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
8898     u32 ip = 0, ip_to = 0, last;
8899     u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
8900     @@ -169,6 +176,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
8901     if (tb[IPSET_ATTR_LINENO])
8902     *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
8903    
8904     + hash_netnet4_init(&e);
8905     if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
8906     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
8907     return -IPSET_ERR_PROTOCOL;
8908     @@ -357,6 +365,13 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
8909     #define IP_SET_EMIT_CREATE
8910     #include "ip_set_hash_gen.h"
8911    
8912     +static void
8913     +hash_netnet6_init(struct hash_netnet6_elem *e)
8914     +{
8915     + e->cidr[0] = HOST_MASK;
8916     + e->cidr[1] = HOST_MASK;
8917     +}
8918     +
8919     static int
8920     hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
8921     const struct xt_action_param *par,
8922     @@ -385,13 +400,14 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
8923     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
8924     {
8925     ipset_adtfn adtfn = set->variant->adt[adt];
8926     - struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
8927     + struct hash_netnet6_elem e = { };
8928     struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
8929     int ret;
8930    
8931     if (tb[IPSET_ATTR_LINENO])
8932     *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
8933    
8934     + hash_netnet6_init(&e);
8935     if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
8936     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
8937     return -IPSET_ERR_PROTOCOL;
8938     diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
8939     index 0c68734f5cc4..9a14c237830f 100644
8940     --- a/net/netfilter/ipset/ip_set_hash_netportnet.c
8941     +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
8942     @@ -142,6 +142,13 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
8943     #define HOST_MASK 32
8944     #include "ip_set_hash_gen.h"
8945    
8946     +static void
8947     +hash_netportnet4_init(struct hash_netportnet4_elem *e)
8948     +{
8949     + e->cidr[0] = HOST_MASK;
8950     + e->cidr[1] = HOST_MASK;
8951     +}
8952     +
8953     static int
8954     hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
8955     const struct xt_action_param *par,
8956     @@ -175,7 +182,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
8957     {
8958     const struct hash_netportnet *h = set->data;
8959     ipset_adtfn adtfn = set->variant->adt[adt];
8960     - struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
8961     + struct hash_netportnet4_elem e = { };
8962     struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
8963     u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
8964     u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
8965     @@ -185,6 +192,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
8966     if (tb[IPSET_ATTR_LINENO])
8967     *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
8968    
8969     + hash_netportnet4_init(&e);
8970     if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
8971     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
8972     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
8973     @@ -412,6 +420,13 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
8974     #define IP_SET_EMIT_CREATE
8975     #include "ip_set_hash_gen.h"
8976    
8977     +static void
8978     +hash_netportnet6_init(struct hash_netportnet6_elem *e)
8979     +{
8980     + e->cidr[0] = HOST_MASK;
8981     + e->cidr[1] = HOST_MASK;
8982     +}
8983     +
8984     static int
8985     hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
8986     const struct xt_action_param *par,
8987     @@ -445,7 +460,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
8988     {
8989     const struct hash_netportnet *h = set->data;
8990     ipset_adtfn adtfn = set->variant->adt[adt];
8991     - struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
8992     + struct hash_netportnet6_elem e = { };
8993     struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
8994     u32 port, port_to;
8995     bool with_ports = false;
8996     @@ -454,6 +469,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
8997     if (tb[IPSET_ATTR_LINENO])
8998     *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
8999    
9000     + hash_netportnet6_init(&e);
9001     if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
9002     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
9003     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
9004     diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
9005     index 3c20d02aee73..0625a42df108 100644
9006     --- a/net/netfilter/nf_conntrack_core.c
9007     +++ b/net/netfilter/nf_conntrack_core.c
9008     @@ -320,12 +320,13 @@ out_free:
9009     }
9010     EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
9011    
9012     -static void nf_ct_tmpl_free(struct nf_conn *tmpl)
9013     +void nf_ct_tmpl_free(struct nf_conn *tmpl)
9014     {
9015     nf_ct_ext_destroy(tmpl);
9016     nf_ct_ext_free(tmpl);
9017     kfree(tmpl);
9018     }
9019     +EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
9020    
9021     static void
9022     destroy_conntrack(struct nf_conntrack *nfct)
9023     diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
9024     index 675d12c69e32..a5d41dfa9f05 100644
9025     --- a/net/netfilter/nf_log.c
9026     +++ b/net/netfilter/nf_log.c
9027     @@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register);
9028    
9029     void nf_log_unregister(struct nf_logger *logger)
9030     {
9031     + const struct nf_logger *log;
9032     int i;
9033    
9034     mutex_lock(&nf_log_mutex);
9035     - for (i = 0; i < NFPROTO_NUMPROTO; i++)
9036     - RCU_INIT_POINTER(loggers[i][logger->type], NULL);
9037     + for (i = 0; i < NFPROTO_NUMPROTO; i++) {
9038     + log = nft_log_dereference(loggers[i][logger->type]);
9039     + if (log == logger)
9040     + RCU_INIT_POINTER(loggers[i][logger->type], NULL);
9041     + }
9042     mutex_unlock(&nf_log_mutex);
9043     + synchronize_rcu();
9044     }
9045     EXPORT_SYMBOL(nf_log_unregister);
9046    
9047     diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
9048     index d7f168527903..d6ee8f8b19b6 100644
9049     --- a/net/netfilter/nf_synproxy_core.c
9050     +++ b/net/netfilter/nf_synproxy_core.c
9051     @@ -378,7 +378,7 @@ static int __net_init synproxy_net_init(struct net *net)
9052     err3:
9053     free_percpu(snet->stats);
9054     err2:
9055     - nf_conntrack_free(ct);
9056     + nf_ct_tmpl_free(ct);
9057     err1:
9058     return err;
9059     }
9060     diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
9061     index 0c0e8ecf02ab..70277b11f742 100644
9062     --- a/net/netfilter/nfnetlink.c
9063     +++ b/net/netfilter/nfnetlink.c
9064     @@ -444,6 +444,7 @@ done:
9065     static void nfnetlink_rcv(struct sk_buff *skb)
9066     {
9067     struct nlmsghdr *nlh = nlmsg_hdr(skb);
9068     + u_int16_t res_id;
9069     int msglen;
9070    
9071     if (nlh->nlmsg_len < NLMSG_HDRLEN ||
9072     @@ -468,7 +469,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
9073    
9074     nfgenmsg = nlmsg_data(nlh);
9075     skb_pull(skb, msglen);
9076     - nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
9077     + /* Work around old nft using host byte order */
9078     + if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
9079     + res_id = NFNL_SUBSYS_NFTABLES;
9080     + else
9081     + res_id = ntohs(nfgenmsg->res_id);
9082     + nfnetlink_rcv_batch(skb, nlh, res_id);
9083     } else {
9084     netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
9085     }
9086     diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
9087     index 66def315eb56..9c8fab00164b 100644
9088     --- a/net/netfilter/nft_compat.c
9089     +++ b/net/netfilter/nft_compat.c
9090     @@ -619,6 +619,13 @@ struct nft_xt {
9091    
9092     static struct nft_expr_type nft_match_type;
9093    
9094     +static bool nft_match_cmp(const struct xt_match *match,
9095     + const char *name, u32 rev, u32 family)
9096     +{
9097     + return strcmp(match->name, name) == 0 && match->revision == rev &&
9098     + (match->family == NFPROTO_UNSPEC || match->family == family);
9099     +}
9100     +
9101     static const struct nft_expr_ops *
9102     nft_match_select_ops(const struct nft_ctx *ctx,
9103     const struct nlattr * const tb[])
9104     @@ -626,7 +633,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
9105     struct nft_xt *nft_match;
9106     struct xt_match *match;
9107     char *mt_name;
9108     - __u32 rev, family;
9109     + u32 rev, family;
9110    
9111     if (tb[NFTA_MATCH_NAME] == NULL ||
9112     tb[NFTA_MATCH_REV] == NULL ||
9113     @@ -641,8 +648,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
9114     list_for_each_entry(nft_match, &nft_match_list, head) {
9115     struct xt_match *match = nft_match->ops.data;
9116    
9117     - if (strcmp(match->name, mt_name) == 0 &&
9118     - match->revision == rev && match->family == family) {
9119     + if (nft_match_cmp(match, mt_name, rev, family)) {
9120     if (!try_module_get(match->me))
9121     return ERR_PTR(-ENOENT);
9122    
9123     @@ -693,6 +699,13 @@ static LIST_HEAD(nft_target_list);
9124    
9125     static struct nft_expr_type nft_target_type;
9126    
9127     +static bool nft_target_cmp(const struct xt_target *tg,
9128     + const char *name, u32 rev, u32 family)
9129     +{
9130     + return strcmp(tg->name, name) == 0 && tg->revision == rev &&
9131     + (tg->family == NFPROTO_UNSPEC || tg->family == family);
9132     +}
9133     +
9134     static const struct nft_expr_ops *
9135     nft_target_select_ops(const struct nft_ctx *ctx,
9136     const struct nlattr * const tb[])
9137     @@ -700,7 +713,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
9138     struct nft_xt *nft_target;
9139     struct xt_target *target;
9140     char *tg_name;
9141     - __u32 rev, family;
9142     + u32 rev, family;
9143    
9144     if (tb[NFTA_TARGET_NAME] == NULL ||
9145     tb[NFTA_TARGET_REV] == NULL ||
9146     @@ -715,8 +728,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
9147     list_for_each_entry(nft_target, &nft_target_list, head) {
9148     struct xt_target *target = nft_target->ops.data;
9149    
9150     - if (strcmp(target->name, tg_name) == 0 &&
9151     - target->revision == rev && target->family == family) {
9152     + if (nft_target_cmp(target, tg_name, rev, family)) {
9153     if (!try_module_get(target->me))
9154     return ERR_PTR(-ENOENT);
9155    
9156     diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
9157     index 43ddeee404e9..f3377ce1ff18 100644
9158     --- a/net/netfilter/xt_CT.c
9159     +++ b/net/netfilter/xt_CT.c
9160     @@ -233,7 +233,7 @@ out:
9161     return 0;
9162    
9163     err3:
9164     - nf_conntrack_free(ct);
9165     + nf_ct_tmpl_free(ct);
9166     err2:
9167     nf_ct_l3proto_module_put(par->family);
9168     err1:
9169     diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
9170     index d25cd430f9ff..95412abc95b0 100644
9171     --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
9172     +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
9173     @@ -384,6 +384,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
9174     int byte_count)
9175     {
9176     struct ib_send_wr send_wr;
9177     + u32 xdr_off;
9178     int sge_no;
9179     int sge_bytes;
9180     int page_no;
9181     @@ -418,8 +419,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
9182     ctxt->direction = DMA_TO_DEVICE;
9183    
9184     /* Map the payload indicated by 'byte_count' */
9185     + xdr_off = 0;
9186     for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
9187     - int xdr_off = 0;
9188     sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
9189     byte_count -= sge_bytes;
9190     ctxt->sge[sge_no].addr =
9191     @@ -457,6 +458,13 @@ static int send_reply(struct svcxprt_rdma *rdma,
9192     }
9193     rqstp->rq_next_page = rqstp->rq_respages + 1;
9194    
9195     + /* The loop above bumps sc_dma_used for each sge. The
9196     + * xdr_buf.tail gets a separate sge, but resides in the
9197     + * same page as xdr_buf.head. Don't count it twice.
9198     + */
9199     + if (sge_no > ctxt->count)
9200     + atomic_dec(&rdma->sc_dma_used);
9201     +
9202     if (sge_no > rdma->sc_max_sge) {
9203     pr_err("svcrdma: Too many sges (%d)\n", sge_no);
9204     goto err;
9205     diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
9206     index 885683a3b0bd..e0406211716b 100644
9207     --- a/sound/arm/Kconfig
9208     +++ b/sound/arm/Kconfig
9209     @@ -9,6 +9,14 @@ menuconfig SND_ARM
9210     Drivers that are implemented on ASoC can be found in
9211     "ALSA for SoC audio support" section.
9212    
9213     +config SND_PXA2XX_LIB
9214     + tristate
9215     + select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
9216     + select SND_DMAENGINE_PCM
9217     +
9218     +config SND_PXA2XX_LIB_AC97
9219     + bool
9220     +
9221     if SND_ARM
9222    
9223     config SND_ARMAACI
9224     @@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
9225     tristate
9226     select SND_PCM
9227    
9228     -config SND_PXA2XX_LIB
9229     - tristate
9230     - select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
9231     -
9232     -config SND_PXA2XX_LIB_AC97
9233     - bool
9234     -
9235     config SND_PXA2XX_AC97
9236     tristate "AC97 driver for the Intel PXA2xx chip"
9237     depends on ARCH_PXA
9238     diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
9239     index 477742cb70a2..58c0aad37284 100644
9240     --- a/sound/pci/hda/hda_tegra.c
9241     +++ b/sound/pci/hda/hda_tegra.c
9242     @@ -73,6 +73,7 @@ struct hda_tegra {
9243     struct clk *hda2codec_2x_clk;
9244     struct clk *hda2hdmi_clk;
9245     void __iomem *regs;
9246     + struct work_struct probe_work;
9247     };
9248    
9249     #ifdef CONFIG_PM
9250     @@ -294,7 +295,9 @@ static int hda_tegra_dev_disconnect(struct snd_device *device)
9251     static int hda_tegra_dev_free(struct snd_device *device)
9252     {
9253     struct azx *chip = device->device_data;
9254     + struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
9255    
9256     + cancel_work_sync(&hda->probe_work);
9257     if (azx_bus(chip)->chip_init) {
9258     azx_stop_all_streams(chip);
9259     azx_stop_chip(chip);
9260     @@ -426,6 +429,9 @@ static int hda_tegra_first_init(struct azx *chip, struct platform_device *pdev)
9261     /*
9262     * constructor
9263     */
9264     +
9265     +static void hda_tegra_probe_work(struct work_struct *work);
9266     +
9267     static int hda_tegra_create(struct snd_card *card,
9268     unsigned int driver_caps,
9269     struct hda_tegra *hda)
9270     @@ -452,6 +458,8 @@ static int hda_tegra_create(struct snd_card *card,
9271     chip->single_cmd = false;
9272     chip->snoop = true;
9273    
9274     + INIT_WORK(&hda->probe_work, hda_tegra_probe_work);
9275     +
9276     err = azx_bus_init(chip, NULL, &hda_tegra_io_ops);
9277     if (err < 0)
9278     return err;
9279     @@ -499,6 +507,21 @@ static int hda_tegra_probe(struct platform_device *pdev)
9280     card->private_data = chip;
9281    
9282     dev_set_drvdata(&pdev->dev, card);
9283     + schedule_work(&hda->probe_work);
9284     +
9285     + return 0;
9286     +
9287     +out_free:
9288     + snd_card_free(card);
9289     + return err;
9290     +}
9291     +
9292     +static void hda_tegra_probe_work(struct work_struct *work)
9293     +{
9294     + struct hda_tegra *hda = container_of(work, struct hda_tegra, probe_work);
9295     + struct azx *chip = &hda->chip;
9296     + struct platform_device *pdev = to_platform_device(hda->dev);
9297     + int err;
9298    
9299     err = hda_tegra_first_init(chip, pdev);
9300     if (err < 0)
9301     @@ -520,11 +543,8 @@ static int hda_tegra_probe(struct platform_device *pdev)
9302     chip->running = 1;
9303     snd_hda_set_power_save(&chip->bus, power_save * 1000);
9304    
9305     - return 0;
9306     -
9307     -out_free:
9308     - snd_card_free(card);
9309     - return err;
9310     + out_free:
9311     + return; /* no error return from async probe */
9312     }
9313    
9314     static int hda_tegra_remove(struct platform_device *pdev)
9315     diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
9316     index 584a0343ab0c..85813de26da8 100644
9317     --- a/sound/pci/hda/patch_cirrus.c
9318     +++ b/sound/pci/hda/patch_cirrus.c
9319     @@ -633,6 +633,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
9320     SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
9321     SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
9322     SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
9323     + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
9324     {} /* terminator */
9325     };
9326    
9327     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
9328     index c8f01ccc2513..6a66139871c6 100644
9329     --- a/sound/pci/hda/patch_realtek.c
9330     +++ b/sound/pci/hda/patch_realtek.c
9331     @@ -4188,6 +4188,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec,
9332     }
9333     }
9334    
9335     +/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
9336     +static void alc_fixup_tpt440_dock(struct hda_codec *codec,
9337     + const struct hda_fixup *fix, int action)
9338     +{
9339     + static const struct hda_pintbl pincfgs[] = {
9340     + { 0x16, 0x21211010 }, /* dock headphone */
9341     + { 0x19, 0x21a11010 }, /* dock mic */
9342     + { }
9343     + };
9344     + struct alc_spec *spec = codec->spec;
9345     +
9346     + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
9347     + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
9348     + codec->power_save_node = 0; /* avoid click noises */
9349     + snd_hda_apply_pincfgs(codec, pincfgs);
9350     + }
9351     +}
9352     +
9353     static void alc_shutup_dell_xps13(struct hda_codec *codec)
9354     {
9355     struct alc_spec *spec = codec->spec;
9356     @@ -4562,7 +4580,6 @@ enum {
9357     ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
9358     ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
9359     ALC292_FIXUP_TPT440_DOCK,
9360     - ALC292_FIXUP_TPT440_DOCK2,
9361     ALC283_FIXUP_BXBT2807_MIC,
9362     ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
9363     ALC282_FIXUP_ASPIRE_V5_PINS,
9364     @@ -5029,17 +5046,7 @@ static const struct hda_fixup alc269_fixups[] = {
9365     },
9366     [ALC292_FIXUP_TPT440_DOCK] = {
9367     .type = HDA_FIXUP_FUNC,
9368     - .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
9369     - .chained = true,
9370     - .chain_id = ALC292_FIXUP_TPT440_DOCK2
9371     - },
9372     - [ALC292_FIXUP_TPT440_DOCK2] = {
9373     - .type = HDA_FIXUP_PINS,
9374     - .v.pins = (const struct hda_pintbl[]) {
9375     - { 0x16, 0x21211010 }, /* dock headphone */
9376     - { 0x19, 0x21a11010 }, /* dock mic */
9377     - { }
9378     - },
9379     + .v.func = alc_fixup_tpt440_dock,
9380     .chained = true,
9381     .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
9382     },
9383     @@ -5299,6 +5306,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9384     SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
9385     SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
9386     SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
9387     + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
9388     SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
9389     SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
9390     SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
9391     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
9392     index 9d947aef2c8b..def5cc8dff02 100644
9393     --- a/sound/pci/hda/patch_sigmatel.c
9394     +++ b/sound/pci/hda/patch_sigmatel.c
9395     @@ -4520,7 +4520,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
9396     return err;
9397    
9398     spec = codec->spec;
9399     - codec->power_save_node = 1;
9400     + /* enable power_save_node only for new 92HD89xx chips, as it causes
9401     + * click noises on old 92HD73xx chips.
9402     + */
9403     + if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
9404     + codec->power_save_node = 1;
9405     spec->linear_tone_beep = 0;
9406     spec->gen.mixer_nid = 0x1d;
9407     spec->have_spdif_mux = 1;
9408     diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
9409     index 58c3164802b8..8c907ebea189 100644
9410     --- a/sound/soc/au1x/db1200.c
9411     +++ b/sound/soc/au1x/db1200.c
9412     @@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
9413     .cpu_dai_name = "au1xpsc_i2s.2",
9414     .platform_name = "au1xpsc-pcm.2",
9415     .codec_name = "wm8731.0-001b",
9416     + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
9417     + SND_SOC_DAIFMT_CBM_CFM,
9418     .ops = &db1200_i2s_wm8731_ops,
9419     };
9420    
9421     @@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
9422     .cpu_dai_name = "au1xpsc_i2s.3",
9423     .platform_name = "au1xpsc-pcm.3",
9424     .codec_name = "wm8731.0-001b",
9425     + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
9426     + SND_SOC_DAIFMT_CBM_CFM,
9427     .ops = &db1200_i2s_wm8731_ops,
9428     };
9429    
9430     diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
9431     index e673f6ceb521..7c411297bfdd 100644
9432     --- a/sound/soc/codecs/sgtl5000.c
9433     +++ b/sound/soc/codecs/sgtl5000.c
9434     @@ -1377,8 +1377,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
9435     sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
9436    
9437     snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
9438     - SGTL5000_BIAS_R_MASK,
9439     - sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
9440     + SGTL5000_BIAS_VOLT_MASK,
9441     + sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
9442     /*
9443     * disable DAP
9444     * TODO:
9445     diff --git a/sound/soc/codecs/tas2552.c b/sound/soc/codecs/tas2552.c
9446     index 4f25a7d0efa2..b3e5685aca1e 100644
9447     --- a/sound/soc/codecs/tas2552.c
9448     +++ b/sound/soc/codecs/tas2552.c
9449     @@ -551,7 +551,7 @@ static struct snd_soc_dai_driver tas2552_dai[] = {
9450     /*
9451     * DAC digital volumes. From -7 to 24 dB in 1 dB steps
9452     */
9453     -static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0);
9454     +static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0);
9455    
9456     static const char * const tas2552_din_source_select[] = {
9457     "Muted",
9458     diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
9459     index a3e97b46b64e..0d28e3b356f6 100644
9460     --- a/sound/soc/dwc/designware_i2s.c
9461     +++ b/sound/soc/dwc/designware_i2s.c
9462     @@ -131,10 +131,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
9463    
9464     if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
9465     for (i = 0; i < 4; i++)
9466     - i2s_write_reg(dev->i2s_base, TOR(i), 0);
9467     + i2s_read_reg(dev->i2s_base, TOR(i));
9468     } else {
9469     for (i = 0; i < 4; i++)
9470     - i2s_write_reg(dev->i2s_base, ROR(i), 0);
9471     + i2s_read_reg(dev->i2s_base, ROR(i));
9472     }
9473     }
9474    
9475     diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
9476     index 39cea80846c3..f2bf8661dd21 100644
9477     --- a/sound/soc/pxa/Kconfig
9478     +++ b/sound/soc/pxa/Kconfig
9479     @@ -1,7 +1,6 @@
9480     config SND_PXA2XX_SOC
9481     tristate "SoC Audio for the Intel PXA2xx chip"
9482     depends on ARCH_PXA
9483     - select SND_ARM
9484     select SND_PXA2XX_LIB
9485     help
9486     Say Y or M if you want to add support for codecs attached to
9487     @@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
9488     config SND_PXA2XX_SOC_AC97
9489     tristate
9490     select AC97_BUS
9491     - select SND_ARM
9492     select SND_PXA2XX_LIB_AC97
9493     select SND_SOC_AC97_BUS
9494    
9495     diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
9496     index 1f6054650991..9e4b04e0fbd1 100644
9497     --- a/sound/soc/pxa/pxa2xx-ac97.c
9498     +++ b/sound/soc/pxa/pxa2xx-ac97.c
9499     @@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
9500     .reset = pxa2xx_ac97_cold_reset,
9501     };
9502    
9503     -static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
9504     +static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
9505     static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
9506     .addr = __PREG(PCDR),
9507     .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
9508     @@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
9509     .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
9510     };
9511    
9512     -static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
9513     +static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
9514     static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
9515     .addr = __PREG(PCDR),
9516     .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
9517     diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
9518     index 82e350e9501c..ac75816ada7c 100644
9519     --- a/sound/synth/emux/emux_oss.c
9520     +++ b/sound/synth/emux/emux_oss.c
9521     @@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
9522     struct snd_seq_oss_reg *arg;
9523     struct snd_seq_device *dev;
9524    
9525     - if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
9526     + /* using device#1 here for avoiding conflicts with OPL3 */
9527     + if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
9528     sizeof(struct snd_seq_oss_reg), &dev) < 0)
9529     return;
9530    
9531     diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
9532     index e44052483ed9..80159e6811c2 100644
9533     --- a/tools/lguest/lguest.c
9534     +++ b/tools/lguest/lguest.c
9535     @@ -125,7 +125,11 @@ struct device_list {
9536     /* The list of Guest devices, based on command line arguments. */
9537     static struct device_list devices;
9538    
9539     -struct virtio_pci_cfg_cap {
9540     +/*
9541     + * Just like struct virtio_pci_cfg_cap in uapi/linux/virtio_pci.h,
9542     + * but uses a u32 explicitly for the data.
9543     + */
9544     +struct virtio_pci_cfg_cap_u32 {
9545     struct virtio_pci_cap cap;
9546     u32 pci_cfg_data; /* Data for BAR access. */
9547     };
9548     @@ -157,7 +161,7 @@ struct pci_config {
9549     struct virtio_pci_notify_cap notify;
9550     struct virtio_pci_cap isr;
9551     struct virtio_pci_cap device;
9552     - struct virtio_pci_cfg_cap cfg_access;
9553     + struct virtio_pci_cfg_cap_u32 cfg_access;
9554     };
9555    
9556     /* The device structure describes a single device. */
9557     @@ -1291,7 +1295,7 @@ static struct device *dev_and_reg(u32 *reg)
9558     * only fault if they try to write with some invalid bar/offset/length.
9559     */
9560     static bool valid_bar_access(struct device *d,
9561     - struct virtio_pci_cfg_cap *cfg_access)
9562     + struct virtio_pci_cfg_cap_u32 *cfg_access)
9563     {
9564     /* We only have 1 bar (BAR0) */
9565     if (cfg_access->cap.bar != 0)
9566     diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
9567     index cc25f059ab3d..a843bee66a4f 100644
9568     --- a/tools/lib/traceevent/event-parse.c
9569     +++ b/tools/lib/traceevent/event-parse.c
9570     @@ -3721,7 +3721,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
9571     struct format_field *field;
9572     struct printk_map *printk;
9573     long long val, fval;
9574     - unsigned long addr;
9575     + unsigned long long addr;
9576     char *str;
9577     unsigned char *hex;
9578     int print;
9579     @@ -3754,13 +3754,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
9580     */
9581     if (!(field->flags & FIELD_IS_ARRAY) &&
9582     field->size == pevent->long_size) {
9583     - addr = *(unsigned long *)(data + field->offset);
9584     +
9585     + /* Handle heterogeneous recording and processing
9586     + * architectures
9587     + *
9588     + * CASE I:
9589     + * Traces recorded on 32-bit devices (32-bit
9590     + * addressing) and processed on 64-bit devices:
9591     + * In this case, only 32 bits should be read.
9592     + *
9593     + * CASE II:
9594     + * Traces recorded on 64 bit devices and processed
9595     + * on 32-bit devices:
9596     + * In this case, 64 bits must be read.
9597     + */
9598     + addr = (pevent->long_size == 8) ?
9599     + *(unsigned long long *)(data + field->offset) :
9600     + (unsigned long long)*(unsigned int *)(data + field->offset);
9601     +
9602     /* Check if it matches a print format */
9603     printk = find_printk(pevent, addr);
9604     if (printk)
9605     trace_seq_puts(s, printk->printk);
9606     else
9607     - trace_seq_printf(s, "%lx", addr);
9608     + trace_seq_printf(s, "%llx", addr);
9609     break;
9610     }
9611     str = malloc(len + 1);
9612     diff --git a/tools/perf/arch/alpha/Build b/tools/perf/arch/alpha/Build
9613     new file mode 100644
9614     index 000000000000..1bb8bf6d7fd4
9615     --- /dev/null
9616     +++ b/tools/perf/arch/alpha/Build
9617     @@ -0,0 +1 @@
9618     +# empty
9619     diff --git a/tools/perf/arch/mips/Build b/tools/perf/arch/mips/Build
9620     new file mode 100644
9621     index 000000000000..1bb8bf6d7fd4
9622     --- /dev/null
9623     +++ b/tools/perf/arch/mips/Build
9624     @@ -0,0 +1 @@
9625     +# empty
9626     diff --git a/tools/perf/arch/parisc/Build b/tools/perf/arch/parisc/Build
9627     new file mode 100644
9628     index 000000000000..1bb8bf6d7fd4
9629     --- /dev/null
9630     +++ b/tools/perf/arch/parisc/Build
9631     @@ -0,0 +1 @@
9632     +# empty
9633     diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
9634     index d99d850e1444..ef355fc0e870 100644
9635     --- a/tools/perf/builtin-stat.c
9636     +++ b/tools/perf/builtin-stat.c
9637     @@ -694,7 +694,7 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
9638     static void print_aggr(char *prefix)
9639     {
9640     struct perf_evsel *counter;
9641     - int cpu, cpu2, s, s2, id, nr;
9642     + int cpu, s, s2, id, nr;
9643     double uval;
9644     u64 ena, run, val;
9645    
9646     @@ -707,8 +707,7 @@ static void print_aggr(char *prefix)
9647     val = ena = run = 0;
9648     nr = 0;
9649     for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
9650     - cpu2 = perf_evsel__cpus(counter)->map[cpu];
9651     - s2 = aggr_get_id(evsel_list->cpus, cpu2);
9652     + s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
9653     if (s2 != id)
9654     continue;
9655     val += perf_counts(counter->counts, cpu, 0)->val;
9656     diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
9657     index 03ace57a800c..4215cc155041 100644
9658     --- a/tools/perf/util/header.c
9659     +++ b/tools/perf/util/header.c
9660     @@ -1442,7 +1442,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
9661     if (ph->needs_swap)
9662     nr = bswap_32(nr);
9663    
9664     - ph->env.nr_cpus_online = nr;
9665     + ph->env.nr_cpus_avail = nr;
9666    
9667     ret = readn(fd, &nr, sizeof(nr));
9668     if (ret != sizeof(nr))
9669     @@ -1451,7 +1451,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
9670     if (ph->needs_swap)
9671     nr = bswap_32(nr);
9672    
9673     - ph->env.nr_cpus_avail = nr;
9674     + ph->env.nr_cpus_online = nr;
9675     return 0;
9676     }
9677    
9678     diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
9679     index 6f28d53d4e46..f298c696e24f 100644
9680     --- a/tools/perf/util/hist.c
9681     +++ b/tools/perf/util/hist.c
9682     @@ -151,6 +151,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
9683     hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
9684     hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
9685    
9686     + if (h->srcline)
9687     + hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
9688     +
9689     if (h->transaction)
9690     hists__new_col_len(hists, HISTC_TRANSACTION,
9691     hist_entry__transaction_len());
9692     diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
9693     index 591905a02b92..9cd70819c795 100644
9694     --- a/tools/perf/util/parse-events.y
9695     +++ b/tools/perf/util/parse-events.y
9696     @@ -255,7 +255,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
9697     list_add_tail(&term->list, head);
9698    
9699     ALLOC_LIST(list);
9700     - ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head));
9701     + ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
9702     parse_events__free_terms(head);
9703     $$ = list;
9704     }
9705     diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
9706     index 381f23a443c7..ae6351db6de4 100644
9707     --- a/tools/perf/util/probe-event.c
9708     +++ b/tools/perf/util/probe-event.c
9709     @@ -274,12 +274,13 @@ static int kernel_get_module_dso(const char *module, struct dso **pdso)
9710     int ret = 0;
9711    
9712     if (module) {
9713     - list_for_each_entry(dso, &host_machine->dsos.head, node) {
9714     - if (!dso->kernel)
9715     - continue;
9716     - if (strncmp(dso->short_name + 1, module,
9717     - dso->short_name_len - 2) == 0)
9718     - goto found;
9719     + char module_name[128];
9720     +
9721     + snprintf(module_name, sizeof(module_name), "[%s]", module);
9722     + map = map_groups__find_by_name(&host_machine->kmaps, MAP__FUNCTION, module_name);
9723     + if (map) {
9724     + dso = map->dso;
9725     + goto found;
9726     }
9727     pr_debug("Failed to find module %s.\n", module);
9728     return -ENOENT;
9729     diff --git a/tools/perf/util/probe-event.h b/tools/perf/util/probe-event.h
9730     index 31db6ee7db54..cd55c6db421d 100644
9731     --- a/tools/perf/util/probe-event.h
9732     +++ b/tools/perf/util/probe-event.h
9733     @@ -106,6 +106,8 @@ struct variable_list {
9734     struct strlist *vars; /* Available variables */
9735     };
9736    
9737     +struct map;
9738     +
9739     /* Command string to events */
9740     extern int parse_perf_probe_command(const char *cmd,
9741     struct perf_probe_event *pev);
9742     diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
9743     index 65f7e389ae09..333858821ab0 100644
9744     --- a/tools/perf/util/symbol-elf.c
9745     +++ b/tools/perf/util/symbol-elf.c
9746     @@ -1260,8 +1260,6 @@ out_close:
9747     static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
9748     bool temp)
9749     {
9750     - GElf_Ehdr *ehdr;
9751     -
9752     kcore->elfclass = elfclass;
9753    
9754     if (temp)
9755     @@ -1278,9 +1276,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
9756     if (!gelf_newehdr(kcore->elf, elfclass))
9757     goto out_end;
9758    
9759     - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
9760     - if (!ehdr)
9761     - goto out_end;
9762     + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
9763    
9764     return 0;
9765    
9766     @@ -1337,23 +1333,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
9767     static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
9768     u64 addr, u64 len)
9769     {
9770     - GElf_Phdr gphdr;
9771     - GElf_Phdr *phdr;
9772     -
9773     - phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
9774     - if (!phdr)
9775     - return -1;
9776     -
9777     - phdr->p_type = PT_LOAD;
9778     - phdr->p_flags = PF_R | PF_W | PF_X;
9779     - phdr->p_offset = offset;
9780     - phdr->p_vaddr = addr;
9781     - phdr->p_paddr = 0;
9782     - phdr->p_filesz = len;
9783     - phdr->p_memsz = len;
9784     - phdr->p_align = page_size;
9785     -
9786     - if (!gelf_update_phdr(kcore->elf, idx, phdr))
9787     + GElf_Phdr phdr = {
9788     + .p_type = PT_LOAD,
9789     + .p_flags = PF_R | PF_W | PF_X,
9790     + .p_offset = offset,
9791     + .p_vaddr = addr,
9792     + .p_paddr = 0,
9793     + .p_filesz = len,
9794     + .p_memsz = len,
9795     + .p_align = page_size,
9796     + };
9797     +
9798     + if (!gelf_update_phdr(kcore->elf, idx, &phdr))
9799     return -1;
9800    
9801     return 0;
9802     diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
9803     index 9ff4193dfa49..79db45336e3a 100644
9804     --- a/virt/kvm/eventfd.c
9805     +++ b/virt/kvm/eventfd.c
9806     @@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
9807     return KVM_MMIO_BUS;
9808     }
9809    
9810     -static int
9811     -kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9812     +static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
9813     + enum kvm_bus bus_idx,
9814     + struct kvm_ioeventfd *args)
9815     {
9816     - enum kvm_bus bus_idx;
9817     - struct _ioeventfd *p;
9818     - struct eventfd_ctx *eventfd;
9819     - int ret;
9820     -
9821     - bus_idx = ioeventfd_bus_from_flags(args->flags);
9822     - /* must be natural-word sized, or 0 to ignore length */
9823     - switch (args->len) {
9824     - case 0:
9825     - case 1:
9826     - case 2:
9827     - case 4:
9828     - case 8:
9829     - break;
9830     - default:
9831     - return -EINVAL;
9832     - }
9833     -
9834     - /* check for range overflow */
9835     - if (args->addr + args->len < args->addr)
9836     - return -EINVAL;
9837    
9838     - /* check for extra flags that we don't understand */
9839     - if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
9840     - return -EINVAL;
9841     -
9842     - /* ioeventfd with no length can't be combined with DATAMATCH */
9843     - if (!args->len &&
9844     - args->flags & (KVM_IOEVENTFD_FLAG_PIO |
9845     - KVM_IOEVENTFD_FLAG_DATAMATCH))
9846     - return -EINVAL;
9847     + struct eventfd_ctx *eventfd;
9848     + struct _ioeventfd *p;
9849     + int ret;
9850    
9851     eventfd = eventfd_ctx_fdget(args->fd);
9852     if (IS_ERR(eventfd))
9853     @@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9854     if (ret < 0)
9855     goto unlock_fail;
9856    
9857     - /* When length is ignored, MMIO is also put on a separate bus, for
9858     - * faster lookups.
9859     - */
9860     - if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
9861     - ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
9862     - p->addr, 0, &p->dev);
9863     - if (ret < 0)
9864     - goto register_fail;
9865     - }
9866     -
9867     kvm->buses[bus_idx]->ioeventfd_count++;
9868     list_add_tail(&p->list, &kvm->ioeventfds);
9869    
9870     @@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9871    
9872     return 0;
9873    
9874     -register_fail:
9875     - kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
9876     unlock_fail:
9877     mutex_unlock(&kvm->slots_lock);
9878    
9879     @@ -873,14 +835,13 @@ fail:
9880     }
9881    
9882     static int
9883     -kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9884     +kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
9885     + struct kvm_ioeventfd *args)
9886     {
9887     - enum kvm_bus bus_idx;
9888     struct _ioeventfd *p, *tmp;
9889     struct eventfd_ctx *eventfd;
9890     int ret = -ENOENT;
9891    
9892     - bus_idx = ioeventfd_bus_from_flags(args->flags);
9893     eventfd = eventfd_ctx_fdget(args->fd);
9894     if (IS_ERR(eventfd))
9895     return PTR_ERR(eventfd);
9896     @@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9897     continue;
9898    
9899     kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
9900     - if (!p->length) {
9901     - kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
9902     - &p->dev);
9903     - }
9904     kvm->buses[bus_idx]->ioeventfd_count--;
9905     ioeventfd_release(p);
9906     ret = 0;
9907     @@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9908     return ret;
9909     }
9910    
9911     +static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9912     +{
9913     + enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
9914     + int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
9915     +
9916     + if (!args->len && bus_idx == KVM_MMIO_BUS)
9917     + kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
9918     +
9919     + return ret;
9920     +}
9921     +
9922     +static int
9923     +kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9924     +{
9925     + enum kvm_bus bus_idx;
9926     + int ret;
9927     +
9928     + bus_idx = ioeventfd_bus_from_flags(args->flags);
9929     + /* must be natural-word sized, or 0 to ignore length */
9930     + switch (args->len) {
9931     + case 0:
9932     + case 1:
9933     + case 2:
9934     + case 4:
9935     + case 8:
9936     + break;
9937     + default:
9938     + return -EINVAL;
9939     + }
9940     +
9941     + /* check for range overflow */
9942     + if (args->addr + args->len < args->addr)
9943     + return -EINVAL;
9944     +
9945     + /* check for extra flags that we don't understand */
9946     + if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
9947     + return -EINVAL;
9948     +
9949     + /* ioeventfd with no length can't be combined with DATAMATCH */
9950     + if (!args->len &&
9951     + args->flags & (KVM_IOEVENTFD_FLAG_PIO |
9952     + KVM_IOEVENTFD_FLAG_DATAMATCH))
9953     + return -EINVAL;
9954     +
9955     + ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
9956     + if (ret)
9957     + goto fail;
9958     +
9959     + /* When length is ignored, MMIO is also put on a separate bus, for
9960     + * faster lookups.
9961     + */
9962     + if (!args->len && bus_idx == KVM_MMIO_BUS) {
9963     + ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
9964     + if (ret < 0)
9965     + goto fast_fail;
9966     + }
9967     +
9968     + return 0;
9969     +
9970     +fast_fail:
9971     + kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
9972     +fail:
9973     + return ret;
9974     +}
9975     +
9976     int
9977     kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
9978     {
9979     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9980     index 8b8a44453670..5a2a78a91d58 100644
9981     --- a/virt/kvm/kvm_main.c
9982     +++ b/virt/kvm/kvm_main.c
9983     @@ -3080,10 +3080,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
9984     static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
9985     const struct kvm_io_range *r2)
9986     {
9987     - if (r1->addr < r2->addr)
9988     + gpa_t addr1 = r1->addr;
9989     + gpa_t addr2 = r2->addr;
9990     +
9991     + if (addr1 < addr2)
9992     return -1;
9993     - if (r1->addr + r1->len > r2->addr + r2->len)
9994     +
9995     + /* If r2->len == 0, match the exact address. If r2->len != 0,
9996     + * accept any overlapping write. Any order is acceptable for
9997     + * overlapping ranges, because kvm_io_bus_get_first_dev ensures
9998     + * we process all of them.
9999     + */
10000     + if (r2->len) {
10001     + addr1 += r1->len;
10002     + addr2 += r2->len;
10003     + }
10004     +
10005     + if (addr1 > addr2)
10006     return 1;
10007     +
10008     return 0;
10009     }
10010