Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0110-4.1.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (show annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 3 months ago) by niro
File size: 275481 byte(s)
-linux-4.1 patches up to 4.1.15
1 diff --git a/Documentation/HOWTO b/Documentation/HOWTO
2 index 93aa8604630e..21152d397b88 100644
3 --- a/Documentation/HOWTO
4 +++ b/Documentation/HOWTO
5 @@ -218,16 +218,16 @@ The development process
6 Linux kernel development process currently consists of a few different
7 main kernel "branches" and lots of different subsystem-specific kernel
8 branches. These different branches are:
9 - - main 3.x kernel tree
10 - - 3.x.y -stable kernel tree
11 - - 3.x -git kernel patches
12 + - main 4.x kernel tree
13 + - 4.x.y -stable kernel tree
14 + - 4.x -git kernel patches
15 - subsystem specific kernel trees and patches
16 - - the 3.x -next kernel tree for integration tests
17 + - the 4.x -next kernel tree for integration tests
18
19 -3.x kernel tree
20 +4.x kernel tree
21 -----------------
22 -3.x kernels are maintained by Linus Torvalds, and can be found on
23 -kernel.org in the pub/linux/kernel/v3.x/ directory. Its development
24 +4.x kernels are maintained by Linus Torvalds, and can be found on
25 +kernel.org in the pub/linux/kernel/v4.x/ directory. Its development
26 process is as follows:
27 - As soon as a new kernel is released a two weeks window is open,
28 during this period of time maintainers can submit big diffs to
29 @@ -262,20 +262,20 @@ mailing list about kernel releases:
30 released according to perceived bug status, not according to a
31 preconceived timeline."
32
33 -3.x.y -stable kernel tree
34 +4.x.y -stable kernel tree
35 ---------------------------
36 Kernels with 3-part versions are -stable kernels. They contain
37 relatively small and critical fixes for security problems or significant
38 -regressions discovered in a given 3.x kernel.
39 +regressions discovered in a given 4.x kernel.
40
41 This is the recommended branch for users who want the most recent stable
42 kernel and are not interested in helping test development/experimental
43 versions.
44
45 -If no 3.x.y kernel is available, then the highest numbered 3.x
46 +If no 4.x.y kernel is available, then the highest numbered 4.x
47 kernel is the current stable kernel.
48
49 -3.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
50 +4.x.y are maintained by the "stable" team <stable@vger.kernel.org>, and
51 are released as needs dictate. The normal release period is approximately
52 two weeks, but it can be longer if there are no pressing problems. A
53 security-related problem, instead, can cause a release to happen almost
54 @@ -285,7 +285,7 @@ The file Documentation/stable_kernel_rules.txt in the kernel tree
55 documents what kinds of changes are acceptable for the -stable tree, and
56 how the release process works.
57
58 -3.x -git patches
59 +4.x -git patches
60 ------------------
61 These are daily snapshots of Linus' kernel tree which are managed in a
62 git repository (hence the name.) These patches are usually released
63 @@ -317,9 +317,9 @@ revisions to it, and maintainers can mark patches as under review,
64 accepted, or rejected. Most of these patchwork sites are listed at
65 http://patchwork.kernel.org/.
66
67 -3.x -next kernel tree for integration tests
68 +4.x -next kernel tree for integration tests
69 ---------------------------------------------
70 -Before updates from subsystem trees are merged into the mainline 3.x
71 +Before updates from subsystem trees are merged into the mainline 4.x
72 tree, they need to be integration-tested. For this purpose, a special
73 testing repository exists into which virtually all subsystem trees are
74 pulled on an almost daily basis:
75 diff --git a/Makefile b/Makefile
76 index d02f16b510dc..c7d877b1c248 100644
77 --- a/Makefile
78 +++ b/Makefile
79 @@ -1,6 +1,6 @@
80 VERSION = 4
81 PATCHLEVEL = 1
82 -SUBLEVEL = 10
83 +SUBLEVEL = 11
84 EXTRAVERSION =
85 NAME = Series 4800
86
87 diff --git a/arch/arm/Makefile b/arch/arm/Makefile
88 index 985227cbbd1b..47f10e7ad1f6 100644
89 --- a/arch/arm/Makefile
90 +++ b/arch/arm/Makefile
91 @@ -50,6 +50,14 @@ AS += -EL
92 LD += -EL
93 endif
94
95 +#
96 +# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
97 +# later may result in code being generated that handles signed short and signed
98 +# char struct members incorrectly. So disable it.
99 +# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
100 +#
101 +KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
102 +
103 # This selects which instruction set is used.
104 # Note that GCC does not numerically define an architecture version
105 # macro, but instead defines a whole series of macros which makes
106 diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
107 index dd45e6971bc3..9351296356dc 100644
108 --- a/arch/arm/boot/dts/imx25-pdk.dts
109 +++ b/arch/arm/boot/dts/imx25-pdk.dts
110 @@ -10,6 +10,7 @@
111 */
112
113 /dts-v1/;
114 +#include <dt-bindings/gpio/gpio.h>
115 #include <dt-bindings/input/input.h>
116 #include "imx25.dtsi"
117
118 @@ -114,8 +115,8 @@
119 &esdhc1 {
120 pinctrl-names = "default";
121 pinctrl-0 = <&pinctrl_esdhc1>;
122 - cd-gpios = <&gpio2 1 0>;
123 - wp-gpios = <&gpio2 0 0>;
124 + cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
125 + wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
126 status = "okay";
127 };
128
129 diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
130 index 93d3ea12328c..0f3fe29b816e 100644
131 --- a/arch/arm/boot/dts/imx51-apf51dev.dts
132 +++ b/arch/arm/boot/dts/imx51-apf51dev.dts
133 @@ -98,7 +98,7 @@
134 &esdhc1 {
135 pinctrl-names = "default";
136 pinctrl-0 = <&pinctrl_esdhc1>;
137 - cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>;
138 + cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
139 bus-width = <4>;
140 status = "okay";
141 };
142 diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
143 index e9337ad52f59..3bc18835fb4b 100644
144 --- a/arch/arm/boot/dts/imx53-ard.dts
145 +++ b/arch/arm/boot/dts/imx53-ard.dts
146 @@ -103,8 +103,8 @@
147 &esdhc1 {
148 pinctrl-names = "default";
149 pinctrl-0 = <&pinctrl_esdhc1>;
150 - cd-gpios = <&gpio1 1 0>;
151 - wp-gpios = <&gpio1 9 0>;
152 + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
153 + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
154 status = "okay";
155 };
156
157 diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
158 index d0e0f57eb432..53f40885c530 100644
159 --- a/arch/arm/boot/dts/imx53-m53evk.dts
160 +++ b/arch/arm/boot/dts/imx53-m53evk.dts
161 @@ -124,8 +124,8 @@
162 &esdhc1 {
163 pinctrl-names = "default";
164 pinctrl-0 = <&pinctrl_esdhc1>;
165 - cd-gpios = <&gpio1 1 0>;
166 - wp-gpios = <&gpio1 9 0>;
167 + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
168 + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
169 status = "okay";
170 };
171
172 diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
173 index 181ae5ebf23f..1f55187ed9ce 100644
174 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
175 +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
176 @@ -147,8 +147,8 @@
177 &esdhc3 {
178 pinctrl-names = "default";
179 pinctrl-0 = <&pinctrl_esdhc3>;
180 - cd-gpios = <&gpio3 11 0>;
181 - wp-gpios = <&gpio3 12 0>;
182 + cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
183 + wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
184 bus-width = <8>;
185 status = "okay";
186 };
187 diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
188 index 1d325576bcc0..fc89ce1e5763 100644
189 --- a/arch/arm/boot/dts/imx53-smd.dts
190 +++ b/arch/arm/boot/dts/imx53-smd.dts
191 @@ -41,8 +41,8 @@
192 &esdhc1 {
193 pinctrl-names = "default";
194 pinctrl-0 = <&pinctrl_esdhc1>;
195 - cd-gpios = <&gpio3 13 0>;
196 - wp-gpios = <&gpio4 11 0>;
197 + cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
198 + wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
199 status = "okay";
200 };
201
202 diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
203 index 4f1f0e2868bf..e03373a58760 100644
204 --- a/arch/arm/boot/dts/imx53-tqma53.dtsi
205 +++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
206 @@ -41,8 +41,8 @@
207 pinctrl-0 = <&pinctrl_esdhc2>,
208 <&pinctrl_esdhc2_cdwp>;
209 vmmc-supply = <&reg_3p3v>;
210 - wp-gpios = <&gpio1 2 0>;
211 - cd-gpios = <&gpio1 4 0>;
212 + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
213 + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
214 status = "disabled";
215 };
216
217 diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
218 index 704bd72cbfec..d3e50b22064f 100644
219 --- a/arch/arm/boot/dts/imx53-tx53.dtsi
220 +++ b/arch/arm/boot/dts/imx53-tx53.dtsi
221 @@ -183,7 +183,7 @@
222 };
223
224 &esdhc1 {
225 - cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
226 + cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
227 fsl,wp-controller;
228 pinctrl-names = "default";
229 pinctrl-0 = <&pinctrl_esdhc1>;
230 @@ -191,7 +191,7 @@
231 };
232
233 &esdhc2 {
234 - cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>;
235 + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
236 fsl,wp-controller;
237 pinctrl-names = "default";
238 pinctrl-0 = <&pinctrl_esdhc2>;
239 diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
240 index c17d3ad6dba5..fc51b87ad208 100644
241 --- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
242 +++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
243 @@ -119,8 +119,8 @@
244 &esdhc2 {
245 pinctrl-names = "default";
246 pinctrl-0 = <&pinctrl_esdhc2>;
247 - cd-gpios = <&gpio3 25 0>;
248 - wp-gpios = <&gpio2 19 0>;
249 + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
250 + wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
251 vmmc-supply = <&reg_3p3v>;
252 status = "okay";
253 };
254 diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
255 index 488a640796ac..394a4ace351a 100644
256 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
257 +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
258 @@ -35,7 +35,6 @@
259 compatible = "regulator-fixed";
260 reg = <1>;
261 pinctrl-names = "default";
262 - pinctrl-0 = <&pinctrl_usbh1>;
263 regulator-name = "usbh1_vbus";
264 regulator-min-microvolt = <5000000>;
265 regulator-max-microvolt = <5000000>;
266 @@ -47,7 +46,6 @@
267 compatible = "regulator-fixed";
268 reg = <2>;
269 pinctrl-names = "default";
270 - pinctrl-0 = <&pinctrl_usbotg>;
271 regulator-name = "usb_otg_vbus";
272 regulator-min-microvolt = <5000000>;
273 regulator-max-microvolt = <5000000>;
274 diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
275 index a5474113cd50..67659a0ed13e 100644
276 --- a/arch/arm/boot/dts/omap3-beagle.dts
277 +++ b/arch/arm/boot/dts/omap3-beagle.dts
278 @@ -202,7 +202,7 @@
279
280 tfp410_pins: pinmux_tfp410_pins {
281 pinctrl-single,pins = <
282 - 0x194 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
283 + 0x196 (PIN_OUTPUT | MUX_MODE4) /* hdq_sio.gpio_170 */
284 >;
285 };
286
287 diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
288 index 74777a6e200a..1b958e92d674 100644
289 --- a/arch/arm/boot/dts/omap5-uevm.dts
290 +++ b/arch/arm/boot/dts/omap5-uevm.dts
291 @@ -174,8 +174,8 @@
292
293 i2c5_pins: pinmux_i2c5_pins {
294 pinctrl-single,pins = <
295 - 0x184 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
296 - 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
297 + 0x186 (PIN_INPUT | MUX_MODE0) /* i2c5_scl */
298 + 0x188 (PIN_INPUT | MUX_MODE0) /* i2c5_sda */
299 >;
300 };
301
302 diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
303 index a6ad93c9bce3..fd9eefce0a7b 100644
304 --- a/arch/arm/kernel/kgdb.c
305 +++ b/arch/arm/kernel/kgdb.c
306 @@ -259,15 +259,17 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
307 if (err)
308 return err;
309
310 - patch_text((void *)bpt->bpt_addr,
311 - *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
312 + /* Machine is already stopped, so we can use __patch_text() directly */
313 + __patch_text((void *)bpt->bpt_addr,
314 + *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
315
316 return err;
317 }
318
319 int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
320 {
321 - patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
322 + /* Machine is already stopped, so we can use __patch_text() directly */
323 + __patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
324
325 return 0;
326 }
327 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
328 index 423663e23791..586eef26203d 100644
329 --- a/arch/arm/kernel/signal.c
330 +++ b/arch/arm/kernel/signal.c
331 @@ -343,12 +343,17 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
332 */
333 thumb = handler & 1;
334
335 -#if __LINUX_ARM_ARCH__ >= 7
336 +#if __LINUX_ARM_ARCH__ >= 6
337 /*
338 - * Clear the If-Then Thumb-2 execution state
339 - * ARM spec requires this to be all 000s in ARM mode
340 - * Snapdragon S4/Krait misbehaves on a Thumb=>ARM
341 - * signal transition without this.
342 + * Clear the If-Then Thumb-2 execution state. ARM spec
343 + * requires this to be all 000s in ARM mode. Snapdragon
344 + * S4/Krait misbehaves on a Thumb=>ARM signal transition
345 + * without this.
346 + *
347 + * We must do this whenever we are running on a Thumb-2
348 + * capable CPU, which includes ARMv6T2. However, we elect
349 + * to do this whenever we're on an ARMv6 or later CPU for
350 + * simplicity.
351 */
352 cpsr &= ~PSR_IT_MASK;
353 #endif
354 diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
355 index 48efe2ee452c..58048b333d31 100644
356 --- a/arch/arm/kvm/interrupts_head.S
357 +++ b/arch/arm/kvm/interrupts_head.S
358 @@ -518,8 +518,7 @@ ARM_BE8(rev r6, r6 )
359
360 mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
361 str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
362 - bic r2, #1 @ Clear ENABLE
363 - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
364 +
365 isb
366
367 mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
368 @@ -532,6 +531,9 @@ ARM_BE8(rev r6, r6 )
369 mcrr p15, 4, r2, r2, c14 @ CNTVOFF
370
371 1:
372 + mov r2, #0 @ Clear ENABLE
373 + mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
374 +
375 @ Allow physical timer/counter access for the host
376 mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
377 orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
378 diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
379 index 1d5accbd3dcf..191dcfab9f60 100644
380 --- a/arch/arm/kvm/mmu.c
381 +++ b/arch/arm/kvm/mmu.c
382 @@ -1790,8 +1790,10 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
383 if (vma->vm_flags & VM_PFNMAP) {
384 gpa_t gpa = mem->guest_phys_addr +
385 (vm_start - mem->userspace_addr);
386 - phys_addr_t pa = (vma->vm_pgoff << PAGE_SHIFT) +
387 - vm_start - vma->vm_start;
388 + phys_addr_t pa;
389 +
390 + pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
391 + pa += vm_start - vma->vm_start;
392
393 /* IO region dirty page logging not allowed */
394 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
395 diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
396 index 9bdf54795f05..56978199c479 100644
397 --- a/arch/arm/mach-exynos/mcpm-exynos.c
398 +++ b/arch/arm/mach-exynos/mcpm-exynos.c
399 @@ -20,6 +20,7 @@
400 #include <asm/cputype.h>
401 #include <asm/cp15.h>
402 #include <asm/mcpm.h>
403 +#include <asm/smp_plat.h>
404
405 #include "regs-pmu.h"
406 #include "common.h"
407 @@ -70,7 +71,31 @@ static int exynos_cpu_powerup(unsigned int cpu, unsigned int cluster)
408 cluster >= EXYNOS5420_NR_CLUSTERS)
409 return -EINVAL;
410
411 - exynos_cpu_power_up(cpunr);
412 + if (!exynos_cpu_power_state(cpunr)) {
413 + exynos_cpu_power_up(cpunr);
414 +
415 + /*
416 + * This assumes the cluster number of the big cores(Cortex A15)
417 + * is 0 and the Little cores(Cortex A7) is 1.
418 + * When the system was booted from the Little core,
419 + * they should be reset during power up cpu.
420 + */
421 + if (cluster &&
422 + cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) {
423 + /*
424 + * Before we reset the Little cores, we should wait
425 + * the SPARE2 register is set to 1 because the init
426 + * codes of the iROM will set the register after
427 + * initialization.
428 + */
429 + while (!pmu_raw_readl(S5P_PMU_SPARE2))
430 + udelay(10);
431 +
432 + pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu),
433 + EXYNOS_SWRESET);
434 + }
435 + }
436 +
437 return 0;
438 }
439
440 diff --git a/arch/arm/mach-exynos/regs-pmu.h b/arch/arm/mach-exynos/regs-pmu.h
441 index b7614333d296..fba9068ed260 100644
442 --- a/arch/arm/mach-exynos/regs-pmu.h
443 +++ b/arch/arm/mach-exynos/regs-pmu.h
444 @@ -513,6 +513,12 @@ static inline unsigned int exynos_pmu_cpunr(unsigned int mpidr)
445 #define SPREAD_ENABLE 0xF
446 #define SPREAD_USE_STANDWFI 0xF
447
448 +#define EXYNOS5420_KFC_CORE_RESET0 BIT(8)
449 +#define EXYNOS5420_KFC_ETM_RESET0 BIT(20)
450 +
451 +#define EXYNOS5420_KFC_CORE_RESET(_nr) \
452 + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
453 +
454 #define EXYNOS5420_BB_CON1 0x0784
455 #define EXYNOS5420_BB_SEL_EN BIT(31)
456 #define EXYNOS5420_BB_PMOS_EN BIT(7)
457 diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
458 index 352962bc2e78..5170fd5c8e97 100644
459 --- a/arch/arm64/kernel/efi.c
460 +++ b/arch/arm64/kernel/efi.c
461 @@ -257,7 +257,8 @@ static bool __init efi_virtmap_init(void)
462 */
463 if (!is_normal_ram(md))
464 prot = __pgprot(PROT_DEVICE_nGnRE);
465 - else if (md->type == EFI_RUNTIME_SERVICES_CODE)
466 + else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
467 + !PAGE_ALIGNED(md->phys_addr))
468 prot = PAGE_KERNEL_EXEC;
469 else
470 prot = PAGE_KERNEL;
471 diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
472 index 08cafc518b9a..0f03a8fe2314 100644
473 --- a/arch/arm64/kernel/entry-ftrace.S
474 +++ b/arch/arm64/kernel/entry-ftrace.S
475 @@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
476 ENDPROC(ftrace_stub)
477
478 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
479 + /* save return value regs*/
480 + .macro save_return_regs
481 + sub sp, sp, #64
482 + stp x0, x1, [sp]
483 + stp x2, x3, [sp, #16]
484 + stp x4, x5, [sp, #32]
485 + stp x6, x7, [sp, #48]
486 + .endm
487 +
488 + /* restore return value regs*/
489 + .macro restore_return_regs
490 + ldp x0, x1, [sp]
491 + ldp x2, x3, [sp, #16]
492 + ldp x4, x5, [sp, #32]
493 + ldp x6, x7, [sp, #48]
494 + add sp, sp, #64
495 + .endm
496 +
497 /*
498 * void ftrace_graph_caller(void)
499 *
500 @@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
501 * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
502 */
503 ENTRY(return_to_handler)
504 - str x0, [sp, #-16]!
505 + save_return_regs
506 mov x0, x29 // parent's fp
507 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
508 mov x30, x0 // restore the original return address
509 - ldr x0, [sp], #16
510 + restore_return_regs
511 ret
512 END(return_to_handler)
513 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
514 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
515 index 96da13167d4a..fa5efaa5c3ac 100644
516 --- a/arch/arm64/mm/fault.c
517 +++ b/arch/arm64/mm/fault.c
518 @@ -279,6 +279,7 @@ retry:
519 * starvation.
520 */
521 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
522 + mm_flags |= FAULT_FLAG_TRIED;
523 goto retry;
524 }
525 }
526 diff --git a/arch/m68k/include/asm/linkage.h b/arch/m68k/include/asm/linkage.h
527 index 5a822bb790f7..066e74f666ae 100644
528 --- a/arch/m68k/include/asm/linkage.h
529 +++ b/arch/m68k/include/asm/linkage.h
530 @@ -4,4 +4,34 @@
531 #define __ALIGN .align 4
532 #define __ALIGN_STR ".align 4"
533
534 +/*
535 + * Make sure the compiler doesn't do anything stupid with the
536 + * arguments on the stack - they are owned by the *caller*, not
537 + * the callee. This just fools gcc into not spilling into them,
538 + * and keeps it from doing tailcall recursion and/or using the
539 + * stack slots for temporaries, since they are live and "used"
540 + * all the way to the end of the function.
541 + */
542 +#define asmlinkage_protect(n, ret, args...) \
543 + __asmlinkage_protect##n(ret, ##args)
544 +#define __asmlinkage_protect_n(ret, args...) \
545 + __asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
546 +#define __asmlinkage_protect0(ret) \
547 + __asmlinkage_protect_n(ret)
548 +#define __asmlinkage_protect1(ret, arg1) \
549 + __asmlinkage_protect_n(ret, "m" (arg1))
550 +#define __asmlinkage_protect2(ret, arg1, arg2) \
551 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
552 +#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
553 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
554 +#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
555 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
556 + "m" (arg4))
557 +#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
558 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
559 + "m" (arg4), "m" (arg5))
560 +#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
561 + __asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
562 + "m" (arg4), "m" (arg5), "m" (arg6))
563 +
564 #endif
565 diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
566 index 22f04ca2ff3e..2efb18aafa4f 100644
567 --- a/arch/mips/loongson/common/env.c
568 +++ b/arch/mips/loongson/common/env.c
569 @@ -64,6 +64,9 @@ void __init prom_init_env(void)
570 }
571 if (memsize == 0)
572 memsize = 256;
573 +
574 + loongson_sysconf.nr_uarts = 1;
575 +
576 pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
577 #else
578 struct boot_params *boot_p;
579 diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
580 index 609d1241b0c4..371eec113659 100644
581 --- a/arch/mips/mm/dma-default.c
582 +++ b/arch/mips/mm/dma-default.c
583 @@ -100,7 +100,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
584 else
585 #endif
586 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
587 - if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
588 + if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
589 dma_flag = __GFP_DMA;
590 else
591 #endif
592 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
593 index 453a8a47a467..964c0ce584ce 100644
594 --- a/arch/powerpc/kvm/book3s.c
595 +++ b/arch/powerpc/kvm/book3s.c
596 @@ -826,12 +826,15 @@ int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
597 unsigned long size = kvmppc_get_gpr(vcpu, 4);
598 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
599 u64 buf;
600 + int srcu_idx;
601 int ret;
602
603 if (!is_power_of_2(size) || (size > sizeof(buf)))
604 return H_TOO_HARD;
605
606 + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
607 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
608 + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
609 if (ret != 0)
610 return H_TOO_HARD;
611
612 @@ -866,6 +869,7 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
613 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
614 unsigned long val = kvmppc_get_gpr(vcpu, 6);
615 u64 buf;
616 + int srcu_idx;
617 int ret;
618
619 switch (size) {
620 @@ -889,7 +893,9 @@ int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
621 return H_TOO_HARD;
622 }
623
624 + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
625 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
626 + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
627 if (ret != 0)
628 return H_TOO_HARD;
629
630 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
631 index 3b2d2c5b6376..ffd98b2bfa16 100644
632 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
633 +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
634 @@ -1171,6 +1171,7 @@ mc_cont:
635 bl kvmhv_accumulate_time
636 #endif
637
638 + mr r3, r12
639 /* Increment exit count, poke other threads to exit */
640 bl kvmhv_commence_exit
641 nop
642 diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
643 index bca2aeb6e4b6..3ff29cf6d05c 100644
644 --- a/arch/powerpc/platforms/powernv/pci.c
645 +++ b/arch/powerpc/platforms/powernv/pci.c
646 @@ -99,6 +99,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
647 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
648 struct pnv_phb *phb = hose->private_data;
649 struct msi_desc *entry;
650 + irq_hw_number_t hwirq;
651
652 if (WARN_ON(!phb))
653 return;
654 @@ -106,10 +107,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
655 list_for_each_entry(entry, &pdev->msi_list, list) {
656 if (entry->irq == NO_IRQ)
657 continue;
658 + hwirq = virq_to_hw(entry->irq);
659 irq_set_msi_desc(entry->irq, NULL);
660 - msi_bitmap_free_hwirqs(&phb->msi_bmp,
661 - virq_to_hw(entry->irq) - phb->msi_base, 1);
662 irq_dispose_mapping(entry->irq);
663 + msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
664 }
665 }
666 #endif /* CONFIG_PCI_MSI */
667 diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
668 index f086c6f22dc9..fd16cb5d83f3 100644
669 --- a/arch/powerpc/sysdev/fsl_msi.c
670 +++ b/arch/powerpc/sysdev/fsl_msi.c
671 @@ -128,15 +128,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
672 {
673 struct msi_desc *entry;
674 struct fsl_msi *msi_data;
675 + irq_hw_number_t hwirq;
676
677 list_for_each_entry(entry, &pdev->msi_list, list) {
678 if (entry->irq == NO_IRQ)
679 continue;
680 + hwirq = virq_to_hw(entry->irq);
681 msi_data = irq_get_chip_data(entry->irq);
682 irq_set_msi_desc(entry->irq, NULL);
683 - msi_bitmap_free_hwirqs(&msi_data->bitmap,
684 - virq_to_hw(entry->irq), 1);
685 irq_dispose_mapping(entry->irq);
686 + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
687 }
688
689 return;
690 diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
691 index a3f660eed6de..89496cf4e04d 100644
692 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
693 +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
694 @@ -65,6 +65,7 @@ static struct irq_chip mpic_pasemi_msi_chip = {
695 static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
696 {
697 struct msi_desc *entry;
698 + irq_hw_number_t hwirq;
699
700 pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
701
702 @@ -72,10 +73,11 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
703 if (entry->irq == NO_IRQ)
704 continue;
705
706 + hwirq = virq_to_hw(entry->irq);
707 irq_set_msi_desc(entry->irq, NULL);
708 - msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
709 - virq_to_hw(entry->irq), ALLOC_CHUNK);
710 irq_dispose_mapping(entry->irq);
711 + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
712 + hwirq, ALLOC_CHUNK);
713 }
714
715 return;
716 diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
717 index b2cef1809389..13a34b237559 100644
718 --- a/arch/powerpc/sysdev/mpic_u3msi.c
719 +++ b/arch/powerpc/sysdev/mpic_u3msi.c
720 @@ -107,15 +107,16 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
721 static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
722 {
723 struct msi_desc *entry;
724 + irq_hw_number_t hwirq;
725
726 list_for_each_entry(entry, &pdev->msi_list, list) {
727 if (entry->irq == NO_IRQ)
728 continue;
729
730 + hwirq = virq_to_hw(entry->irq);
731 irq_set_msi_desc(entry->irq, NULL);
732 - msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
733 - virq_to_hw(entry->irq), 1);
734 irq_dispose_mapping(entry->irq);
735 + msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
736 }
737
738 return;
739 diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
740 index 6e2e6aa378bb..02a137daa182 100644
741 --- a/arch/powerpc/sysdev/ppc4xx_msi.c
742 +++ b/arch/powerpc/sysdev/ppc4xx_msi.c
743 @@ -124,16 +124,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
744 {
745 struct msi_desc *entry;
746 struct ppc4xx_msi *msi_data = &ppc4xx_msi;
747 + irq_hw_number_t hwirq;
748
749 dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
750
751 list_for_each_entry(entry, &dev->msi_list, list) {
752 if (entry->irq == NO_IRQ)
753 continue;
754 + hwirq = virq_to_hw(entry->irq);
755 irq_set_msi_desc(entry->irq, NULL);
756 - msi_bitmap_free_hwirqs(&msi_data->bitmap,
757 - virq_to_hw(entry->irq), 1);
758 irq_dispose_mapping(entry->irq);
759 + msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
760 }
761 }
762
763 diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
764 index d4788111c161..fac6ac9790fa 100644
765 --- a/arch/s390/boot/compressed/Makefile
766 +++ b/arch/s390/boot/compressed/Makefile
767 @@ -10,7 +10,7 @@ targets += misc.o piggy.o sizes.h head.o
768
769 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2
770 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
771 -KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks
772 +KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float
773 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack)
774 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
775
776 diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
777 index fe8d6924efaa..c78ba51ae285 100644
778 --- a/arch/s390/kernel/compat_signal.c
779 +++ b/arch/s390/kernel/compat_signal.c
780 @@ -48,6 +48,19 @@ typedef struct
781 struct ucontext32 uc;
782 } rt_sigframe32;
783
784 +static inline void sigset_to_sigset32(unsigned long *set64,
785 + compat_sigset_word *set32)
786 +{
787 + set32[0] = (compat_sigset_word) set64[0];
788 + set32[1] = (compat_sigset_word)(set64[0] >> 32);
789 +}
790 +
791 +static inline void sigset32_to_sigset(compat_sigset_word *set32,
792 + unsigned long *set64)
793 +{
794 + set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
795 +}
796 +
797 int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
798 {
799 int err;
800 @@ -303,10 +316,12 @@ COMPAT_SYSCALL_DEFINE0(sigreturn)
801 {
802 struct pt_regs *regs = task_pt_regs(current);
803 sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
804 + compat_sigset_t cset;
805 sigset_t set;
806
807 - if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
808 + if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
809 goto badframe;
810 + sigset32_to_sigset(cset.sig, set.sig);
811 set_current_blocked(&set);
812 if (restore_sigregs32(regs, &frame->sregs))
813 goto badframe;
814 @@ -323,10 +338,12 @@ COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
815 {
816 struct pt_regs *regs = task_pt_regs(current);
817 rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
818 + compat_sigset_t cset;
819 sigset_t set;
820
821 - if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
822 + if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
823 goto badframe;
824 + sigset32_to_sigset(cset.sig, set.sig);
825 set_current_blocked(&set);
826 if (compat_restore_altstack(&frame->uc.uc_stack))
827 goto badframe;
828 @@ -397,7 +414,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
829 return -EFAULT;
830
831 /* Create struct sigcontext32 on the signal stack */
832 - memcpy(&sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32);
833 + sigset_to_sigset32(set->sig, sc.oldmask);
834 sc.sregs = (__u32)(unsigned long __force) &frame->sregs;
835 if (__copy_to_user(&frame->sc, &sc, sizeof(frame->sc)))
836 return -EFAULT;
837 @@ -458,6 +475,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
838 static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
839 struct pt_regs *regs)
840 {
841 + compat_sigset_t cset;
842 rt_sigframe32 __user *frame;
843 unsigned long restorer;
844 size_t frame_size;
845 @@ -505,11 +523,12 @@ static int setup_rt_frame32(struct ksignal *ksig, sigset_t *set,
846 store_sigregs();
847
848 /* Create ucontext on the signal stack. */
849 + sigset_to_sigset32(set->sig, cset.sig);
850 if (__put_user(uc_flags, &frame->uc.uc_flags) ||
851 __put_user(0, &frame->uc.uc_link) ||
852 __compat_save_altstack(&frame->uc.uc_stack, regs->gprs[15]) ||
853 save_sigregs32(regs, &frame->uc.uc_mcontext) ||
854 - __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)) ||
855 + __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset)) ||
856 save_sigregs_ext32(regs, &frame->uc.uc_mcontext_ext))
857 return -EFAULT;
858
859 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
860 index aef653193160..d1918a8c4393 100644
861 --- a/arch/x86/kernel/alternative.c
862 +++ b/arch/x86/kernel/alternative.c
863 @@ -325,10 +325,15 @@ done:
864
865 static void __init_or_module optimize_nops(struct alt_instr *a, u8 *instr)
866 {
867 + unsigned long flags;
868 +
869 if (instr[0] != 0x90)
870 return;
871
872 + local_irq_save(flags);
873 add_nops(instr + (a->instrlen - a->padlen), a->padlen);
874 + sync_core();
875 + local_irq_restore(flags);
876
877 DUMP_BYTES(instr, a->instrlen, "%p: [%d:%d) optimized NOPs: ",
878 instr, a->instrlen - a->padlen, a->padlen);
879 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
880 index cde732c1b495..307a49828826 100644
881 --- a/arch/x86/kernel/apic/apic.c
882 +++ b/arch/x86/kernel/apic/apic.c
883 @@ -336,6 +336,13 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
884 apic_write(APIC_LVTT, lvtt_value);
885
886 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
887 + /*
888 + * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
889 + * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
890 + * According to Intel, MFENCE can do the serialization here.
891 + */
892 + asm volatile("mfence" : : : "memory");
893 +
894 printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
895 return;
896 }
897 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
898 index 2813ea0f142e..22212615a137 100644
899 --- a/arch/x86/kernel/cpu/perf_event_intel.c
900 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
901 @@ -2098,9 +2098,12 @@ static struct event_constraint *
902 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
903 struct perf_event *event)
904 {
905 - struct event_constraint *c1 = cpuc->event_constraint[idx];
906 + struct event_constraint *c1 = NULL;
907 struct event_constraint *c2;
908
909 + if (idx >= 0) /* fake does < 0 */
910 + c1 = cpuc->event_constraint[idx];
911 +
912 /*
913 * first time only
914 * - static constraint: no change across incremental scheduling calls
915 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
916 index c76d3e37c6e1..403ace539b73 100644
917 --- a/arch/x86/kernel/crash.c
918 +++ b/arch/x86/kernel/crash.c
919 @@ -184,10 +184,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
920 }
921
922 #ifdef CONFIG_KEXEC_FILE
923 -static int get_nr_ram_ranges_callback(unsigned long start_pfn,
924 - unsigned long nr_pfn, void *arg)
925 +static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
926 {
927 - int *nr_ranges = arg;
928 + unsigned int *nr_ranges = arg;
929
930 (*nr_ranges)++;
931 return 0;
932 @@ -213,7 +212,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
933
934 ced->image = image;
935
936 - walk_system_ram_range(0, -1, &nr_ranges,
937 + walk_system_ram_res(0, -1, &nr_ranges,
938 get_nr_ram_ranges_callback);
939
940 ced->max_nr_ranges = nr_ranges;
941 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
942 index 4bd6c197563d..6c9cb6073832 100644
943 --- a/arch/x86/kernel/entry_64.S
944 +++ b/arch/x86/kernel/entry_64.S
945 @@ -1393,7 +1393,18 @@ END(error_exit)
946 /* Runs on exception stack */
947 ENTRY(nmi)
948 INTR_FRAME
949 + /*
950 + * Fix up the exception frame if we're on Xen.
951 + * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
952 + * one value to the stack on native, so it may clobber the rdx
953 + * scratch slot, but it won't clobber any of the important
954 + * slots past it.
955 + *
956 + * Xen is a different story, because the Xen frame itself overlaps
957 + * the "NMI executing" variable.
958 + */
959 PARAVIRT_ADJUST_EXCEPTION_FRAME
960 +
961 /*
962 * We allow breakpoints in NMIs. If a breakpoint occurs, then
963 * the iretq it performs will take us out of NMI context.
964 @@ -1445,9 +1456,12 @@ ENTRY(nmi)
965 * we don't want to enable interrupts, because then we'll end
966 * up in an awkward situation in which IRQs are on but NMIs
967 * are off.
968 + *
969 + * We also must not push anything to the stack before switching
970 + * stacks lest we corrupt the "NMI executing" variable.
971 */
972
973 - SWAPGS
974 + SWAPGS_UNSAFE_STACK
975 cld
976 movq %rsp, %rdx
977 movq PER_CPU_VAR(kernel_stack), %rsp
978 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
979 index c614dd492f5f..1f316f066c49 100644
980 --- a/arch/x86/kernel/paravirt.c
981 +++ b/arch/x86/kernel/paravirt.c
982 @@ -41,10 +41,18 @@
983 #include <asm/timer.h>
984 #include <asm/special_insns.h>
985
986 -/* nop stub */
987 -void _paravirt_nop(void)
988 -{
989 -}
990 +/*
991 + * nop stub, which must not clobber anything *including the stack* to
992 + * avoid confusing the entry prologues.
993 + */
994 +extern void _paravirt_nop(void);
995 +asm (".pushsection .entry.text, \"ax\"\n"
996 + ".global _paravirt_nop\n"
997 + "_paravirt_nop:\n\t"
998 + "ret\n\t"
999 + ".size _paravirt_nop, . - _paravirt_nop\n\t"
1000 + ".type _paravirt_nop, @function\n\t"
1001 + ".popsection");
1002
1003 /* identity function, which can be inlined */
1004 u32 _paravirt_ident_32(u32 x)
1005 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
1006 index 5e0bf57d9944..58e02d938218 100644
1007 --- a/arch/x86/kernel/process_64.c
1008 +++ b/arch/x86/kernel/process_64.c
1009 @@ -499,27 +499,59 @@ void set_personality_ia32(bool x32)
1010 }
1011 EXPORT_SYMBOL_GPL(set_personality_ia32);
1012
1013 +/*
1014 + * Called from fs/proc with a reference on @p to find the function
1015 + * which called into schedule(). This needs to be done carefully
1016 + * because the task might wake up and we might look at a stack
1017 + * changing under us.
1018 + */
1019 unsigned long get_wchan(struct task_struct *p)
1020 {
1021 - unsigned long stack;
1022 - u64 fp, ip;
1023 + unsigned long start, bottom, top, sp, fp, ip;
1024 int count = 0;
1025
1026 if (!p || p == current || p->state == TASK_RUNNING)
1027 return 0;
1028 - stack = (unsigned long)task_stack_page(p);
1029 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
1030 +
1031 + start = (unsigned long)task_stack_page(p);
1032 + if (!start)
1033 + return 0;
1034 +
1035 + /*
1036 + * Layout of the stack page:
1037 + *
1038 + * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
1039 + * PADDING
1040 + * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
1041 + * stack
1042 + * ----------- bottom = start + sizeof(thread_info)
1043 + * thread_info
1044 + * ----------- start
1045 + *
1046 + * The tasks stack pointer points at the location where the
1047 + * framepointer is stored. The data on the stack is:
1048 + * ... IP FP ... IP FP
1049 + *
1050 + * We need to read FP and IP, so we need to adjust the upper
1051 + * bound by another unsigned long.
1052 + */
1053 + top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
1054 + top -= 2 * sizeof(unsigned long);
1055 + bottom = start + sizeof(struct thread_info);
1056 +
1057 + sp = READ_ONCE(p->thread.sp);
1058 + if (sp < bottom || sp > top)
1059 return 0;
1060 - fp = *(u64 *)(p->thread.sp);
1061 +
1062 + fp = READ_ONCE(*(unsigned long *)sp);
1063 do {
1064 - if (fp < (unsigned long)stack ||
1065 - fp >= (unsigned long)stack+THREAD_SIZE)
1066 + if (fp < bottom || fp > top)
1067 return 0;
1068 - ip = *(u64 *)(fp+8);
1069 + ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
1070 if (!in_sched_functions(ip))
1071 return ip;
1072 - fp = *(u64 *)fp;
1073 - } while (count++ < 16);
1074 + fp = READ_ONCE(*(unsigned long *)fp);
1075 + } while (count++ < 16 && p->state != TASK_RUNNING);
1076 return 0;
1077 }
1078
1079 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
1080 index 505449700e0c..21187ebee7d0 100644
1081 --- a/arch/x86/kernel/tsc.c
1082 +++ b/arch/x86/kernel/tsc.c
1083 @@ -21,6 +21,7 @@
1084 #include <asm/hypervisor.h>
1085 #include <asm/nmi.h>
1086 #include <asm/x86_init.h>
1087 +#include <asm/geode.h>
1088
1089 unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
1090 EXPORT_SYMBOL(cpu_khz);
1091 @@ -1004,15 +1005,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);
1092
1093 static void __init check_system_tsc_reliable(void)
1094 {
1095 -#ifdef CONFIG_MGEODE_LX
1096 - /* RTSC counts during suspend */
1097 +#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
1098 + if (is_geode_lx()) {
1099 + /* RTSC counts during suspend */
1100 #define RTSC_SUSP 0x100
1101 - unsigned long res_low, res_high;
1102 + unsigned long res_low, res_high;
1103
1104 - rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1105 - /* Geode_LX - the OLPC CPU has a very reliable TSC */
1106 - if (res_low & RTSC_SUSP)
1107 - tsc_clocksource_reliable = 1;
1108 + rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
1109 + /* Geode_LX - the OLPC CPU has a very reliable TSC */
1110 + if (res_low & RTSC_SUSP)
1111 + tsc_clocksource_reliable = 1;
1112 + }
1113 #endif
1114 if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
1115 tsc_clocksource_reliable = 1;
1116 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1117 index 4911bf19122b..7858cd9acfe4 100644
1118 --- a/arch/x86/kvm/svm.c
1119 +++ b/arch/x86/kvm/svm.c
1120 @@ -512,7 +512,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1121 struct vcpu_svm *svm = to_svm(vcpu);
1122
1123 if (svm->vmcb->control.next_rip != 0) {
1124 - WARN_ON(!static_cpu_has(X86_FEATURE_NRIPS));
1125 + WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
1126 svm->next_rip = svm->vmcb->control.next_rip;
1127 }
1128
1129 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1130 index 2d73807f0d31..bc3041e1abbc 100644
1131 --- a/arch/x86/kvm/vmx.c
1132 +++ b/arch/x86/kvm/vmx.c
1133 @@ -6144,6 +6144,8 @@ static __init int hardware_setup(void)
1134 memcpy(vmx_msr_bitmap_longmode_x2apic,
1135 vmx_msr_bitmap_longmode, PAGE_SIZE);
1136
1137 + set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
1138 +
1139 if (enable_apicv) {
1140 for (msr = 0x800; msr <= 0x8ff; msr++)
1141 vmx_disable_intercept_msr_read_x2apic(msr);
1142 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
1143 index 3fba623e3ba5..f9977a7a9444 100644
1144 --- a/arch/x86/mm/init_64.c
1145 +++ b/arch/x86/mm/init_64.c
1146 @@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
1147 * has been zapped already via cleanup_highmem().
1148 */
1149 all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1150 - set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
1151 + set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
1152
1153 rodata_test();
1154
1155 diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
1156 index 841ea05e1b02..477384985ac9 100644
1157 --- a/arch/x86/platform/efi/efi.c
1158 +++ b/arch/x86/platform/efi/efi.c
1159 @@ -679,6 +679,70 @@ out:
1160 }
1161
1162 /*
1163 + * Iterate the EFI memory map in reverse order because the regions
1164 + * will be mapped top-down. The end result is the same as if we had
1165 + * mapped things forward, but doesn't require us to change the
1166 + * existing implementation of efi_map_region().
1167 + */
1168 +static inline void *efi_map_next_entry_reverse(void *entry)
1169 +{
1170 + /* Initial call */
1171 + if (!entry)
1172 + return memmap.map_end - memmap.desc_size;
1173 +
1174 + entry -= memmap.desc_size;
1175 + if (entry < memmap.map)
1176 + return NULL;
1177 +
1178 + return entry;
1179 +}
1180 +
1181 +/*
1182 + * efi_map_next_entry - Return the next EFI memory map descriptor
1183 + * @entry: Previous EFI memory map descriptor
1184 + *
1185 + * This is a helper function to iterate over the EFI memory map, which
1186 + * we do in different orders depending on the current configuration.
1187 + *
1188 + * To begin traversing the memory map @entry must be %NULL.
1189 + *
1190 + * Returns %NULL when we reach the end of the memory map.
1191 + */
1192 +static void *efi_map_next_entry(void *entry)
1193 +{
1194 + if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
1195 + /*
1196 + * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
1197 + * config table feature requires us to map all entries
1198 + * in the same order as they appear in the EFI memory
1199 + * map. That is to say, entry N must have a lower
1200 + * virtual address than entry N+1. This is because the
1201 + * firmware toolchain leaves relative references in
1202 + * the code/data sections, which are split and become
1203 + * separate EFI memory regions. Mapping things
1204 + * out-of-order leads to the firmware accessing
1205 + * unmapped addresses.
1206 + *
1207 + * Since we need to map things this way whether or not
1208 + * the kernel actually makes use of
1209 + * EFI_PROPERTIES_TABLE, let's just switch to this
1210 + * scheme by default for 64-bit.
1211 + */
1212 + return efi_map_next_entry_reverse(entry);
1213 + }
1214 +
1215 + /* Initial call */
1216 + if (!entry)
1217 + return memmap.map;
1218 +
1219 + entry += memmap.desc_size;
1220 + if (entry >= memmap.map_end)
1221 + return NULL;
1222 +
1223 + return entry;
1224 +}
1225 +
1226 +/*
1227 * Map the efi memory ranges of the runtime services and update new_mmap with
1228 * virtual addresses.
1229 */
1230 @@ -688,7 +752,8 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
1231 unsigned long left = 0;
1232 efi_memory_desc_t *md;
1233
1234 - for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
1235 + p = NULL;
1236 + while ((p = efi_map_next_entry(p))) {
1237 md = p;
1238 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
1239 #ifdef CONFIG_X86_64
1240 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
1241 index a671e837228d..0cc657160cb6 100644
1242 --- a/arch/x86/xen/enlighten.c
1243 +++ b/arch/x86/xen/enlighten.c
1244 @@ -33,6 +33,10 @@
1245 #include <linux/memblock.h>
1246 #include <linux/edd.h>
1247
1248 +#ifdef CONFIG_KEXEC_CORE
1249 +#include <linux/kexec.h>
1250 +#endif
1251 +
1252 #include <xen/xen.h>
1253 #include <xen/events.h>
1254 #include <xen/interface/xen.h>
1255 @@ -1798,6 +1802,21 @@ static struct notifier_block xen_hvm_cpu_notifier = {
1256 .notifier_call = xen_hvm_cpu_notify,
1257 };
1258
1259 +#ifdef CONFIG_KEXEC_CORE
1260 +static void xen_hvm_shutdown(void)
1261 +{
1262 + native_machine_shutdown();
1263 + if (kexec_in_progress)
1264 + xen_reboot(SHUTDOWN_soft_reset);
1265 +}
1266 +
1267 +static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1268 +{
1269 + native_machine_crash_shutdown(regs);
1270 + xen_reboot(SHUTDOWN_soft_reset);
1271 +}
1272 +#endif
1273 +
1274 static void __init xen_hvm_guest_init(void)
1275 {
1276 if (xen_pv_domain())
1277 @@ -1817,6 +1836,10 @@ static void __init xen_hvm_guest_init(void)
1278 x86_init.irqs.intr_init = xen_init_IRQ;
1279 xen_hvm_init_time_ops();
1280 xen_hvm_init_mmu_ops();
1281 +#ifdef CONFIG_KEXEC_CORE
1282 + machine_ops.shutdown = xen_hvm_shutdown;
1283 + machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1284 +#endif
1285 }
1286 #endif
1287
1288 diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
1289 index df0c66cb7ad3..fdba441457ec 100644
1290 --- a/drivers/base/cacheinfo.c
1291 +++ b/drivers/base/cacheinfo.c
1292 @@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
1293
1294 if (sibling == cpu) /* skip itself */
1295 continue;
1296 +
1297 sib_cpu_ci = get_cpu_cacheinfo(sibling);
1298 + if (!sib_cpu_ci->info_list)
1299 + continue;
1300 +
1301 sib_leaf = sib_cpu_ci->info_list + index;
1302 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
1303 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
1304 @@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
1305
1306 static void free_cache_attributes(unsigned int cpu)
1307 {
1308 + if (!per_cpu_cacheinfo(cpu))
1309 + return;
1310 +
1311 cache_shared_cpu_map_remove(cpu);
1312
1313 kfree(per_cpu_cacheinfo(cpu));
1314 @@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1315 break;
1316 case CPU_DEAD:
1317 cache_remove_dev(cpu);
1318 - if (per_cpu_cacheinfo(cpu))
1319 - free_cache_attributes(cpu);
1320 + free_cache_attributes(cpu);
1321 break;
1322 }
1323 return notifier_from_errno(rc);
1324 diff --git a/drivers/base/property.c b/drivers/base/property.c
1325 index 1d0b116cae95..0a60ef1500cd 100644
1326 --- a/drivers/base/property.c
1327 +++ b/drivers/base/property.c
1328 @@ -26,9 +26,10 @@
1329 */
1330 void device_add_property_set(struct device *dev, struct property_set *pset)
1331 {
1332 - if (pset)
1333 - pset->fwnode.type = FWNODE_PDATA;
1334 + if (!pset)
1335 + return;
1336
1337 + pset->fwnode.type = FWNODE_PDATA;
1338 set_secondary_fwnode(dev, &pset->fwnode);
1339 }
1340 EXPORT_SYMBOL_GPL(device_add_property_set);
1341 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
1342 index 5799a0b9e6cc..c8941f39c919 100644
1343 --- a/drivers/base/regmap/regmap-debugfs.c
1344 +++ b/drivers/base/regmap/regmap-debugfs.c
1345 @@ -32,8 +32,7 @@ static DEFINE_MUTEX(regmap_debugfs_early_lock);
1346 /* Calculate the length of a fixed format */
1347 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
1348 {
1349 - snprintf(buf, buf_size, "%x", max_val);
1350 - return strlen(buf);
1351 + return snprintf(NULL, 0, "%x", max_val);
1352 }
1353
1354 static ssize_t regmap_name_read_file(struct file *file,
1355 @@ -432,7 +431,7 @@ static ssize_t regmap_access_read_file(struct file *file,
1356 /* If we're in the region the user is trying to read */
1357 if (p >= *ppos) {
1358 /* ...but not beyond it */
1359 - if (buf_pos >= count - 1 - tot_len)
1360 + if (buf_pos + tot_len + 1 >= count)
1361 break;
1362
1363 /* Format the register */
1364 diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c
1365 index 757636d166cf..4ab28cfb8d2a 100644
1366 --- a/drivers/clk/ti/clk-3xxx.c
1367 +++ b/drivers/clk/ti/clk-3xxx.c
1368 @@ -163,7 +163,6 @@ static struct ti_dt_clk omap3xxx_clks[] = {
1369 DT_CLK(NULL, "gpio2_ick", "gpio2_ick"),
1370 DT_CLK(NULL, "wdt3_ick", "wdt3_ick"),
1371 DT_CLK(NULL, "uart3_ick", "uart3_ick"),
1372 - DT_CLK(NULL, "uart4_ick", "uart4_ick"),
1373 DT_CLK(NULL, "gpt9_ick", "gpt9_ick"),
1374 DT_CLK(NULL, "gpt8_ick", "gpt8_ick"),
1375 DT_CLK(NULL, "gpt7_ick", "gpt7_ick"),
1376 @@ -308,6 +307,7 @@ static struct ti_dt_clk am35xx_clks[] = {
1377 static struct ti_dt_clk omap36xx_clks[] = {
1378 DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"),
1379 DT_CLK(NULL, "uart4_fck", "uart4_fck"),
1380 + DT_CLK(NULL, "uart4_ick", "uart4_ick"),
1381 { .node_name = NULL },
1382 };
1383
1384 diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
1385 index bab67db54b7e..663045ce6fac 100644
1386 --- a/drivers/cpufreq/cpufreq-dt.c
1387 +++ b/drivers/cpufreq/cpufreq-dt.c
1388 @@ -255,7 +255,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
1389 rcu_read_unlock();
1390
1391 tol_uV = opp_uV * priv->voltage_tolerance / 100;
1392 - if (regulator_is_supported_voltage(cpu_reg, opp_uV,
1393 + if (regulator_is_supported_voltage(cpu_reg,
1394 + opp_uV - tol_uV,
1395 opp_uV + tol_uV)) {
1396 if (opp_uV < min_uV)
1397 min_uV = opp_uV;
1398 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1399 index 6f9d27f9001c..e8d16997c5cb 100644
1400 --- a/drivers/cpufreq/intel_pstate.c
1401 +++ b/drivers/cpufreq/intel_pstate.c
1402 @@ -48,9 +48,9 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
1403 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
1404 }
1405
1406 -static inline int32_t div_fp(int32_t x, int32_t y)
1407 +static inline int32_t div_fp(s64 x, s64 y)
1408 {
1409 - return div_s64((int64_t)x << FRAC_BITS, y);
1410 + return div64_s64((int64_t)x << FRAC_BITS, y);
1411 }
1412
1413 static inline int ceiling_fp(int32_t x)
1414 @@ -795,7 +795,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
1415 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
1416 {
1417 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
1418 - u32 duration_us;
1419 + s64 duration_us;
1420 u32 sample_time;
1421
1422 /*
1423 @@ -822,8 +822,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
1424 * to adjust our busyness.
1425 */
1426 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
1427 - duration_us = (u32) ktime_us_delta(cpu->sample.time,
1428 - cpu->last_sample_time);
1429 + duration_us = ktime_us_delta(cpu->sample.time,
1430 + cpu->last_sample_time);
1431 if (duration_us > sample_time * 3) {
1432 sample_ratio = div_fp(int_tofp(sample_time),
1433 int_tofp(duration_us));
1434 diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1435 index 1022c2e1a2b0..9e504d3b0d4f 100644
1436 --- a/drivers/dma/dw/core.c
1437 +++ b/drivers/dma/dw/core.c
1438 @@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1439 INIT_LIST_HEAD(&dw->dma.channels);
1440 for (i = 0; i < nr_channels; i++) {
1441 struct dw_dma_chan *dwc = &dw->chan[i];
1442 - int r = nr_channels - i - 1;
1443
1444 dwc->chan.device = &dw->dma;
1445 dma_cookie_init(&dwc->chan);
1446 @@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1447
1448 /* 7 is highest priority & 0 is lowest. */
1449 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1450 - dwc->priority = r;
1451 + dwc->priority = nr_channels - i - 1;
1452 else
1453 dwc->priority = i;
1454
1455 @@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
1456 /* Hardware configuration */
1457 if (autocfg) {
1458 unsigned int dwc_params;
1459 + unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1460 void __iomem *addr = chip->regs + r * sizeof(u32);
1461
1462 dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
1463 diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
1464 index e29560e6b40b..950c87f5d279 100644
1465 --- a/drivers/firmware/efi/libstub/arm-stub.c
1466 +++ b/drivers/firmware/efi/libstub/arm-stub.c
1467 @@ -13,6 +13,7 @@
1468 */
1469
1470 #include <linux/efi.h>
1471 +#include <linux/sort.h>
1472 #include <asm/efi.h>
1473
1474 #include "efistub.h"
1475 @@ -305,6 +306,44 @@ fail:
1476 */
1477 #define EFI_RT_VIRTUAL_BASE 0x40000000
1478
1479 +static int cmp_mem_desc(const void *l, const void *r)
1480 +{
1481 + const efi_memory_desc_t *left = l, *right = r;
1482 +
1483 + return (left->phys_addr > right->phys_addr) ? 1 : -1;
1484 +}
1485 +
1486 +/*
1487 + * Returns whether region @left ends exactly where region @right starts,
1488 + * or false if either argument is NULL.
1489 + */
1490 +static bool regions_are_adjacent(efi_memory_desc_t *left,
1491 + efi_memory_desc_t *right)
1492 +{
1493 + u64 left_end;
1494 +
1495 + if (left == NULL || right == NULL)
1496 + return false;
1497 +
1498 + left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
1499 +
1500 + return left_end == right->phys_addr;
1501 +}
1502 +
1503 +/*
1504 + * Returns whether region @left and region @right have compatible memory type
1505 + * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
1506 + */
1507 +static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
1508 + efi_memory_desc_t *right)
1509 +{
1510 + static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
1511 + EFI_MEMORY_WC | EFI_MEMORY_UC |
1512 + EFI_MEMORY_RUNTIME;
1513 +
1514 + return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
1515 +}
1516 +
1517 /*
1518 * efi_get_virtmap() - create a virtual mapping for the EFI memory map
1519 *
1520 @@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
1521 int *count)
1522 {
1523 u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
1524 - efi_memory_desc_t *out = runtime_map;
1525 + efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
1526 int l;
1527
1528 - for (l = 0; l < map_size; l += desc_size) {
1529 - efi_memory_desc_t *in = (void *)memory_map + l;
1530 + /*
1531 + * To work around potential issues with the Properties Table feature
1532 + * introduced in UEFI 2.5, which may split PE/COFF executable images
1533 + * in memory into several RuntimeServicesCode and RuntimeServicesData
1534 + * regions, we need to preserve the relative offsets between adjacent
1535 + * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
1536 + * The easiest way to find adjacent regions is to sort the memory map
1537 + * before traversing it.
1538 + */
1539 + sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
1540 +
1541 + for (l = 0; l < map_size; l += desc_size, prev = in) {
1542 u64 paddr, size;
1543
1544 + in = (void *)memory_map + l;
1545 if (!(in->attribute & EFI_MEMORY_RUNTIME))
1546 continue;
1547
1548 + paddr = in->phys_addr;
1549 + size = in->num_pages * EFI_PAGE_SIZE;
1550 +
1551 /*
1552 * Make the mapping compatible with 64k pages: this allows
1553 * a 4k page size kernel to kexec a 64k page size kernel and
1554 * vice versa.
1555 */
1556 - paddr = round_down(in->phys_addr, SZ_64K);
1557 - size = round_up(in->num_pages * EFI_PAGE_SIZE +
1558 - in->phys_addr - paddr, SZ_64K);
1559 -
1560 - /*
1561 - * Avoid wasting memory on PTEs by choosing a virtual base that
1562 - * is compatible with section mappings if this region has the
1563 - * appropriate size and physical alignment. (Sections are 2 MB
1564 - * on 4k granule kernels)
1565 - */
1566 - if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
1567 - efi_virt_base = round_up(efi_virt_base, SZ_2M);
1568 + if (!regions_are_adjacent(prev, in) ||
1569 + !regions_have_compatible_memory_type_attrs(prev, in)) {
1570 +
1571 + paddr = round_down(in->phys_addr, SZ_64K);
1572 + size += in->phys_addr - paddr;
1573 +
1574 + /*
1575 + * Avoid wasting memory on PTEs by choosing a virtual
1576 + * base that is compatible with section mappings if this
1577 + * region has the appropriate size and physical
1578 + * alignment. (Sections are 2 MB on 4k granule kernels)
1579 + */
1580 + if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
1581 + efi_virt_base = round_up(efi_virt_base, SZ_2M);
1582 + else
1583 + efi_virt_base = round_up(efi_virt_base, SZ_64K);
1584 + }
1585
1586 in->virt_addr = efi_virt_base + in->phys_addr - paddr;
1587 efi_virt_base += size;
1588 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1589 index b0487c9f018c..7f467fdc9107 100644
1590 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1591 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1592 @@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1593 struct drm_dp_mst_port *port, *tmp;
1594 bool wake_tx = false;
1595
1596 - cancel_work_sync(&mstb->mgr->work);
1597 -
1598 /*
1599 * destroy all ports - don't need lock
1600 * as there are no more references to the mst branch
1601 @@ -1977,6 +1975,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1602 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1603 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1604 mutex_unlock(&mgr->lock);
1605 + flush_work(&mgr->work);
1606 + flush_work(&mgr->destroy_connector_work);
1607 }
1608 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1609
1610 @@ -2730,6 +2730,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
1611 */
1612 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
1613 {
1614 + flush_work(&mgr->work);
1615 flush_work(&mgr->destroy_connector_work);
1616 mutex_lock(&mgr->payload_lock);
1617 kfree(mgr->payloads);
1618 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
1619 index f861361a635e..4924d381b664 100644
1620 --- a/drivers/gpu/drm/drm_lock.c
1621 +++ b/drivers/gpu/drm/drm_lock.c
1622 @@ -61,6 +61,9 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
1623 struct drm_master *master = file_priv->master;
1624 int ret = 0;
1625
1626 + if (drm_core_check_feature(dev, DRIVER_MODESET))
1627 + return -EINVAL;
1628 +
1629 ++file_priv->lock_count;
1630
1631 if (lock->context == DRM_KERNEL_CONTEXT) {
1632 @@ -153,6 +156,9 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
1633 struct drm_lock *lock = data;
1634 struct drm_master *master = file_priv->master;
1635
1636 + if (drm_core_check_feature(dev, DRIVER_MODESET))
1637 + return -EINVAL;
1638 +
1639 if (lock->context == DRM_KERNEL_CONTEXT) {
1640 DRM_ERROR("Process %d using kernel context %d\n",
1641 task_pid_nr(current), lock->context);
1642 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1643 index c684085cb56a..fadf9865709e 100644
1644 --- a/drivers/gpu/drm/i915/intel_bios.c
1645 +++ b/drivers/gpu/drm/i915/intel_bios.c
1646 @@ -41,7 +41,7 @@ find_section(struct bdb_header *bdb, int section_id)
1647 {
1648 u8 *base = (u8 *)bdb;
1649 int index = 0;
1650 - u16 total, current_size;
1651 + u32 total, current_size;
1652 u8 current_id;
1653
1654 /* skip to first section */
1655 @@ -56,6 +56,10 @@ find_section(struct bdb_header *bdb, int section_id)
1656 current_size = *((u16 *)(base + index));
1657 index += 2;
1658
1659 + /* The MIPI Sequence Block v3+ has a separate size field. */
1660 + if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
1661 + current_size = *((const u32 *)(base + index + 1));
1662 +
1663 if (index + current_size > total)
1664 return NULL;
1665
1666 @@ -845,6 +849,12 @@ parse_mipi(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
1667 return;
1668 }
1669
1670 + /* Fail gracefully for forward incompatible sequence block. */
1671 + if (sequence->version >= 3) {
1672 + DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
1673 + return;
1674 + }
1675 +
1676 DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
1677
1678 block_size = get_blocksize(sequence);
1679 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1680 index 32248791bc4b..52921a871230 100644
1681 --- a/drivers/gpu/drm/qxl/qxl_display.c
1682 +++ b/drivers/gpu/drm/qxl/qxl_display.c
1683 @@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
1684 adjusted_mode->hdisplay,
1685 adjusted_mode->vdisplay);
1686
1687 - if (qcrtc->index == 0)
1688 + if (bo->is_primary == false)
1689 recreate_primary = true;
1690
1691 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
1692 @@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
1693 drm_connector_to_qxl_output(connector);
1694 struct drm_device *ddev = connector->dev;
1695 struct qxl_device *qdev = ddev->dev_private;
1696 - int connected;
1697 + bool connected = false;
1698
1699 /* The first monitor is always connected */
1700 - connected = (output->index == 0) ||
1701 - (qdev->client_monitors_config &&
1702 - qdev->client_monitors_config->count > output->index &&
1703 - qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
1704 + if (!qdev->client_monitors_config) {
1705 + if (output->index == 0)
1706 + connected = true;
1707 + } else
1708 + connected = qdev->client_monitors_config->count > output->index &&
1709 + qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
1710
1711 DRM_DEBUG("#%d connected: %d\n", output->index, connected);
1712 if (!connected)
1713 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
1714 index dd39f434b4a7..b4ff4c134fbb 100644
1715 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
1716 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
1717 @@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
1718 } else
1719 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1720 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
1721 - args.ucAction = ATOM_LCD_BLON;
1722 - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
1723 + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
1724 +
1725 + atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1726 }
1727 break;
1728 case DRM_MODE_DPMS_STANDBY:
1729 @@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1730 atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
1731 }
1732 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
1733 - atombios_dig_transmitter_setup(encoder,
1734 - ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
1735 + atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
1736 if (ext_encoder)
1737 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1738 break;
1739 diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
1740 index bd1c99deac71..2aaedbe0b023 100644
1741 --- a/drivers/hwmon/nct6775.c
1742 +++ b/drivers/hwmon/nct6775.c
1743 @@ -354,6 +354,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
1744
1745 /* NCT6776 specific data */
1746
1747 +/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
1748 +#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
1749 +#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
1750 +
1751 static const s8 NCT6776_ALARM_BITS[] = {
1752 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
1753 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
1754 @@ -3528,8 +3532,8 @@ static int nct6775_probe(struct platform_device *pdev)
1755 data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
1756 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
1757 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
1758 - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
1759 - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
1760 + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
1761 + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
1762 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
1763 data->REG_PWM[0] = NCT6775_REG_PWM;
1764 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
1765 @@ -3600,8 +3604,8 @@ static int nct6775_probe(struct platform_device *pdev)
1766 data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
1767 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
1768 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
1769 - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
1770 - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
1771 + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
1772 + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
1773 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
1774 data->REG_PWM[0] = NCT6775_REG_PWM;
1775 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
1776 @@ -3677,8 +3681,8 @@ static int nct6775_probe(struct platform_device *pdev)
1777 data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
1778 data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
1779 data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
1780 - data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
1781 - data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
1782 + data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
1783 + data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
1784 data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
1785 data->REG_PWM[0] = NCT6775_REG_PWM;
1786 data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
1787 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
1788 index 575a072d765f..c32a934f7693 100644
1789 --- a/drivers/infiniband/ulp/isert/ib_isert.c
1790 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
1791 @@ -2996,9 +2996,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
1792 static int
1793 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
1794 {
1795 - int ret;
1796 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1797 + int ret = 0;
1798
1799 switch (state) {
1800 + case ISTATE_REMOVE:
1801 + spin_lock_bh(&conn->cmd_lock);
1802 + list_del_init(&cmd->i_conn_node);
1803 + spin_unlock_bh(&conn->cmd_lock);
1804 + isert_put_cmd(isert_cmd, true);
1805 + break;
1806 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
1807 ret = isert_put_nopin(cmd, conn, false);
1808 break;
1809 @@ -3363,6 +3370,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
1810 wait_for_completion(&isert_conn->wait_comp_err);
1811 }
1812
1813 +/**
1814 + * isert_put_unsol_pending_cmds() - Drop commands waiting for
1815 + * unsolicitate dataout
1816 + * @conn: iscsi connection
1817 + *
1818 + * We might still have commands that are waiting for unsolicited
1819 + * dataouts messages. We must put the extra reference on those
1820 + * before blocking on the target_wait_for_session_cmds
1821 + */
1822 +static void
1823 +isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
1824 +{
1825 + struct iscsi_cmd *cmd, *tmp;
1826 + static LIST_HEAD(drop_cmd_list);
1827 +
1828 + spin_lock_bh(&conn->cmd_lock);
1829 + list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
1830 + if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
1831 + (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
1832 + (cmd->write_data_done < cmd->se_cmd.data_length))
1833 + list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
1834 + }
1835 + spin_unlock_bh(&conn->cmd_lock);
1836 +
1837 + list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
1838 + list_del_init(&cmd->i_conn_node);
1839 + if (cmd->i_state != ISTATE_REMOVE) {
1840 + struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
1841 +
1842 + isert_info("conn %p dropping cmd %p\n", conn, cmd);
1843 + isert_put_cmd(isert_cmd, true);
1844 + }
1845 + }
1846 +}
1847 +
1848 static void isert_wait_conn(struct iscsi_conn *conn)
1849 {
1850 struct isert_conn *isert_conn = conn->context;
1851 @@ -3381,8 +3423,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
1852 isert_conn_terminate(isert_conn);
1853 mutex_unlock(&isert_conn->mutex);
1854
1855 - isert_wait4cmds(conn);
1856 isert_wait4flush(isert_conn);
1857 + isert_put_unsol_pending_cmds(conn);
1858 + isert_wait4cmds(conn);
1859 isert_wait4logout(isert_conn);
1860
1861 queue_work(isert_release_wq, &isert_conn->release_work);
1862 diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
1863 index a2e8c3f876cb..c2c578f0b268 100644
1864 --- a/drivers/irqchip/irq-atmel-aic5.c
1865 +++ b/drivers/irqchip/irq-atmel-aic5.c
1866 @@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
1867 {
1868 struct irq_domain *domain = d->domain;
1869 struct irq_domain_chip_generic *dgc = domain->gc;
1870 - struct irq_chip_generic *gc = dgc->gc[0];
1871 + struct irq_chip_generic *bgc = dgc->gc[0];
1872 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
1873
1874 - /* Disable interrupt on AIC5 */
1875 - irq_gc_lock(gc);
1876 + /*
1877 + * Disable interrupt on AIC5. We always take the lock of the
1878 + * first irq chip as all chips share the same registers.
1879 + */
1880 + irq_gc_lock(bgc);
1881 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
1882 irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
1883 gc->mask_cache &= ~d->mask;
1884 - irq_gc_unlock(gc);
1885 + irq_gc_unlock(bgc);
1886 }
1887
1888 static void aic5_unmask(struct irq_data *d)
1889 {
1890 struct irq_domain *domain = d->domain;
1891 struct irq_domain_chip_generic *dgc = domain->gc;
1892 - struct irq_chip_generic *gc = dgc->gc[0];
1893 + struct irq_chip_generic *bgc = dgc->gc[0];
1894 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
1895
1896 - /* Enable interrupt on AIC5 */
1897 - irq_gc_lock(gc);
1898 + /*
1899 + * Enable interrupt on AIC5. We always take the lock of the
1900 + * first irq chip as all chips share the same registers.
1901 + */
1902 + irq_gc_lock(bgc);
1903 irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
1904 irq_reg_writel(gc, 1, AT91_AIC5_IECR);
1905 gc->mask_cache |= d->mask;
1906 - irq_gc_unlock(gc);
1907 + irq_gc_unlock(bgc);
1908 }
1909
1910 static int aic5_retrigger(struct irq_data *d)
1911 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1912 index c00e2db351ba..9a791dd52199 100644
1913 --- a/drivers/irqchip/irq-gic-v3-its.c
1914 +++ b/drivers/irqchip/irq-gic-v3-its.c
1915 @@ -921,8 +921,10 @@ retry_baser:
1916 * non-cacheable as well.
1917 */
1918 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1919 - if (!shr)
1920 + if (!shr) {
1921 cache = GITS_BASER_nC;
1922 + __flush_dcache_area(base, alloc_size);
1923 + }
1924 goto retry_baser;
1925 }
1926
1927 @@ -1163,6 +1165,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
1928 return NULL;
1929 }
1930
1931 + __flush_dcache_area(itt, sz);
1932 +
1933 dev->its = its;
1934 dev->itt = itt;
1935 dev->nr_ites = nr_ites;
1936 diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
1937 index 7fb2a19ac649..557f8a53a062 100644
1938 --- a/drivers/leds/led-class.c
1939 +++ b/drivers/leds/led-class.c
1940 @@ -223,12 +223,15 @@ static int led_classdev_next_name(const char *init_name, char *name,
1941 {
1942 unsigned int i = 0;
1943 int ret = 0;
1944 + struct device *dev;
1945
1946 strlcpy(name, init_name, len);
1947
1948 - while (class_find_device(leds_class, NULL, name, match_name) &&
1949 - (ret < len))
1950 + while ((ret < len) &&
1951 + (dev = class_find_device(leds_class, NULL, name, match_name))) {
1952 + put_device(dev);
1953 ret = snprintf(name, len, "%s_%u", init_name, ++i);
1954 + }
1955
1956 if (ret >= len)
1957 return -ENOMEM;
1958 diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
1959 index 3ee198b65843..cc7ece1712b5 100644
1960 --- a/drivers/macintosh/windfarm_core.c
1961 +++ b/drivers/macintosh/windfarm_core.c
1962 @@ -435,7 +435,7 @@ int wf_unregister_client(struct notifier_block *nb)
1963 {
1964 mutex_lock(&wf_lock);
1965 blocking_notifier_chain_unregister(&wf_client_list, nb);
1966 - wf_client_count++;
1967 + wf_client_count--;
1968 if (wf_client_count == 0)
1969 wf_stop_thread();
1970 mutex_unlock(&wf_lock);
1971 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
1972 index c90118e90708..a7621a258936 100644
1973 --- a/drivers/md/bitmap.c
1974 +++ b/drivers/md/bitmap.c
1975 @@ -2000,7 +2000,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
1976 if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
1977 ret = bitmap_storage_alloc(&store, chunks,
1978 !bitmap->mddev->bitmap_info.external,
1979 - bitmap->cluster_slot);
1980 + mddev_is_clustered(bitmap->mddev)
1981 + ? bitmap->cluster_slot : 0);
1982 if (ret)
1983 goto err;
1984
1985 diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
1986 index 004e463c9423..8308f4b434ec 100644
1987 --- a/drivers/md/dm-cache-policy-cleaner.c
1988 +++ b/drivers/md/dm-cache-policy-cleaner.c
1989 @@ -435,7 +435,7 @@ static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
1990 static struct dm_cache_policy_type wb_policy_type = {
1991 .name = "cleaner",
1992 .version = {1, 0, 0},
1993 - .hint_size = 0,
1994 + .hint_size = 4,
1995 .owner = THIS_MODULE,
1996 .create = wb_create
1997 };
1998 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
1999 index 5503e43e5f28..049282e6482f 100644
2000 --- a/drivers/md/dm-crypt.c
2001 +++ b/drivers/md/dm-crypt.c
2002 @@ -955,7 +955,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
2003
2004 /*
2005 * Generate a new unfragmented bio with the given size
2006 - * This should never violate the device limitations
2007 + * This should never violate the device limitations (but only because
2008 + * max_segment_size is being constrained to PAGE_SIZE).
2009 *
2010 * This function may be called concurrently. If we allocate from the mempool
2011 * concurrently, there is a possibility of deadlock. For example, if we have
2012 @@ -2040,9 +2041,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
2013 return fn(ti, cc->dev, cc->start, ti->len, data);
2014 }
2015
2016 +static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
2017 +{
2018 + /*
2019 + * Unfortunate constraint that is required to avoid the potential
2020 + * for exceeding underlying device's max_segments limits -- due to
2021 + * crypt_alloc_buffer() possibly allocating pages for the encryption
2022 + * bio that are not as physically contiguous as the original bio.
2023 + */
2024 + limits->max_segment_size = PAGE_SIZE;
2025 +}
2026 +
2027 static struct target_type crypt_target = {
2028 .name = "crypt",
2029 - .version = {1, 14, 0},
2030 + .version = {1, 14, 1},
2031 .module = THIS_MODULE,
2032 .ctr = crypt_ctr,
2033 .dtr = crypt_dtr,
2034 @@ -2054,6 +2066,7 @@ static struct target_type crypt_target = {
2035 .message = crypt_message,
2036 .merge = crypt_merge,
2037 .iterate_devices = crypt_iterate_devices,
2038 + .io_hints = crypt_io_hints,
2039 };
2040
2041 static int __init dm_crypt_init(void)
2042 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
2043 index 88e4c7f24986..2c1f2e13719e 100644
2044 --- a/drivers/md/dm-raid.c
2045 +++ b/drivers/md/dm-raid.c
2046 @@ -327,8 +327,7 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size)
2047 */
2048 if (min_region_size > (1 << 13)) {
2049 /* If not a power of 2, make it the next power of 2 */
2050 - if (min_region_size & (min_region_size - 1))
2051 - region_size = 1 << fls(region_size);
2052 + region_size = roundup_pow_of_two(min_region_size);
2053 DMINFO("Choosing default region size of %lu sectors",
2054 region_size);
2055 } else {
2056 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2057 index 697f34fba06b..8b72ceee0f61 100644
2058 --- a/drivers/md/dm.c
2059 +++ b/drivers/md/dm.c
2060 @@ -2925,8 +2925,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2061
2062 might_sleep();
2063
2064 - map = dm_get_live_table(md, &srcu_idx);
2065 -
2066 spin_lock(&_minor_lock);
2067 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2068 set_bit(DMF_FREEING, &md->flags);
2069 @@ -2940,14 +2938,14 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
2070 * do not race with internal suspend.
2071 */
2072 mutex_lock(&md->suspend_lock);
2073 + map = dm_get_live_table(md, &srcu_idx);
2074 if (!dm_suspended_md(md)) {
2075 dm_table_presuspend_targets(map);
2076 dm_table_postsuspend_targets(map);
2077 }
2078 - mutex_unlock(&md->suspend_lock);
2079 -
2080 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2081 dm_put_live_table(md, srcu_idx);
2082 + mutex_unlock(&md->suspend_lock);
2083
2084 /*
2085 * Rare, but there may be I/O requests still going to complete,
2086 diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
2087 index bf2b80d5c470..8731b6ea026b 100644
2088 --- a/drivers/md/persistent-data/dm-btree-internal.h
2089 +++ b/drivers/md/persistent-data/dm-btree-internal.h
2090 @@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
2091
2092 extern struct dm_block_validator btree_node_validator;
2093
2094 +/*
2095 + * Value type for upper levels of multi-level btrees.
2096 + */
2097 +extern void init_le64_type(struct dm_transaction_manager *tm,
2098 + struct dm_btree_value_type *vt);
2099 +
2100 #endif /* DM_BTREE_INTERNAL_H */
2101 diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
2102 index a03178e91a79..7c0d75547ccf 100644
2103 --- a/drivers/md/persistent-data/dm-btree-remove.c
2104 +++ b/drivers/md/persistent-data/dm-btree-remove.c
2105 @@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
2106 return r;
2107 }
2108
2109 -static struct dm_btree_value_type le64_type = {
2110 - .context = NULL,
2111 - .size = sizeof(__le64),
2112 - .inc = NULL,
2113 - .dec = NULL,
2114 - .equal = NULL
2115 -};
2116 -
2117 int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
2118 uint64_t *keys, dm_block_t *new_root)
2119 {
2120 @@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
2121 int index = 0, r = 0;
2122 struct shadow_spine spine;
2123 struct btree_node *n;
2124 + struct dm_btree_value_type le64_vt;
2125
2126 + init_le64_type(info->tm, &le64_vt);
2127 init_shadow_spine(&spine, info);
2128 for (level = 0; level < info->levels; level++) {
2129 r = remove_raw(&spine, info,
2130 (level == last_level ?
2131 - &info->value_type : &le64_type),
2132 + &info->value_type : &le64_vt),
2133 root, keys[level], (unsigned *)&index);
2134 if (r < 0)
2135 break;
2136 diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
2137 index 1b5e13ec7f96..0dee514ba4c5 100644
2138 --- a/drivers/md/persistent-data/dm-btree-spine.c
2139 +++ b/drivers/md/persistent-data/dm-btree-spine.c
2140 @@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
2141 {
2142 return s->root;
2143 }
2144 +
2145 +static void le64_inc(void *context, const void *value_le)
2146 +{
2147 + struct dm_transaction_manager *tm = context;
2148 + __le64 v_le;
2149 +
2150 + memcpy(&v_le, value_le, sizeof(v_le));
2151 + dm_tm_inc(tm, le64_to_cpu(v_le));
2152 +}
2153 +
2154 +static void le64_dec(void *context, const void *value_le)
2155 +{
2156 + struct dm_transaction_manager *tm = context;
2157 + __le64 v_le;
2158 +
2159 + memcpy(&v_le, value_le, sizeof(v_le));
2160 + dm_tm_dec(tm, le64_to_cpu(v_le));
2161 +}
2162 +
2163 +static int le64_equal(void *context, const void *value1_le, const void *value2_le)
2164 +{
2165 + __le64 v1_le, v2_le;
2166 +
2167 + memcpy(&v1_le, value1_le, sizeof(v1_le));
2168 + memcpy(&v2_le, value2_le, sizeof(v2_le));
2169 + return v1_le == v2_le;
2170 +}
2171 +
2172 +void init_le64_type(struct dm_transaction_manager *tm,
2173 + struct dm_btree_value_type *vt)
2174 +{
2175 + vt->context = tm;
2176 + vt->size = sizeof(__le64);
2177 + vt->inc = le64_inc;
2178 + vt->dec = le64_dec;
2179 + vt->equal = le64_equal;
2180 +}
2181 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
2182 index fdd3793e22f9..c7726cebc495 100644
2183 --- a/drivers/md/persistent-data/dm-btree.c
2184 +++ b/drivers/md/persistent-data/dm-btree.c
2185 @@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
2186 struct btree_node *n;
2187 struct dm_btree_value_type le64_type;
2188
2189 - le64_type.context = NULL;
2190 - le64_type.size = sizeof(__le64);
2191 - le64_type.inc = NULL;
2192 - le64_type.dec = NULL;
2193 - le64_type.equal = NULL;
2194 -
2195 + init_le64_type(info->tm, &le64_type);
2196 init_shadow_spine(&spine, info);
2197
2198 for (level = 0; level < (info->levels - 1); level++) {
2199 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
2200 index efb654eb5399..0875e5e7e09a 100644
2201 --- a/drivers/md/raid0.c
2202 +++ b/drivers/md/raid0.c
2203 @@ -83,7 +83,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
2204 char b[BDEVNAME_SIZE];
2205 char b2[BDEVNAME_SIZE];
2206 struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
2207 - bool discard_supported = false;
2208 + unsigned short blksize = 512;
2209
2210 if (!conf)
2211 return -ENOMEM;
2212 @@ -98,6 +98,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
2213 sector_div(sectors, mddev->chunk_sectors);
2214 rdev1->sectors = sectors * mddev->chunk_sectors;
2215
2216 + blksize = max(blksize, queue_logical_block_size(
2217 + rdev1->bdev->bd_disk->queue));
2218 +
2219 rdev_for_each(rdev2, mddev) {
2220 pr_debug("md/raid0:%s: comparing %s(%llu)"
2221 " with %s(%llu)\n",
2222 @@ -134,6 +137,18 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
2223 }
2224 pr_debug("md/raid0:%s: FINAL %d zones\n",
2225 mdname(mddev), conf->nr_strip_zones);
2226 + /*
2227 + * now since we have the hard sector sizes, we can make sure
2228 + * chunk size is a multiple of that sector size
2229 + */
2230 + if ((mddev->chunk_sectors << 9) % blksize) {
2231 + printk(KERN_ERR "md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
2232 + mdname(mddev),
2233 + mddev->chunk_sectors << 9, blksize);
2234 + err = -EINVAL;
2235 + goto abort;
2236 + }
2237 +
2238 err = -ENOMEM;
2239 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
2240 conf->nr_strip_zones, GFP_KERNEL);
2241 @@ -188,19 +203,12 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
2242 }
2243 dev[j] = rdev1;
2244
2245 - if (mddev->queue)
2246 - disk_stack_limits(mddev->gendisk, rdev1->bdev,
2247 - rdev1->data_offset << 9);
2248 -
2249 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
2250 conf->has_merge_bvec = 1;
2251
2252 if (!smallest || (rdev1->sectors < smallest->sectors))
2253 smallest = rdev1;
2254 cnt++;
2255 -
2256 - if (blk_queue_discard(bdev_get_queue(rdev1->bdev)))
2257 - discard_supported = true;
2258 }
2259 if (cnt != mddev->raid_disks) {
2260 printk(KERN_ERR "md/raid0:%s: too few disks (%d of %d) - "
2261 @@ -261,28 +269,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
2262 (unsigned long long)smallest->sectors);
2263 }
2264
2265 - /*
2266 - * now since we have the hard sector sizes, we can make sure
2267 - * chunk size is a multiple of that sector size
2268 - */
2269 - if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
2270 - printk(KERN_ERR "md/raid0:%s: chunk_size of %d not valid\n",
2271 - mdname(mddev),
2272 - mddev->chunk_sectors << 9);
2273 - goto abort;
2274 - }
2275 -
2276 - if (mddev->queue) {
2277 - blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
2278 - blk_queue_io_opt(mddev->queue,
2279 - (mddev->chunk_sectors << 9) * mddev->raid_disks);
2280 -
2281 - if (!discard_supported)
2282 - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
2283 - else
2284 - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
2285 - }
2286 -
2287 pr_debug("md/raid0:%s: done.\n", mdname(mddev));
2288 *private_conf = conf;
2289
2290 @@ -433,12 +419,6 @@ static int raid0_run(struct mddev *mddev)
2291 if (md_check_no_bitmap(mddev))
2292 return -EINVAL;
2293
2294 - if (mddev->queue) {
2295 - blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
2296 - blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
2297 - blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
2298 - }
2299 -
2300 /* if private is not null, we are here after takeover */
2301 if (mddev->private == NULL) {
2302 ret = create_strip_zones(mddev, &conf);
2303 @@ -447,6 +427,29 @@ static int raid0_run(struct mddev *mddev)
2304 mddev->private = conf;
2305 }
2306 conf = mddev->private;
2307 + if (mddev->queue) {
2308 + struct md_rdev *rdev;
2309 + bool discard_supported = false;
2310 +
2311 + blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
2312 + blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
2313 + blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
2314 +
2315 + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
2316 + blk_queue_io_opt(mddev->queue,
2317 + (mddev->chunk_sectors << 9) * mddev->raid_disks);
2318 +
2319 + rdev_for_each(rdev, mddev) {
2320 + disk_stack_limits(mddev->gendisk, rdev->bdev,
2321 + rdev->data_offset << 9);
2322 + if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
2323 + discard_supported = true;
2324 + }
2325 + if (!discard_supported)
2326 + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
2327 + else
2328 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
2329 + }
2330
2331 /* calculate array device size */
2332 md_set_array_sectors(mddev, raid0_size(mddev, 0, 0));
2333 diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
2334 index 8be0df758e68..a0b1b460377d 100644
2335 --- a/drivers/mmc/core/host.c
2336 +++ b/drivers/mmc/core/host.c
2337 @@ -373,7 +373,7 @@ int mmc_of_parse(struct mmc_host *host)
2338 0, &cd_gpio_invert);
2339 if (!ret)
2340 dev_info(host->parent, "Got CD GPIO\n");
2341 - else if (ret != -ENOENT)
2342 + else if (ret != -ENOENT && ret != -ENOSYS)
2343 return ret;
2344
2345 /*
2346 @@ -397,7 +397,7 @@ int mmc_of_parse(struct mmc_host *host)
2347 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
2348 if (!ret)
2349 dev_info(host->parent, "Got WP GPIO\n");
2350 - else if (ret != -ENOENT)
2351 + else if (ret != -ENOENT && ret != -ENOSYS)
2352 return ret;
2353
2354 /* See the comment on CD inversion above */
2355 diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
2356 index 5f5adafb253a..b354c8bffb9e 100644
2357 --- a/drivers/mmc/host/dw_mmc.c
2358 +++ b/drivers/mmc/host/dw_mmc.c
2359 @@ -99,6 +99,9 @@ struct idmac_desc {
2360
2361 __le32 des3; /* buffer 2 physical address */
2362 };
2363 +
2364 +/* Each descriptor can transfer up to 4KB of data in chained mode */
2365 +#define DW_MCI_DESC_DATA_LENGTH 0x1000
2366 #endif /* CONFIG_MMC_DW_IDMAC */
2367
2368 static bool dw_mci_reset(struct dw_mci *host);
2369 @@ -462,66 +465,96 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host)
2370 static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
2371 unsigned int sg_len)
2372 {
2373 + unsigned int desc_len;
2374 int i;
2375 if (host->dma_64bit_address == 1) {
2376 - struct idmac_desc_64addr *desc = host->sg_cpu;
2377 + struct idmac_desc_64addr *desc_first, *desc_last, *desc;
2378 +
2379 + desc_first = desc_last = desc = host->sg_cpu;
2380
2381 - for (i = 0; i < sg_len; i++, desc++) {
2382 + for (i = 0; i < sg_len; i++) {
2383 unsigned int length = sg_dma_len(&data->sg[i]);
2384 u64 mem_addr = sg_dma_address(&data->sg[i]);
2385
2386 - /*
2387 - * Set the OWN bit and disable interrupts for this
2388 - * descriptor
2389 - */
2390 - desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
2391 - IDMAC_DES0_CH;
2392 - /* Buffer length */
2393 - IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, length);
2394 -
2395 - /* Physical address to DMA to/from */
2396 - desc->des4 = mem_addr & 0xffffffff;
2397 - desc->des5 = mem_addr >> 32;
2398 + for ( ; length ; desc++) {
2399 + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
2400 + length : DW_MCI_DESC_DATA_LENGTH;
2401 +
2402 + length -= desc_len;
2403 +
2404 + /*
2405 + * Set the OWN bit and disable interrupts
2406 + * for this descriptor
2407 + */
2408 + desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
2409 + IDMAC_DES0_CH;
2410 +
2411 + /* Buffer length */
2412 + IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
2413 +
2414 + /* Physical address to DMA to/from */
2415 + desc->des4 = mem_addr & 0xffffffff;
2416 + desc->des5 = mem_addr >> 32;
2417 +
2418 + /* Update physical address for the next desc */
2419 + mem_addr += desc_len;
2420 +
2421 + /* Save pointer to the last descriptor */
2422 + desc_last = desc;
2423 + }
2424 }
2425
2426 /* Set first descriptor */
2427 - desc = host->sg_cpu;
2428 - desc->des0 |= IDMAC_DES0_FD;
2429 + desc_first->des0 |= IDMAC_DES0_FD;
2430
2431 /* Set last descriptor */
2432 - desc = host->sg_cpu + (i - 1) *
2433 - sizeof(struct idmac_desc_64addr);
2434 - desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
2435 - desc->des0 |= IDMAC_DES0_LD;
2436 + desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
2437 + desc_last->des0 |= IDMAC_DES0_LD;
2438
2439 } else {
2440 - struct idmac_desc *desc = host->sg_cpu;
2441 + struct idmac_desc *desc_first, *desc_last, *desc;
2442 +
2443 + desc_first = desc_last = desc = host->sg_cpu;
2444
2445 - for (i = 0; i < sg_len; i++, desc++) {
2446 + for (i = 0; i < sg_len; i++) {
2447 unsigned int length = sg_dma_len(&data->sg[i]);
2448 u32 mem_addr = sg_dma_address(&data->sg[i]);
2449
2450 - /*
2451 - * Set the OWN bit and disable interrupts for this
2452 - * descriptor
2453 - */
2454 - desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
2455 - IDMAC_DES0_DIC | IDMAC_DES0_CH);
2456 - /* Buffer length */
2457 - IDMAC_SET_BUFFER1_SIZE(desc, length);
2458 + for ( ; length ; desc++) {
2459 + desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
2460 + length : DW_MCI_DESC_DATA_LENGTH;
2461 +
2462 + length -= desc_len;
2463 +
2464 + /*
2465 + * Set the OWN bit and disable interrupts
2466 + * for this descriptor
2467 + */
2468 + desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
2469 + IDMAC_DES0_DIC |
2470 + IDMAC_DES0_CH);
2471 +
2472 + /* Buffer length */
2473 + IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
2474
2475 - /* Physical address to DMA to/from */
2476 - desc->des2 = cpu_to_le32(mem_addr);
2477 + /* Physical address to DMA to/from */
2478 + desc->des2 = cpu_to_le32(mem_addr);
2479 +
2480 + /* Update physical address for the next desc */
2481 + mem_addr += desc_len;
2482 +
2483 + /* Save pointer to the last descriptor */
2484 + desc_last = desc;
2485 + }
2486 }
2487
2488 /* Set first descriptor */
2489 - desc = host->sg_cpu;
2490 - desc->des0 |= cpu_to_le32(IDMAC_DES0_FD);
2491 + desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
2492
2493 /* Set last descriptor */
2494 - desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
2495 - desc->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
2496 - desc->des0 |= cpu_to_le32(IDMAC_DES0_LD);
2497 + desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
2498 + IDMAC_DES0_DIC));
2499 + desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
2500 }
2501
2502 wmb();
2503 @@ -2406,7 +2439,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
2504 #ifdef CONFIG_MMC_DW_IDMAC
2505 mmc->max_segs = host->ring_size;
2506 mmc->max_blk_size = 65536;
2507 - mmc->max_seg_size = 0x1000;
2508 + mmc->max_seg_size = DW_MCI_DESC_DATA_LENGTH;
2509 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2510 mmc->max_blk_count = mmc->max_req_size / 512;
2511 #else
2512 diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
2513 index 82f512d87cb8..461698b038f7 100644
2514 --- a/drivers/mmc/host/sdhci-esdhc-imx.c
2515 +++ b/drivers/mmc/host/sdhci-esdhc-imx.c
2516 @@ -868,6 +868,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
2517 struct esdhc_platform_data *boarddata)
2518 {
2519 struct device_node *np = pdev->dev.of_node;
2520 + int ret;
2521
2522 if (!np)
2523 return -ENODEV;
2524 @@ -903,6 +904,14 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
2525
2526 mmc_of_parse_voltage(np, &host->ocr_mask);
2527
2528 + /* call to generic mmc_of_parse to support additional capabilities */
2529 + ret = mmc_of_parse(host->mmc);
2530 + if (ret)
2531 + return ret;
2532 +
2533 + if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
2534 + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
2535 +
2536 return 0;
2537 }
2538 #else
2539 @@ -924,6 +933,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
2540 struct esdhc_platform_data *boarddata;
2541 int err;
2542 struct pltfm_imx_data *imx_data;
2543 + bool dt = true;
2544
2545 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
2546 if (IS_ERR(host))
2547 @@ -1011,11 +1021,44 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
2548 }
2549 imx_data->boarddata = *((struct esdhc_platform_data *)
2550 host->mmc->parent->platform_data);
2551 + dt = false;
2552 + }
2553 + /* write_protect */
2554 + if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
2555 + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
2556 + if (err) {
2557 + dev_err(mmc_dev(host->mmc),
2558 + "failed to request write-protect gpio!\n");
2559 + goto disable_clk;
2560 + }
2561 + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
2562 }
2563
2564 /* card_detect */
2565 - if (boarddata->cd_type == ESDHC_CD_CONTROLLER)
2566 + switch (boarddata->cd_type) {
2567 + case ESDHC_CD_GPIO:
2568 + if (dt)
2569 + break;
2570 + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
2571 + if (err) {
2572 + dev_err(mmc_dev(host->mmc),
2573 + "failed to request card-detect gpio!\n");
2574 + goto disable_clk;
2575 + }
2576 + /* fall through */
2577 +
2578 + case ESDHC_CD_CONTROLLER:
2579 + /* we have a working card_detect back */
2580 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
2581 + break;
2582 +
2583 + case ESDHC_CD_PERMANENT:
2584 + host->mmc->caps |= MMC_CAP_NONREMOVABLE;
2585 + break;
2586 +
2587 + case ESDHC_CD_NONE:
2588 + break;
2589 + }
2590
2591 switch (boarddata->max_bus_width) {
2592 case 8:
2593 @@ -1048,11 +1091,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
2594 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
2595 }
2596
2597 - /* call to generic mmc_of_parse to support additional capabilities */
2598 - err = mmc_of_parse(host->mmc);
2599 - if (err)
2600 - goto disable_clk;
2601 -
2602 err = sdhci_add_host(host);
2603 if (err)
2604 goto disable_clk;
2605 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
2606 index fd41b91436ec..cbaf3df3ebd9 100644
2607 --- a/drivers/mmc/host/sdhci.c
2608 +++ b/drivers/mmc/host/sdhci.c
2609 @@ -55,8 +55,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
2610 static void sdhci_tuning_timer(unsigned long data);
2611 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
2612 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
2613 - struct mmc_data *data,
2614 - struct sdhci_host_next *next);
2615 + struct mmc_data *data);
2616 static int sdhci_do_get_cd(struct sdhci_host *host);
2617
2618 #ifdef CONFIG_PM
2619 @@ -510,7 +509,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
2620 goto fail;
2621 BUG_ON(host->align_addr & host->align_mask);
2622
2623 - host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
2624 + host->sg_count = sdhci_pre_dma_transfer(host, data);
2625 if (host->sg_count < 0)
2626 goto unmap_align;
2627
2628 @@ -649,9 +648,11 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
2629 }
2630 }
2631
2632 - if (!data->host_cookie)
2633 + if (data->host_cookie == COOKIE_MAPPED) {
2634 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2635 data->sg_len, direction);
2636 + data->host_cookie = COOKIE_UNMAPPED;
2637 + }
2638 }
2639
2640 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
2641 @@ -847,7 +848,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
2642 } else {
2643 int sg_cnt;
2644
2645 - sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
2646 + sg_cnt = sdhci_pre_dma_transfer(host, data);
2647 if (sg_cnt <= 0) {
2648 /*
2649 * This only happens when someone fed
2650 @@ -963,11 +964,13 @@ static void sdhci_finish_data(struct sdhci_host *host)
2651 if (host->flags & SDHCI_USE_ADMA)
2652 sdhci_adma_table_post(host, data);
2653 else {
2654 - if (!data->host_cookie)
2655 + if (data->host_cookie == COOKIE_MAPPED) {
2656 dma_unmap_sg(mmc_dev(host->mmc),
2657 data->sg, data->sg_len,
2658 (data->flags & MMC_DATA_READ) ?
2659 DMA_FROM_DEVICE : DMA_TO_DEVICE);
2660 + data->host_cookie = COOKIE_UNMAPPED;
2661 + }
2662 }
2663 }
2664
2665 @@ -2131,49 +2134,36 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2666 struct mmc_data *data = mrq->data;
2667
2668 if (host->flags & SDHCI_REQ_USE_DMA) {
2669 - if (data->host_cookie)
2670 + if (data->host_cookie == COOKIE_GIVEN ||
2671 + data->host_cookie == COOKIE_MAPPED)
2672 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2673 data->flags & MMC_DATA_WRITE ?
2674 DMA_TO_DEVICE : DMA_FROM_DEVICE);
2675 - mrq->data->host_cookie = 0;
2676 + data->host_cookie = COOKIE_UNMAPPED;
2677 }
2678 }
2679
2680 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
2681 - struct mmc_data *data,
2682 - struct sdhci_host_next *next)
2683 + struct mmc_data *data)
2684 {
2685 int sg_count;
2686
2687 - if (!next && data->host_cookie &&
2688 - data->host_cookie != host->next_data.cookie) {
2689 - pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
2690 - __func__, data->host_cookie, host->next_data.cookie);
2691 - data->host_cookie = 0;
2692 + if (data->host_cookie == COOKIE_MAPPED) {
2693 + data->host_cookie = COOKIE_GIVEN;
2694 + return data->sg_count;
2695 }
2696
2697 - /* Check if next job is already prepared */
2698 - if (next ||
2699 - (!next && data->host_cookie != host->next_data.cookie)) {
2700 - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
2701 - data->sg_len,
2702 - data->flags & MMC_DATA_WRITE ?
2703 - DMA_TO_DEVICE : DMA_FROM_DEVICE);
2704 -
2705 - } else {
2706 - sg_count = host->next_data.sg_count;
2707 - host->next_data.sg_count = 0;
2708 - }
2709 + WARN_ON(data->host_cookie == COOKIE_GIVEN);
2710
2711 + sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2712 + data->flags & MMC_DATA_WRITE ?
2713 + DMA_TO_DEVICE : DMA_FROM_DEVICE);
2714
2715 if (sg_count == 0)
2716 - return -EINVAL;
2717 + return -ENOSPC;
2718
2719 - if (next) {
2720 - next->sg_count = sg_count;
2721 - data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
2722 - } else
2723 - host->sg_count = sg_count;
2724 + data->sg_count = sg_count;
2725 + data->host_cookie = COOKIE_MAPPED;
2726
2727 return sg_count;
2728 }
2729 @@ -2183,16 +2173,10 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
2730 {
2731 struct sdhci_host *host = mmc_priv(mmc);
2732
2733 - if (mrq->data->host_cookie) {
2734 - mrq->data->host_cookie = 0;
2735 - return;
2736 - }
2737 + mrq->data->host_cookie = COOKIE_UNMAPPED;
2738
2739 if (host->flags & SDHCI_REQ_USE_DMA)
2740 - if (sdhci_pre_dma_transfer(host,
2741 - mrq->data,
2742 - &host->next_data) < 0)
2743 - mrq->data->host_cookie = 0;
2744 + sdhci_pre_dma_transfer(host, mrq->data);
2745 }
2746
2747 static void sdhci_card_event(struct mmc_host *mmc)
2748 @@ -3090,7 +3074,6 @@ int sdhci_add_host(struct sdhci_host *host)
2749 host->max_clk = host->ops->get_max_clock(host);
2750 }
2751
2752 - host->next_data.cookie = 1;
2753 /*
2754 * In case of Host Controller v3.00, find out whether clock
2755 * multiplier is supported.
2756 diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
2757 index e639b7f435e5..eea23f62356a 100644
2758 --- a/drivers/mmc/host/sdhci.h
2759 +++ b/drivers/mmc/host/sdhci.h
2760 @@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc {
2761 */
2762 #define SDHCI_MAX_SEGS 128
2763
2764 -struct sdhci_host_next {
2765 - unsigned int sg_count;
2766 - s32 cookie;
2767 +enum sdhci_cookie {
2768 + COOKIE_UNMAPPED,
2769 + COOKIE_MAPPED,
2770 + COOKIE_GIVEN,
2771 };
2772
2773 struct sdhci_host {
2774 @@ -506,7 +507,6 @@ struct sdhci_host {
2775 #define SDHCI_TUNING_MODE_1 0
2776 struct timer_list tuning_timer; /* Timer for tuning */
2777
2778 - struct sdhci_host_next next_data;
2779 unsigned long private[0] ____cacheline_aligned;
2780 };
2781
2782 diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
2783 index a4615fcc3d00..94a357d93bab 100644
2784 --- a/drivers/mtd/nand/pxa3xx_nand.c
2785 +++ b/drivers/mtd/nand/pxa3xx_nand.c
2786 @@ -1475,6 +1475,9 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
2787 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
2788 goto KEEP_CONFIG;
2789
2790 + /* Set a default chunk size */
2791 + info->chunk_size = 512;
2792 +
2793 ret = pxa3xx_nand_sensing(info);
2794 if (ret) {
2795 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
2796 diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
2797 index 6f93b2990d25..499b8e433d3d 100644
2798 --- a/drivers/mtd/nand/sunxi_nand.c
2799 +++ b/drivers/mtd/nand/sunxi_nand.c
2800 @@ -138,6 +138,10 @@
2801 #define NFC_ECC_MODE GENMASK(15, 12)
2802 #define NFC_RANDOM_SEED GENMASK(30, 16)
2803
2804 +/* NFC_USER_DATA helper macros */
2805 +#define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \
2806 + ((buf)[2] << 16) | ((buf)[3] << 24))
2807 +
2808 #define NFC_DEFAULT_TIMEOUT_MS 1000
2809
2810 #define NFC_SRAM_SIZE 1024
2811 @@ -632,15 +636,9 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
2812 offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize;
2813
2814 /* Fill OOB data in */
2815 - if (oob_required) {
2816 - tmp = 0xffffffff;
2817 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
2818 - 4);
2819 - } else {
2820 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE,
2821 - chip->oob_poi + offset - mtd->writesize,
2822 - 4);
2823 - }
2824 + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi +
2825 + layout->oobfree[i].offset),
2826 + nfc->regs + NFC_REG_USER_DATA_BASE);
2827
2828 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1);
2829
2830 @@ -770,14 +768,8 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
2831 offset += ecc->size;
2832
2833 /* Fill OOB data in */
2834 - if (oob_required) {
2835 - tmp = 0xffffffff;
2836 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp,
2837 - 4);
2838 - } else {
2839 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob,
2840 - 4);
2841 - }
2842 + writel(NFC_BUF_TO_USER_DATA(oob),
2843 + nfc->regs + NFC_REG_USER_DATA_BASE);
2844
2845 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR |
2846 (1 << 30);
2847 @@ -1312,6 +1304,7 @@ static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
2848 node);
2849 nand_release(&chip->mtd);
2850 sunxi_nand_ecc_cleanup(&chip->nand.ecc);
2851 + list_del(&chip->node);
2852 }
2853 }
2854
2855 diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
2856 index 5bbd1f094f4e..1fc23e48fe8e 100644
2857 --- a/drivers/mtd/ubi/io.c
2858 +++ b/drivers/mtd/ubi/io.c
2859 @@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
2860 goto bad;
2861 }
2862
2863 + if (data_size > ubi->leb_size) {
2864 + ubi_err(ubi, "bad data_size");
2865 + goto bad;
2866 + }
2867 +
2868 if (vol_type == UBI_VID_STATIC) {
2869 /*
2870 * Although from high-level point of view static volumes may
2871 diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
2872 index 68c9c5ea676f..bf2f916df4e2 100644
2873 --- a/drivers/mtd/ubi/vtbl.c
2874 +++ b/drivers/mtd/ubi/vtbl.c
2875 @@ -646,6 +646,7 @@ static int init_volumes(struct ubi_device *ubi,
2876 if (ubi->corr_peb_count)
2877 ubi_err(ubi, "%d PEBs are corrupted and not used",
2878 ubi->corr_peb_count);
2879 + return -ENOSPC;
2880 }
2881 ubi->rsvd_pebs += reserved_pebs;
2882 ubi->avail_pebs -= reserved_pebs;
2883 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2884 index 16214d3d57a4..18fef94542f8 100644
2885 --- a/drivers/mtd/ubi/wl.c
2886 +++ b/drivers/mtd/ubi/wl.c
2887 @@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
2888 if (ubi->corr_peb_count)
2889 ubi_err(ubi, "%d PEBs are corrupted and not used",
2890 ubi->corr_peb_count);
2891 + err = -ENOSPC;
2892 goto out_free;
2893 }
2894 ubi->avail_pebs -= reserved_pebs;
2895 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2896 index 4f6bf996851e..7dfbcde34509 100644
2897 --- a/drivers/net/ethernet/intel/igb/igb_main.c
2898 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
2899 @@ -2864,7 +2864,7 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
2900 return;
2901
2902 pci_sriov_set_totalvfs(pdev, 7);
2903 - igb_pci_enable_sriov(pdev, max_vfs);
2904 + igb_enable_sriov(pdev, max_vfs);
2905
2906 #endif /* CONFIG_PCI_IOV */
2907 }
2908 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
2909 index 2fd9e180272b..c5dc6b57212e 100644
2910 --- a/drivers/net/wireless/ath/ath10k/htc.c
2911 +++ b/drivers/net/wireless/ath/ath10k/htc.c
2912 @@ -163,8 +163,10 @@ int ath10k_htc_send(struct ath10k_htc *htc,
2913 skb_cb->eid = eid;
2914 skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
2915 ret = dma_mapping_error(dev, skb_cb->paddr);
2916 - if (ret)
2917 + if (ret) {
2918 + ret = -EIO;
2919 goto err_credits;
2920 + }
2921
2922 sg_item.transfer_id = ep->eid;
2923 sg_item.transfer_context = skb;
2924 diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
2925 index cbd2bc9e6202..7f4854a52a7c 100644
2926 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c
2927 +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
2928 @@ -371,8 +371,10 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
2929 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
2930 DMA_TO_DEVICE);
2931 res = dma_mapping_error(dev, skb_cb->paddr);
2932 - if (res)
2933 + if (res) {
2934 + res = -EIO;
2935 goto err_free_txdesc;
2936 + }
2937
2938 skb_put(txdesc, len);
2939 cmd = (struct htt_cmd *)txdesc->data;
2940 @@ -463,8 +465,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
2941 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
2942 DMA_TO_DEVICE);
2943 res = dma_mapping_error(dev, skb_cb->paddr);
2944 - if (res)
2945 + if (res) {
2946 + res = -EIO;
2947 goto err_free_txbuf;
2948 + }
2949
2950 if (likely(use_frags)) {
2951 frags = skb_cb->htt.txbuf->frags;
2952 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2953 index 973485bd4121..5e021b0b3f9e 100644
2954 --- a/drivers/net/wireless/ath/ath10k/mac.c
2955 +++ b/drivers/net/wireless/ath/ath10k/mac.c
2956 @@ -4464,6 +4464,21 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2957 return ret;
2958 }
2959
2960 +static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
2961 +{
2962 + /* Even though there's a WMI enum for fragmentation threshold no known
2963 + * firmware actually implements it. Moreover it is not possible to rely
2964 + * frame fragmentation to mac80211 because firmware clears the "more
2965 + * fragments" bit in frame control making it impossible for remote
2966 + * devices to reassemble frames.
2967 + *
2968 + * Hence implement a dummy callback just to say fragmentation isn't
2969 + * supported. This effectively prevents mac80211 from doing frame
2970 + * fragmentation in software.
2971 + */
2972 + return -EOPNOTSUPP;
2973 +}
2974 +
2975 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2976 u32 queues, bool drop)
2977 {
2978 @@ -5108,6 +5123,7 @@ static const struct ieee80211_ops ath10k_ops = {
2979 .remain_on_channel = ath10k_remain_on_channel,
2980 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
2981 .set_rts_threshold = ath10k_set_rts_threshold,
2982 + .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
2983 .flush = ath10k_flush,
2984 .tx_last_beacon = ath10k_tx_last_beacon,
2985 .set_antenna = ath10k_set_antenna,
2986 diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
2987 index ead543282128..3c4c800ab505 100644
2988 --- a/drivers/net/wireless/ath/ath10k/pci.c
2989 +++ b/drivers/net/wireless/ath/ath10k/pci.c
2990 @@ -1378,8 +1378,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
2991
2992 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
2993 ret = dma_mapping_error(ar->dev, req_paddr);
2994 - if (ret)
2995 + if (ret) {
2996 + ret = -EIO;
2997 goto err_dma;
2998 + }
2999
3000 if (resp && resp_len) {
3001 tresp = kzalloc(*resp_len, GFP_KERNEL);
3002 @@ -1391,8 +1393,10 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
3003 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
3004 DMA_FROM_DEVICE);
3005 ret = dma_mapping_error(ar->dev, resp_paddr);
3006 - if (ret)
3007 + if (ret) {
3008 + ret = EIO;
3009 goto err_req;
3010 + }
3011
3012 xfer.wait_for_resp = true;
3013 xfer.resp_len = 0;
3014 diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
3015 index c7ea77edce24..408ecd98e61b 100644
3016 --- a/drivers/net/wireless/ath/ath10k/wmi.c
3017 +++ b/drivers/net/wireless/ath/ath10k/wmi.c
3018 @@ -2517,6 +2517,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
3019 ath10k_warn(ar, "failed to map beacon: %d\n",
3020 ret);
3021 dev_kfree_skb_any(bcn);
3022 + ret = -EIO;
3023 goto skip;
3024 }
3025
3026 diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
3027 index 1c6788aecc62..40d72312f3df 100644
3028 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
3029 +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
3030 @@ -203,8 +203,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
3031
3032 /* Copy firmware into DMA-accessible memory */
3033 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
3034 - if (!fw)
3035 - return -ENOMEM;
3036 + if (!fw) {
3037 + status = -ENOMEM;
3038 + goto out;
3039 + }
3040 len = fw_entry->size;
3041
3042 if (len % 4)
3043 @@ -217,6 +219,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
3044
3045 status = rsi_copy_to_card(common, fw, len, num_blocks);
3046 kfree(fw);
3047 +
3048 +out:
3049 release_firmware(fw_entry);
3050 return status;
3051 }
3052 diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
3053 index 30c2cf7fa93b..de4900862836 100644
3054 --- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
3055 +++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
3056 @@ -148,8 +148,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
3057
3058 /* Copy firmware into DMA-accessible memory */
3059 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
3060 - if (!fw)
3061 - return -ENOMEM;
3062 + if (!fw) {
3063 + status = -ENOMEM;
3064 + goto out;
3065 + }
3066 len = fw_entry->size;
3067
3068 if (len % 4)
3069 @@ -162,6 +164,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
3070
3071 status = rsi_copy_to_card(common, fw, len, num_blocks);
3072 kfree(fw);
3073 +
3074 +out:
3075 release_firmware(fw_entry);
3076 return status;
3077 }
3078 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
3079 index e031c943286e..52f081f4dfd5 100644
3080 --- a/drivers/net/xen-netfront.c
3081 +++ b/drivers/net/xen-netfront.c
3082 @@ -1353,7 +1353,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
3083 queue->tx_evtchn = queue->rx_evtchn = 0;
3084 queue->tx_irq = queue->rx_irq = 0;
3085
3086 - napi_synchronize(&queue->napi);
3087 + if (netif_running(info->netdev))
3088 + napi_synchronize(&queue->napi);
3089
3090 xennet_release_tx_bufs(queue);
3091 xennet_release_rx_bufs(queue);
3092 diff --git a/drivers/pci/access.c b/drivers/pci/access.c
3093 index b965c12168b7..502a82ca1db0 100644
3094 --- a/drivers/pci/access.c
3095 +++ b/drivers/pci/access.c
3096 @@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
3097 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
3098 void *arg)
3099 {
3100 - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
3101 + struct pci_dev *tdev = pci_get_slot(dev->bus,
3102 + PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3103 ssize_t ret;
3104
3105 if (!tdev)
3106 @@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
3107 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
3108 const void *arg)
3109 {
3110 - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
3111 + struct pci_dev *tdev = pci_get_slot(dev->bus,
3112 + PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3113 ssize_t ret;
3114
3115 if (!tdev)
3116 @@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
3117 .release = pci_vpd_pci22_release,
3118 };
3119
3120 -static int pci_vpd_f0_dev_check(struct pci_dev *dev)
3121 -{
3122 - struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
3123 - int ret = 0;
3124 -
3125 - if (!tdev)
3126 - return -ENODEV;
3127 - if (!tdev->vpd || !tdev->multifunction ||
3128 - dev->class != tdev->class || dev->vendor != tdev->vendor ||
3129 - dev->device != tdev->device)
3130 - ret = -ENODEV;
3131 -
3132 - pci_dev_put(tdev);
3133 - return ret;
3134 -}
3135 -
3136 int pci_vpd_pci22_init(struct pci_dev *dev)
3137 {
3138 struct pci_vpd_pci22 *vpd;
3139 @@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
3140 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
3141 if (!cap)
3142 return -ENODEV;
3143 - if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
3144 - int ret = pci_vpd_f0_dev_check(dev);
3145
3146 - if (ret)
3147 - return ret;
3148 - }
3149 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
3150 if (!vpd)
3151 return -ENOMEM;
3152 diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
3153 index 6fbd3f2b5992..d3346d23963b 100644
3154 --- a/drivers/pci/bus.c
3155 +++ b/drivers/pci/bus.c
3156 @@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
3157
3158 res->start = start;
3159 res->end = end;
3160 + res->flags &= ~IORESOURCE_UNSET;
3161 + orig_res.flags &= ~IORESOURCE_UNSET;
3162 dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
3163 &orig_res, res);
3164
3165 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
3166 index 804cd3b02c66..4a6933f02cd0 100644
3167 --- a/drivers/pci/quirks.c
3168 +++ b/drivers/pci/quirks.c
3169 @@ -1915,11 +1915,27 @@ static void quirk_netmos(struct pci_dev *dev)
3170 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
3171 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
3172
3173 +/*
3174 + * Quirk non-zero PCI functions to route VPD access through function 0 for
3175 + * devices that share VPD resources between functions. The functions are
3176 + * expected to be identical devices.
3177 + */
3178 static void quirk_f0_vpd_link(struct pci_dev *dev)
3179 {
3180 - if (!dev->multifunction || !PCI_FUNC(dev->devfn))
3181 + struct pci_dev *f0;
3182 +
3183 + if (!PCI_FUNC(dev->devfn))
3184 return;
3185 - dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
3186 +
3187 + f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
3188 + if (!f0)
3189 + return;
3190 +
3191 + if (f0->vpd && dev->class == f0->class &&
3192 + dev->vendor == f0->vendor && dev->device == f0->device)
3193 + dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
3194 +
3195 + pci_dev_put(f0);
3196 }
3197 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
3198 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
3199 diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
3200 index 803945259da8..42861cc70158 100644
3201 --- a/drivers/pcmcia/sa1100_generic.c
3202 +++ b/drivers/pcmcia/sa1100_generic.c
3203 @@ -93,7 +93,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
3204 for (i = 0; i < sinfo->nskt; i++)
3205 soc_pcmcia_remove_one(&sinfo->skt[i]);
3206
3207 - clk_put(sinfo->clk);
3208 kfree(sinfo);
3209 return 0;
3210 }
3211 diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
3212 index cf6de2c2b329..553d70a67f80 100644
3213 --- a/drivers/pcmcia/sa11xx_base.c
3214 +++ b/drivers/pcmcia/sa11xx_base.c
3215 @@ -222,7 +222,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
3216 int i, ret = 0;
3217 struct clk *clk;
3218
3219 - clk = clk_get(dev, NULL);
3220 + clk = devm_clk_get(dev, NULL);
3221 if (IS_ERR(clk))
3222 return PTR_ERR(clk);
3223
3224 @@ -251,7 +251,6 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
3225 if (ret) {
3226 while (--i >= 0)
3227 soc_pcmcia_remove_one(&sinfo->skt[i]);
3228 - clk_put(clk);
3229 kfree(sinfo);
3230 } else {
3231 dev_set_drvdata(dev, sinfo);
3232 diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
3233 index 9956b9902bb4..93e54a0f471a 100644
3234 --- a/drivers/platform/x86/toshiba_acpi.c
3235 +++ b/drivers/platform/x86/toshiba_acpi.c
3236 @@ -2525,11 +2525,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
3237 if (error)
3238 return error;
3239
3240 - error = toshiba_hotkey_event_type_get(dev, &events_type);
3241 - if (error) {
3242 - pr_err("Unable to query Hotkey Event Type\n");
3243 - return error;
3244 - }
3245 + if (toshiba_hotkey_event_type_get(dev, &events_type))
3246 + pr_notice("Unable to query Hotkey Event Type\n");
3247 +
3248 dev->hotkey_event_type = events_type;
3249
3250 dev->hotkey_dev = input_allocate_device();
3251 diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
3252 index 7f3d389bd601..a67eeace6a89 100644
3253 --- a/drivers/power/avs/Kconfig
3254 +++ b/drivers/power/avs/Kconfig
3255 @@ -13,7 +13,7 @@ menuconfig POWER_AVS
3256
3257 config ROCKCHIP_IODOMAIN
3258 tristate "Rockchip IO domain support"
3259 - depends on ARCH_ROCKCHIP && OF
3260 + depends on POWER_AVS && ARCH_ROCKCHIP && OF
3261 help
3262 Say y here to enable support io domains on Rockchip SoCs. It is
3263 necessary for the io domain setting of the SoC to match the
3264 diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
3265 index add419d6ff34..a56a7b243e91 100644
3266 --- a/drivers/scsi/3w-9xxx.c
3267 +++ b/drivers/scsi/3w-9xxx.c
3268 @@ -212,6 +212,17 @@ static const struct file_operations twa_fops = {
3269 .llseek = noop_llseek,
3270 };
3271
3272 +/*
3273 + * The controllers use an inline buffer instead of a mapped SGL for small,
3274 + * single entry buffers. Note that we treat a zero-length transfer like
3275 + * a mapped SGL.
3276 + */
3277 +static bool twa_command_mapped(struct scsi_cmnd *cmd)
3278 +{
3279 + return scsi_sg_count(cmd) != 1 ||
3280 + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
3281 +}
3282 +
3283 /* This function will complete an aen request from the isr */
3284 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
3285 {
3286 @@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
3287 }
3288
3289 /* Now complete the io */
3290 - scsi_dma_unmap(cmd);
3291 + if (twa_command_mapped(cmd))
3292 + scsi_dma_unmap(cmd);
3293 cmd->scsi_done(cmd);
3294 tw_dev->state[request_id] = TW_S_COMPLETED;
3295 twa_free_request_id(tw_dev, request_id);
3296 @@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
3297 struct scsi_cmnd *cmd = tw_dev->srb[i];
3298
3299 cmd->result = (DID_RESET << 16);
3300 - scsi_dma_unmap(cmd);
3301 + if (twa_command_mapped(cmd))
3302 + scsi_dma_unmap(cmd);
3303 cmd->scsi_done(cmd);
3304 }
3305 }
3306 @@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
3307 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
3308 switch (retval) {
3309 case SCSI_MLQUEUE_HOST_BUSY:
3310 - scsi_dma_unmap(SCpnt);
3311 + if (twa_command_mapped(SCpnt))
3312 + scsi_dma_unmap(SCpnt);
3313 twa_free_request_id(tw_dev, request_id);
3314 break;
3315 case 1:
3316 SCpnt->result = (DID_ERROR << 16);
3317 - scsi_dma_unmap(SCpnt);
3318 + if (twa_command_mapped(SCpnt))
3319 + scsi_dma_unmap(SCpnt);
3320 done(SCpnt);
3321 tw_dev->state[request_id] = TW_S_COMPLETED;
3322 twa_free_request_id(tw_dev, request_id);
3323 @@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
3324 /* Map sglist from scsi layer to cmd packet */
3325
3326 if (scsi_sg_count(srb)) {
3327 - if ((scsi_sg_count(srb) == 1) &&
3328 - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
3329 + if (!twa_command_mapped(srb)) {
3330 if (srb->sc_data_direction == DMA_TO_DEVICE ||
3331 srb->sc_data_direction == DMA_BIDIRECTIONAL)
3332 scsi_sg_copy_to_buffer(srb,
3333 @@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int re
3334 {
3335 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
3336
3337 - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
3338 + if (!twa_command_mapped(cmd) &&
3339 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
3340 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
3341 if (scsi_sg_count(cmd) == 1) {
3342 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
3343 index a9aa38903efe..cccab6188328 100644
3344 --- a/drivers/scsi/ipr.c
3345 +++ b/drivers/scsi/ipr.c
3346 @@ -4554,7 +4554,7 @@ static ssize_t ipr_store_raw_mode(struct device *dev,
3347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3348 res = (struct ipr_resource_entry *)sdev->hostdata;
3349 if (res) {
3350 - if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
3351 + if (ipr_is_af_dasd_device(res)) {
3352 res->raw_mode = simple_strtoul(buf, NULL, 10);
3353 len = strlen(buf);
3354 if (res->sdev)
3355 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
3356 index ce6c770d74d5..c6b93d273799 100644
3357 --- a/drivers/scsi/scsi_error.c
3358 +++ b/drivers/scsi/scsi_error.c
3359 @@ -2169,8 +2169,17 @@ int scsi_error_handler(void *data)
3360 * We never actually get interrupted because kthread_run
3361 * disables signal delivery for the created thread.
3362 */
3363 - while (!kthread_should_stop()) {
3364 + while (true) {
3365 + /*
3366 + * The sequence in kthread_stop() sets the stop flag first
3367 + * then wakes the process. To avoid missed wakeups, the task
3368 + * should always be in a non running state before the stop
3369 + * flag is checked
3370 + */
3371 set_current_state(TASK_INTERRUPTIBLE);
3372 + if (kthread_should_stop())
3373 + break;
3374 +
3375 if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
3376 shost->host_failed != atomic_read(&shost->host_busy)) {
3377 SCSI_LOG_ERROR_RECOVERY(1,
3378 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
3379 index e3223ac75a7c..f089082c00e1 100644
3380 --- a/drivers/spi/spi-pxa2xx.c
3381 +++ b/drivers/spi/spi-pxa2xx.c
3382 @@ -624,6 +624,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
3383 if (!(sccr1_reg & SSCR1_TIE))
3384 mask &= ~SSSR_TFS;
3385
3386 + /* Ignore RX timeout interrupt if it is disabled */
3387 + if (!(sccr1_reg & SSCR1_TINTE))
3388 + mask &= ~SSSR_TINT;
3389 +
3390 if (!(status & mask))
3391 return IRQ_NONE;
3392
3393 diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
3394 index 2e32ea2f194f..be6155cba9de 100644
3395 --- a/drivers/spi/spi-xtensa-xtfpga.c
3396 +++ b/drivers/spi/spi-xtensa-xtfpga.c
3397 @@ -34,13 +34,13 @@ struct xtfpga_spi {
3398 static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
3399 unsigned addr, u32 val)
3400 {
3401 - iowrite32(val, spi->regs + addr);
3402 + __raw_writel(val, spi->regs + addr);
3403 }
3404
3405 static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
3406 unsigned addr)
3407 {
3408 - return ioread32(spi->regs + addr);
3409 + return __raw_readl(spi->regs + addr);
3410 }
3411
3412 static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
3413 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3414 index d35c1a13217c..029dbd33b4b2 100644
3415 --- a/drivers/spi/spi.c
3416 +++ b/drivers/spi/spi.c
3417 @@ -1427,8 +1427,7 @@ static struct class spi_master_class = {
3418 *
3419 * The caller is responsible for assigning the bus number and initializing
3420 * the master's methods before calling spi_register_master(); and (after errors
3421 - * adding the device) calling spi_master_put() and kfree() to prevent a memory
3422 - * leak.
3423 + * adding the device) calling spi_master_put() to prevent a memory leak.
3424 */
3425 struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
3426 {
3427 diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
3428 index 92c909eed6b5..8fab566e0f0b 100644
3429 --- a/drivers/spi/spidev.c
3430 +++ b/drivers/spi/spidev.c
3431 @@ -664,7 +664,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
3432 kfree(spidev->rx_buffer);
3433 spidev->rx_buffer = NULL;
3434
3435 - spidev->speed_hz = spidev->spi->max_speed_hz;
3436 + if (spidev->spi)
3437 + spidev->speed_hz = spidev->spi->max_speed_hz;
3438
3439 /* ... after we unbound from the underlying device? */
3440 spin_lock_irq(&spidev->spi_lock);
3441 diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
3442 index b0b96ab31954..abbc42a56e7c 100644
3443 --- a/drivers/staging/android/ion/ion.c
3444 +++ b/drivers/staging/android/ion/ion.c
3445 @@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
3446 mutex_unlock(&client->lock);
3447 goto end;
3448 }
3449 - mutex_unlock(&client->lock);
3450
3451 handle = ion_handle_create(client, buffer);
3452 - if (IS_ERR(handle))
3453 + if (IS_ERR(handle)) {
3454 + mutex_unlock(&client->lock);
3455 goto end;
3456 + }
3457
3458 - mutex_lock(&client->lock);
3459 ret = ion_handle_add(client, handle);
3460 mutex_unlock(&client->lock);
3461 if (ret) {
3462 diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
3463 index 4299cf45f947..5e1f16c36b49 100644
3464 --- a/drivers/staging/speakup/fakekey.c
3465 +++ b/drivers/staging/speakup/fakekey.c
3466 @@ -81,6 +81,7 @@ void speakup_fake_down_arrow(void)
3467 __this_cpu_write(reporting_keystroke, true);
3468 input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
3469 input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
3470 + input_sync(virt_keyboard);
3471 __this_cpu_write(reporting_keystroke, false);
3472
3473 /* reenable preemption */
3474 diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
3475 index 0ab6e2efd28c..330bbe831066 100644
3476 --- a/drivers/target/iscsi/iscsi_target.c
3477 +++ b/drivers/target/iscsi/iscsi_target.c
3478 @@ -341,7 +341,6 @@ static struct iscsi_np *iscsit_get_np(
3479
3480 struct iscsi_np *iscsit_add_np(
3481 struct __kernel_sockaddr_storage *sockaddr,
3482 - char *ip_str,
3483 int network_transport)
3484 {
3485 struct sockaddr_in *sock_in;
3486 @@ -370,11 +369,9 @@ struct iscsi_np *iscsit_add_np(
3487 np->np_flags |= NPF_IP_NETWORK;
3488 if (sockaddr->ss_family == AF_INET6) {
3489 sock_in6 = (struct sockaddr_in6 *)sockaddr;
3490 - snprintf(np->np_ip, IPV6_ADDRESS_SPACE, "%s", ip_str);
3491 np->np_port = ntohs(sock_in6->sin6_port);
3492 } else {
3493 sock_in = (struct sockaddr_in *)sockaddr;
3494 - sprintf(np->np_ip, "%s", ip_str);
3495 np->np_port = ntohs(sock_in->sin_port);
3496 }
3497
3498 @@ -411,8 +408,8 @@ struct iscsi_np *iscsit_add_np(
3499 list_add_tail(&np->np_list, &g_np_list);
3500 mutex_unlock(&np_lock);
3501
3502 - pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
3503 - np->np_ip, np->np_port, np->np_transport->name);
3504 + pr_debug("CORE[0] - Added Network Portal: %pISc:%hu on %s\n",
3505 + &np->np_sockaddr, np->np_port, np->np_transport->name);
3506
3507 return np;
3508 }
3509 @@ -481,8 +478,8 @@ int iscsit_del_np(struct iscsi_np *np)
3510 list_del(&np->np_list);
3511 mutex_unlock(&np_lock);
3512
3513 - pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
3514 - np->np_ip, np->np_port, np->np_transport->name);
3515 + pr_debug("CORE[0] - Removed Network Portal: %pISc:%hu on %s\n",
3516 + &np->np_sockaddr, np->np_port, np->np_transport->name);
3517
3518 iscsit_put_transport(np->np_transport);
3519 kfree(np);
3520 @@ -3467,7 +3464,6 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3521 tpg_np_list) {
3522 struct iscsi_np *np = tpg_np->tpg_np;
3523 bool inaddr_any = iscsit_check_inaddr_any(np);
3524 - char *fmt_str;
3525
3526 if (np->np_network_transport != network_transport)
3527 continue;
3528 @@ -3495,15 +3491,18 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3529 }
3530 }
3531
3532 - if (np->np_sockaddr.ss_family == AF_INET6)
3533 - fmt_str = "TargetAddress=[%s]:%hu,%hu";
3534 - else
3535 - fmt_str = "TargetAddress=%s:%hu,%hu";
3536 -
3537 - len = sprintf(buf, fmt_str,
3538 - inaddr_any ? conn->local_ip : np->np_ip,
3539 - np->np_port,
3540 - tpg->tpgt);
3541 + if (inaddr_any) {
3542 + len = sprintf(buf, "TargetAddress="
3543 + "%s:%hu,%hu",
3544 + conn->local_ip,
3545 + np->np_port,
3546 + tpg->tpgt);
3547 + } else {
3548 + len = sprintf(buf, "TargetAddress="
3549 + "%pISpc,%hu",
3550 + &np->np_sockaddr,
3551 + tpg->tpgt);
3552 + }
3553 len += 1;
3554
3555 if ((len + payload_len) > buffer_len) {
3556 diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
3557 index 7d0f9c00d9c2..d294f030a097 100644
3558 --- a/drivers/target/iscsi/iscsi_target.h
3559 +++ b/drivers/target/iscsi/iscsi_target.h
3560 @@ -13,7 +13,7 @@ extern int iscsit_deaccess_np(struct iscsi_np *, struct iscsi_portal_group *,
3561 extern bool iscsit_check_np_match(struct __kernel_sockaddr_storage *,
3562 struct iscsi_np *, int);
3563 extern struct iscsi_np *iscsit_add_np(struct __kernel_sockaddr_storage *,
3564 - char *, int);
3565 + int);
3566 extern int iscsit_reset_np_thread(struct iscsi_np *, struct iscsi_tpg_np *,
3567 struct iscsi_portal_group *, bool);
3568 extern int iscsit_del_np(struct iscsi_np *);
3569 diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
3570 index 469fce44ebad..6f2fb546477e 100644
3571 --- a/drivers/target/iscsi/iscsi_target_configfs.c
3572 +++ b/drivers/target/iscsi/iscsi_target_configfs.c
3573 @@ -100,7 +100,7 @@ static ssize_t lio_target_np_store_sctp(
3574 * Use existing np->np_sockaddr for SCTP network portal reference
3575 */
3576 tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
3577 - np->np_ip, tpg_np, ISCSI_SCTP_TCP);
3578 + tpg_np, ISCSI_SCTP_TCP);
3579 if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
3580 goto out;
3581 } else {
3582 @@ -178,7 +178,7 @@ static ssize_t lio_target_np_store_iser(
3583 }
3584
3585 tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
3586 - np->np_ip, tpg_np, ISCSI_INFINIBAND);
3587 + tpg_np, ISCSI_INFINIBAND);
3588 if (IS_ERR(tpg_np_iser)) {
3589 rc = PTR_ERR(tpg_np_iser);
3590 goto out;
3591 @@ -249,8 +249,8 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
3592 return ERR_PTR(-EINVAL);
3593 }
3594 str++; /* Skip over leading "[" */
3595 - *str2 = '\0'; /* Terminate the IPv6 address */
3596 - str2++; /* Skip over the "]" */
3597 + *str2 = '\0'; /* Terminate the unbracketed IPv6 address */
3598 + str2++; /* Skip over the \0 */
3599 port_str = strstr(str2, ":");
3600 if (!port_str) {
3601 pr_err("Unable to locate \":port\""
3602 @@ -317,7 +317,7 @@ static struct se_tpg_np *lio_target_call_addnptotpg(
3603 * sys/kernel/config/iscsi/$IQN/$TPG/np/$IP:$PORT/
3604 *
3605 */
3606 - tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, str, NULL,
3607 + tpg_np = iscsit_tpg_add_network_portal(tpg, &sockaddr, NULL,
3608 ISCSI_TCP);
3609 if (IS_ERR(tpg_np)) {
3610 iscsit_put_tpg(tpg);
3611 @@ -345,8 +345,8 @@ static void lio_target_call_delnpfromtpg(
3612
3613 se_tpg = &tpg->tpg_se_tpg;
3614 pr_debug("LIO_Target_ConfigFS: DEREGISTER -> %s TPGT: %hu"
3615 - " PORTAL: %s:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
3616 - tpg->tpgt, tpg_np->tpg_np->np_ip, tpg_np->tpg_np->np_port);
3617 + " PORTAL: %pISc:%hu\n", config_item_name(&se_tpg->se_tpg_wwn->wwn_group.cg_item),
3618 + tpg->tpgt, &tpg_np->tpg_np->np_sockaddr, tpg_np->tpg_np->np_port);
3619
3620 ret = iscsit_tpg_del_network_portal(tpg, tpg_np);
3621 if (ret < 0)
3622 diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
3623 index c3bccaddb592..39654e917cd8 100644
3624 --- a/drivers/target/iscsi/iscsi_target_login.c
3625 +++ b/drivers/target/iscsi/iscsi_target_login.c
3626 @@ -879,8 +879,8 @@ static void iscsi_handle_login_thread_timeout(unsigned long data)
3627 struct iscsi_np *np = (struct iscsi_np *) data;
3628
3629 spin_lock_bh(&np->np_thread_lock);
3630 - pr_err("iSCSI Login timeout on Network Portal %s:%hu\n",
3631 - np->np_ip, np->np_port);
3632 + pr_err("iSCSI Login timeout on Network Portal %pISc:%hu\n",
3633 + &np->np_sockaddr, np->np_port);
3634
3635 if (np->np_login_timer_flags & ISCSI_TF_STOP) {
3636 spin_unlock_bh(&np->np_thread_lock);
3637 @@ -1358,8 +1358,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
3638 spin_lock_bh(&np->np_thread_lock);
3639 if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
3640 spin_unlock_bh(&np->np_thread_lock);
3641 - pr_err("iSCSI Network Portal on %s:%hu currently not"
3642 - " active.\n", np->np_ip, np->np_port);
3643 + pr_err("iSCSI Network Portal on %pISc:%hu currently not"
3644 + " active.\n", &np->np_sockaddr, np->np_port);
3645 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
3646 ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
3647 goto new_sess_out;
3648 diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
3649 index 5e3295fe404d..3bc7d62c0a65 100644
3650 --- a/drivers/target/iscsi/iscsi_target_tpg.c
3651 +++ b/drivers/target/iscsi/iscsi_target_tpg.c
3652 @@ -460,7 +460,6 @@ static bool iscsit_tpg_check_network_portal(
3653 struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
3654 struct iscsi_portal_group *tpg,
3655 struct __kernel_sockaddr_storage *sockaddr,
3656 - char *ip_str,
3657 struct iscsi_tpg_np *tpg_np_parent,
3658 int network_transport)
3659 {
3660 @@ -470,8 +469,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
3661 if (!tpg_np_parent) {
3662 if (iscsit_tpg_check_network_portal(tpg->tpg_tiqn, sockaddr,
3663 network_transport)) {
3664 - pr_err("Network Portal: %s already exists on a"
3665 - " different TPG on %s\n", ip_str,
3666 + pr_err("Network Portal: %pISc already exists on a"
3667 + " different TPG on %s\n", sockaddr,
3668 tpg->tpg_tiqn->tiqn);
3669 return ERR_PTR(-EEXIST);
3670 }
3671 @@ -484,7 +483,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
3672 return ERR_PTR(-ENOMEM);
3673 }
3674
3675 - np = iscsit_add_np(sockaddr, ip_str, network_transport);
3676 + np = iscsit_add_np(sockaddr, network_transport);
3677 if (IS_ERR(np)) {
3678 kfree(tpg_np);
3679 return ERR_CAST(np);
3680 @@ -514,8 +513,8 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
3681 spin_unlock(&tpg_np_parent->tpg_np_parent_lock);
3682 }
3683
3684 - pr_debug("CORE[%s] - Added Network Portal: %s:%hu,%hu on %s\n",
3685 - tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
3686 + pr_debug("CORE[%s] - Added Network Portal: %pISc:%hu,%hu on %s\n",
3687 + tpg->tpg_tiqn->tiqn, &np->np_sockaddr, np->np_port, tpg->tpgt,
3688 np->np_transport->name);
3689
3690 return tpg_np;
3691 @@ -528,8 +527,8 @@ static int iscsit_tpg_release_np(
3692 {
3693 iscsit_clear_tpg_np_login_thread(tpg_np, tpg, true);
3694
3695 - pr_debug("CORE[%s] - Removed Network Portal: %s:%hu,%hu on %s\n",
3696 - tpg->tpg_tiqn->tiqn, np->np_ip, np->np_port, tpg->tpgt,
3697 + pr_debug("CORE[%s] - Removed Network Portal: %pISc:%hu,%hu on %s\n",
3698 + tpg->tpg_tiqn->tiqn, &np->np_sockaddr, np->np_port, tpg->tpgt,
3699 np->np_transport->name);
3700
3701 tpg_np->tpg_np = NULL;
3702 diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
3703 index 95ff5bdecd71..28abda89ea98 100644
3704 --- a/drivers/target/iscsi/iscsi_target_tpg.h
3705 +++ b/drivers/target/iscsi/iscsi_target_tpg.h
3706 @@ -22,7 +22,7 @@ extern struct iscsi_node_attrib *iscsit_tpg_get_node_attrib(struct iscsi_session
3707 extern void iscsit_tpg_del_external_nps(struct iscsi_tpg_np *);
3708 extern struct iscsi_tpg_np *iscsit_tpg_locate_child_np(struct iscsi_tpg_np *, int);
3709 extern struct iscsi_tpg_np *iscsit_tpg_add_network_portal(struct iscsi_portal_group *,
3710 - struct __kernel_sockaddr_storage *, char *, struct iscsi_tpg_np *,
3711 + struct __kernel_sockaddr_storage *, struct iscsi_tpg_np *,
3712 int);
3713 extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
3714 struct iscsi_tpg_np *);
3715 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
3716 index a15411c79ae9..08aa7cc58694 100644
3717 --- a/drivers/target/target_core_pr.c
3718 +++ b/drivers/target/target_core_pr.c
3719 @@ -328,6 +328,9 @@ static int core_scsi3_pr_seq_non_holder(
3720 int legacy = 0; /* Act like a legacy device and return
3721 * RESERVATION CONFLICT on some CDBs */
3722
3723 + if (!se_sess->se_node_acl->device_list)
3724 + return;
3725 +
3726 se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
3727 /*
3728 * Determine if the registration should be ignored due to
3729 diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
3730 index 1738b1646988..9fc33e84439a 100644
3731 --- a/drivers/target/target_core_ua.c
3732 +++ b/drivers/target/target_core_ua.c
3733 @@ -48,7 +48,7 @@ target_scsi3_ua_check(struct se_cmd *cmd)
3734 return 0;
3735
3736 nacl = sess->se_node_acl;
3737 - if (!nacl)
3738 + if (!nacl || !nacl->device_list)
3739 return 0;
3740
3741 deve = nacl->device_list[cmd->orig_fe_lun];
3742 @@ -90,7 +90,7 @@ int core_scsi3_ua_allocate(
3743 /*
3744 * PASSTHROUGH OPS
3745 */
3746 - if (!nacl)
3747 + if (!nacl || !nacl->device_list)
3748 return -EINVAL;
3749
3750 ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
3751 @@ -208,7 +208,7 @@ void core_scsi3_ua_for_check_condition(
3752 return;
3753
3754 nacl = sess->se_node_acl;
3755 - if (!nacl)
3756 + if (!nacl || !nacl->device_list)
3757 return;
3758
3759 spin_lock_irq(&nacl->device_list_lock);
3760 @@ -276,7 +276,7 @@ int core_scsi3_ua_clear_for_request_sense(
3761 return -EINVAL;
3762
3763 nacl = sess->se_node_acl;
3764 - if (!nacl)
3765 + if (!nacl || !nacl->device_list)
3766 return -EINVAL;
3767
3768 spin_lock_irq(&nacl->device_list_lock);
3769 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
3770 index 8fd680ac941b..4609305a1591 100644
3771 --- a/drivers/target/target_core_xcopy.c
3772 +++ b/drivers/target/target_core_xcopy.c
3773 @@ -465,6 +465,8 @@ int target_xcopy_setup_pt(void)
3774 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
3775 INIT_LIST_HEAD(&xcopy_pt_sess.sess_list);
3776 INIT_LIST_HEAD(&xcopy_pt_sess.sess_acl_list);
3777 + INIT_LIST_HEAD(&xcopy_pt_sess.sess_cmd_list);
3778 + spin_lock_init(&xcopy_pt_sess.sess_cmd_lock);
3779
3780 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
3781 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
3782 @@ -666,7 +668,7 @@ static int target_xcopy_read_source(
3783 pr_debug("XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n",
3784 (unsigned long long)src_lba, src_sectors, length);
3785
3786 - transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
3787 + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
3788 DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
3789 xop->src_pt_cmd = xpt_cmd;
3790
3791 @@ -726,7 +728,7 @@ static int target_xcopy_write_destination(
3792 pr_debug("XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n",
3793 (unsigned long long)dst_lba, dst_sectors, length);
3794
3795 - transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, NULL, length,
3796 + transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
3797 DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
3798 xop->dst_pt_cmd = xpt_cmd;
3799
3800 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
3801 index 16ed0b6c7f9c..6b6c6606af5f 100644
3802 --- a/drivers/tty/n_tty.c
3803 +++ b/drivers/tty/n_tty.c
3804 @@ -343,8 +343,7 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
3805 spin_lock_irqsave(&tty->ctrl_lock, flags);
3806 tty->ctrl_status |= TIOCPKT_FLUSHREAD;
3807 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
3808 - if (waitqueue_active(&tty->link->read_wait))
3809 - wake_up_interruptible(&tty->link->read_wait);
3810 + wake_up_interruptible(&tty->link->read_wait);
3811 }
3812 }
3813
3814 @@ -1383,8 +1382,7 @@ handle_newline:
3815 put_tty_queue(c, ldata);
3816 smp_store_release(&ldata->canon_head, ldata->read_head);
3817 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
3818 - if (waitqueue_active(&tty->read_wait))
3819 - wake_up_interruptible_poll(&tty->read_wait, POLLIN);
3820 + wake_up_interruptible_poll(&tty->read_wait, POLLIN);
3821 return 0;
3822 }
3823 }
3824 @@ -1670,8 +1668,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
3825
3826 if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
3827 kill_fasync(&tty->fasync, SIGIO, POLL_IN);
3828 - if (waitqueue_active(&tty->read_wait))
3829 - wake_up_interruptible_poll(&tty->read_wait, POLLIN);
3830 + wake_up_interruptible_poll(&tty->read_wait, POLLIN);
3831 }
3832 }
3833
3834 @@ -1890,10 +1887,8 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
3835 }
3836
3837 /* The termios change make the tty ready for I/O */
3838 - if (waitqueue_active(&tty->write_wait))
3839 - wake_up_interruptible(&tty->write_wait);
3840 - if (waitqueue_active(&tty->read_wait))
3841 - wake_up_interruptible(&tty->read_wait);
3842 + wake_up_interruptible(&tty->write_wait);
3843 + wake_up_interruptible(&tty->read_wait);
3844 }
3845
3846 /**
3847 diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
3848 index 4506e405c8f3..b4fd8debf941 100644
3849 --- a/drivers/tty/serial/8250/8250_core.c
3850 +++ b/drivers/tty/serial/8250/8250_core.c
3851 @@ -339,6 +339,14 @@ configured less than Maximum supported fifo bytes */
3852 UART_FCR7_64BYTE,
3853 .flags = UART_CAP_FIFO,
3854 },
3855 + [PORT_RT2880] = {
3856 + .name = "Palmchip BK-3103",
3857 + .fifo_size = 16,
3858 + .tx_loadsz = 16,
3859 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
3860 + .rxtrig_bytes = {1, 4, 8, 14},
3861 + .flags = UART_CAP_FIFO,
3862 + },
3863 };
3864
3865 /* Uart divisor latch read */
3866 diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
3867 index 763eb20fe321..0cc622afb67d 100644
3868 --- a/drivers/tty/serial/amba-pl011.c
3869 +++ b/drivers/tty/serial/amba-pl011.c
3870 @@ -1360,9 +1360,9 @@ static void pl011_tx_softirq(struct work_struct *work)
3871 struct uart_amba_port *uap =
3872 container_of(dwork, struct uart_amba_port, tx_softirq_work);
3873
3874 - spin_lock(&uap->port.lock);
3875 + spin_lock_irq(&uap->port.lock);
3876 while (pl011_tx_chars(uap)) ;
3877 - spin_unlock(&uap->port.lock);
3878 + spin_unlock_irq(&uap->port.lock);
3879 }
3880
3881 static void pl011_tx_irq_seen(struct uart_amba_port *uap)
3882 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
3883 index 5ca1dfb0561c..85323ff75edf 100644
3884 --- a/drivers/tty/serial/atmel_serial.c
3885 +++ b/drivers/tty/serial/atmel_serial.c
3886 @@ -2640,7 +2640,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
3887 ret = atmel_init_gpios(port, &pdev->dev);
3888 if (ret < 0) {
3889 dev_err(&pdev->dev, "Failed to initialize GPIOs.");
3890 - goto err;
3891 + goto err_clear_bit;
3892 }
3893
3894 ret = atmel_init_port(port, pdev);
3895 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3896 index e5695467598f..21837f14a403 100644
3897 --- a/drivers/tty/tty_io.c
3898 +++ b/drivers/tty/tty_io.c
3899 @@ -2144,8 +2144,24 @@ retry_open:
3900 if (!noctty &&
3901 current->signal->leader &&
3902 !current->signal->tty &&
3903 - tty->session == NULL)
3904 - __proc_set_tty(tty);
3905 + tty->session == NULL) {
3906 + /*
3907 + * Don't let a process that only has write access to the tty
3908 + * obtain the privileges associated with having a tty as
3909 + * controlling terminal (being able to reopen it with full
3910 + * access through /dev/tty, being able to perform pushback).
3911 + * Many distributions set the group of all ttys to "tty" and
3912 + * grant write-only access to all terminals for setgid tty
3913 + * binaries, which should not imply full privileges on all ttys.
3914 + *
3915 + * This could theoretically break old code that performs open()
3916 + * on a write-only file descriptor. In that case, it might be
3917 + * necessary to also permit this if
3918 + * inode_permission(inode, MAY_READ) == 0.
3919 + */
3920 + if (filp->f_mode & FMODE_READ)
3921 + __proc_set_tty(tty);
3922 + }
3923 spin_unlock_irq(&current->sighand->siglock);
3924 read_unlock(&tasklist_lock);
3925 tty_unlock(tty);
3926 @@ -2434,7 +2450,7 @@ static int fionbio(struct file *file, int __user *p)
3927 * Takes ->siglock() when updating signal->tty
3928 */
3929
3930 -static int tiocsctty(struct tty_struct *tty, int arg)
3931 +static int tiocsctty(struct tty_struct *tty, struct file *file, int arg)
3932 {
3933 int ret = 0;
3934
3935 @@ -2468,6 +2484,13 @@ static int tiocsctty(struct tty_struct *tty, int arg)
3936 goto unlock;
3937 }
3938 }
3939 +
3940 + /* See the comment in tty_open(). */
3941 + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) {
3942 + ret = -EPERM;
3943 + goto unlock;
3944 + }
3945 +
3946 proc_set_tty(tty);
3947 unlock:
3948 read_unlock(&tasklist_lock);
3949 @@ -2860,7 +2883,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3950 no_tty();
3951 return 0;
3952 case TIOCSCTTY:
3953 - return tiocsctty(tty, arg);
3954 + return tiocsctty(tty, file, arg);
3955 case TIOCGPGRP:
3956 return tiocgpgrp(tty, real_tty, p);
3957 case TIOCSPGRP:
3958 diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
3959 index 389f0e034259..fa774323ebda 100644
3960 --- a/drivers/usb/chipidea/ci_hdrc_imx.c
3961 +++ b/drivers/usb/chipidea/ci_hdrc_imx.c
3962 @@ -56,7 +56,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
3963 { .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
3964 { .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
3965 { .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
3966 - { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data},
3967 + { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
3968 { /* sentinel */ }
3969 };
3970 MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
3971 diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
3972 index 764f668d45a9..6e53c24fa1cb 100644
3973 --- a/drivers/usb/chipidea/udc.c
3974 +++ b/drivers/usb/chipidea/udc.c
3975 @@ -656,6 +656,44 @@ __acquires(hwep->lock)
3976 return 0;
3977 }
3978
3979 +static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
3980 +{
3981 + struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
3982 + int direction, retval = 0;
3983 + unsigned long flags;
3984 +
3985 + if (ep == NULL || hwep->ep.desc == NULL)
3986 + return -EINVAL;
3987 +
3988 + if (usb_endpoint_xfer_isoc(hwep->ep.desc))
3989 + return -EOPNOTSUPP;
3990 +
3991 + spin_lock_irqsave(hwep->lock, flags);
3992 +
3993 + if (value && hwep->dir == TX && check_transfer &&
3994 + !list_empty(&hwep->qh.queue) &&
3995 + !usb_endpoint_xfer_control(hwep->ep.desc)) {
3996 + spin_unlock_irqrestore(hwep->lock, flags);
3997 + return -EAGAIN;
3998 + }
3999 +
4000 + direction = hwep->dir;
4001 + do {
4002 + retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
4003 +
4004 + if (!value)
4005 + hwep->wedge = 0;
4006 +
4007 + if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
4008 + hwep->dir = (hwep->dir == TX) ? RX : TX;
4009 +
4010 + } while (hwep->dir != direction);
4011 +
4012 + spin_unlock_irqrestore(hwep->lock, flags);
4013 + return retval;
4014 +}
4015 +
4016 +
4017 /**
4018 * _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
4019 * @gadget: gadget
4020 @@ -1051,7 +1089,7 @@ __acquires(ci->lock)
4021 num += ci->hw_ep_max / 2;
4022
4023 spin_unlock(&ci->lock);
4024 - err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
4025 + err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
4026 spin_lock(&ci->lock);
4027 if (!err)
4028 isr_setup_status_phase(ci);
4029 @@ -1110,8 +1148,8 @@ delegate:
4030
4031 if (err < 0) {
4032 spin_unlock(&ci->lock);
4033 - if (usb_ep_set_halt(&hwep->ep))
4034 - dev_err(ci->dev, "error: ep_set_halt\n");
4035 + if (_ep_set_halt(&hwep->ep, 1, false))
4036 + dev_err(ci->dev, "error: _ep_set_halt\n");
4037 spin_lock(&ci->lock);
4038 }
4039 }
4040 @@ -1142,9 +1180,9 @@ __acquires(ci->lock)
4041 err = isr_setup_status_phase(ci);
4042 if (err < 0) {
4043 spin_unlock(&ci->lock);
4044 - if (usb_ep_set_halt(&hwep->ep))
4045 + if (_ep_set_halt(&hwep->ep, 1, false))
4046 dev_err(ci->dev,
4047 - "error: ep_set_halt\n");
4048 + "error: _ep_set_halt\n");
4049 spin_lock(&ci->lock);
4050 }
4051 }
4052 @@ -1390,41 +1428,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
4053 */
4054 static int ep_set_halt(struct usb_ep *ep, int value)
4055 {
4056 - struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
4057 - int direction, retval = 0;
4058 - unsigned long flags;
4059 -
4060 - if (ep == NULL || hwep->ep.desc == NULL)
4061 - return -EINVAL;
4062 -
4063 - if (usb_endpoint_xfer_isoc(hwep->ep.desc))
4064 - return -EOPNOTSUPP;
4065 -
4066 - spin_lock_irqsave(hwep->lock, flags);
4067 -
4068 -#ifndef STALL_IN
4069 - /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
4070 - if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
4071 - !list_empty(&hwep->qh.queue)) {
4072 - spin_unlock_irqrestore(hwep->lock, flags);
4073 - return -EAGAIN;
4074 - }
4075 -#endif
4076 -
4077 - direction = hwep->dir;
4078 - do {
4079 - retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
4080 -
4081 - if (!value)
4082 - hwep->wedge = 0;
4083 -
4084 - if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
4085 - hwep->dir = (hwep->dir == TX) ? RX : TX;
4086 -
4087 - } while (hwep->dir != direction);
4088 -
4089 - spin_unlock_irqrestore(hwep->lock, flags);
4090 - return retval;
4091 + return _ep_set_halt(ep, value, true);
4092 }
4093
4094 /**
4095 diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
4096 index b2a540b43f97..b9ddf0c1ffe5 100644
4097 --- a/drivers/usb/core/config.c
4098 +++ b/drivers/usb/core/config.c
4099 @@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
4100 cfgno, inum, asnum, ep->desc.bEndpointAddress);
4101 ep->ss_ep_comp.bmAttributes = 16;
4102 } else if (usb_endpoint_xfer_isoc(&ep->desc) &&
4103 - desc->bmAttributes > 2) {
4104 + USB_SS_MULT(desc->bmAttributes) > 3) {
4105 dev_warn(ddev, "Isoc endpoint has Mult of %d in "
4106 "config %d interface %d altsetting %d ep %d: "
4107 "setting to 3\n", desc->bmAttributes + 1,
4108 @@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
4109 }
4110
4111 if (usb_endpoint_xfer_isoc(&ep->desc))
4112 - max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
4113 + max_tx = (desc->bMaxBurst + 1) *
4114 + (USB_SS_MULT(desc->bmAttributes)) *
4115 usb_endpoint_maxp(&ep->desc);
4116 else if (usb_endpoint_xfer_int(&ep->desc))
4117 max_tx = usb_endpoint_maxp(&ep->desc) *
4118 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
4119 index d85abfed84cc..f5a381945db2 100644
4120 --- a/drivers/usb/core/quirks.c
4121 +++ b/drivers/usb/core/quirks.c
4122 @@ -54,6 +54,13 @@ static const struct usb_device_id usb_quirk_list[] = {
4123 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
4124 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
4125
4126 + /* Logitech ConferenceCam CC3000e */
4127 + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
4128 + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT },
4129 +
4130 + /* Logitech PTZ Pro Camera */
4131 + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
4132 +
4133 /* Logitech Quickcam Fusion */
4134 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
4135
4136 @@ -78,6 +85,12 @@ static const struct usb_device_id usb_quirk_list[] = {
4137 /* Philips PSC805 audio device */
4138 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME },
4139
4140 + /* Plantronic Audio 655 DSP */
4141 + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME },
4142 +
4143 + /* Plantronic Audio 648 USB */
4144 + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME },
4145 +
4146 /* Artisman Watchdog Dongle */
4147 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
4148 USB_QUIRK_CONFIG_INTF_STRINGS },
4149 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4150 index 9a8c936cd42c..41f841fa6c4d 100644
4151 --- a/drivers/usb/host/xhci-mem.c
4152 +++ b/drivers/usb/host/xhci-mem.c
4153 @@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
4154 * use Event Data TRBs, and we don't chain in a link TRB on short
4155 * transfers, we're basically dividing by 1.
4156 *
4157 - * xHCI 1.0 specification indicates that the Average TRB Length should
4158 - * be set to 8 for control endpoints.
4159 + * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
4160 + * should be set to 8 for control endpoints.
4161 */
4162 - if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
4163 + if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
4164 ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
4165 else
4166 ep_ctx->tx_info |=
4167 @@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
4168 int size;
4169 int i, j, num_ports;
4170
4171 - if (timer_pending(&xhci->cmd_timer))
4172 - del_timer_sync(&xhci->cmd_timer);
4173 + del_timer_sync(&xhci->cmd_timer);
4174
4175 /* Free the Event Ring Segment Table and the actual Event Ring */
4176 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
4177 @@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
4178
4179 INIT_LIST_HEAD(&xhci->cmd_list);
4180
4181 + /* init command timeout timer */
4182 + setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
4183 + (unsigned long)xhci);
4184 +
4185 page_size = readl(&xhci->op_regs->page_size);
4186 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
4187 "Supported page size register = 0x%x", page_size);
4188 @@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
4189 "Wrote ERST address to ir_set 0.");
4190 xhci_print_ir_set(xhci, 0);
4191
4192 - /* init command timeout timer */
4193 - setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
4194 - (unsigned long)xhci);
4195 -
4196 /*
4197 * XXX: Might need to set the Interrupter Moderation Register to
4198 * something other than the default (~1ms minimum between interrupts).
4199 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
4200 index b3a0a2275f5a..ad975a2975ca 100644
4201 --- a/drivers/usb/host/xhci-ring.c
4202 +++ b/drivers/usb/host/xhci-ring.c
4203 @@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
4204 ret = xhci_handshake(&xhci->op_regs->cmd_ring,
4205 CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
4206 if (ret < 0) {
4207 + /* we are about to kill xhci, give it one more chance */
4208 + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
4209 + &xhci->op_regs->cmd_ring);
4210 + udelay(1000);
4211 + ret = xhci_handshake(&xhci->op_regs->cmd_ring,
4212 + CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
4213 + if (ret == 0)
4214 + return 0;
4215 +
4216 xhci_err(xhci, "Stopped the command ring failed, "
4217 "maybe the host is dead\n");
4218 xhci->xhc_state |= XHCI_STATE_DYING;
4219 @@ -3041,9 +3050,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4220 struct xhci_td *td;
4221 struct scatterlist *sg;
4222 int num_sgs;
4223 - int trb_buff_len, this_sg_len, running_total;
4224 + int trb_buff_len, this_sg_len, running_total, ret;
4225 unsigned int total_packet_count;
4226 + bool zero_length_needed;
4227 bool first_trb;
4228 + int last_trb_num;
4229 u64 addr;
4230 bool more_trbs_coming;
4231
4232 @@ -3059,13 +3070,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4233 total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
4234 usb_endpoint_maxp(&urb->ep->desc));
4235
4236 - trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
4237 + ret = prepare_transfer(xhci, xhci->devs[slot_id],
4238 ep_index, urb->stream_id,
4239 num_trbs, urb, 0, mem_flags);
4240 - if (trb_buff_len < 0)
4241 - return trb_buff_len;
4242 + if (ret < 0)
4243 + return ret;
4244
4245 urb_priv = urb->hcpriv;
4246 +
4247 + /* Deal with URB_ZERO_PACKET - need one more td/trb */
4248 + zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
4249 + urb_priv->length == 2;
4250 + if (zero_length_needed) {
4251 + num_trbs++;
4252 + xhci_dbg(xhci, "Creating zero length td.\n");
4253 + ret = prepare_transfer(xhci, xhci->devs[slot_id],
4254 + ep_index, urb->stream_id,
4255 + 1, urb, 1, mem_flags);
4256 + if (ret < 0)
4257 + return ret;
4258 + }
4259 +
4260 td = urb_priv->td[0];
4261
4262 /*
4263 @@ -3095,6 +3120,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4264 trb_buff_len = urb->transfer_buffer_length;
4265
4266 first_trb = true;
4267 + last_trb_num = zero_length_needed ? 2 : 1;
4268 /* Queue the first TRB, even if it's zero-length */
4269 do {
4270 u32 field = 0;
4271 @@ -3112,12 +3138,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4272 /* Chain all the TRBs together; clear the chain bit in the last
4273 * TRB to indicate it's the last TRB in the chain.
4274 */
4275 - if (num_trbs > 1) {
4276 + if (num_trbs > last_trb_num) {
4277 field |= TRB_CHAIN;
4278 - } else {
4279 - /* FIXME - add check for ZERO_PACKET flag before this */
4280 + } else if (num_trbs == last_trb_num) {
4281 td->last_trb = ep_ring->enqueue;
4282 field |= TRB_IOC;
4283 + } else if (zero_length_needed && num_trbs == 1) {
4284 + trb_buff_len = 0;
4285 + urb_priv->td[1]->last_trb = ep_ring->enqueue;
4286 + field |= TRB_IOC;
4287 }
4288
4289 /* Only set interrupt on short packet for IN endpoints */
4290 @@ -3179,7 +3208,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4291 if (running_total + trb_buff_len > urb->transfer_buffer_length)
4292 trb_buff_len =
4293 urb->transfer_buffer_length - running_total;
4294 - } while (running_total < urb->transfer_buffer_length);
4295 + } while (num_trbs > 0);
4296
4297 check_trb_math(urb, num_trbs, running_total);
4298 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4299 @@ -3197,7 +3226,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4300 int num_trbs;
4301 struct xhci_generic_trb *start_trb;
4302 bool first_trb;
4303 + int last_trb_num;
4304 bool more_trbs_coming;
4305 + bool zero_length_needed;
4306 int start_cycle;
4307 u32 field, length_field;
4308
4309 @@ -3228,7 +3259,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4310 num_trbs++;
4311 running_total += TRB_MAX_BUFF_SIZE;
4312 }
4313 - /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
4314
4315 ret = prepare_transfer(xhci, xhci->devs[slot_id],
4316 ep_index, urb->stream_id,
4317 @@ -3237,6 +3267,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4318 return ret;
4319
4320 urb_priv = urb->hcpriv;
4321 +
4322 + /* Deal with URB_ZERO_PACKET - need one more td/trb */
4323 + zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
4324 + urb_priv->length == 2;
4325 + if (zero_length_needed) {
4326 + num_trbs++;
4327 + xhci_dbg(xhci, "Creating zero length td.\n");
4328 + ret = prepare_transfer(xhci, xhci->devs[slot_id],
4329 + ep_index, urb->stream_id,
4330 + 1, urb, 1, mem_flags);
4331 + if (ret < 0)
4332 + return ret;
4333 + }
4334 +
4335 td = urb_priv->td[0];
4336
4337 /*
4338 @@ -3258,7 +3302,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4339 trb_buff_len = urb->transfer_buffer_length;
4340
4341 first_trb = true;
4342 -
4343 + last_trb_num = zero_length_needed ? 2 : 1;
4344 /* Queue the first TRB, even if it's zero-length */
4345 do {
4346 u32 remainder = 0;
4347 @@ -3275,12 +3319,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4348 /* Chain all the TRBs together; clear the chain bit in the last
4349 * TRB to indicate it's the last TRB in the chain.
4350 */
4351 - if (num_trbs > 1) {
4352 + if (num_trbs > last_trb_num) {
4353 field |= TRB_CHAIN;
4354 - } else {
4355 - /* FIXME - add check for ZERO_PACKET flag before this */
4356 + } else if (num_trbs == last_trb_num) {
4357 td->last_trb = ep_ring->enqueue;
4358 field |= TRB_IOC;
4359 + } else if (zero_length_needed && num_trbs == 1) {
4360 + trb_buff_len = 0;
4361 + urb_priv->td[1]->last_trb = ep_ring->enqueue;
4362 + field |= TRB_IOC;
4363 }
4364
4365 /* Only set interrupt on short packet for IN endpoints */
4366 @@ -3318,7 +3365,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4367 trb_buff_len = urb->transfer_buffer_length - running_total;
4368 if (trb_buff_len > TRB_MAX_BUFF_SIZE)
4369 trb_buff_len = TRB_MAX_BUFF_SIZE;
4370 - } while (running_total < urb->transfer_buffer_length);
4371 + } while (num_trbs > 0);
4372
4373 check_trb_math(urb, num_trbs, running_total);
4374 giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
4375 @@ -3385,8 +3432,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
4376 if (start_cycle == 0)
4377 field |= 0x1;
4378
4379 - /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
4380 - if (xhci->hci_version == 0x100) {
4381 + /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
4382 + if (xhci->hci_version >= 0x100) {
4383 if (urb->transfer_buffer_length > 0) {
4384 if (setup->bRequestType & USB_DIR_IN)
4385 field |= TRB_TX_TYPE(TRB_DATA_IN);
4386 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4387 index c502c2277aeb..26f62b2b33f8 100644
4388 --- a/drivers/usb/host/xhci.c
4389 +++ b/drivers/usb/host/xhci.c
4390 @@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
4391 "waited %u microseconds.\n",
4392 XHCI_MAX_HALT_USEC);
4393 if (!ret)
4394 - xhci->xhc_state &= ~XHCI_STATE_HALTED;
4395 + xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
4396 +
4397 return ret;
4398 }
4399
4400 @@ -683,8 +684,11 @@ void xhci_stop(struct usb_hcd *hcd)
4401 u32 temp;
4402 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
4403
4404 + mutex_lock(&xhci->mutex);
4405 +
4406 if (!usb_hcd_is_primary_hcd(hcd)) {
4407 xhci_only_stop_hcd(xhci->shared_hcd);
4408 + mutex_unlock(&xhci->mutex);
4409 return;
4410 }
4411
4412 @@ -723,6 +727,7 @@ void xhci_stop(struct usb_hcd *hcd)
4413 xhci_dbg_trace(xhci, trace_xhci_dbg_init,
4414 "xhci_stop completed - status = %x",
4415 readl(&xhci->op_regs->status));
4416 + mutex_unlock(&xhci->mutex);
4417 }
4418
4419 /*
4420 @@ -1340,6 +1345,11 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
4421
4422 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
4423 size = urb->number_of_packets;
4424 + else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
4425 + urb->transfer_buffer_length > 0 &&
4426 + urb->transfer_flags & URB_ZERO_PACKET &&
4427 + !(urb->transfer_buffer_length % usb_endpoint_maxp(&urb->ep->desc)))
4428 + size = 2;
4429 else
4430 size = 1;
4431
4432 @@ -3790,6 +3800,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
4433
4434 mutex_lock(&xhci->mutex);
4435
4436 + if (xhci->xhc_state) /* dying or halted */
4437 + goto out;
4438 +
4439 if (!udev->slot_id) {
4440 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
4441 "Bad Slot ID %d", udev->slot_id);
4442 diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
4443 index 3ad5d19e4d04..23c794813e6a 100644
4444 --- a/drivers/usb/misc/chaoskey.c
4445 +++ b/drivers/usb/misc/chaoskey.c
4446 @@ -472,7 +472,7 @@ static int chaoskey_rng_read(struct hwrng *rng, void *data,
4447 if (this_time > max)
4448 this_time = max;
4449
4450 - memcpy(data, dev->buf, this_time);
4451 + memcpy(data, dev->buf + dev->used, this_time);
4452
4453 dev->used += this_time;
4454
4455 diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
4456 index 8bd8c5e26921..d5a140745640 100644
4457 --- a/drivers/usb/musb/musb_cppi41.c
4458 +++ b/drivers/usb/musb/musb_cppi41.c
4459 @@ -614,7 +614,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
4460 {
4461 struct musb *musb = controller->musb;
4462 struct device *dev = musb->controller;
4463 - struct device_node *np = dev->of_node;
4464 + struct device_node *np = dev->parent->of_node;
4465 struct cppi41_dma_channel *cppi41_channel;
4466 int count;
4467 int i;
4468 @@ -664,7 +664,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
4469 musb_dma->status = MUSB_DMA_STATUS_FREE;
4470 musb_dma->max_len = SZ_4M;
4471
4472 - dc = dma_request_slave_channel(dev, str);
4473 + dc = dma_request_slave_channel(dev->parent, str);
4474 if (!dc) {
4475 dev_err(dev, "Failed to request %s.\n", str);
4476 ret = -EPROBE_DEFER;
4477 @@ -694,7 +694,7 @@ struct dma_controller *dma_controller_create(struct musb *musb,
4478 struct cppi41_dma_controller *controller;
4479 int ret = 0;
4480
4481 - if (!musb->controller->of_node) {
4482 + if (!musb->controller->parent->of_node) {
4483 dev_err(musb->controller, "Need DT for the DMA engine.\n");
4484 return NULL;
4485 }
4486 diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
4487 index 65d931a28a14..dcac5e7f19e0 100644
4488 --- a/drivers/usb/musb/musb_dsps.c
4489 +++ b/drivers/usb/musb/musb_dsps.c
4490 @@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb)
4491
4492 dsps_writel(reg_base, wrp->epintr_set, epmask);
4493 dsps_writel(reg_base, wrp->coreintr_set, coremask);
4494 - /* start polling for ID change. */
4495 - mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout));
4496 + /* start polling for ID change in dual-role idle mode */
4497 + if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
4498 + musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
4499 + mod_timer(&glue->timer, jiffies +
4500 + msecs_to_jiffies(wrp->poll_timeout));
4501 dsps_musb_try_idle(musb, 0);
4502 }
4503
4504 diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
4505 index deee68eafb72..0cd85f2ccddd 100644
4506 --- a/drivers/usb/phy/phy-generic.c
4507 +++ b/drivers/usb/phy/phy-generic.c
4508 @@ -230,7 +230,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
4509 clk_rate = pdata->clk_rate;
4510 needs_vcc = pdata->needs_vcc;
4511 if (gpio_is_valid(pdata->gpio_reset)) {
4512 - err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
4513 + err = devm_gpio_request_one(dev, pdata->gpio_reset,
4514 + GPIOF_ACTIVE_LOW,
4515 dev_name(dev));
4516 if (!err)
4517 nop->gpiod_reset =
4518 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
4519 index 876423b8892c..7c8eb4c4c175 100644
4520 --- a/drivers/usb/serial/option.c
4521 +++ b/drivers/usb/serial/option.c
4522 @@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
4523 #define ZTE_PRODUCT_MF622 0x0001
4524 #define ZTE_PRODUCT_MF628 0x0015
4525 #define ZTE_PRODUCT_MF626 0x0031
4526 +#define ZTE_PRODUCT_ZM8620_X 0x0396
4527 +#define ZTE_PRODUCT_ME3620_MBIM 0x0426
4528 +#define ZTE_PRODUCT_ME3620_X 0x1432
4529 +#define ZTE_PRODUCT_ME3620_L 0x1433
4530 #define ZTE_PRODUCT_AC2726 0xfff1
4531 #define ZTE_PRODUCT_MG880 0xfffd
4532 #define ZTE_PRODUCT_CDMA_TECH 0xfffe
4533 @@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
4534 .sendsetup = BIT(1) | BIT(2) | BIT(3),
4535 };
4536
4537 +static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
4538 + .reserved = BIT(2) | BIT(3) | BIT(4),
4539 +};
4540 +
4541 +static const struct option_blacklist_info zte_me3620_xl_blacklist = {
4542 + .reserved = BIT(3) | BIT(4) | BIT(5),
4543 +};
4544 +
4545 +static const struct option_blacklist_info zte_zm8620_x_blacklist = {
4546 + .reserved = BIT(3) | BIT(4) | BIT(5),
4547 +};
4548 +
4549 static const struct option_blacklist_info huawei_cdc12_blacklist = {
4550 .reserved = BIT(1) | BIT(2),
4551 };
4552 @@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = {
4553 .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
4554 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
4555 .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
4556 + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
4557 + .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
4558 + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
4559 + .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
4560 + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
4561 + .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
4562 + { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
4563 + .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
4564 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
4565 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
4566 { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
4567 diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
4568 index 6c3734d2b45a..d3ea90bef84d 100644
4569 --- a/drivers/usb/serial/whiteheat.c
4570 +++ b/drivers/usb/serial/whiteheat.c
4571 @@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
4572 static int whiteheat_firmware_attach(struct usb_serial *serial);
4573
4574 /* function prototypes for the Connect Tech WhiteHEAT serial converter */
4575 +static int whiteheat_probe(struct usb_serial *serial,
4576 + const struct usb_device_id *id);
4577 static int whiteheat_attach(struct usb_serial *serial);
4578 static void whiteheat_release(struct usb_serial *serial);
4579 static int whiteheat_port_probe(struct usb_serial_port *port);
4580 @@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = {
4581 .description = "Connect Tech - WhiteHEAT",
4582 .id_table = id_table_std,
4583 .num_ports = 4,
4584 + .probe = whiteheat_probe,
4585 .attach = whiteheat_attach,
4586 .release = whiteheat_release,
4587 .port_probe = whiteheat_port_probe,
4588 @@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
4589 /*****************************************************************************
4590 * Connect Tech's White Heat serial driver functions
4591 *****************************************************************************/
4592 +
4593 +static int whiteheat_probe(struct usb_serial *serial,
4594 + const struct usb_device_id *id)
4595 +{
4596 + struct usb_host_interface *iface_desc;
4597 + struct usb_endpoint_descriptor *endpoint;
4598 + size_t num_bulk_in = 0;
4599 + size_t num_bulk_out = 0;
4600 + size_t min_num_bulk;
4601 + unsigned int i;
4602 +
4603 + iface_desc = serial->interface->cur_altsetting;
4604 +
4605 + for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
4606 + endpoint = &iface_desc->endpoint[i].desc;
4607 + if (usb_endpoint_is_bulk_in(endpoint))
4608 + ++num_bulk_in;
4609 + if (usb_endpoint_is_bulk_out(endpoint))
4610 + ++num_bulk_out;
4611 + }
4612 +
4613 + min_num_bulk = COMMAND_PORT + 1;
4614 + if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
4615 + return -ENODEV;
4616 +
4617 + return 0;
4618 +}
4619 +
4620 static int whiteheat_attach(struct usb_serial *serial)
4621 {
4622 struct usb_serial_port *command_port;
4623 diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
4624 index 109462303087..d1e1e1704da1 100644
4625 --- a/drivers/video/fbdev/Kconfig
4626 +++ b/drivers/video/fbdev/Kconfig
4627 @@ -298,7 +298,7 @@ config FB_ARMCLCD
4628
4629 # Helper logic selected only by the ARM Versatile platform family.
4630 config PLAT_VERSATILE_CLCD
4631 - def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS
4632 + def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
4633 depends on ARM
4634 depends on FB_ARMCLCD && FB=y
4635
4636 diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
4637 index a29afb37c48c..47bd8a14d01f 100644
4638 --- a/drivers/watchdog/sunxi_wdt.c
4639 +++ b/drivers/watchdog/sunxi_wdt.c
4640 @@ -184,7 +184,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
4641 /* Set system reset function */
4642 reg = readl(wdt_base + regs->wdt_cfg);
4643 reg &= ~(regs->wdt_reset_mask);
4644 - reg |= ~(regs->wdt_reset_val);
4645 + reg |= regs->wdt_reset_val;
4646 writel(reg, wdt_base + regs->wdt_cfg);
4647
4648 /* Enable watchdog */
4649 diff --git a/fs/block_dev.c b/fs/block_dev.c
4650 index c7e4163ede87..ccfd31f1df3a 100644
4651 --- a/fs/block_dev.c
4652 +++ b/fs/block_dev.c
4653 @@ -1234,6 +1234,13 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
4654 goto out_clear;
4655 }
4656 bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
4657 + /*
4658 + * If the partition is not aligned on a page
4659 + * boundary, we can't do dax I/O to it.
4660 + */
4661 + if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) ||
4662 + (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
4663 + bdev->bd_inode->i_flags &= ~S_DAX;
4664 }
4665 } else {
4666 if (bdev->bd_contains == bdev) {
4667 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
4668 index c32d226bfecc..885f533a34d9 100644
4669 --- a/fs/btrfs/extent_io.c
4670 +++ b/fs/btrfs/extent_io.c
4671 @@ -2795,7 +2795,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
4672 bio_end_io_t end_io_func,
4673 int mirror_num,
4674 unsigned long prev_bio_flags,
4675 - unsigned long bio_flags)
4676 + unsigned long bio_flags,
4677 + bool force_bio_submit)
4678 {
4679 int ret = 0;
4680 struct bio *bio;
4681 @@ -2813,6 +2814,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
4682 contig = bio_end_sector(bio) == sector;
4683
4684 if (prev_bio_flags != bio_flags || !contig ||
4685 + force_bio_submit ||
4686 merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
4687 bio_add_page(bio, page, page_size, offset) < page_size) {
4688 ret = submit_one_bio(rw, bio, mirror_num,
4689 @@ -2906,7 +2908,8 @@ static int __do_readpage(struct extent_io_tree *tree,
4690 get_extent_t *get_extent,
4691 struct extent_map **em_cached,
4692 struct bio **bio, int mirror_num,
4693 - unsigned long *bio_flags, int rw)
4694 + unsigned long *bio_flags, int rw,
4695 + u64 *prev_em_start)
4696 {
4697 struct inode *inode = page->mapping->host;
4698 u64 start = page_offset(page);
4699 @@ -2954,6 +2957,7 @@ static int __do_readpage(struct extent_io_tree *tree,
4700 }
4701 while (cur <= end) {
4702 unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
4703 + bool force_bio_submit = false;
4704
4705 if (cur >= last_byte) {
4706 char *userpage;
4707 @@ -3004,6 +3008,49 @@ static int __do_readpage(struct extent_io_tree *tree,
4708 block_start = em->block_start;
4709 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4710 block_start = EXTENT_MAP_HOLE;
4711 +
4712 + /*
4713 + * If we have a file range that points to a compressed extent
4714 + * and it's followed by a consecutive file range that points to
4715 + * to the same compressed extent (possibly with a different
4716 + * offset and/or length, so it either points to the whole extent
4717 + * or only part of it), we must make sure we do not submit a
4718 + * single bio to populate the pages for the 2 ranges because
4719 + * this makes the compressed extent read zero out the pages
4720 + * belonging to the 2nd range. Imagine the following scenario:
4721 + *
4722 + * File layout
4723 + * [0 - 8K] [8K - 24K]
4724 + * | |
4725 + * | |
4726 + * points to extent X, points to extent X,
4727 + * offset 4K, length of 8K offset 0, length 16K
4728 + *
4729 + * [extent X, compressed length = 4K uncompressed length = 16K]
4730 + *
4731 + * If the bio to read the compressed extent covers both ranges,
4732 + * it will decompress extent X into the pages belonging to the
4733 + * first range and then it will stop, zeroing out the remaining
4734 + * pages that belong to the other range that points to extent X.
4735 + * So here we make sure we submit 2 bios, one for the first
4736 + * range and another one for the third range. Both will target
4737 + * the same physical extent from disk, but we can't currently
4738 + * make the compressed bio endio callback populate the pages
4739 + * for both ranges because each compressed bio is tightly
4740 + * coupled with a single extent map, and each range can have
4741 + * an extent map with a different offset value relative to the
4742 + * uncompressed data of our extent and different lengths. This
4743 + * is a corner case so we prioritize correctness over
4744 + * non-optimal behavior (submitting 2 bios for the same extent).
4745 + */
4746 + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
4747 + prev_em_start && *prev_em_start != (u64)-1 &&
4748 + *prev_em_start != em->orig_start)
4749 + force_bio_submit = true;
4750 +
4751 + if (prev_em_start)
4752 + *prev_em_start = em->orig_start;
4753 +
4754 free_extent_map(em);
4755 em = NULL;
4756
4757 @@ -3053,7 +3100,8 @@ static int __do_readpage(struct extent_io_tree *tree,
4758 bdev, bio, pnr,
4759 end_bio_extent_readpage, mirror_num,
4760 *bio_flags,
4761 - this_bio_flag);
4762 + this_bio_flag,
4763 + force_bio_submit);
4764 if (!ret) {
4765 nr++;
4766 *bio_flags = this_bio_flag;
4767 @@ -3080,7 +3128,8 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
4768 get_extent_t *get_extent,
4769 struct extent_map **em_cached,
4770 struct bio **bio, int mirror_num,
4771 - unsigned long *bio_flags, int rw)
4772 + unsigned long *bio_flags, int rw,
4773 + u64 *prev_em_start)
4774 {
4775 struct inode *inode;
4776 struct btrfs_ordered_extent *ordered;
4777 @@ -3100,7 +3149,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
4778
4779 for (index = 0; index < nr_pages; index++) {
4780 __do_readpage(tree, pages[index], get_extent, em_cached, bio,
4781 - mirror_num, bio_flags, rw);
4782 + mirror_num, bio_flags, rw, prev_em_start);
4783 page_cache_release(pages[index]);
4784 }
4785 }
4786 @@ -3110,7 +3159,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
4787 int nr_pages, get_extent_t *get_extent,
4788 struct extent_map **em_cached,
4789 struct bio **bio, int mirror_num,
4790 - unsigned long *bio_flags, int rw)
4791 + unsigned long *bio_flags, int rw,
4792 + u64 *prev_em_start)
4793 {
4794 u64 start = 0;
4795 u64 end = 0;
4796 @@ -3131,7 +3181,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
4797 index - first_index, start,
4798 end, get_extent, em_cached,
4799 bio, mirror_num, bio_flags,
4800 - rw);
4801 + rw, prev_em_start);
4802 start = page_start;
4803 end = start + PAGE_CACHE_SIZE - 1;
4804 first_index = index;
4805 @@ -3142,7 +3192,8 @@ static void __extent_readpages(struct extent_io_tree *tree,
4806 __do_contiguous_readpages(tree, &pages[first_index],
4807 index - first_index, start,
4808 end, get_extent, em_cached, bio,
4809 - mirror_num, bio_flags, rw);
4810 + mirror_num, bio_flags, rw,
4811 + prev_em_start);
4812 }
4813
4814 static int __extent_read_full_page(struct extent_io_tree *tree,
4815 @@ -3168,7 +3219,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
4816 }
4817
4818 ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
4819 - bio_flags, rw);
4820 + bio_flags, rw, NULL);
4821 return ret;
4822 }
4823
4824 @@ -3194,7 +3245,7 @@ int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
4825 int ret;
4826
4827 ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
4828 - &bio_flags, READ);
4829 + &bio_flags, READ, NULL);
4830 if (bio)
4831 ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
4832 return ret;
4833 @@ -3447,7 +3498,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
4834 sector, iosize, pg_offset,
4835 bdev, &epd->bio, max_nr,
4836 end_bio_extent_writepage,
4837 - 0, 0, 0);
4838 + 0, 0, 0, false);
4839 if (ret)
4840 SetPageError(page);
4841 }
4842 @@ -3749,7 +3800,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
4843 ret = submit_extent_page(rw, tree, p, offset >> 9,
4844 PAGE_CACHE_SIZE, 0, bdev, &epd->bio,
4845 -1, end_bio_extent_buffer_writepage,
4846 - 0, epd->bio_flags, bio_flags);
4847 + 0, epd->bio_flags, bio_flags, false);
4848 epd->bio_flags = bio_flags;
4849 if (ret) {
4850 set_btree_ioerr(p);
4851 @@ -4153,6 +4204,7 @@ int extent_readpages(struct extent_io_tree *tree,
4852 struct page *page;
4853 struct extent_map *em_cached = NULL;
4854 int nr = 0;
4855 + u64 prev_em_start = (u64)-1;
4856
4857 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
4858 page = list_entry(pages->prev, struct page, lru);
4859 @@ -4169,12 +4221,12 @@ int extent_readpages(struct extent_io_tree *tree,
4860 if (nr < ARRAY_SIZE(pagepool))
4861 continue;
4862 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4863 - &bio, 0, &bio_flags, READ);
4864 + &bio, 0, &bio_flags, READ, &prev_em_start);
4865 nr = 0;
4866 }
4867 if (nr)
4868 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
4869 - &bio, 0, &bio_flags, READ);
4870 + &bio, 0, &bio_flags, READ, &prev_em_start);
4871
4872 if (em_cached)
4873 free_extent_map(em_cached);
4874 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4875 index 8bb013672aee..e3b39f0c4666 100644
4876 --- a/fs/btrfs/inode.c
4877 +++ b/fs/btrfs/inode.c
4878 @@ -5035,7 +5035,8 @@ void btrfs_evict_inode(struct inode *inode)
4879 goto no_delete;
4880 }
4881 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
4882 - btrfs_wait_ordered_range(inode, 0, (u64)-1);
4883 + if (!special_file(inode->i_mode))
4884 + btrfs_wait_ordered_range(inode, 0, (u64)-1);
4885
4886 btrfs_free_io_failure_record(inode, 0, (u64)-1);
4887
4888 diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
4889 index aa0dc2573374..afa09fce8151 100644
4890 --- a/fs/cifs/cifsencrypt.c
4891 +++ b/fs/cifs/cifsencrypt.c
4892 @@ -444,6 +444,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
4893 return 0;
4894 }
4895
4896 +/* Server has provided av pairs/target info in the type 2 challenge
4897 + * packet and we have plucked it and stored within smb session.
4898 + * We parse that blob here to find the server given timestamp
4899 + * as part of ntlmv2 authentication (or local current time as
4900 + * default in case of failure)
4901 + */
4902 +static __le64
4903 +find_timestamp(struct cifs_ses *ses)
4904 +{
4905 + unsigned int attrsize;
4906 + unsigned int type;
4907 + unsigned int onesize = sizeof(struct ntlmssp2_name);
4908 + unsigned char *blobptr;
4909 + unsigned char *blobend;
4910 + struct ntlmssp2_name *attrptr;
4911 +
4912 + if (!ses->auth_key.len || !ses->auth_key.response)
4913 + return 0;
4914 +
4915 + blobptr = ses->auth_key.response;
4916 + blobend = blobptr + ses->auth_key.len;
4917 +
4918 + while (blobptr + onesize < blobend) {
4919 + attrptr = (struct ntlmssp2_name *) blobptr;
4920 + type = le16_to_cpu(attrptr->type);
4921 + if (type == NTLMSSP_AV_EOL)
4922 + break;
4923 + blobptr += 2; /* advance attr type */
4924 + attrsize = le16_to_cpu(attrptr->length);
4925 + blobptr += 2; /* advance attr size */
4926 + if (blobptr + attrsize > blobend)
4927 + break;
4928 + if (type == NTLMSSP_AV_TIMESTAMP) {
4929 + if (attrsize == sizeof(u64))
4930 + return *((__le64 *)blobptr);
4931 + }
4932 + blobptr += attrsize; /* advance attr value */
4933 + }
4934 +
4935 + return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
4936 +}
4937 +
4938 static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
4939 const struct nls_table *nls_cp)
4940 {
4941 @@ -641,6 +683,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
4942 struct ntlmv2_resp *ntlmv2;
4943 char ntlmv2_hash[16];
4944 unsigned char *tiblob = NULL; /* target info blob */
4945 + __le64 rsp_timestamp;
4946
4947 if (ses->server->negflavor == CIFS_NEGFLAVOR_EXTENDED) {
4948 if (!ses->domainName) {
4949 @@ -659,6 +702,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
4950 }
4951 }
4952
4953 + /* Must be within 5 minutes of the server (or in range +/-2h
4954 + * in case of Mac OS X), so simply carry over server timestamp
4955 + * (as Windows 7 does)
4956 + */
4957 + rsp_timestamp = find_timestamp(ses);
4958 +
4959 baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
4960 tilen = ses->auth_key.len;
4961 tiblob = ses->auth_key.response;
4962 @@ -675,8 +724,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
4963 (ses->auth_key.response + CIFS_SESS_KEY_SIZE);
4964 ntlmv2->blob_signature = cpu_to_le32(0x00000101);
4965 ntlmv2->reserved = 0;
4966 - /* Must be within 5 minutes of the server */
4967 - ntlmv2->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
4968 + ntlmv2->time = rsp_timestamp;
4969 +
4970 get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
4971 ntlmv2->reserved2 = 0;
4972
4973 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
4974 index f621b44cb800..6b66dd5d1540 100644
4975 --- a/fs/cifs/inode.c
4976 +++ b/fs/cifs/inode.c
4977 @@ -2034,7 +2034,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
4978 struct tcon_link *tlink = NULL;
4979 struct cifs_tcon *tcon = NULL;
4980 struct TCP_Server_Info *server;
4981 - struct cifs_io_parms io_parms;
4982
4983 /*
4984 * To avoid spurious oplock breaks from server, in the case of
4985 @@ -2056,18 +2055,6 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
4986 rc = -ENOSYS;
4987 cifsFileInfo_put(open_file);
4988 cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc);
4989 - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
4990 - unsigned int bytes_written;
4991 -
4992 - io_parms.netfid = open_file->fid.netfid;
4993 - io_parms.pid = open_file->pid;
4994 - io_parms.tcon = tcon;
4995 - io_parms.offset = 0;
4996 - io_parms.length = attrs->ia_size;
4997 - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
4998 - NULL, NULL, 1);
4999 - cifs_dbg(FYI, "Wrt seteof rc %d\n", rc);
5000 - }
5001 } else
5002 rc = -EINVAL;
5003
5004 @@ -2093,28 +2080,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
5005 else
5006 rc = -ENOSYS;
5007 cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc);
5008 - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
5009 - __u16 netfid;
5010 - int oplock = 0;
5011
5012 - rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN,
5013 - GENERIC_WRITE, CREATE_NOT_DIR, &netfid,
5014 - &oplock, NULL, cifs_sb->local_nls,
5015 - cifs_remap(cifs_sb));
5016 - if (rc == 0) {
5017 - unsigned int bytes_written;
5018 -
5019 - io_parms.netfid = netfid;
5020 - io_parms.pid = current->tgid;
5021 - io_parms.tcon = tcon;
5022 - io_parms.offset = 0;
5023 - io_parms.length = attrs->ia_size;
5024 - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL,
5025 - NULL, 1);
5026 - cifs_dbg(FYI, "wrt seteof rc %d\n", rc);
5027 - CIFSSMBClose(xid, tcon, netfid);
5028 - }
5029 - }
5030 if (tlink)
5031 cifs_put_tlink(tlink);
5032
5033 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5034 index 54daee5ad4c1..1678b9cb94c7 100644
5035 --- a/fs/cifs/smb2ops.c
5036 +++ b/fs/cifs/smb2ops.c
5037 @@ -50,9 +50,13 @@ change_conf(struct TCP_Server_Info *server)
5038 break;
5039 default:
5040 server->echoes = true;
5041 - server->oplocks = true;
5042 + if (enable_oplocks) {
5043 + server->oplocks = true;
5044 + server->oplock_credits = 1;
5045 + } else
5046 + server->oplocks = false;
5047 +
5048 server->echo_credits = 1;
5049 - server->oplock_credits = 1;
5050 }
5051 server->credits -= server->echo_credits + server->oplock_credits;
5052 return 0;
5053 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5054 index 54cbe19d9c08..894f259d3989 100644
5055 --- a/fs/cifs/smb2pdu.c
5056 +++ b/fs/cifs/smb2pdu.c
5057 @@ -46,6 +46,7 @@
5058 #include "smb2status.h"
5059 #include "smb2glob.h"
5060 #include "cifspdu.h"
5061 +#include "cifs_spnego.h"
5062
5063 /*
5064 * The following table defines the expected "StructureSize" of SMB2 requests
5065 @@ -427,19 +428,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
5066 cifs_dbg(FYI, "missing security blob on negprot\n");
5067
5068 rc = cifs_enable_signing(server, ses->sign);
5069 -#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
5070 if (rc)
5071 goto neg_exit;
5072 - if (blob_length)
5073 + if (blob_length) {
5074 rc = decode_negTokenInit(security_blob, blob_length, server);
5075 - if (rc == 1)
5076 - rc = 0;
5077 - else if (rc == 0) {
5078 - rc = -EIO;
5079 - goto neg_exit;
5080 + if (rc == 1)
5081 + rc = 0;
5082 + else if (rc == 0)
5083 + rc = -EIO;
5084 }
5085 -#endif
5086 -
5087 neg_exit:
5088 free_rsp_buf(resp_buftype, rsp);
5089 return rc;
5090 @@ -533,7 +530,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
5091 __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
5092 struct TCP_Server_Info *server = ses->server;
5093 u16 blob_length = 0;
5094 - char *security_blob;
5095 + struct key *spnego_key = NULL;
5096 + char *security_blob = NULL;
5097 char *ntlmssp_blob = NULL;
5098 bool use_spnego = false; /* else use raw ntlmssp */
5099
5100 @@ -561,7 +559,8 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
5101 ses->ntlmssp->sesskey_per_smbsess = true;
5102
5103 /* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
5104 - ses->sectype = RawNTLMSSP;
5105 + if (ses->sectype != Kerberos && ses->sectype != RawNTLMSSP)
5106 + ses->sectype = RawNTLMSSP;
5107
5108 ssetup_ntlmssp_authenticate:
5109 if (phase == NtLmChallenge)
5110 @@ -590,7 +589,48 @@ ssetup_ntlmssp_authenticate:
5111 iov[0].iov_base = (char *)req;
5112 /* 4 for rfc1002 length field and 1 for pad */
5113 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
5114 - if (phase == NtLmNegotiate) {
5115 +
5116 + if (ses->sectype == Kerberos) {
5117 +#ifdef CONFIG_CIFS_UPCALL
5118 + struct cifs_spnego_msg *msg;
5119 +
5120 + spnego_key = cifs_get_spnego_key(ses);
5121 + if (IS_ERR(spnego_key)) {
5122 + rc = PTR_ERR(spnego_key);
5123 + spnego_key = NULL;
5124 + goto ssetup_exit;
5125 + }
5126 +
5127 + msg = spnego_key->payload.data;
5128 + /*
5129 + * check version field to make sure that cifs.upcall is
5130 + * sending us a response in an expected form
5131 + */
5132 + if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
5133 + cifs_dbg(VFS,
5134 + "bad cifs.upcall version. Expected %d got %d",
5135 + CIFS_SPNEGO_UPCALL_VERSION, msg->version);
5136 + rc = -EKEYREJECTED;
5137 + goto ssetup_exit;
5138 + }
5139 + ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
5140 + GFP_KERNEL);
5141 + if (!ses->auth_key.response) {
5142 + cifs_dbg(VFS,
5143 + "Kerberos can't allocate (%u bytes) memory",
5144 + msg->sesskey_len);
5145 + rc = -ENOMEM;
5146 + goto ssetup_exit;
5147 + }
5148 + ses->auth_key.len = msg->sesskey_len;
5149 + blob_length = msg->secblob_len;
5150 + iov[1].iov_base = msg->data + msg->sesskey_len;
5151 + iov[1].iov_len = blob_length;
5152 +#else
5153 + rc = -EOPNOTSUPP;
5154 + goto ssetup_exit;
5155 +#endif /* CONFIG_CIFS_UPCALL */
5156 + } else if (phase == NtLmNegotiate) { /* if not krb5 must be ntlmssp */
5157 ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
5158 GFP_KERNEL);
5159 if (ntlmssp_blob == NULL) {
5160 @@ -613,6 +653,8 @@ ssetup_ntlmssp_authenticate:
5161 /* with raw NTLMSSP we don't encapsulate in SPNEGO */
5162 security_blob = ntlmssp_blob;
5163 }
5164 + iov[1].iov_base = security_blob;
5165 + iov[1].iov_len = blob_length;
5166 } else if (phase == NtLmAuthenticate) {
5167 req->hdr.SessionId = ses->Suid;
5168 ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
5169 @@ -640,6 +682,8 @@ ssetup_ntlmssp_authenticate:
5170 } else {
5171 security_blob = ntlmssp_blob;
5172 }
5173 + iov[1].iov_base = security_blob;
5174 + iov[1].iov_len = blob_length;
5175 } else {
5176 cifs_dbg(VFS, "illegal ntlmssp phase\n");
5177 rc = -EIO;
5178 @@ -651,8 +695,6 @@ ssetup_ntlmssp_authenticate:
5179 cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
5180 1 /* pad */ - 4 /* rfc1001 len */);
5181 req->SecurityBufferLength = cpu_to_le16(blob_length);
5182 - iov[1].iov_base = security_blob;
5183 - iov[1].iov_len = blob_length;
5184
5185 inc_rfc1001_len(req, blob_length - 1 /* pad */);
5186
5187 @@ -663,6 +705,7 @@ ssetup_ntlmssp_authenticate:
5188
5189 kfree(security_blob);
5190 rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
5191 + ses->Suid = rsp->hdr.SessionId;
5192 if (resp_buftype != CIFS_NO_BUFFER &&
5193 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
5194 if (phase != NtLmNegotiate) {
5195 @@ -680,7 +723,6 @@ ssetup_ntlmssp_authenticate:
5196 /* NTLMSSP Negotiate sent now processing challenge (response) */
5197 phase = NtLmChallenge; /* process ntlmssp challenge */
5198 rc = 0; /* MORE_PROCESSING is not an error here but expected */
5199 - ses->Suid = rsp->hdr.SessionId;
5200 rc = decode_ntlmssp_challenge(rsp->Buffer,
5201 le16_to_cpu(rsp->SecurityBufferLength), ses);
5202 }
5203 @@ -737,6 +779,10 @@ keygen_exit:
5204 kfree(ses->auth_key.response);
5205 ses->auth_key.response = NULL;
5206 }
5207 + if (spnego_key) {
5208 + key_invalidate(spnego_key);
5209 + key_put(spnego_key);
5210 + }
5211 kfree(ses->ntlmssp);
5212
5213 return rc;
5214 diff --git a/fs/dcache.c b/fs/dcache.c
5215 index 5d03eb0ec0ac..0046ab7d4f3d 100644
5216 --- a/fs/dcache.c
5217 +++ b/fs/dcache.c
5218 @@ -1676,7 +1676,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
5219 DCACHE_OP_COMPARE |
5220 DCACHE_OP_REVALIDATE |
5221 DCACHE_OP_WEAK_REVALIDATE |
5222 - DCACHE_OP_DELETE ));
5223 + DCACHE_OP_DELETE |
5224 + DCACHE_OP_SELECT_INODE));
5225 dentry->d_op = op;
5226 if (!op)
5227 return;
5228 @@ -1692,6 +1693,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
5229 dentry->d_flags |= DCACHE_OP_DELETE;
5230 if (op->d_prune)
5231 dentry->d_flags |= DCACHE_OP_PRUNE;
5232 + if (op->d_select_inode)
5233 + dentry->d_flags |= DCACHE_OP_SELECT_INODE;
5234
5235 }
5236 EXPORT_SYMBOL(d_set_d_op);
5237 @@ -2923,6 +2926,13 @@ restart:
5238
5239 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
5240 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
5241 + /* Escaped? */
5242 + if (dentry != vfsmnt->mnt_root) {
5243 + bptr = *buffer;
5244 + blen = *buflen;
5245 + error = 3;
5246 + break;
5247 + }
5248 /* Global root? */
5249 if (mnt != parent) {
5250 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
5251 diff --git a/fs/internal.h b/fs/internal.h
5252 index 01dce1d1476b..4d5af583ab03 100644
5253 --- a/fs/internal.h
5254 +++ b/fs/internal.h
5255 @@ -107,6 +107,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
5256 extern long do_handle_open(int mountdirfd,
5257 struct file_handle __user *ufh, int open_flag);
5258 extern int open_check_o_direct(struct file *f);
5259 +extern int vfs_open(const struct path *, struct file *, const struct cred *);
5260
5261 /*
5262 * inode.c
5263 diff --git a/fs/namei.c b/fs/namei.c
5264 index fe30d3be43a8..ccd7f98d85b9 100644
5265 --- a/fs/namei.c
5266 +++ b/fs/namei.c
5267 @@ -505,6 +505,24 @@ struct nameidata {
5268 char *saved_names[MAX_NESTED_LINKS + 1];
5269 };
5270
5271 +/**
5272 + * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
5273 + * @path: nameidate to verify
5274 + *
5275 + * Rename can sometimes move a file or directory outside of a bind
5276 + * mount, path_connected allows those cases to be detected.
5277 + */
5278 +static bool path_connected(const struct path *path)
5279 +{
5280 + struct vfsmount *mnt = path->mnt;
5281 +
5282 + /* Only bind mounts can have disconnected paths */
5283 + if (mnt->mnt_root == mnt->mnt_sb->s_root)
5284 + return true;
5285 +
5286 + return is_subdir(path->dentry, mnt->mnt_root);
5287 +}
5288 +
5289 /*
5290 * Path walking has 2 modes, rcu-walk and ref-walk (see
5291 * Documentation/filesystems/path-lookup.txt). In situations when we can't
5292 @@ -1194,6 +1212,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
5293 goto failed;
5294 nd->path.dentry = parent;
5295 nd->seq = seq;
5296 + if (unlikely(!path_connected(&nd->path)))
5297 + goto failed;
5298 break;
5299 }
5300 if (!follow_up_rcu(&nd->path))
5301 @@ -1290,7 +1310,7 @@ static void follow_mount(struct path *path)
5302 }
5303 }
5304
5305 -static void follow_dotdot(struct nameidata *nd)
5306 +static int follow_dotdot(struct nameidata *nd)
5307 {
5308 if (!nd->root.mnt)
5309 set_root(nd);
5310 @@ -1306,6 +1326,10 @@ static void follow_dotdot(struct nameidata *nd)
5311 /* rare case of legitimate dget_parent()... */
5312 nd->path.dentry = dget_parent(nd->path.dentry);
5313 dput(old);
5314 + if (unlikely(!path_connected(&nd->path))) {
5315 + path_put(&nd->path);
5316 + return -ENOENT;
5317 + }
5318 break;
5319 }
5320 if (!follow_up(&nd->path))
5321 @@ -1313,6 +1337,7 @@ static void follow_dotdot(struct nameidata *nd)
5322 }
5323 follow_mount(&nd->path);
5324 nd->inode = nd->path.dentry->d_inode;
5325 + return 0;
5326 }
5327
5328 /*
5329 @@ -1428,8 +1453,6 @@ static int lookup_fast(struct nameidata *nd,
5330 negative = d_is_negative(dentry);
5331 if (read_seqcount_retry(&dentry->d_seq, seq))
5332 return -ECHILD;
5333 - if (negative)
5334 - return -ENOENT;
5335
5336 /*
5337 * This sequence count validates that the parent had no
5338 @@ -1450,6 +1473,12 @@ static int lookup_fast(struct nameidata *nd,
5339 goto unlazy;
5340 }
5341 }
5342 + /*
5343 + * Note: do negative dentry check after revalidation in
5344 + * case that drops it.
5345 + */
5346 + if (negative)
5347 + return -ENOENT;
5348 path->mnt = mnt;
5349 path->dentry = dentry;
5350 if (likely(__follow_mount_rcu(nd, path, inode)))
5351 @@ -1541,7 +1570,7 @@ static inline int handle_dots(struct nameidata *nd, int type)
5352 if (follow_dotdot_rcu(nd))
5353 return -ECHILD;
5354 } else
5355 - follow_dotdot(nd);
5356 + return follow_dotdot(nd);
5357 }
5358 return 0;
5359 }
5360 @@ -2290,7 +2319,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
5361 if (unlikely(nd->last_type != LAST_NORM)) {
5362 error = handle_dots(nd, nd->last_type);
5363 if (error)
5364 - goto out;
5365 + return error;
5366 dentry = dget(nd->path.dentry);
5367 goto done;
5368 }
5369 diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
5370 index a46bf6de9ce4..fb1fb2774d34 100644
5371 --- a/fs/nfs/filelayout/filelayout.c
5372 +++ b/fs/nfs/filelayout/filelayout.c
5373 @@ -628,23 +628,18 @@ out_put:
5374 goto out;
5375 }
5376
5377 -static void filelayout_free_fh_array(struct nfs4_filelayout_segment *fl)
5378 +static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
5379 {
5380 int i;
5381
5382 - for (i = 0; i < fl->num_fh; i++) {
5383 - if (!fl->fh_array[i])
5384 - break;
5385 - kfree(fl->fh_array[i]);
5386 + if (fl->fh_array) {
5387 + for (i = 0; i < fl->num_fh; i++) {
5388 + if (!fl->fh_array[i])
5389 + break;
5390 + kfree(fl->fh_array[i]);
5391 + }
5392 + kfree(fl->fh_array);
5393 }
5394 - kfree(fl->fh_array);
5395 - fl->fh_array = NULL;
5396 -}
5397 -
5398 -static void
5399 -_filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
5400 -{
5401 - filelayout_free_fh_array(fl);
5402 kfree(fl);
5403 }
5404
5405 @@ -715,21 +710,21 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
5406 /* Do we want to use a mempool here? */
5407 fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
5408 if (!fl->fh_array[i])
5409 - goto out_err_free;
5410 + goto out_err;
5411
5412 p = xdr_inline_decode(&stream, 4);
5413 if (unlikely(!p))
5414 - goto out_err_free;
5415 + goto out_err;
5416 fl->fh_array[i]->size = be32_to_cpup(p++);
5417 if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
5418 printk(KERN_ERR "NFS: Too big fh %d received %d\n",
5419 i, fl->fh_array[i]->size);
5420 - goto out_err_free;
5421 + goto out_err;
5422 }
5423
5424 p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
5425 if (unlikely(!p))
5426 - goto out_err_free;
5427 + goto out_err;
5428 memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
5429 dprintk("DEBUG: %s: fh len %d\n", __func__,
5430 fl->fh_array[i]->size);
5431 @@ -738,8 +733,6 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
5432 __free_page(scratch);
5433 return 0;
5434
5435 -out_err_free:
5436 - filelayout_free_fh_array(fl);
5437 out_err:
5438 __free_page(scratch);
5439 return -EIO;
5440 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
5441 index 069914ce7641..93d355c8b467 100644
5442 --- a/fs/nfs/pagelist.c
5443 +++ b/fs/nfs/pagelist.c
5444 @@ -508,7 +508,7 @@ size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
5445 * for it without upsetting the slab allocator.
5446 */
5447 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
5448 - sizeof(struct page) > PAGE_SIZE)
5449 + sizeof(struct page *) > PAGE_SIZE)
5450 return 0;
5451
5452 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
5453 diff --git a/fs/nfs/read.c b/fs/nfs/read.c
5454 index ae0ff7a11b40..01b8cc8e8cfc 100644
5455 --- a/fs/nfs/read.c
5456 +++ b/fs/nfs/read.c
5457 @@ -72,6 +72,9 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
5458 {
5459 struct nfs_pgio_mirror *mirror;
5460
5461 + if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
5462 + pgio->pg_ops->pg_cleanup(pgio);
5463 +
5464 pgio->pg_ops = &nfs_pgio_rw_ops;
5465
5466 /* read path should never have more than one mirror */
5467 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
5468 index 07115b9b1ad2..d9851a6a2813 100644
5469 --- a/fs/nfs/write.c
5470 +++ b/fs/nfs/write.c
5471 @@ -1203,7 +1203,7 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
5472 return 1;
5473 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
5474 list_empty_careful(&flctx->flc_posix)))
5475 - return 0;
5476 + return 1;
5477
5478 /* Check to see if there are whole file write locks */
5479 ret = 0;
5480 @@ -1331,6 +1331,9 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
5481 {
5482 struct nfs_pgio_mirror *mirror;
5483
5484 + if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
5485 + pgio->pg_ops->pg_cleanup(pgio);
5486 +
5487 pgio->pg_ops = &nfs_pgio_rw_ops;
5488
5489 nfs_pageio_stop_mirroring(pgio);
5490 diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
5491 index fdf4b41d0609..482cfd34472d 100644
5492 --- a/fs/ocfs2/dlm/dlmmaster.c
5493 +++ b/fs/ocfs2/dlm/dlmmaster.c
5494 @@ -1439,6 +1439,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
5495 int found, ret;
5496 int set_maybe;
5497 int dispatch_assert = 0;
5498 + int dispatched = 0;
5499
5500 if (!dlm_grab(dlm))
5501 return DLM_MASTER_RESP_NO;
5502 @@ -1658,15 +1659,18 @@ send_response:
5503 mlog(ML_ERROR, "failed to dispatch assert master work\n");
5504 response = DLM_MASTER_RESP_ERROR;
5505 dlm_lockres_put(res);
5506 - } else
5507 + } else {
5508 + dispatched = 1;
5509 __dlm_lockres_grab_inflight_worker(dlm, res);
5510 + }
5511 spin_unlock(&res->spinlock);
5512 } else {
5513 if (res)
5514 dlm_lockres_put(res);
5515 }
5516
5517 - dlm_put(dlm);
5518 + if (!dispatched)
5519 + dlm_put(dlm);
5520 return response;
5521 }
5522
5523 @@ -2090,7 +2094,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
5524
5525
5526 /* queue up work for dlm_assert_master_worker */
5527 - dlm_grab(dlm); /* get an extra ref for the work item */
5528 dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
5529 item->u.am.lockres = res; /* already have a ref */
5530 /* can optionally ignore node numbers higher than this node */
5531 diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
5532 index ce12e0b1a31f..3d90ad7ff91f 100644
5533 --- a/fs/ocfs2/dlm/dlmrecovery.c
5534 +++ b/fs/ocfs2/dlm/dlmrecovery.c
5535 @@ -1694,6 +1694,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
5536 unsigned int hash;
5537 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
5538 u32 flags = DLM_ASSERT_MASTER_REQUERY;
5539 + int dispatched = 0;
5540
5541 if (!dlm_grab(dlm)) {
5542 /* since the domain has gone away on this
5543 @@ -1719,8 +1720,10 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
5544 dlm_put(dlm);
5545 /* sender will take care of this and retry */
5546 return ret;
5547 - } else
5548 + } else {
5549 + dispatched = 1;
5550 __dlm_lockres_grab_inflight_worker(dlm, res);
5551 + }
5552 spin_unlock(&res->spinlock);
5553 } else {
5554 /* put.. incase we are not the master */
5555 @@ -1730,7 +1733,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
5556 }
5557 spin_unlock(&dlm->spinlock);
5558
5559 - dlm_put(dlm);
5560 + if (!dispatched)
5561 + dlm_put(dlm);
5562 return master;
5563 }
5564
5565 diff --git a/fs/open.c b/fs/open.c
5566 index 98e5a52dc68c..f9d2bf935099 100644
5567 --- a/fs/open.c
5568 +++ b/fs/open.c
5569 @@ -678,18 +678,18 @@ int open_check_o_direct(struct file *f)
5570 }
5571
5572 static int do_dentry_open(struct file *f,
5573 + struct inode *inode,
5574 int (*open)(struct inode *, struct file *),
5575 const struct cred *cred)
5576 {
5577 static const struct file_operations empty_fops = {};
5578 - struct inode *inode;
5579 int error;
5580
5581 f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
5582 FMODE_PREAD | FMODE_PWRITE;
5583
5584 path_get(&f->f_path);
5585 - inode = f->f_inode = f->f_path.dentry->d_inode;
5586 + f->f_inode = inode;
5587 f->f_mapping = inode->i_mapping;
5588
5589 if (unlikely(f->f_flags & O_PATH)) {
5590 @@ -793,7 +793,8 @@ int finish_open(struct file *file, struct dentry *dentry,
5591 BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
5592
5593 file->f_path.dentry = dentry;
5594 - error = do_dentry_open(file, open, current_cred());
5595 + error = do_dentry_open(file, d_backing_inode(dentry), open,
5596 + current_cred());
5597 if (!error)
5598 *opened |= FILE_OPENED;
5599
5600 @@ -822,6 +823,28 @@ int finish_no_open(struct file *file, struct dentry *dentry)
5601 }
5602 EXPORT_SYMBOL(finish_no_open);
5603
5604 +/**
5605 + * vfs_open - open the file at the given path
5606 + * @path: path to open
5607 + * @file: newly allocated file with f_flag initialized
5608 + * @cred: credentials to use
5609 + */
5610 +int vfs_open(const struct path *path, struct file *file,
5611 + const struct cred *cred)
5612 +{
5613 + struct dentry *dentry = path->dentry;
5614 + struct inode *inode = dentry->d_inode;
5615 +
5616 + file->f_path = *path;
5617 + if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
5618 + inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
5619 + if (IS_ERR(inode))
5620 + return PTR_ERR(inode);
5621 + }
5622 +
5623 + return do_dentry_open(file, inode, NULL, cred);
5624 +}
5625 +
5626 struct file *dentry_open(const struct path *path, int flags,
5627 const struct cred *cred)
5628 {
5629 @@ -853,26 +876,6 @@ struct file *dentry_open(const struct path *path, int flags,
5630 }
5631 EXPORT_SYMBOL(dentry_open);
5632
5633 -/**
5634 - * vfs_open - open the file at the given path
5635 - * @path: path to open
5636 - * @filp: newly allocated file with f_flag initialized
5637 - * @cred: credentials to use
5638 - */
5639 -int vfs_open(const struct path *path, struct file *filp,
5640 - const struct cred *cred)
5641 -{
5642 - struct inode *inode = path->dentry->d_inode;
5643 -
5644 - if (inode->i_op->dentry_open)
5645 - return inode->i_op->dentry_open(path->dentry, filp, cred);
5646 - else {
5647 - filp->f_path = *path;
5648 - return do_dentry_open(filp, NULL, cred);
5649 - }
5650 -}
5651 -EXPORT_SYMBOL(vfs_open);
5652 -
5653 static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
5654 {
5655 int lookup_flags = 0;
5656 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
5657 index 04f124884687..ba0db2638946 100644
5658 --- a/fs/overlayfs/inode.c
5659 +++ b/fs/overlayfs/inode.c
5660 @@ -336,37 +336,33 @@ static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
5661 return true;
5662 }
5663
5664 -static int ovl_dentry_open(struct dentry *dentry, struct file *file,
5665 - const struct cred *cred)
5666 +struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
5667 {
5668 int err;
5669 struct path realpath;
5670 enum ovl_path_type type;
5671 - bool want_write = false;
5672 +
5673 + if (d_is_dir(dentry))
5674 + return d_backing_inode(dentry);
5675
5676 type = ovl_path_real(dentry, &realpath);
5677 - if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
5678 - want_write = true;
5679 + if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
5680 err = ovl_want_write(dentry);
5681 if (err)
5682 - goto out;
5683 + return ERR_PTR(err);
5684
5685 - if (file->f_flags & O_TRUNC)
5686 + if (file_flags & O_TRUNC)
5687 err = ovl_copy_up_last(dentry, NULL, true);
5688 else
5689 err = ovl_copy_up(dentry);
5690 + ovl_drop_write(dentry);
5691 if (err)
5692 - goto out_drop_write;
5693 + return ERR_PTR(err);
5694
5695 ovl_path_upper(dentry, &realpath);
5696 }
5697
5698 - err = vfs_open(&realpath, file, cred);
5699 -out_drop_write:
5700 - if (want_write)
5701 - ovl_drop_write(dentry);
5702 -out:
5703 - return err;
5704 + return d_backing_inode(realpath.dentry);
5705 }
5706
5707 static const struct inode_operations ovl_file_inode_operations = {
5708 @@ -377,7 +373,6 @@ static const struct inode_operations ovl_file_inode_operations = {
5709 .getxattr = ovl_getxattr,
5710 .listxattr = ovl_listxattr,
5711 .removexattr = ovl_removexattr,
5712 - .dentry_open = ovl_dentry_open,
5713 };
5714
5715 static const struct inode_operations ovl_symlink_inode_operations = {
5716 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
5717 index 17ac5afc9ffb..ea5a40b06e3a 100644
5718 --- a/fs/overlayfs/overlayfs.h
5719 +++ b/fs/overlayfs/overlayfs.h
5720 @@ -173,6 +173,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
5721 void *value, size_t size);
5722 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
5723 int ovl_removexattr(struct dentry *dentry, const char *name);
5724 +struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
5725
5726 struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
5727 struct ovl_entry *oe);
5728 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
5729 index 155989455a72..33f2d27a6792 100644
5730 --- a/fs/overlayfs/super.c
5731 +++ b/fs/overlayfs/super.c
5732 @@ -275,6 +275,7 @@ static void ovl_dentry_release(struct dentry *dentry)
5733
5734 static const struct dentry_operations ovl_dentry_operations = {
5735 .d_release = ovl_dentry_release,
5736 + .d_select_inode = ovl_d_select_inode,
5737 };
5738
5739 static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
5740 diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
5741 index 96f3448b6eb4..fd65b3f1923c 100644
5742 --- a/fs/ubifs/xattr.c
5743 +++ b/fs/ubifs/xattr.c
5744 @@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode,
5745 {
5746 int err;
5747
5748 - mutex_lock(&inode->i_mutex);
5749 err = security_inode_init_security(inode, dentry, qstr,
5750 &init_xattrs, 0);
5751 - mutex_unlock(&inode->i_mutex);
5752 -
5753 if (err) {
5754 struct ubifs_info *c = dentry->i_sb->s_fs_info;
5755 ubifs_err(c, "cannot initialize security for inode %lu, error %d",
5756 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
5757 index df334cbacc6d..167ec0934049 100644
5758 --- a/include/linux/dcache.h
5759 +++ b/include/linux/dcache.h
5760 @@ -160,6 +160,7 @@ struct dentry_operations {
5761 char *(*d_dname)(struct dentry *, char *, int);
5762 struct vfsmount *(*d_automount)(struct path *);
5763 int (*d_manage)(struct dentry *, bool);
5764 + struct inode *(*d_select_inode)(struct dentry *, unsigned);
5765 } ____cacheline_aligned;
5766
5767 /*
5768 @@ -225,6 +226,7 @@ struct dentry_operations {
5769
5770 #define DCACHE_MAY_FREE 0x00800000
5771 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
5772 +#define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */
5773
5774 extern seqlock_t rename_lock;
5775
5776 diff --git a/include/linux/fs.h b/include/linux/fs.h
5777 index 571aab91bfc0..f93192333b37 100644
5778 --- a/include/linux/fs.h
5779 +++ b/include/linux/fs.h
5780 @@ -1641,7 +1641,6 @@ struct inode_operations {
5781 int (*set_acl)(struct inode *, struct posix_acl *, int);
5782
5783 /* WARNING: probably going away soon, do not use! */
5784 - int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
5785 } ____cacheline_aligned;
5786
5787 ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
5788 @@ -2193,7 +2192,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
5789 extern struct file *filp_open(const char *, int, umode_t);
5790 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
5791 const char *, int);
5792 -extern int vfs_open(const struct path *, struct file *, const struct cred *);
5793 extern struct file * dentry_open(const struct path *, int, const struct cred *);
5794 extern int filp_close(struct file *, fl_owner_t id);
5795
5796 diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
5797 index de722d4e9d61..258daf914c6d 100644
5798 --- a/include/linux/mmc/core.h
5799 +++ b/include/linux/mmc/core.h
5800 @@ -121,6 +121,7 @@ struct mmc_data {
5801 struct mmc_request *mrq; /* associated request */
5802
5803 unsigned int sg_len; /* size of scatter list */
5804 + int sg_count; /* mapped sg entries */
5805 struct scatterlist *sg; /* I/O scatter list */
5806 s32 host_cookie; /* host private data */
5807 };
5808 diff --git a/include/linux/security.h b/include/linux/security.h
5809 index 18264ea9e314..5d45b4fd91d2 100644
5810 --- a/include/linux/security.h
5811 +++ b/include/linux/security.h
5812 @@ -2527,7 +2527,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
5813 unsigned long arg4,
5814 unsigned long arg5)
5815 {
5816 - return cap_task_prctl(option, arg2, arg3, arg3, arg5);
5817 + return cap_task_prctl(option, arg2, arg3, arg4, arg5);
5818 }
5819
5820 static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
5821 diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
5822 index d81d584157e1..e8635854a55b 100644
5823 --- a/include/net/netfilter/nf_queue.h
5824 +++ b/include/net/netfilter/nf_queue.h
5825 @@ -24,6 +24,8 @@ struct nf_queue_entry {
5826 struct nf_queue_handler {
5827 int (*outfn)(struct nf_queue_entry *entry,
5828 unsigned int queuenum);
5829 + void (*nf_hook_drop)(struct net *net,
5830 + struct nf_hook_ops *ops);
5831 };
5832
5833 void nf_register_queue_handler(const struct nf_queue_handler *qh);
5834 diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
5835 index e6bcf55dcf20..fd0ca42b1d63 100644
5836 --- a/include/net/netfilter/nf_tables.h
5837 +++ b/include/net/netfilter/nf_tables.h
5838 @@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
5839
5840 static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
5841 {
5842 - return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
5843 + return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
5844 }
5845
5846 unsigned int nft_parse_register(const struct nlattr *attr);
5847 diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
5848 index 73abbc54063d..7bd03f867fca 100644
5849 --- a/include/target/iscsi/iscsi_target_core.h
5850 +++ b/include/target/iscsi/iscsi_target_core.h
5851 @@ -787,7 +787,6 @@ struct iscsi_np {
5852 enum iscsi_timer_flags_table np_login_timer_flags;
5853 u32 np_exports;
5854 enum np_flags_table np_flags;
5855 - unsigned char np_ip[IPV6_ADDRESS_SPACE];
5856 u16 np_port;
5857 spinlock_t np_thread_lock;
5858 struct completion np_restart_comp;
5859 diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
5860 index 9ce083960a25..f18490985fc8 100644
5861 --- a/include/xen/interface/sched.h
5862 +++ b/include/xen/interface/sched.h
5863 @@ -107,5 +107,13 @@ struct sched_watchdog {
5864 #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
5865 #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
5866 #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
5867 +/*
5868 + * Domain asked to perform 'soft reset' for it. The expected behavior is to
5869 + * reset internal Xen state for the domain returning it to the point where it
5870 + * was created but leaving the domain's memory contents and vCPU contexts
5871 + * intact. This will allow the domain to start over and set up all Xen specific
5872 + * interfaces again.
5873 + */
5874 +#define SHUTDOWN_soft_reset 5
5875
5876 #endif /* __XEN_PUBLIC_SCHED_H__ */
5877 diff --git a/ipc/msg.c b/ipc/msg.c
5878 index 2b6fdbb9e0e9..652540613d26 100644
5879 --- a/ipc/msg.c
5880 +++ b/ipc/msg.c
5881 @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
5882 return retval;
5883 }
5884
5885 - /* ipc_addid() locks msq upon success. */
5886 - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
5887 - if (id < 0) {
5888 - ipc_rcu_putref(msq, msg_rcu_free);
5889 - return id;
5890 - }
5891 -
5892 msq->q_stime = msq->q_rtime = 0;
5893 msq->q_ctime = get_seconds();
5894 msq->q_cbytes = msq->q_qnum = 0;
5895 @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
5896 INIT_LIST_HEAD(&msq->q_receivers);
5897 INIT_LIST_HEAD(&msq->q_senders);
5898
5899 + /* ipc_addid() locks msq upon success. */
5900 + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
5901 + if (id < 0) {
5902 + ipc_rcu_putref(msq, msg_rcu_free);
5903 + return id;
5904 + }
5905 +
5906 ipc_unlock_object(&msq->q_perm);
5907 rcu_read_unlock();
5908
5909 diff --git a/ipc/shm.c b/ipc/shm.c
5910 index 6d767071c367..499a8bd22fad 100644
5911 --- a/ipc/shm.c
5912 +++ b/ipc/shm.c
5913 @@ -550,12 +550,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
5914 if (IS_ERR(file))
5915 goto no_file;
5916
5917 - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
5918 - if (id < 0) {
5919 - error = id;
5920 - goto no_id;
5921 - }
5922 -
5923 shp->shm_cprid = task_tgid_vnr(current);
5924 shp->shm_lprid = 0;
5925 shp->shm_atim = shp->shm_dtim = 0;
5926 @@ -564,6 +558,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
5927 shp->shm_nattch = 0;
5928 shp->shm_file = file;
5929 shp->shm_creator = current;
5930 +
5931 + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
5932 + if (id < 0) {
5933 + error = id;
5934 + goto no_id;
5935 + }
5936 +
5937 list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
5938
5939 /*
5940 diff --git a/ipc/util.c b/ipc/util.c
5941 index ff3323ef8d8b..c917e9fd10b1 100644
5942 --- a/ipc/util.c
5943 +++ b/ipc/util.c
5944 @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
5945 rcu_read_lock();
5946 spin_lock(&new->lock);
5947
5948 + current_euid_egid(&euid, &egid);
5949 + new->cuid = new->uid = euid;
5950 + new->gid = new->cgid = egid;
5951 +
5952 id = idr_alloc(&ids->ipcs_idr, new,
5953 (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
5954 GFP_NOWAIT);
5955 @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
5956
5957 ids->in_use++;
5958
5959 - current_euid_egid(&euid, &egid);
5960 - new->cuid = new->uid = euid;
5961 - new->gid = new->cgid = egid;
5962 -
5963 if (next_id < 0) {
5964 new->seq = ids->seq++;
5965 if (ids->seq > IPCID_SEQ_MAX)
5966 diff --git a/kernel/events/core.c b/kernel/events/core.c
5967 index 94817491407b..e1af58e23bee 100644
5968 --- a/kernel/events/core.c
5969 +++ b/kernel/events/core.c
5970 @@ -4411,14 +4411,6 @@ static void ring_buffer_wakeup(struct perf_event *event)
5971 rcu_read_unlock();
5972 }
5973
5974 -static void rb_free_rcu(struct rcu_head *rcu_head)
5975 -{
5976 - struct ring_buffer *rb;
5977 -
5978 - rb = container_of(rcu_head, struct ring_buffer, rcu_head);
5979 - rb_free(rb);
5980 -}
5981 -
5982 struct ring_buffer *ring_buffer_get(struct perf_event *event)
5983 {
5984 struct ring_buffer *rb;
5985 diff --git a/kernel/events/internal.h b/kernel/events/internal.h
5986 index 9f6ce9ba4a04..a6adc36a3732 100644
5987 --- a/kernel/events/internal.h
5988 +++ b/kernel/events/internal.h
5989 @@ -11,6 +11,7 @@
5990 struct ring_buffer {
5991 atomic_t refcount;
5992 struct rcu_head rcu_head;
5993 + struct irq_work irq_work;
5994 #ifdef CONFIG_PERF_USE_VMALLOC
5995 struct work_struct work;
5996 int page_order; /* allocation order */
5997 @@ -55,6 +56,15 @@ struct ring_buffer {
5998 };
5999
6000 extern void rb_free(struct ring_buffer *rb);
6001 +
6002 +static inline void rb_free_rcu(struct rcu_head *rcu_head)
6003 +{
6004 + struct ring_buffer *rb;
6005 +
6006 + rb = container_of(rcu_head, struct ring_buffer, rcu_head);
6007 + rb_free(rb);
6008 +}
6009 +
6010 extern struct ring_buffer *
6011 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
6012 extern void perf_event_wakeup(struct perf_event *event);
6013 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
6014 index a7604c81168e..7f63ad978cb8 100644
6015 --- a/kernel/events/ring_buffer.c
6016 +++ b/kernel/events/ring_buffer.c
6017 @@ -221,6 +221,8 @@ void perf_output_end(struct perf_output_handle *handle)
6018 rcu_read_unlock();
6019 }
6020
6021 +static void rb_irq_work(struct irq_work *work);
6022 +
6023 static void
6024 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
6025 {
6026 @@ -241,6 +243,16 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
6027
6028 INIT_LIST_HEAD(&rb->event_list);
6029 spin_lock_init(&rb->event_lock);
6030 + init_irq_work(&rb->irq_work, rb_irq_work);
6031 +}
6032 +
6033 +static void ring_buffer_put_async(struct ring_buffer *rb)
6034 +{
6035 + if (!atomic_dec_and_test(&rb->refcount))
6036 + return;
6037 +
6038 + rb->rcu_head.next = (void *)rb;
6039 + irq_work_queue(&rb->irq_work);
6040 }
6041
6042 /*
6043 @@ -319,7 +331,7 @@ err_put:
6044 rb_free_aux(rb);
6045
6046 err:
6047 - ring_buffer_put(rb);
6048 + ring_buffer_put_async(rb);
6049 handle->event = NULL;
6050
6051 return NULL;
6052 @@ -370,7 +382,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
6053
6054 local_set(&rb->aux_nest, 0);
6055 rb_free_aux(rb);
6056 - ring_buffer_put(rb);
6057 + ring_buffer_put_async(rb);
6058 }
6059
6060 /*
6061 @@ -559,7 +571,18 @@ static void __rb_free_aux(struct ring_buffer *rb)
6062 void rb_free_aux(struct ring_buffer *rb)
6063 {
6064 if (atomic_dec_and_test(&rb->aux_refcount))
6065 + irq_work_queue(&rb->irq_work);
6066 +}
6067 +
6068 +static void rb_irq_work(struct irq_work *work)
6069 +{
6070 + struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
6071 +
6072 + if (!atomic_read(&rb->aux_refcount))
6073 __rb_free_aux(rb);
6074 +
6075 + if (rb->rcu_head.next == (void *)rb)
6076 + call_rcu(&rb->rcu_head, rb_free_rcu);
6077 }
6078
6079 #ifndef CONFIG_PERF_USE_VMALLOC
6080 diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
6081 index df2f4642d1e7..5c38f59741e2 100644
6082 --- a/kernel/irq/proc.c
6083 +++ b/kernel/irq/proc.c
6084 @@ -12,6 +12,7 @@
6085 #include <linux/seq_file.h>
6086 #include <linux/interrupt.h>
6087 #include <linux/kernel_stat.h>
6088 +#include <linux/mutex.h>
6089
6090 #include "internals.h"
6091
6092 @@ -323,18 +324,29 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
6093
6094 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
6095 {
6096 + static DEFINE_MUTEX(register_lock);
6097 char name [MAX_NAMELEN];
6098
6099 - if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
6100 + if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
6101 return;
6102
6103 + /*
6104 + * irq directories are registered only when a handler is
6105 + * added, not when the descriptor is created, so multiple
6106 + * tasks might try to register at the same time.
6107 + */
6108 + mutex_lock(&register_lock);
6109 +
6110 + if (desc->dir)
6111 + goto out_unlock;
6112 +
6113 memset(name, 0, MAX_NAMELEN);
6114 sprintf(name, "%d", irq);
6115
6116 /* create /proc/irq/1234 */
6117 desc->dir = proc_mkdir(name, root_irq_dir);
6118 if (!desc->dir)
6119 - return;
6120 + goto out_unlock;
6121
6122 #ifdef CONFIG_SMP
6123 /* create /proc/irq/<irq>/smp_affinity */
6124 @@ -355,6 +367,9 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
6125
6126 proc_create_data("spurious", 0444, desc->dir,
6127 &irq_spurious_proc_fops, (void *)(long)irq);
6128 +
6129 +out_unlock:
6130 + mutex_unlock(&register_lock);
6131 }
6132
6133 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
6134 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6135 index e6910526c84b..8476206a1e19 100644
6136 --- a/kernel/sched/core.c
6137 +++ b/kernel/sched/core.c
6138 @@ -2217,11 +2217,11 @@ static struct rq *finish_task_switch(struct task_struct *prev)
6139 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
6140 * schedule one last time. The schedule call will never return, and
6141 * the scheduled task must drop that reference.
6142 - * The test for TASK_DEAD must occur while the runqueue locks are
6143 - * still held, otherwise prev could be scheduled on another cpu, die
6144 - * there before we look at prev->state, and then the reference would
6145 - * be dropped twice.
6146 - * Manfred Spraul <manfred@colorfullife.com>
6147 + *
6148 + * We must observe prev->state before clearing prev->on_cpu (in
6149 + * finish_lock_switch), otherwise a concurrent wakeup can get prev
6150 + * running on another CPU and we could rave with its RUNNING -> DEAD
6151 + * transition, resulting in a double drop.
6152 */
6153 prev_state = prev->state;
6154 vtime_task_switch(prev);
6155 @@ -2358,13 +2358,20 @@ unsigned long nr_running(void)
6156
6157 /*
6158 * Check if only the current task is running on the cpu.
6159 + *
6160 + * Caution: this function does not check that the caller has disabled
6161 + * preemption, thus the result might have a time-of-check-to-time-of-use
6162 + * race. The caller is responsible to use it correctly, for example:
6163 + *
6164 + * - from a non-preemptable section (of course)
6165 + *
6166 + * - from a thread that is bound to a single CPU
6167 + *
6168 + * - in a loop with very short iterations (e.g. a polling loop)
6169 */
6170 bool single_task_running(void)
6171 {
6172 - if (cpu_rq(smp_processor_id())->nr_running == 1)
6173 - return true;
6174 - else
6175 - return false;
6176 + return raw_rq()->nr_running == 1;
6177 }
6178 EXPORT_SYMBOL(single_task_running);
6179
6180 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6181 index c2980e8733bc..77690b653ca9 100644
6182 --- a/kernel/sched/fair.c
6183 +++ b/kernel/sched/fair.c
6184 @@ -5126,18 +5126,21 @@ again:
6185 * entity, update_curr() will update its vruntime, otherwise
6186 * forget we've ever seen it.
6187 */
6188 - if (curr && curr->on_rq)
6189 - update_curr(cfs_rq);
6190 - else
6191 - curr = NULL;
6192 + if (curr) {
6193 + if (curr->on_rq)
6194 + update_curr(cfs_rq);
6195 + else
6196 + curr = NULL;
6197
6198 - /*
6199 - * This call to check_cfs_rq_runtime() will do the throttle and
6200 - * dequeue its entity in the parent(s). Therefore the 'simple'
6201 - * nr_running test will indeed be correct.
6202 - */
6203 - if (unlikely(check_cfs_rq_runtime(cfs_rq)))
6204 - goto simple;
6205 + /*
6206 + * This call to check_cfs_rq_runtime() will do the
6207 + * throttle and dequeue its entity in the parent(s).
6208 + * Therefore the 'simple' nr_running test will indeed
6209 + * be correct.
6210 + */
6211 + if (unlikely(check_cfs_rq_runtime(cfs_rq)))
6212 + goto simple;
6213 + }
6214
6215 se = pick_next_entity(cfs_rq, curr);
6216 cfs_rq = group_cfs_rq(se);
6217 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
6218 index e0e129993958..aa1f059de4f7 100644
6219 --- a/kernel/sched/sched.h
6220 +++ b/kernel/sched/sched.h
6221 @@ -1068,9 +1068,10 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
6222 * After ->on_cpu is cleared, the task can be moved to a different CPU.
6223 * We must ensure this doesn't happen until the switch is completely
6224 * finished.
6225 + *
6226 + * Pairs with the control dependency and rmb in try_to_wake_up().
6227 */
6228 - smp_wmb();
6229 - prev->on_cpu = 0;
6230 + smp_store_release(&prev->on_cpu, 0);
6231 #endif
6232 #ifdef CONFIG_DEBUG_SPINLOCK
6233 /* this is a valid case when another task releases the spinlock */
6234 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
6235 index 946acb72179f..414d9df94724 100644
6236 --- a/kernel/time/timekeeping.c
6237 +++ b/kernel/time/timekeeping.c
6238 @@ -1615,7 +1615,7 @@ static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
6239 negative = (tick_error < 0);
6240
6241 /* Sort out the magnitude of the correction */
6242 - tick_error = abs(tick_error);
6243 + tick_error = abs64(tick_error);
6244 for (adj = 0; tick_error > interval; adj++)
6245 tick_error >>= 1;
6246
6247 diff --git a/lib/iommu-common.c b/lib/iommu-common.c
6248 index df30632f0bef..4fdeee02e0a9 100644
6249 --- a/lib/iommu-common.c
6250 +++ b/lib/iommu-common.c
6251 @@ -21,8 +21,7 @@ static DEFINE_PER_CPU(unsigned int, iommu_hash_common);
6252
6253 static inline bool need_flush(struct iommu_map_table *iommu)
6254 {
6255 - return (iommu->lazy_flush != NULL &&
6256 - (iommu->flags & IOMMU_NEED_FLUSH) != 0);
6257 + return ((iommu->flags & IOMMU_NEED_FLUSH) != 0);
6258 }
6259
6260 static inline void set_flush(struct iommu_map_table *iommu)
6261 @@ -211,7 +210,8 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
6262 goto bail;
6263 }
6264 }
6265 - if (n < pool->hint || need_flush(iommu)) {
6266 + if (iommu->lazy_flush &&
6267 + (n < pool->hint || need_flush(iommu))) {
6268 clear_flush(iommu);
6269 iommu->lazy_flush(iommu);
6270 }
6271 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
6272 index 8c4c1f9f9a9a..a6ff935476e3 100644
6273 --- a/mm/hugetlb.c
6274 +++ b/mm/hugetlb.c
6275 @@ -2897,6 +2897,14 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
6276 continue;
6277
6278 /*
6279 + * Shared VMAs have their own reserves and do not affect
6280 + * MAP_PRIVATE accounting but it is possible that a shared
6281 + * VMA is using the same page so check and skip such VMAs.
6282 + */
6283 + if (iter_vma->vm_flags & VM_MAYSHARE)
6284 + continue;
6285 +
6286 + /*
6287 * Unmap the page from other VMAs without their own reserves.
6288 * They get marked to be SIGKILLed if they fault in these
6289 * areas. This is because a future no-page fault on this VMA
6290 diff --git a/mm/migrate.c b/mm/migrate.c
6291 index f53838fe3dfe..2c37b1a44a8c 100644
6292 --- a/mm/migrate.c
6293 +++ b/mm/migrate.c
6294 @@ -1062,7 +1062,7 @@ out:
6295 if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
6296 put_new_page(new_hpage, private);
6297 else
6298 - put_page(new_hpage);
6299 + putback_active_hugepage(new_hpage);
6300
6301 if (result) {
6302 if (rc)
6303 diff --git a/mm/slab.c b/mm/slab.c
6304 index 3dd2d1ff9d5d..330039fdcf18 100644
6305 --- a/mm/slab.c
6306 +++ b/mm/slab.c
6307 @@ -2189,9 +2189,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
6308 size += BYTES_PER_WORD;
6309 }
6310 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
6311 - if (size >= kmalloc_size(INDEX_NODE + 1)
6312 - && cachep->object_size > cache_line_size()
6313 - && ALIGN(size, cachep->align) < PAGE_SIZE) {
6314 + /*
6315 + * To activate debug pagealloc, off-slab management is necessary
6316 + * requirement. In early phase of initialization, small sized slab
6317 + * doesn't get initialized so it would not be possible. So, we need
6318 + * to check size >= 256. It guarantees that all necessary small
6319 + * sized slab is initialized in current slab initialization sequence.
6320 + */
6321 + if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
6322 + size >= 256 && cachep->object_size > cache_line_size() &&
6323 + ALIGN(size, cachep->align) < PAGE_SIZE) {
6324 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
6325 size = PAGE_SIZE;
6326 }
6327 diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
6328 index aad022dd15df..95b3167cf036 100644
6329 --- a/net/batman-adv/distributed-arp-table.c
6330 +++ b/net/batman-adv/distributed-arp-table.c
6331 @@ -15,6 +15,7 @@
6332 * along with this program; if not, see <http://www.gnu.org/licenses/>.
6333 */
6334
6335 +#include <linux/bitops.h>
6336 #include <linux/if_ether.h>
6337 #include <linux/if_arp.h>
6338 #include <linux/if_vlan.h>
6339 @@ -422,7 +423,7 @@ static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
6340 int j;
6341
6342 /* check if orig node candidate is running DAT */
6343 - if (!(candidate->capabilities & BATADV_ORIG_CAPA_HAS_DAT))
6344 + if (!test_bit(BATADV_ORIG_CAPA_HAS_DAT, &candidate->capabilities))
6345 goto out;
6346
6347 /* Check if this node has already been selected... */
6348 @@ -682,9 +683,9 @@ static void batadv_dat_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
6349 uint16_t tvlv_value_len)
6350 {
6351 if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
6352 - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_DAT;
6353 + clear_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
6354 else
6355 - orig->capabilities |= BATADV_ORIG_CAPA_HAS_DAT;
6356 + set_bit(BATADV_ORIG_CAPA_HAS_DAT, &orig->capabilities);
6357 }
6358
6359 /**
6360 diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
6361 index b24e4bb64fb5..8653c1a506f4 100644
6362 --- a/net/batman-adv/multicast.c
6363 +++ b/net/batman-adv/multicast.c
6364 @@ -15,6 +15,8 @@
6365 * along with this program; if not, see <http://www.gnu.org/licenses/>.
6366 */
6367
6368 +#include <linux/bitops.h>
6369 +#include <linux/bug.h>
6370 #include "main.h"
6371 #include "multicast.h"
6372 #include "originator.h"
6373 @@ -565,19 +567,26 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
6374 *
6375 * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
6376 * orig, has toggled then this method updates counter and list accordingly.
6377 + *
6378 + * Caller needs to hold orig->mcast_handler_lock.
6379 */
6380 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
6381 struct batadv_orig_node *orig,
6382 uint8_t mcast_flags)
6383 {
6384 + struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
6385 + struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
6386 +
6387 /* switched from flag unset to set */
6388 if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
6389 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
6390 atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
6391
6392 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
6393 - hlist_add_head_rcu(&orig->mcast_want_all_unsnoopables_node,
6394 - &bat_priv->mcast.want_all_unsnoopables_list);
6395 + /* flag checks above + mcast_handler_lock prevents this */
6396 + WARN_ON(!hlist_unhashed(node));
6397 +
6398 + hlist_add_head_rcu(node, head);
6399 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
6400 /* switched from flag set to unset */
6401 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
6402 @@ -585,7 +594,10 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
6403 atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
6404
6405 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
6406 - hlist_del_rcu(&orig->mcast_want_all_unsnoopables_node);
6407 + /* flag checks above + mcast_handler_lock prevents this */
6408 + WARN_ON(hlist_unhashed(node));
6409 +
6410 + hlist_del_init_rcu(node);
6411 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
6412 }
6413 }
6414 @@ -598,19 +610,26 @@ static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
6415 *
6416 * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
6417 * toggled then this method updates counter and list accordingly.
6418 + *
6419 + * Caller needs to hold orig->mcast_handler_lock.
6420 */
6421 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
6422 struct batadv_orig_node *orig,
6423 uint8_t mcast_flags)
6424 {
6425 + struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
6426 + struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
6427 +
6428 /* switched from flag unset to set */
6429 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
6430 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
6431 atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
6432
6433 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
6434 - hlist_add_head_rcu(&orig->mcast_want_all_ipv4_node,
6435 - &bat_priv->mcast.want_all_ipv4_list);
6436 + /* flag checks above + mcast_handler_lock prevents this */
6437 + WARN_ON(!hlist_unhashed(node));
6438 +
6439 + hlist_add_head_rcu(node, head);
6440 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
6441 /* switched from flag set to unset */
6442 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
6443 @@ -618,7 +637,10 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
6444 atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
6445
6446 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
6447 - hlist_del_rcu(&orig->mcast_want_all_ipv4_node);
6448 + /* flag checks above + mcast_handler_lock prevents this */
6449 + WARN_ON(hlist_unhashed(node));
6450 +
6451 + hlist_del_init_rcu(node);
6452 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
6453 }
6454 }
6455 @@ -631,19 +653,26 @@ static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
6456 *
6457 * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
6458 * toggled then this method updates counter and list accordingly.
6459 + *
6460 + * Caller needs to hold orig->mcast_handler_lock.
6461 */
6462 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
6463 struct batadv_orig_node *orig,
6464 uint8_t mcast_flags)
6465 {
6466 + struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
6467 + struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
6468 +
6469 /* switched from flag unset to set */
6470 if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
6471 !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
6472 atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
6473
6474 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
6475 - hlist_add_head_rcu(&orig->mcast_want_all_ipv6_node,
6476 - &bat_priv->mcast.want_all_ipv6_list);
6477 + /* flag checks above + mcast_handler_lock prevents this */
6478 + WARN_ON(!hlist_unhashed(node));
6479 +
6480 + hlist_add_head_rcu(node, head);
6481 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
6482 /* switched from flag set to unset */
6483 } else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
6484 @@ -651,7 +680,10 @@ static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
6485 atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
6486
6487 spin_lock_bh(&bat_priv->mcast.want_lists_lock);
6488 - hlist_del_rcu(&orig->mcast_want_all_ipv6_node);
6489 + /* flag checks above + mcast_handler_lock prevents this */
6490 + WARN_ON(hlist_unhashed(node));
6491 +
6492 + hlist_del_init_rcu(node);
6493 spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
6494 }
6495 }
6496 @@ -674,39 +706,42 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
6497 uint8_t mcast_flags = BATADV_NO_FLAGS;
6498 bool orig_initialized;
6499
6500 - orig_initialized = orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST;
6501 + if (orig_mcast_enabled && tvlv_value &&
6502 + (tvlv_value_len >= sizeof(mcast_flags)))
6503 + mcast_flags = *(uint8_t *)tvlv_value;
6504 +
6505 + spin_lock_bh(&orig->mcast_handler_lock);
6506 + orig_initialized = test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
6507 + &orig->capa_initialized);
6508
6509 /* If mcast support is turned on decrease the disabled mcast node
6510 * counter only if we had increased it for this node before. If this
6511 * is a completely new orig_node no need to decrease the counter.
6512 */
6513 if (orig_mcast_enabled &&
6514 - !(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) {
6515 + !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
6516 if (orig_initialized)
6517 atomic_dec(&bat_priv->mcast.num_disabled);
6518 - orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
6519 + set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
6520 /* If mcast support is being switched off or if this is an initial
6521 * OGM without mcast support then increase the disabled mcast
6522 * node counter.
6523 */
6524 } else if (!orig_mcast_enabled &&
6525 - (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
6526 + (test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) ||
6527 !orig_initialized)) {
6528 atomic_inc(&bat_priv->mcast.num_disabled);
6529 - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
6530 + clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
6531 }
6532
6533 - orig->capa_initialized |= BATADV_ORIG_CAPA_HAS_MCAST;
6534 -
6535 - if (orig_mcast_enabled && tvlv_value &&
6536 - (tvlv_value_len >= sizeof(mcast_flags)))
6537 - mcast_flags = *(uint8_t *)tvlv_value;
6538 + set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
6539
6540 batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
6541 batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
6542 batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
6543
6544 orig->mcast_flags = mcast_flags;
6545 + spin_unlock_bh(&orig->mcast_handler_lock);
6546 }
6547
6548 /**
6549 @@ -740,11 +775,15 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
6550 {
6551 struct batadv_priv *bat_priv = orig->bat_priv;
6552
6553 - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
6554 - orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
6555 + spin_lock_bh(&orig->mcast_handler_lock);
6556 +
6557 + if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities) &&
6558 + test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized))
6559 atomic_dec(&bat_priv->mcast.num_disabled);
6560
6561 batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
6562 batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
6563 batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
6564 +
6565 + spin_unlock_bh(&orig->mcast_handler_lock);
6566 }
6567 diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
6568 index 127cc4d7380a..a449195c5b2b 100644
6569 --- a/net/batman-adv/network-coding.c
6570 +++ b/net/batman-adv/network-coding.c
6571 @@ -15,6 +15,7 @@
6572 * along with this program; if not, see <http://www.gnu.org/licenses/>.
6573 */
6574
6575 +#include <linux/bitops.h>
6576 #include <linux/debugfs.h>
6577
6578 #include "main.h"
6579 @@ -105,9 +106,9 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
6580 uint16_t tvlv_value_len)
6581 {
6582 if (flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND)
6583 - orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_NC;
6584 + clear_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
6585 else
6586 - orig->capabilities |= BATADV_ORIG_CAPA_HAS_NC;
6587 + set_bit(BATADV_ORIG_CAPA_HAS_NC, &orig->capabilities);
6588 }
6589
6590 /**
6591 @@ -871,7 +872,7 @@ void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
6592 goto out;
6593
6594 /* check if orig node is network coding enabled */
6595 - if (!(orig_node->capabilities & BATADV_ORIG_CAPA_HAS_NC))
6596 + if (!test_bit(BATADV_ORIG_CAPA_HAS_NC, &orig_node->capabilities))
6597 goto out;
6598
6599 /* accept ogms from 'good' neighbors and single hop neighbors */
6600 diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
6601 index 90e805aba379..dfae97408628 100644
6602 --- a/net/batman-adv/originator.c
6603 +++ b/net/batman-adv/originator.c
6604 @@ -678,8 +678,13 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
6605 orig_node->last_seen = jiffies;
6606 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
6607 orig_node->bcast_seqno_reset = reset_time;
6608 +
6609 #ifdef CONFIG_BATMAN_ADV_MCAST
6610 orig_node->mcast_flags = BATADV_NO_FLAGS;
6611 + INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
6612 + INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
6613 + INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
6614 + spin_lock_init(&orig_node->mcast_handler_lock);
6615 #endif
6616
6617 /* create a vlan object for the "untagged" LAN */
6618 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
6619 index 5ec31d7de24f..a0b1b861b968 100644
6620 --- a/net/batman-adv/soft-interface.c
6621 +++ b/net/batman-adv/soft-interface.c
6622 @@ -172,6 +172,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
6623 int gw_mode;
6624 enum batadv_forw_mode forw_mode;
6625 struct batadv_orig_node *mcast_single_orig = NULL;
6626 + int network_offset = ETH_HLEN;
6627
6628 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
6629 goto dropped;
6630 @@ -184,14 +185,18 @@ static int batadv_interface_tx(struct sk_buff *skb,
6631 case ETH_P_8021Q:
6632 vhdr = vlan_eth_hdr(skb);
6633
6634 - if (vhdr->h_vlan_encapsulated_proto != ethertype)
6635 + if (vhdr->h_vlan_encapsulated_proto != ethertype) {
6636 + network_offset += VLAN_HLEN;
6637 break;
6638 + }
6639
6640 /* fall through */
6641 case ETH_P_BATMAN:
6642 goto dropped;
6643 }
6644
6645 + skb_set_network_header(skb, network_offset);
6646 +
6647 if (batadv_bla_tx(bat_priv, skb, vid))
6648 goto dropped;
6649
6650 @@ -449,6 +454,9 @@ out:
6651 */
6652 void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
6653 {
6654 + if (!vlan)
6655 + return;
6656 +
6657 if (atomic_dec_and_test(&vlan->refcount)) {
6658 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
6659 hlist_del_rcu(&vlan->list);
6660 diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
6661 index 07b263a437d1..4f2a9d2c56db 100644
6662 --- a/net/batman-adv/translation-table.c
6663 +++ b/net/batman-adv/translation-table.c
6664 @@ -15,6 +15,7 @@
6665 * along with this program; if not, see <http://www.gnu.org/licenses/>.
6666 */
6667
6668 +#include <linux/bitops.h>
6669 #include "main.h"
6670 #include "translation-table.h"
6671 #include "soft-interface.h"
6672 @@ -575,6 +576,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
6673
6674 /* increase the refcounter of the related vlan */
6675 vlan = batadv_softif_vlan_get(bat_priv, vid);
6676 + if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
6677 + addr, BATADV_PRINT_VID(vid)))
6678 + goto out;
6679
6680 batadv_dbg(BATADV_DBG_TT, bat_priv,
6681 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
6682 @@ -1015,6 +1019,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
6683 struct batadv_tt_local_entry *tt_local_entry;
6684 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
6685 struct batadv_softif_vlan *vlan;
6686 + void *tt_entry_exists;
6687
6688 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
6689 if (!tt_local_entry)
6690 @@ -1042,11 +1047,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
6691 * immediately purge it
6692 */
6693 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
6694 - hlist_del_rcu(&tt_local_entry->common.hash_entry);
6695 +
6696 + tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
6697 + batadv_compare_tt,
6698 + batadv_choose_tt,
6699 + &tt_local_entry->common);
6700 + if (!tt_entry_exists)
6701 + goto out;
6702 +
6703 + /* extra call to free the local tt entry */
6704 batadv_tt_local_entry_free_ref(tt_local_entry);
6705
6706 /* decrease the reference held for this vlan */
6707 vlan = batadv_softif_vlan_get(bat_priv, vid);
6708 + if (!vlan)
6709 + goto out;
6710 +
6711 batadv_softif_vlan_free_ref(vlan);
6712 batadv_softif_vlan_free_ref(vlan);
6713
6714 @@ -1147,8 +1163,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
6715 /* decrease the reference held for this vlan */
6716 vlan = batadv_softif_vlan_get(bat_priv,
6717 tt_common_entry->vid);
6718 - batadv_softif_vlan_free_ref(vlan);
6719 - batadv_softif_vlan_free_ref(vlan);
6720 + if (vlan) {
6721 + batadv_softif_vlan_free_ref(vlan);
6722 + batadv_softif_vlan_free_ref(vlan);
6723 + }
6724
6725 batadv_tt_local_entry_free_ref(tt_local);
6726 }
6727 @@ -1843,7 +1861,7 @@ void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
6728 }
6729 spin_unlock_bh(list_lock);
6730 }
6731 - orig_node->capa_initialized &= ~BATADV_ORIG_CAPA_HAS_TT;
6732 + clear_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
6733 }
6734
6735 static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
6736 @@ -2802,7 +2820,7 @@ static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
6737 return;
6738 }
6739 }
6740 - orig_node->capa_initialized |= BATADV_ORIG_CAPA_HAS_TT;
6741 + set_bit(BATADV_ORIG_CAPA_HAS_TT, &orig_node->capa_initialized);
6742 }
6743
6744 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
6745 @@ -3188,8 +3206,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
6746
6747 /* decrease the reference held for this vlan */
6748 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
6749 - batadv_softif_vlan_free_ref(vlan);
6750 - batadv_softif_vlan_free_ref(vlan);
6751 + if (vlan) {
6752 + batadv_softif_vlan_free_ref(vlan);
6753 + batadv_softif_vlan_free_ref(vlan);
6754 + }
6755
6756 batadv_tt_local_entry_free_ref(tt_local);
6757 }
6758 @@ -3302,7 +3322,8 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
6759 bool has_tt_init;
6760
6761 tt_vlan = (struct batadv_tvlv_tt_vlan_data *)tt_buff;
6762 - has_tt_init = orig_node->capa_initialized & BATADV_ORIG_CAPA_HAS_TT;
6763 + has_tt_init = test_bit(BATADV_ORIG_CAPA_HAS_TT,
6764 + &orig_node->capa_initialized);
6765
6766 /* orig table not initialised AND first diff is in the OGM OR the ttvn
6767 * increased by one -> we can apply the attached changes
6768 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
6769 index 9398c3fb4174..26c37be2aa05 100644
6770 --- a/net/batman-adv/types.h
6771 +++ b/net/batman-adv/types.h
6772 @@ -204,6 +204,7 @@ struct batadv_orig_bat_iv {
6773 * @batadv_dat_addr_t: address of the orig node in the distributed hash
6774 * @last_seen: time when last packet from this node was received
6775 * @bcast_seqno_reset: time when the broadcast seqno window was reset
6776 + * @mcast_handler_lock: synchronizes mcast-capability and -flag changes
6777 * @mcast_flags: multicast flags announced by the orig node
6778 * @mcast_want_all_unsnoop_node: a list node for the
6779 * mcast.want_all_unsnoopables list
6780 @@ -251,13 +252,15 @@ struct batadv_orig_node {
6781 unsigned long last_seen;
6782 unsigned long bcast_seqno_reset;
6783 #ifdef CONFIG_BATMAN_ADV_MCAST
6784 + /* synchronizes mcast tvlv specific orig changes */
6785 + spinlock_t mcast_handler_lock;
6786 uint8_t mcast_flags;
6787 struct hlist_node mcast_want_all_unsnoopables_node;
6788 struct hlist_node mcast_want_all_ipv4_node;
6789 struct hlist_node mcast_want_all_ipv6_node;
6790 #endif
6791 - uint8_t capabilities;
6792 - uint8_t capa_initialized;
6793 + unsigned long capabilities;
6794 + unsigned long capa_initialized;
6795 atomic_t last_ttvn;
6796 unsigned char *tt_buff;
6797 int16_t tt_buff_len;
6798 @@ -296,10 +299,10 @@ struct batadv_orig_node {
6799 * (= orig node announces a tvlv of type BATADV_TVLV_MCAST)
6800 */
6801 enum batadv_orig_capabilities {
6802 - BATADV_ORIG_CAPA_HAS_DAT = BIT(0),
6803 - BATADV_ORIG_CAPA_HAS_NC = BIT(1),
6804 - BATADV_ORIG_CAPA_HAS_TT = BIT(2),
6805 - BATADV_ORIG_CAPA_HAS_MCAST = BIT(3),
6806 + BATADV_ORIG_CAPA_HAS_DAT,
6807 + BATADV_ORIG_CAPA_HAS_NC,
6808 + BATADV_ORIG_CAPA_HAS_TT,
6809 + BATADV_ORIG_CAPA_HAS_MCAST,
6810 };
6811
6812 /**
6813 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
6814 index 7b815bcc8c9b..69ad5091e2ce 100644
6815 --- a/net/bluetooth/smp.c
6816 +++ b/net/bluetooth/smp.c
6817 @@ -2294,12 +2294,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
6818 if (!conn)
6819 return 1;
6820
6821 - chan = conn->smp;
6822 - if (!chan) {
6823 - BT_ERR("SMP security requested but not available");
6824 - return 1;
6825 - }
6826 -
6827 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
6828 return 1;
6829
6830 @@ -2313,6 +2307,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
6831 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
6832 return 0;
6833
6834 + chan = conn->smp;
6835 + if (!chan) {
6836 + BT_ERR("SMP security requested but not available");
6837 + return 1;
6838 + }
6839 +
6840 l2cap_chan_lock(chan);
6841
6842 /* If SMP is already in progress ignore this request */
6843 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
6844 index b27fc401c6a9..e664706b350c 100644
6845 --- a/net/ipv4/inet_connection_sock.c
6846 +++ b/net/ipv4/inet_connection_sock.c
6847 @@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
6848 }
6849
6850 spin_unlock(&queue->syn_wait_lock);
6851 - if (del_timer_sync(&req->rsk_timer))
6852 + if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
6853 reqsk_put(req);
6854 return found;
6855 }
6856 diff --git a/net/netfilter/core.c b/net/netfilter/core.c
6857 index e6163017c42d..5d0c6fd59475 100644
6858 --- a/net/netfilter/core.c
6859 +++ b/net/netfilter/core.c
6860 @@ -89,6 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
6861 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
6862 #endif
6863 synchronize_net();
6864 + nf_queue_nf_hook_drop(reg);
6865 }
6866 EXPORT_SYMBOL(nf_unregister_hook);
6867
6868 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
6869 index 5d2b806a862e..38fbc194b9cb 100644
6870 --- a/net/netfilter/ipvs/ip_vs_core.c
6871 +++ b/net/netfilter/ipvs/ip_vs_core.c
6872 @@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
6873 * return *ignored=0 i.e. ICMP and NF_DROP
6874 */
6875 sched = rcu_dereference(svc->scheduler);
6876 - dest = sched->schedule(svc, skb, iph);
6877 + if (sched) {
6878 + /* read svc->sched_data after svc->scheduler */
6879 + smp_rmb();
6880 + dest = sched->schedule(svc, skb, iph);
6881 + } else {
6882 + dest = NULL;
6883 + }
6884 if (!dest) {
6885 IP_VS_DBG(1, "p-schedule: no dest found.\n");
6886 kfree(param.pe_data);
6887 @@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
6888 }
6889
6890 sched = rcu_dereference(svc->scheduler);
6891 - dest = sched->schedule(svc, skb, iph);
6892 + if (sched) {
6893 + /* read svc->sched_data after svc->scheduler */
6894 + smp_rmb();
6895 + dest = sched->schedule(svc, skb, iph);
6896 + } else {
6897 + dest = NULL;
6898 + }
6899 if (dest == NULL) {
6900 IP_VS_DBG(1, "Schedule: no dest found.\n");
6901 return NULL;
6902 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
6903 index 285eae3a1454..24c554201a76 100644
6904 --- a/net/netfilter/ipvs/ip_vs_ctl.c
6905 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
6906 @@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
6907 __ip_vs_dst_cache_reset(dest);
6908 spin_unlock_bh(&dest->dst_lock);
6909
6910 - sched = rcu_dereference_protected(svc->scheduler, 1);
6911 if (add) {
6912 ip_vs_start_estimator(svc->net, &dest->stats);
6913 list_add_rcu(&dest->n_list, &svc->destinations);
6914 svc->num_dests++;
6915 - if (sched->add_dest)
6916 + sched = rcu_dereference_protected(svc->scheduler, 1);
6917 + if (sched && sched->add_dest)
6918 sched->add_dest(svc, dest);
6919 } else {
6920 - if (sched->upd_dest)
6921 + sched = rcu_dereference_protected(svc->scheduler, 1);
6922 + if (sched && sched->upd_dest)
6923 sched->upd_dest(svc, dest);
6924 }
6925 }
6926 @@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
6927 struct ip_vs_scheduler *sched;
6928
6929 sched = rcu_dereference_protected(svc->scheduler, 1);
6930 - if (sched->del_dest)
6931 + if (sched && sched->del_dest)
6932 sched->del_dest(svc, dest);
6933 }
6934 }
6935 @@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
6936 ip_vs_use_count_inc();
6937
6938 /* Lookup the scheduler by 'u->sched_name' */
6939 - sched = ip_vs_scheduler_get(u->sched_name);
6940 - if (sched == NULL) {
6941 - pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
6942 - ret = -ENOENT;
6943 - goto out_err;
6944 + if (strcmp(u->sched_name, "none")) {
6945 + sched = ip_vs_scheduler_get(u->sched_name);
6946 + if (!sched) {
6947 + pr_info("Scheduler module ip_vs_%s not found\n",
6948 + u->sched_name);
6949 + ret = -ENOENT;
6950 + goto out_err;
6951 + }
6952 }
6953
6954 if (u->pe_name && *u->pe_name) {
6955 @@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
6956 spin_lock_init(&svc->stats.lock);
6957
6958 /* Bind the scheduler */
6959 - ret = ip_vs_bind_scheduler(svc, sched);
6960 - if (ret)
6961 - goto out_err;
6962 - sched = NULL;
6963 + if (sched) {
6964 + ret = ip_vs_bind_scheduler(svc, sched);
6965 + if (ret)
6966 + goto out_err;
6967 + sched = NULL;
6968 + }
6969
6970 /* Bind the ct retriever */
6971 RCU_INIT_POINTER(svc->pe, pe);
6972 @@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
6973 static int
6974 ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
6975 {
6976 - struct ip_vs_scheduler *sched, *old_sched;
6977 + struct ip_vs_scheduler *sched = NULL, *old_sched;
6978 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
6979 int ret = 0;
6980
6981 /*
6982 * Lookup the scheduler, by 'u->sched_name'
6983 */
6984 - sched = ip_vs_scheduler_get(u->sched_name);
6985 - if (sched == NULL) {
6986 - pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
6987 - return -ENOENT;
6988 + if (strcmp(u->sched_name, "none")) {
6989 + sched = ip_vs_scheduler_get(u->sched_name);
6990 + if (!sched) {
6991 + pr_info("Scheduler module ip_vs_%s not found\n",
6992 + u->sched_name);
6993 + return -ENOENT;
6994 + }
6995 }
6996 old_sched = sched;
6997
6998 @@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
6999
7000 old_sched = rcu_dereference_protected(svc->scheduler, 1);
7001 if (sched != old_sched) {
7002 + if (old_sched) {
7003 + ip_vs_unbind_scheduler(svc, old_sched);
7004 + RCU_INIT_POINTER(svc->scheduler, NULL);
7005 + /* Wait all svc->sched_data users */
7006 + synchronize_rcu();
7007 + }
7008 /* Bind the new scheduler */
7009 - ret = ip_vs_bind_scheduler(svc, sched);
7010 - if (ret) {
7011 - old_sched = sched;
7012 - goto out;
7013 + if (sched) {
7014 + ret = ip_vs_bind_scheduler(svc, sched);
7015 + if (ret) {
7016 + ip_vs_scheduler_put(sched);
7017 + goto out;
7018 + }
7019 }
7020 - /* Unbind the old scheduler on success */
7021 - ip_vs_unbind_scheduler(svc, old_sched);
7022 }
7023
7024 /*
7025 @@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
7026 const struct ip_vs_iter *iter = seq->private;
7027 const struct ip_vs_dest *dest;
7028 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
7029 + char *sched_name = sched ? sched->name : "none";
7030
7031 if (iter->table == ip_vs_svc_table) {
7032 #ifdef CONFIG_IP_VS_IPV6
7033 @@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
7034 ip_vs_proto_name(svc->protocol),
7035 &svc->addr.in6,
7036 ntohs(svc->port),
7037 - sched->name);
7038 + sched_name);
7039 else
7040 #endif
7041 seq_printf(seq, "%s %08X:%04X %s %s ",
7042 ip_vs_proto_name(svc->protocol),
7043 ntohl(svc->addr.ip),
7044 ntohs(svc->port),
7045 - sched->name,
7046 + sched_name,
7047 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
7048 } else {
7049 seq_printf(seq, "FWM %08X %s %s",
7050 - svc->fwmark, sched->name,
7051 + svc->fwmark, sched_name,
7052 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
7053 }
7054
7055 @@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
7056 {
7057 struct ip_vs_scheduler *sched;
7058 struct ip_vs_kstats kstats;
7059 + char *sched_name;
7060
7061 sched = rcu_dereference_protected(src->scheduler, 1);
7062 + sched_name = sched ? sched->name : "none";
7063 dst->protocol = src->protocol;
7064 dst->addr = src->addr.ip;
7065 dst->port = src->port;
7066 dst->fwmark = src->fwmark;
7067 - strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name));
7068 + strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
7069 dst->flags = src->flags;
7070 dst->timeout = src->timeout / HZ;
7071 dst->netmask = src->netmask;
7072 @@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
7073 struct ip_vs_flags flags = { .flags = svc->flags,
7074 .mask = ~0 };
7075 struct ip_vs_kstats kstats;
7076 + char *sched_name;
7077
7078 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
7079 if (!nl_service)
7080 @@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
7081 }
7082
7083 sched = rcu_dereference_protected(svc->scheduler, 1);
7084 + sched_name = sched ? sched->name : "none";
7085 pe = rcu_dereference_protected(svc->pe, 1);
7086 - if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) ||
7087 + if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
7088 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
7089 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
7090 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
7091 diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
7092 index 199760c71f39..7e8141647943 100644
7093 --- a/net/netfilter/ipvs/ip_vs_sched.c
7094 +++ b/net/netfilter/ipvs/ip_vs_sched.c
7095 @@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
7096
7097 if (sched->done_service)
7098 sched->done_service(svc);
7099 - /* svc->scheduler can not be set to NULL */
7100 + /* svc->scheduler can be set to NULL only by caller */
7101 }
7102
7103
7104 @@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
7105
7106 void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
7107 {
7108 - struct ip_vs_scheduler *sched;
7109 + struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
7110 + char *sched_name = sched ? sched->name : "none";
7111
7112 - sched = rcu_dereference(svc->scheduler);
7113 if (svc->fwmark) {
7114 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
7115 - sched->name, svc->fwmark, svc->fwmark, msg);
7116 + sched_name, svc->fwmark, svc->fwmark, msg);
7117 #ifdef CONFIG_IP_VS_IPV6
7118 } else if (svc->af == AF_INET6) {
7119 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
7120 - sched->name, ip_vs_proto_name(svc->protocol),
7121 + sched_name, ip_vs_proto_name(svc->protocol),
7122 &svc->addr.in6, ntohs(svc->port), msg);
7123 #endif
7124 } else {
7125 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
7126 - sched->name, ip_vs_proto_name(svc->protocol),
7127 + sched_name, ip_vs_proto_name(svc->protocol),
7128 &svc->addr.ip, ntohs(svc->port), msg);
7129 }
7130 }
7131 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
7132 index 19b9cce6c210..150047c739fa 100644
7133 --- a/net/netfilter/ipvs/ip_vs_sync.c
7134 +++ b/net/netfilter/ipvs/ip_vs_sync.c
7135 @@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
7136 pkts = atomic_add_return(1, &cp->in_pkts);
7137 else
7138 pkts = sysctl_sync_threshold(ipvs);
7139 - ip_vs_sync_conn(net, cp->control, pkts);
7140 + ip_vs_sync_conn(net, cp, pkts);
7141 }
7142 }
7143
7144 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
7145 index 19986ec5f21a..258f1e05250f 100644
7146 --- a/net/netfilter/ipvs/ip_vs_xmit.c
7147 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
7148 @@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
7149
7150 memset(&fl4, 0, sizeof(fl4));
7151 fl4.daddr = daddr;
7152 - fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
7153 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
7154 FLOWI_FLAG_KNOWN_NH : 0;
7155
7156 @@ -519,10 +518,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
7157 if (ret == NF_ACCEPT) {
7158 nf_reset(skb);
7159 skb_forward_csum(skb);
7160 + if (!skb->sk)
7161 + skb_sender_cpu_clear(skb);
7162 }
7163 return ret;
7164 }
7165
7166 +/* In the event of a remote destination, it's possible that we would have
7167 + * matches against an old socket (particularly a TIME-WAIT socket). This
7168 + * causes havoc down the line (ip_local_out et. al. expect regular sockets
7169 + * and invalid memory accesses will happen) so simply drop the association
7170 + * in this case.
7171 +*/
7172 +static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
7173 +{
7174 + /* If dev is set, the packet came from the LOCAL_IN callback and
7175 + * not from a local TCP socket.
7176 + */
7177 + if (skb->dev)
7178 + skb_orphan(skb);
7179 +}
7180 +
7181 /* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
7182 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
7183 struct ip_vs_conn *cp, int local)
7184 @@ -534,12 +550,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
7185 ip_vs_notrack(skb);
7186 else
7187 ip_vs_update_conntrack(skb, cp, 1);
7188 +
7189 + /* Remove the early_demux association unless it's bound for the
7190 + * exact same port and address on this host after translation.
7191 + */
7192 + if (!local || cp->vport != cp->dport ||
7193 + !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
7194 + ip_vs_drop_early_demux_sk(skb);
7195 +
7196 if (!local) {
7197 skb_forward_csum(skb);
7198 + if (!skb->sk)
7199 + skb_sender_cpu_clear(skb);
7200 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
7201 NULL, skb_dst(skb)->dev, dst_output_sk);
7202 } else
7203 ret = NF_ACCEPT;
7204 +
7205 return ret;
7206 }
7207
7208 @@ -553,7 +580,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
7209 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
7210 ip_vs_notrack(skb);
7211 if (!local) {
7212 + ip_vs_drop_early_demux_sk(skb);
7213 skb_forward_csum(skb);
7214 + if (!skb->sk)
7215 + skb_sender_cpu_clear(skb);
7216 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
7217 NULL, skb_dst(skb)->dev, dst_output_sk);
7218 } else
7219 @@ -841,6 +871,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
7220 struct ipv6hdr *old_ipv6h = NULL;
7221 #endif
7222
7223 + ip_vs_drop_early_demux_sk(skb);
7224 +
7225 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
7226 new_skb = skb_realloc_headroom(skb, max_headroom);
7227 if (!new_skb)
7228 diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
7229 index 7a17070c5dab..b45a4223cb05 100644
7230 --- a/net/netfilter/nf_conntrack_expect.c
7231 +++ b/net/netfilter/nf_conntrack_expect.c
7232 @@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
7233 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
7234 }
7235
7236 - return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
7237 + return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
7238 + nf_ct_zone(a->master) == nf_ct_zone(b->master);
7239 }
7240
7241 static inline int expect_matches(const struct nf_conntrack_expect *a,
7242 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
7243 index d1c23940a86a..6b8b0abbfab4 100644
7244 --- a/net/netfilter/nf_conntrack_netlink.c
7245 +++ b/net/netfilter/nf_conntrack_netlink.c
7246 @@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
7247 }
7248
7249 err = nf_ct_expect_related_report(exp, portid, report);
7250 - if (err < 0)
7251 - goto err_exp;
7252 -
7253 - return 0;
7254 -err_exp:
7255 nf_ct_expect_put(exp);
7256 err_ct:
7257 nf_ct_put(ct);
7258 diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h
7259 index ea7f36784b3d..399210693c2a 100644
7260 --- a/net/netfilter/nf_internals.h
7261 +++ b/net/netfilter/nf_internals.h
7262 @@ -19,6 +19,7 @@ unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb,
7263 /* nf_queue.c */
7264 int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem,
7265 struct nf_hook_state *state, unsigned int queuenum);
7266 +void nf_queue_nf_hook_drop(struct nf_hook_ops *ops);
7267 int __init netfilter_queue_init(void);
7268
7269 /* nf_log.c */
7270 diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
7271 index 675d12c69e32..a5d41dfa9f05 100644
7272 --- a/net/netfilter/nf_log.c
7273 +++ b/net/netfilter/nf_log.c
7274 @@ -107,12 +107,17 @@ EXPORT_SYMBOL(nf_log_register);
7275
7276 void nf_log_unregister(struct nf_logger *logger)
7277 {
7278 + const struct nf_logger *log;
7279 int i;
7280
7281 mutex_lock(&nf_log_mutex);
7282 - for (i = 0; i < NFPROTO_NUMPROTO; i++)
7283 - RCU_INIT_POINTER(loggers[i][logger->type], NULL);
7284 + for (i = 0; i < NFPROTO_NUMPROTO; i++) {
7285 + log = nft_log_dereference(loggers[i][logger->type]);
7286 + if (log == logger)
7287 + RCU_INIT_POINTER(loggers[i][logger->type], NULL);
7288 + }
7289 mutex_unlock(&nf_log_mutex);
7290 + synchronize_rcu();
7291 }
7292 EXPORT_SYMBOL(nf_log_unregister);
7293
7294 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
7295 index 2e88032cd5ad..cd60d397fe05 100644
7296 --- a/net/netfilter/nf_queue.c
7297 +++ b/net/netfilter/nf_queue.c
7298 @@ -105,6 +105,23 @@ bool nf_queue_entry_get_refs(struct nf_queue_entry *entry)
7299 }
7300 EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs);
7301
7302 +void nf_queue_nf_hook_drop(struct nf_hook_ops *ops)
7303 +{
7304 + const struct nf_queue_handler *qh;
7305 + struct net *net;
7306 +
7307 + rtnl_lock();
7308 + rcu_read_lock();
7309 + qh = rcu_dereference(queue_handler);
7310 + if (qh) {
7311 + for_each_net(net) {
7312 + qh->nf_hook_drop(net, ops);
7313 + }
7314 + }
7315 + rcu_read_unlock();
7316 + rtnl_unlock();
7317 +}
7318 +
7319 /*
7320 * Any packet that leaves via this function must come back
7321 * through nf_reinject().
7322 diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
7323 index f153b07073af..f77bad46ac68 100644
7324 --- a/net/netfilter/nf_tables_core.c
7325 +++ b/net/netfilter/nf_tables_core.c
7326 @@ -114,7 +114,8 @@ unsigned int
7327 nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
7328 {
7329 const struct nft_chain *chain = ops->priv, *basechain = chain;
7330 - const struct net *net = read_pnet(&nft_base_chain(basechain)->pnet);
7331 + const struct net *chain_net = read_pnet(&nft_base_chain(basechain)->pnet);
7332 + const struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
7333 const struct nft_rule *rule;
7334 const struct nft_expr *expr, *last;
7335 struct nft_regs regs;
7336 @@ -124,6 +125,10 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
7337 int rulenum;
7338 unsigned int gencursor = nft_genmask_cur(net);
7339
7340 + /* Ignore chains that are not for the current network namespace */
7341 + if (!net_eq(net, chain_net))
7342 + return NF_ACCEPT;
7343 +
7344 do_chain:
7345 rulenum = 0;
7346 rule = list_entry(&chain->rules, struct nft_rule, list);
7347 diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
7348 index 8b117c90ecd7..69e3ceffa14d 100644
7349 --- a/net/netfilter/nfnetlink.c
7350 +++ b/net/netfilter/nfnetlink.c
7351 @@ -432,6 +432,7 @@ done:
7352 static void nfnetlink_rcv(struct sk_buff *skb)
7353 {
7354 struct nlmsghdr *nlh = nlmsg_hdr(skb);
7355 + u_int16_t res_id;
7356 int msglen;
7357
7358 if (nlh->nlmsg_len < NLMSG_HDRLEN ||
7359 @@ -456,7 +457,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
7360
7361 nfgenmsg = nlmsg_data(nlh);
7362 skb_pull(skb, msglen);
7363 - nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
7364 + /* Work around old nft using host byte order */
7365 + if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
7366 + res_id = NFNL_SUBSYS_NFTABLES;
7367 + else
7368 + res_id = ntohs(nfgenmsg->res_id);
7369 + nfnetlink_rcv_batch(skb, nlh, res_id);
7370 } else {
7371 netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
7372 }
7373 diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
7374 index 11c7682fa0ea..32d0437abdd8 100644
7375 --- a/net/netfilter/nfnetlink_queue_core.c
7376 +++ b/net/netfilter/nfnetlink_queue_core.c
7377 @@ -824,6 +824,27 @@ static struct notifier_block nfqnl_dev_notifier = {
7378 .notifier_call = nfqnl_rcv_dev_event,
7379 };
7380
7381 +static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr)
7382 +{
7383 + return entry->elem == (struct nf_hook_ops *)ops_ptr;
7384 +}
7385 +
7386 +static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook)
7387 +{
7388 + struct nfnl_queue_net *q = nfnl_queue_pernet(net);
7389 + int i;
7390 +
7391 + rcu_read_lock();
7392 + for (i = 0; i < INSTANCE_BUCKETS; i++) {
7393 + struct nfqnl_instance *inst;
7394 + struct hlist_head *head = &q->instance_table[i];
7395 +
7396 + hlist_for_each_entry_rcu(inst, head, hlist)
7397 + nfqnl_flush(inst, nf_hook_cmp, (unsigned long)hook);
7398 + }
7399 + rcu_read_unlock();
7400 +}
7401 +
7402 static int
7403 nfqnl_rcv_nl_event(struct notifier_block *this,
7404 unsigned long event, void *ptr)
7405 @@ -1031,7 +1052,8 @@ static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
7406 };
7407
7408 static const struct nf_queue_handler nfqh = {
7409 - .outfn = &nfqnl_enqueue_packet,
7410 + .outfn = &nfqnl_enqueue_packet,
7411 + .nf_hook_drop = &nfqnl_nf_hook_drop,
7412 };
7413
7414 static int
7415 diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
7416 index 7f29cfc76349..4d05c7bf5a03 100644
7417 --- a/net/netfilter/nft_compat.c
7418 +++ b/net/netfilter/nft_compat.c
7419 @@ -617,6 +617,13 @@ struct nft_xt {
7420
7421 static struct nft_expr_type nft_match_type;
7422
7423 +static bool nft_match_cmp(const struct xt_match *match,
7424 + const char *name, u32 rev, u32 family)
7425 +{
7426 + return strcmp(match->name, name) == 0 && match->revision == rev &&
7427 + (match->family == NFPROTO_UNSPEC || match->family == family);
7428 +}
7429 +
7430 static const struct nft_expr_ops *
7431 nft_match_select_ops(const struct nft_ctx *ctx,
7432 const struct nlattr * const tb[])
7433 @@ -624,7 +631,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
7434 struct nft_xt *nft_match;
7435 struct xt_match *match;
7436 char *mt_name;
7437 - __u32 rev, family;
7438 + u32 rev, family;
7439
7440 if (tb[NFTA_MATCH_NAME] == NULL ||
7441 tb[NFTA_MATCH_REV] == NULL ||
7442 @@ -639,8 +646,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
7443 list_for_each_entry(nft_match, &nft_match_list, head) {
7444 struct xt_match *match = nft_match->ops.data;
7445
7446 - if (strcmp(match->name, mt_name) == 0 &&
7447 - match->revision == rev && match->family == family) {
7448 + if (nft_match_cmp(match, mt_name, rev, family)) {
7449 if (!try_module_get(match->me))
7450 return ERR_PTR(-ENOENT);
7451
7452 @@ -691,6 +697,13 @@ static LIST_HEAD(nft_target_list);
7453
7454 static struct nft_expr_type nft_target_type;
7455
7456 +static bool nft_target_cmp(const struct xt_target *tg,
7457 + const char *name, u32 rev, u32 family)
7458 +{
7459 + return strcmp(tg->name, name) == 0 && tg->revision == rev &&
7460 + (tg->family == NFPROTO_UNSPEC || tg->family == family);
7461 +}
7462 +
7463 static const struct nft_expr_ops *
7464 nft_target_select_ops(const struct nft_ctx *ctx,
7465 const struct nlattr * const tb[])
7466 @@ -698,7 +711,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
7467 struct nft_xt *nft_target;
7468 struct xt_target *target;
7469 char *tg_name;
7470 - __u32 rev, family;
7471 + u32 rev, family;
7472
7473 if (tb[NFTA_TARGET_NAME] == NULL ||
7474 tb[NFTA_TARGET_REV] == NULL ||
7475 @@ -713,8 +726,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
7476 list_for_each_entry(nft_target, &nft_target_list, head) {
7477 struct xt_target *target = nft_target->ops.data;
7478
7479 - if (strcmp(target->name, tg_name) == 0 &&
7480 - target->revision == rev && target->family == family) {
7481 + if (nft_target_cmp(target, tg_name, rev, family)) {
7482 if (!try_module_get(target->me))
7483 return ERR_PTR(-ENOENT);
7484
7485 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
7486 index 7de33d1af9b6..7fa6d78331ed 100644
7487 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
7488 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
7489 @@ -382,6 +382,7 @@ static int send_reply(struct svcxprt_rdma *rdma,
7490 int byte_count)
7491 {
7492 struct ib_send_wr send_wr;
7493 + u32 xdr_off;
7494 int sge_no;
7495 int sge_bytes;
7496 int page_no;
7497 @@ -416,8 +417,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
7498 ctxt->direction = DMA_TO_DEVICE;
7499
7500 /* Map the payload indicated by 'byte_count' */
7501 + xdr_off = 0;
7502 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
7503 - int xdr_off = 0;
7504 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
7505 byte_count -= sge_bytes;
7506 ctxt->sge[sge_no].addr =
7507 @@ -455,6 +456,13 @@ static int send_reply(struct svcxprt_rdma *rdma,
7508 }
7509 rqstp->rq_next_page = rqstp->rq_respages + 1;
7510
7511 + /* The loop above bumps sc_dma_used for each sge. The
7512 + * xdr_buf.tail gets a separate sge, but resides in the
7513 + * same page as xdr_buf.head. Don't count it twice.
7514 + */
7515 + if (sge_no > ctxt->count)
7516 + atomic_dec(&rdma->sc_dma_used);
7517 +
7518 if (sge_no > rdma->sc_max_sge) {
7519 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
7520 goto err;
7521 diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
7522 index 885683a3b0bd..e0406211716b 100644
7523 --- a/sound/arm/Kconfig
7524 +++ b/sound/arm/Kconfig
7525 @@ -9,6 +9,14 @@ menuconfig SND_ARM
7526 Drivers that are implemented on ASoC can be found in
7527 "ALSA for SoC audio support" section.
7528
7529 +config SND_PXA2XX_LIB
7530 + tristate
7531 + select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
7532 + select SND_DMAENGINE_PCM
7533 +
7534 +config SND_PXA2XX_LIB_AC97
7535 + bool
7536 +
7537 if SND_ARM
7538
7539 config SND_ARMAACI
7540 @@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
7541 tristate
7542 select SND_PCM
7543
7544 -config SND_PXA2XX_LIB
7545 - tristate
7546 - select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
7547 -
7548 -config SND_PXA2XX_LIB_AC97
7549 - bool
7550 -
7551 config SND_PXA2XX_AC97
7552 tristate "AC97 driver for the Intel PXA2xx chip"
7553 depends on ARCH_PXA
7554 diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
7555 index 3a24f7739aaa..b791529bf31c 100644
7556 --- a/sound/pci/hda/patch_cirrus.c
7557 +++ b/sound/pci/hda/patch_cirrus.c
7558 @@ -634,6 +634,7 @@ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
7559 SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
7560 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
7561 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
7562 + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
7563 {} /* terminator */
7564 };
7565
7566 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7567 index 6fe862594e9b..57bb5a559f8e 100644
7568 --- a/sound/pci/hda/patch_realtek.c
7569 +++ b/sound/pci/hda/patch_realtek.c
7570 @@ -4182,6 +4182,24 @@ static void alc_fixup_disable_aamix(struct hda_codec *codec,
7571 }
7572 }
7573
7574 +/* fixup for Thinkpad docks: add dock pins, avoid HP parser fixup */
7575 +static void alc_fixup_tpt440_dock(struct hda_codec *codec,
7576 + const struct hda_fixup *fix, int action)
7577 +{
7578 + static const struct hda_pintbl pincfgs[] = {
7579 + { 0x16, 0x21211010 }, /* dock headphone */
7580 + { 0x19, 0x21a11010 }, /* dock mic */
7581 + { }
7582 + };
7583 + struct alc_spec *spec = codec->spec;
7584 +
7585 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
7586 + spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
7587 + codec->power_save_node = 0; /* avoid click noises */
7588 + snd_hda_apply_pincfgs(codec, pincfgs);
7589 + }
7590 +}
7591 +
7592 static void alc_shutup_dell_xps13(struct hda_codec *codec)
7593 {
7594 struct alc_spec *spec = codec->spec;
7595 @@ -4507,7 +4525,6 @@ enum {
7596 ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC,
7597 ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
7598 ALC292_FIXUP_TPT440_DOCK,
7599 - ALC292_FIXUP_TPT440_DOCK2,
7600 ALC283_FIXUP_BXBT2807_MIC,
7601 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
7602 ALC282_FIXUP_ASPIRE_V5_PINS,
7603 @@ -4972,17 +4989,7 @@ static const struct hda_fixup alc269_fixups[] = {
7604 },
7605 [ALC292_FIXUP_TPT440_DOCK] = {
7606 .type = HDA_FIXUP_FUNC,
7607 - .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
7608 - .chained = true,
7609 - .chain_id = ALC292_FIXUP_TPT440_DOCK2
7610 - },
7611 - [ALC292_FIXUP_TPT440_DOCK2] = {
7612 - .type = HDA_FIXUP_PINS,
7613 - .v.pins = (const struct hda_pintbl[]) {
7614 - { 0x16, 0x21211010 }, /* dock headphone */
7615 - { 0x19, 0x21a11010 }, /* dock mic */
7616 - { }
7617 - },
7618 + .v.func = alc_fixup_tpt440_dock,
7619 .chained = true,
7620 .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST
7621 },
7622 @@ -5226,6 +5233,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7623 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
7624 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
7625 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7626 + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK),
7627 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
7628 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
7629 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
7630 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
7631 index 25f0f45e6640..b1bc66783974 100644
7632 --- a/sound/pci/hda/patch_sigmatel.c
7633 +++ b/sound/pci/hda/patch_sigmatel.c
7634 @@ -4522,7 +4522,11 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
7635 return err;
7636
7637 spec = codec->spec;
7638 - codec->power_save_node = 1;
7639 + /* enable power_save_node only for new 92HD89xx chips, as it causes
7640 + * click noises on old 92HD73xx chips.
7641 + */
7642 + if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670)
7643 + codec->power_save_node = 1;
7644 spec->linear_tone_beep = 0;
7645 spec->gen.mixer_nid = 0x1d;
7646 spec->have_spdif_mux = 1;
7647 diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
7648 index c75995f2779c..b914a08258ea 100644
7649 --- a/sound/soc/au1x/db1200.c
7650 +++ b/sound/soc/au1x/db1200.c
7651 @@ -129,6 +129,8 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
7652 .cpu_dai_name = "au1xpsc_i2s.2",
7653 .platform_name = "au1xpsc-pcm.2",
7654 .codec_name = "wm8731.0-001b",
7655 + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
7656 + SND_SOC_DAIFMT_CBM_CFM,
7657 .ops = &db1200_i2s_wm8731_ops,
7658 };
7659
7660 @@ -146,6 +148,8 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
7661 .cpu_dai_name = "au1xpsc_i2s.3",
7662 .platform_name = "au1xpsc-pcm.3",
7663 .codec_name = "wm8731.0-001b",
7664 + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF |
7665 + SND_SOC_DAIFMT_CBM_CFM,
7666 .ops = &db1200_i2s_wm8731_ops,
7667 };
7668
7669 diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
7670 index 3593a1496056..3a29c0ac5d8a 100644
7671 --- a/sound/soc/codecs/sgtl5000.c
7672 +++ b/sound/soc/codecs/sgtl5000.c
7673 @@ -1339,8 +1339,8 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
7674 sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT);
7675
7676 snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL,
7677 - SGTL5000_BIAS_R_MASK,
7678 - sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT);
7679 + SGTL5000_BIAS_VOLT_MASK,
7680 + sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT);
7681 /*
7682 * disable DAP
7683 * TODO:
7684 diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
7685 index a3e97b46b64e..0d28e3b356f6 100644
7686 --- a/sound/soc/dwc/designware_i2s.c
7687 +++ b/sound/soc/dwc/designware_i2s.c
7688 @@ -131,10 +131,10 @@ static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
7689
7690 if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
7691 for (i = 0; i < 4; i++)
7692 - i2s_write_reg(dev->i2s_base, TOR(i), 0);
7693 + i2s_read_reg(dev->i2s_base, TOR(i));
7694 } else {
7695 for (i = 0; i < 4; i++)
7696 - i2s_write_reg(dev->i2s_base, ROR(i), 0);
7697 + i2s_read_reg(dev->i2s_base, ROR(i));
7698 }
7699 }
7700
7701 diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
7702 index 39cea80846c3..f2bf8661dd21 100644
7703 --- a/sound/soc/pxa/Kconfig
7704 +++ b/sound/soc/pxa/Kconfig
7705 @@ -1,7 +1,6 @@
7706 config SND_PXA2XX_SOC
7707 tristate "SoC Audio for the Intel PXA2xx chip"
7708 depends on ARCH_PXA
7709 - select SND_ARM
7710 select SND_PXA2XX_LIB
7711 help
7712 Say Y or M if you want to add support for codecs attached to
7713 @@ -25,7 +24,6 @@ config SND_PXA2XX_AC97
7714 config SND_PXA2XX_SOC_AC97
7715 tristate
7716 select AC97_BUS
7717 - select SND_ARM
7718 select SND_PXA2XX_LIB_AC97
7719 select SND_SOC_AC97_BUS
7720
7721 diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
7722 index 1f6054650991..9e4b04e0fbd1 100644
7723 --- a/sound/soc/pxa/pxa2xx-ac97.c
7724 +++ b/sound/soc/pxa/pxa2xx-ac97.c
7725 @@ -49,7 +49,7 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
7726 .reset = pxa2xx_ac97_cold_reset,
7727 };
7728
7729 -static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
7730 +static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 11;
7731 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
7732 .addr = __PREG(PCDR),
7733 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
7734 @@ -57,7 +57,7 @@ static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
7735 .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
7736 };
7737
7738 -static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
7739 +static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 12;
7740 static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
7741 .addr = __PREG(PCDR),
7742 .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
7743 diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
7744 index 82e350e9501c..ac75816ada7c 100644
7745 --- a/sound/synth/emux/emux_oss.c
7746 +++ b/sound/synth/emux/emux_oss.c
7747 @@ -69,7 +69,8 @@ snd_emux_init_seq_oss(struct snd_emux *emu)
7748 struct snd_seq_oss_reg *arg;
7749 struct snd_seq_device *dev;
7750
7751 - if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS,
7752 + /* using device#1 here for avoiding conflicts with OPL3 */
7753 + if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS,
7754 sizeof(struct snd_seq_oss_reg), &dev) < 0)
7755 return;
7756
7757 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
7758 index 29f94f6f0d9e..ed5461f065bd 100644
7759 --- a/tools/lib/traceevent/event-parse.c
7760 +++ b/tools/lib/traceevent/event-parse.c
7761 @@ -3721,7 +3721,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
7762 struct format_field *field;
7763 struct printk_map *printk;
7764 long long val, fval;
7765 - unsigned long addr;
7766 + unsigned long long addr;
7767 char *str;
7768 unsigned char *hex;
7769 int print;
7770 @@ -3754,13 +3754,30 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
7771 */
7772 if (!(field->flags & FIELD_IS_ARRAY) &&
7773 field->size == pevent->long_size) {
7774 - addr = *(unsigned long *)(data + field->offset);
7775 +
7776 + /* Handle heterogeneous recording and processing
7777 + * architectures
7778 + *
7779 + * CASE I:
7780 + * Traces recorded on 32-bit devices (32-bit
7781 + * addressing) and processed on 64-bit devices:
7782 + * In this case, only 32 bits should be read.
7783 + *
7784 + * CASE II:
7785 + * Traces recorded on 64 bit devices and processed
7786 + * on 32-bit devices:
7787 + * In this case, 64 bits must be read.
7788 + */
7789 + addr = (pevent->long_size == 8) ?
7790 + *(unsigned long long *)(data + field->offset) :
7791 + (unsigned long long)*(unsigned int *)(data + field->offset);
7792 +
7793 /* Check if it matches a print format */
7794 printk = find_printk(pevent, addr);
7795 if (printk)
7796 trace_seq_puts(s, printk->printk);
7797 else
7798 - trace_seq_printf(s, "%lx", addr);
7799 + trace_seq_printf(s, "%llx", addr);
7800 break;
7801 }
7802 str = malloc(len + 1);
7803 diff --git a/tools/perf/arch/alpha/Build b/tools/perf/arch/alpha/Build
7804 new file mode 100644
7805 index 000000000000..1bb8bf6d7fd4
7806 --- /dev/null
7807 +++ b/tools/perf/arch/alpha/Build
7808 @@ -0,0 +1 @@
7809 +# empty
7810 diff --git a/tools/perf/arch/mips/Build b/tools/perf/arch/mips/Build
7811 new file mode 100644
7812 index 000000000000..1bb8bf6d7fd4
7813 --- /dev/null
7814 +++ b/tools/perf/arch/mips/Build
7815 @@ -0,0 +1 @@
7816 +# empty
7817 diff --git a/tools/perf/arch/parisc/Build b/tools/perf/arch/parisc/Build
7818 new file mode 100644
7819 index 000000000000..1bb8bf6d7fd4
7820 --- /dev/null
7821 +++ b/tools/perf/arch/parisc/Build
7822 @@ -0,0 +1 @@
7823 +# empty
7824 diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
7825 index f7b8218785f6..a1f3ffc2786d 100644
7826 --- a/tools/perf/builtin-stat.c
7827 +++ b/tools/perf/builtin-stat.c
7828 @@ -1227,7 +1227,7 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
7829 static void print_aggr(char *prefix)
7830 {
7831 struct perf_evsel *counter;
7832 - int cpu, cpu2, s, s2, id, nr;
7833 + int cpu, s, s2, id, nr;
7834 double uval;
7835 u64 ena, run, val;
7836
7837 @@ -1240,8 +1240,7 @@ static void print_aggr(char *prefix)
7838 val = ena = run = 0;
7839 nr = 0;
7840 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
7841 - cpu2 = perf_evsel__cpus(counter)->map[cpu];
7842 - s2 = aggr_get_id(evsel_list->cpus, cpu2);
7843 + s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
7844 if (s2 != id)
7845 continue;
7846 val += counter->counts->cpu[cpu].val;
7847 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
7848 index 918fd8ae2d80..23eea5e7fa94 100644
7849 --- a/tools/perf/util/header.c
7850 +++ b/tools/perf/util/header.c
7851 @@ -1426,7 +1426,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
7852 if (ph->needs_swap)
7853 nr = bswap_32(nr);
7854
7855 - ph->env.nr_cpus_online = nr;
7856 + ph->env.nr_cpus_avail = nr;
7857
7858 ret = readn(fd, &nr, sizeof(nr));
7859 if (ret != sizeof(nr))
7860 @@ -1435,7 +1435,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
7861 if (ph->needs_swap)
7862 nr = bswap_32(nr);
7863
7864 - ph->env.nr_cpus_avail = nr;
7865 + ph->env.nr_cpus_online = nr;
7866 return 0;
7867 }
7868
7869 diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
7870 index cc22b9158b93..c7966c0fa13e 100644
7871 --- a/tools/perf/util/hist.c
7872 +++ b/tools/perf/util/hist.c
7873 @@ -151,6 +151,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
7874 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
7875 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
7876
7877 + if (h->srcline)
7878 + hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
7879 +
7880 if (h->transaction)
7881 hists__new_col_len(hists, HISTC_TRANSACTION,
7882 hist_entry__transaction_len());
7883 diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
7884 index a7ab6063e038..3ddfab315e19 100644
7885 --- a/tools/perf/util/symbol-elf.c
7886 +++ b/tools/perf/util/symbol-elf.c
7887 @@ -1253,8 +1253,6 @@ out_close:
7888 static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
7889 bool temp)
7890 {
7891 - GElf_Ehdr *ehdr;
7892 -
7893 kcore->elfclass = elfclass;
7894
7895 if (temp)
7896 @@ -1271,9 +1269,7 @@ static int kcore__init(struct kcore *kcore, char *filename, int elfclass,
7897 if (!gelf_newehdr(kcore->elf, elfclass))
7898 goto out_end;
7899
7900 - ehdr = gelf_getehdr(kcore->elf, &kcore->ehdr);
7901 - if (!ehdr)
7902 - goto out_end;
7903 + memset(&kcore->ehdr, 0, sizeof(GElf_Ehdr));
7904
7905 return 0;
7906
7907 @@ -1330,23 +1326,18 @@ static int kcore__copy_hdr(struct kcore *from, struct kcore *to, size_t count)
7908 static int kcore__add_phdr(struct kcore *kcore, int idx, off_t offset,
7909 u64 addr, u64 len)
7910 {
7911 - GElf_Phdr gphdr;
7912 - GElf_Phdr *phdr;
7913 -
7914 - phdr = gelf_getphdr(kcore->elf, idx, &gphdr);
7915 - if (!phdr)
7916 - return -1;
7917 -
7918 - phdr->p_type = PT_LOAD;
7919 - phdr->p_flags = PF_R | PF_W | PF_X;
7920 - phdr->p_offset = offset;
7921 - phdr->p_vaddr = addr;
7922 - phdr->p_paddr = 0;
7923 - phdr->p_filesz = len;
7924 - phdr->p_memsz = len;
7925 - phdr->p_align = page_size;
7926 -
7927 - if (!gelf_update_phdr(kcore->elf, idx, phdr))
7928 + GElf_Phdr phdr = {
7929 + .p_type = PT_LOAD,
7930 + .p_flags = PF_R | PF_W | PF_X,
7931 + .p_offset = offset,
7932 + .p_vaddr = addr,
7933 + .p_paddr = 0,
7934 + .p_filesz = len,
7935 + .p_memsz = len,
7936 + .p_align = page_size,
7937 + };
7938 +
7939 + if (!gelf_update_phdr(kcore->elf, idx, &phdr))
7940 return -1;
7941
7942 return 0;
7943 diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
7944 index 9ff4193dfa49..79db45336e3a 100644
7945 --- a/virt/kvm/eventfd.c
7946 +++ b/virt/kvm/eventfd.c
7947 @@ -771,40 +771,14 @@ static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
7948 return KVM_MMIO_BUS;
7949 }
7950
7951 -static int
7952 -kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
7953 +static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
7954 + enum kvm_bus bus_idx,
7955 + struct kvm_ioeventfd *args)
7956 {
7957 - enum kvm_bus bus_idx;
7958 - struct _ioeventfd *p;
7959 - struct eventfd_ctx *eventfd;
7960 - int ret;
7961 -
7962 - bus_idx = ioeventfd_bus_from_flags(args->flags);
7963 - /* must be natural-word sized, or 0 to ignore length */
7964 - switch (args->len) {
7965 - case 0:
7966 - case 1:
7967 - case 2:
7968 - case 4:
7969 - case 8:
7970 - break;
7971 - default:
7972 - return -EINVAL;
7973 - }
7974 -
7975 - /* check for range overflow */
7976 - if (args->addr + args->len < args->addr)
7977 - return -EINVAL;
7978
7979 - /* check for extra flags that we don't understand */
7980 - if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
7981 - return -EINVAL;
7982 -
7983 - /* ioeventfd with no length can't be combined with DATAMATCH */
7984 - if (!args->len &&
7985 - args->flags & (KVM_IOEVENTFD_FLAG_PIO |
7986 - KVM_IOEVENTFD_FLAG_DATAMATCH))
7987 - return -EINVAL;
7988 + struct eventfd_ctx *eventfd;
7989 + struct _ioeventfd *p;
7990 + int ret;
7991
7992 eventfd = eventfd_ctx_fdget(args->fd);
7993 if (IS_ERR(eventfd))
7994 @@ -843,16 +817,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
7995 if (ret < 0)
7996 goto unlock_fail;
7997
7998 - /* When length is ignored, MMIO is also put on a separate bus, for
7999 - * faster lookups.
8000 - */
8001 - if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
8002 - ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
8003 - p->addr, 0, &p->dev);
8004 - if (ret < 0)
8005 - goto register_fail;
8006 - }
8007 -
8008 kvm->buses[bus_idx]->ioeventfd_count++;
8009 list_add_tail(&p->list, &kvm->ioeventfds);
8010
8011 @@ -860,8 +824,6 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8012
8013 return 0;
8014
8015 -register_fail:
8016 - kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
8017 unlock_fail:
8018 mutex_unlock(&kvm->slots_lock);
8019
8020 @@ -873,14 +835,13 @@ fail:
8021 }
8022
8023 static int
8024 -kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8025 +kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
8026 + struct kvm_ioeventfd *args)
8027 {
8028 - enum kvm_bus bus_idx;
8029 struct _ioeventfd *p, *tmp;
8030 struct eventfd_ctx *eventfd;
8031 int ret = -ENOENT;
8032
8033 - bus_idx = ioeventfd_bus_from_flags(args->flags);
8034 eventfd = eventfd_ctx_fdget(args->fd);
8035 if (IS_ERR(eventfd))
8036 return PTR_ERR(eventfd);
8037 @@ -901,10 +862,6 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8038 continue;
8039
8040 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
8041 - if (!p->length) {
8042 - kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
8043 - &p->dev);
8044 - }
8045 kvm->buses[bus_idx]->ioeventfd_count--;
8046 ioeventfd_release(p);
8047 ret = 0;
8048 @@ -918,6 +875,71 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8049 return ret;
8050 }
8051
8052 +static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8053 +{
8054 + enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
8055 + int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
8056 +
8057 + if (!args->len && bus_idx == KVM_MMIO_BUS)
8058 + kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
8059 +
8060 + return ret;
8061 +}
8062 +
8063 +static int
8064 +kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8065 +{
8066 + enum kvm_bus bus_idx;
8067 + int ret;
8068 +
8069 + bus_idx = ioeventfd_bus_from_flags(args->flags);
8070 + /* must be natural-word sized, or 0 to ignore length */
8071 + switch (args->len) {
8072 + case 0:
8073 + case 1:
8074 + case 2:
8075 + case 4:
8076 + case 8:
8077 + break;
8078 + default:
8079 + return -EINVAL;
8080 + }
8081 +
8082 + /* check for range overflow */
8083 + if (args->addr + args->len < args->addr)
8084 + return -EINVAL;
8085 +
8086 + /* check for extra flags that we don't understand */
8087 + if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
8088 + return -EINVAL;
8089 +
8090 + /* ioeventfd with no length can't be combined with DATAMATCH */
8091 + if (!args->len &&
8092 + args->flags & (KVM_IOEVENTFD_FLAG_PIO |
8093 + KVM_IOEVENTFD_FLAG_DATAMATCH))
8094 + return -EINVAL;
8095 +
8096 + ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
8097 + if (ret)
8098 + goto fail;
8099 +
8100 + /* When length is ignored, MMIO is also put on a separate bus, for
8101 + * faster lookups.
8102 + */
8103 + if (!args->len && bus_idx == KVM_MMIO_BUS) {
8104 + ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
8105 + if (ret < 0)
8106 + goto fast_fail;
8107 + }
8108 +
8109 + return 0;
8110 +
8111 +fast_fail:
8112 + kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
8113 +fail:
8114 + return ret;
8115 +}
8116 +
8117 int
8118 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
8119 {
8120 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
8121 index 90977418aeb6..85422985235f 100644
8122 --- a/virt/kvm/kvm_main.c
8123 +++ b/virt/kvm/kvm_main.c
8124 @@ -2935,10 +2935,25 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
8125 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
8126 const struct kvm_io_range *r2)
8127 {
8128 - if (r1->addr < r2->addr)
8129 + gpa_t addr1 = r1->addr;
8130 + gpa_t addr2 = r2->addr;
8131 +
8132 + if (addr1 < addr2)
8133 return -1;
8134 - if (r1->addr + r1->len > r2->addr + r2->len)
8135 +
8136 + /* If r2->len == 0, match the exact address. If r2->len != 0,
8137 + * accept any overlapping write. Any order is acceptable for
8138 + * overlapping ranges, because kvm_io_bus_get_first_dev ensures
8139 + * we process all of them.
8140 + */
8141 + if (r2->len) {
8142 + addr1 += r1->len;
8143 + addr2 += r2->len;
8144 + }
8145 +
8146 + if (addr1 > addr2)
8147 return 1;
8148 +
8149 return 0;
8150 }
8151