Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0129-4.9.30-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 202948 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
2 index d9995f1f51b3..a25a99e82bb1 100644
3 --- a/Documentation/arm64/tagged-pointers.txt
4 +++ b/Documentation/arm64/tagged-pointers.txt
5 @@ -11,24 +11,56 @@ in AArch64 Linux.
6 The kernel configures the translation tables so that translations made
7 via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
8 the virtual address ignored by the translation hardware. This frees up
9 -this byte for application use, with the following caveats:
10 +this byte for application use.
11
12 - (1) The kernel requires that all user addresses passed to EL1
13 - are tagged with tag 0x00. This means that any syscall
14 - parameters containing user virtual addresses *must* have
15 - their top byte cleared before trapping to the kernel.
16
17 - (2) Non-zero tags are not preserved when delivering signals.
18 - This means that signal handlers in applications making use
19 - of tags cannot rely on the tag information for user virtual
20 - addresses being maintained for fields inside siginfo_t.
21 - One exception to this rule is for signals raised in response
22 - to watchpoint debug exceptions, where the tag information
23 - will be preserved.
24 +Passing tagged addresses to the kernel
25 +--------------------------------------
26
27 - (3) Special care should be taken when using tagged pointers,
28 - since it is likely that C compilers will not hazard two
29 - virtual addresses differing only in the upper byte.
30 +All interpretation of userspace memory addresses by the kernel assumes
31 +an address tag of 0x00.
32 +
33 +This includes, but is not limited to, addresses found in:
34 +
35 + - pointer arguments to system calls, including pointers in structures
36 + passed to system calls,
37 +
38 + - the stack pointer (sp), e.g. when interpreting it to deliver a
39 + signal,
40 +
41 + - the frame pointer (x29) and frame records, e.g. when interpreting
42 + them to generate a backtrace or call graph.
43 +
44 +Using non-zero address tags in any of these locations may result in an
45 +error code being returned, a (fatal) signal being raised, or other modes
46 +of failure.
47 +
48 +For these reasons, passing non-zero address tags to the kernel via
49 +system calls is forbidden, and using a non-zero address tag for sp is
50 +strongly discouraged.
51 +
52 +Programs maintaining a frame pointer and frame records that use non-zero
53 +address tags may suffer impaired or inaccurate debug and profiling
54 +visibility.
55 +
56 +
57 +Preserving tags
58 +---------------
59 +
60 +Non-zero tags are not preserved when delivering signals. This means that
61 +signal handlers in applications making use of tags cannot rely on the
62 +tag information for user virtual addresses being maintained for fields
63 +inside siginfo_t. One exception to this rule is for signals raised in
64 +response to watchpoint debug exceptions, where the tag information will
65 +be preserved.
66
67 The architecture prevents the use of a tagged PC, so the upper byte will
68 be set to a sign-extension of bit 55 on exception return.
69 +
70 +
71 +Other considerations
72 +--------------------
73 +
74 +Special care should be taken when using tagged pointers, since it is
75 +likely that C compilers will not hazard two virtual addresses differing
76 +only in the upper byte.
77 diff --git a/Makefile b/Makefile
78 index c12e501a18b8..b78a45bcf9b1 100644
79 --- a/Makefile
80 +++ b/Makefile
81 @@ -1,6 +1,6 @@
82 VERSION = 4
83 PATCHLEVEL = 9
84 -SUBLEVEL = 29
85 +SUBLEVEL = 30
86 EXTRAVERSION =
87 NAME = Roaring Lionus
88
89 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
90 index ffb93f499c83..4f95577b0180 100644
91 --- a/arch/alpha/kernel/osf_sys.c
92 +++ b/arch/alpha/kernel/osf_sys.c
93 @@ -1188,8 +1188,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
94 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
95 return -EFAULT;
96
97 - err = 0;
98 - err |= put_user(status, ustatus);
99 + err = put_user(status, ustatus);
100 + if (ret < 0)
101 + return err ? err : ret;
102 +
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
107 index c51fc652f6c7..5a53fcf542ab 100644
108 --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
109 +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
110 @@ -162,9 +162,10 @@
111 };
112
113 adc0: adc@f8018000 {
114 + atmel,adc-vref = <3300>;
115 + atmel,adc-channels-used = <0xfe>;
116 pinctrl-0 = <
117 &pinctrl_adc0_adtrg
118 - &pinctrl_adc0_ad0
119 &pinctrl_adc0_ad1
120 &pinctrl_adc0_ad2
121 &pinctrl_adc0_ad3
122 @@ -172,8 +173,6 @@
123 &pinctrl_adc0_ad5
124 &pinctrl_adc0_ad6
125 &pinctrl_adc0_ad7
126 - &pinctrl_adc0_ad8
127 - &pinctrl_adc0_ad9
128 >;
129 status = "okay";
130 };
131 diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
132 index 5bb8fd57e7f5..d71da30c9cff 100644
133 --- a/arch/arm/boot/dts/imx6sx-sdb.dts
134 +++ b/arch/arm/boot/dts/imx6sx-sdb.dts
135 @@ -12,23 +12,6 @@
136 model = "Freescale i.MX6 SoloX SDB RevB Board";
137 };
138
139 -&cpu0 {
140 - operating-points = <
141 - /* kHz uV */
142 - 996000 1250000
143 - 792000 1175000
144 - 396000 1175000
145 - 198000 1175000
146 - >;
147 - fsl,soc-operating-points = <
148 - /* ARM kHz SOC uV */
149 - 996000 1250000
150 - 792000 1175000
151 - 396000 1175000
152 - 198000 1175000
153 - >;
154 -};
155 -
156 &i2c1 {
157 clock-frequency = <100000>;
158 pinctrl-names = "default";
159 diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
160 index 4e361a8c167e..b4bfa5586c23 100644
161 --- a/arch/arm/boot/dts/tegra20-paz00.dts
162 +++ b/arch/arm/boot/dts/tegra20-paz00.dts
163 @@ -569,6 +569,7 @@
164 regulator-name = "+3VS,vdd_pnl";
165 regulator-min-microvolt = <3300000>;
166 regulator-max-microvolt = <3300000>;
167 + regulator-boot-on;
168 gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
169 enable-active-high;
170 };
171 diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h
172 index 4917c2f7e459..e74ab0fbab79 100644
173 --- a/arch/arm/include/asm/kvm_coproc.h
174 +++ b/arch/arm/include/asm/kvm_coproc.h
175 @@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
176 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
177 int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
178 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
179 -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
180 +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
181 +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
182 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
183 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
184
185 diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
186 index 464748b9fd7d..ed2319663a1e 100644
187 --- a/arch/arm/include/asm/module.h
188 +++ b/arch/arm/include/asm/module.h
189 @@ -18,13 +18,18 @@ enum {
190 };
191 #endif
192
193 +struct mod_plt_sec {
194 + struct elf32_shdr *plt;
195 + int plt_count;
196 +};
197 +
198 struct mod_arch_specific {
199 #ifdef CONFIG_ARM_UNWIND
200 struct unwind_table *unwind[ARM_SEC_MAX];
201 #endif
202 #ifdef CONFIG_ARM_MODULE_PLTS
203 - struct elf32_shdr *plt;
204 - int plt_count;
205 + struct mod_plt_sec core;
206 + struct mod_plt_sec init;
207 #endif
208 };
209
210 diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c
211 index 3a5cba90c971..3d0c2e4dda1d 100644
212 --- a/arch/arm/kernel/module-plts.c
213 +++ b/arch/arm/kernel/module-plts.c
214 @@ -1,5 +1,5 @@
215 /*
216 - * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
217 + * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
218 *
219 * This program is free software; you can redistribute it and/or modify
220 * it under the terms of the GNU General Public License version 2 as
221 @@ -31,9 +31,17 @@ struct plt_entries {
222 u32 lit[PLT_ENT_COUNT];
223 };
224
225 +static bool in_init(const struct module *mod, unsigned long loc)
226 +{
227 + return loc - (u32)mod->init_layout.base < mod->init_layout.size;
228 +}
229 +
230 u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
231 {
232 - struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr;
233 + struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
234 + &mod->arch.init;
235 +
236 + struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr;
237 int idx = 0;
238
239 /*
240 @@ -41,9 +49,9 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
241 * relocations are sorted, this will be the last entry we allocated.
242 * (if one exists).
243 */
244 - if (mod->arch.plt_count > 0) {
245 - plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT;
246 - idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT;
247 + if (pltsec->plt_count > 0) {
248 + plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
249 + idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
250
251 if (plt->lit[idx] == val)
252 return (u32)&plt->ldr[idx];
253 @@ -53,8 +61,8 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
254 plt++;
255 }
256
257 - mod->arch.plt_count++;
258 - BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size);
259 + pltsec->plt_count++;
260 + BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
261
262 if (!idx)
263 /* Populate a new set of entries */
264 @@ -129,7 +137,7 @@ static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
265
266 /* Count how many PLT entries we may need */
267 static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
268 - const Elf32_Rel *rel, int num)
269 + const Elf32_Rel *rel, int num, Elf32_Word dstidx)
270 {
271 unsigned int ret = 0;
272 const Elf32_Sym *s;
273 @@ -144,13 +152,17 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
274 case R_ARM_THM_JUMP24:
275 /*
276 * We only have to consider branch targets that resolve
277 - * to undefined symbols. This is not simply a heuristic,
278 - * it is a fundamental limitation, since the PLT itself
279 - * is part of the module, and needs to be within range
280 - * as well, so modules can never grow beyond that limit.
281 + * to symbols that are defined in a different section.
282 + * This is not simply a heuristic, it is a fundamental
283 + * limitation, since there is no guaranteed way to emit
284 + * PLT entries sufficiently close to the branch if the
285 + * section size exceeds the range of a branch
286 + * instruction. So ignore relocations against defined
287 + * symbols if they live in the same section as the
288 + * relocation target.
289 */
290 s = syms + ELF32_R_SYM(rel[i].r_info);
291 - if (s->st_shndx != SHN_UNDEF)
292 + if (s->st_shndx == dstidx)
293 break;
294
295 /*
296 @@ -161,7 +173,12 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
297 * So we need to support them, but there is no need to
298 * take them into consideration when trying to optimize
299 * this code. So let's only check for duplicates when
300 - * the addend is zero.
301 + * the addend is zero. (Note that calls into the core
302 + * module via init PLT entries could involve section
303 + * relative symbol references with non-zero addends, for
304 + * which we may end up emitting duplicates, but the init
305 + * PLT is released along with the rest of the .init
306 + * region as soon as module loading completes.)
307 */
308 if (!is_zero_addend_relocation(base, rel + i) ||
309 !duplicate_rel(base, rel, i))
310 @@ -174,7 +191,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
311 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
312 char *secstrings, struct module *mod)
313 {
314 - unsigned long plts = 0;
315 + unsigned long core_plts = 0;
316 + unsigned long init_plts = 0;
317 Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
318 Elf32_Sym *syms = NULL;
319
320 @@ -184,13 +202,15 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
321 */
322 for (s = sechdrs; s < sechdrs_end; ++s) {
323 if (strcmp(".plt", secstrings + s->sh_name) == 0)
324 - mod->arch.plt = s;
325 + mod->arch.core.plt = s;
326 + else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
327 + mod->arch.init.plt = s;
328 else if (s->sh_type == SHT_SYMTAB)
329 syms = (Elf32_Sym *)s->sh_addr;
330 }
331
332 - if (!mod->arch.plt) {
333 - pr_err("%s: module PLT section missing\n", mod->name);
334 + if (!mod->arch.core.plt || !mod->arch.init.plt) {
335 + pr_err("%s: module PLT section(s) missing\n", mod->name);
336 return -ENOEXEC;
337 }
338 if (!syms) {
339 @@ -213,16 +233,29 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
340 /* sort by type and symbol index */
341 sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
342
343 - plts += count_plts(syms, dstsec->sh_addr, rels, numrels);
344 + if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
345 + core_plts += count_plts(syms, dstsec->sh_addr, rels,
346 + numrels, s->sh_info);
347 + else
348 + init_plts += count_plts(syms, dstsec->sh_addr, rels,
349 + numrels, s->sh_info);
350 }
351
352 - mod->arch.plt->sh_type = SHT_NOBITS;
353 - mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
354 - mod->arch.plt->sh_addralign = L1_CACHE_BYTES;
355 - mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE,
356 - sizeof(struct plt_entries));
357 - mod->arch.plt_count = 0;
358 -
359 - pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size);
360 + mod->arch.core.plt->sh_type = SHT_NOBITS;
361 + mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
362 + mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
363 + mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
364 + sizeof(struct plt_entries));
365 + mod->arch.core.plt_count = 0;
366 +
367 + mod->arch.init.plt->sh_type = SHT_NOBITS;
368 + mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
369 + mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
370 + mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
371 + sizeof(struct plt_entries));
372 + mod->arch.init.plt_count = 0;
373 +
374 + pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
375 + mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
376 return 0;
377 }
378 diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds
379 index 05881e2b414c..eacb5c67f61e 100644
380 --- a/arch/arm/kernel/module.lds
381 +++ b/arch/arm/kernel/module.lds
382 @@ -1,3 +1,4 @@
383 SECTIONS {
384 .plt : { BYTE(0) }
385 + .init.plt : { BYTE(0) }
386 }
387 diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
388 index 3e5e4194ef86..c3ed6bd5ddf3 100644
389 --- a/arch/arm/kvm/coproc.c
390 +++ b/arch/arm/kvm/coproc.c
391 @@ -93,12 +93,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
392 return 1;
393 }
394
395 -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
396 -{
397 - kvm_inject_undefined(vcpu);
398 - return 1;
399 -}
400 -
401 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
402 {
403 /*
404 @@ -514,12 +508,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu,
405 return 1;
406 }
407
408 -/**
409 - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
410 - * @vcpu: The VCPU pointer
411 - * @run: The kvm_run struct
412 - */
413 -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
414 +static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu)
415 {
416 struct coproc_params params;
417
418 @@ -533,9 +522,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
419 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf;
420 params.CRm = 0;
421
422 + return params;
423 +}
424 +
425 +/**
426 + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access
427 + * @vcpu: The VCPU pointer
428 + * @run: The kvm_run struct
429 + */
430 +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
431 +{
432 + struct coproc_params params = decode_64bit_hsr(vcpu);
433 +
434 return emulate_cp15(vcpu, &params);
435 }
436
437 +/**
438 + * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access
439 + * @vcpu: The VCPU pointer
440 + * @run: The kvm_run struct
441 + */
442 +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
443 +{
444 + struct coproc_params params = decode_64bit_hsr(vcpu);
445 +
446 + /* raz_wi cp14 */
447 + pm_fake(vcpu, &params, NULL);
448 +
449 + /* handled */
450 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
451 + return 1;
452 +}
453 +
454 static void reset_coproc_regs(struct kvm_vcpu *vcpu,
455 const struct coproc_reg *table, size_t num)
456 {
457 @@ -546,12 +564,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu,
458 table[i].reset(vcpu, &table[i]);
459 }
460
461 -/**
462 - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
463 - * @vcpu: The VCPU pointer
464 - * @run: The kvm_run struct
465 - */
466 -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
467 +static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
468 {
469 struct coproc_params params;
470
471 @@ -565,9 +578,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
472 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7;
473 params.Rt2 = 0;
474
475 + return params;
476 +}
477 +
478 +/**
479 + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
480 + * @vcpu: The VCPU pointer
481 + * @run: The kvm_run struct
482 + */
483 +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
484 +{
485 + struct coproc_params params = decode_32bit_hsr(vcpu);
486 return emulate_cp15(vcpu, &params);
487 }
488
489 +/**
490 + * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access
491 + * @vcpu: The VCPU pointer
492 + * @run: The kvm_run struct
493 + */
494 +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
495 +{
496 + struct coproc_params params = decode_32bit_hsr(vcpu);
497 +
498 + /* raz_wi cp14 */
499 + pm_fake(vcpu, &params, NULL);
500 +
501 + /* handled */
502 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
503 + return 1;
504 +}
505 +
506 /******************************************************************************
507 * Userspace API
508 *****************************************************************************/
509 diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
510 index 4e40d1955e35..066b6d4508ce 100644
511 --- a/arch/arm/kvm/handle_exit.c
512 +++ b/arch/arm/kvm/handle_exit.c
513 @@ -83,9 +83,9 @@ static exit_handle_fn arm_exit_handlers[] = {
514 [HSR_EC_WFI] = kvm_handle_wfx,
515 [HSR_EC_CP15_32] = kvm_handle_cp15_32,
516 [HSR_EC_CP15_64] = kvm_handle_cp15_64,
517 - [HSR_EC_CP14_MR] = kvm_handle_cp14_access,
518 + [HSR_EC_CP14_MR] = kvm_handle_cp14_32,
519 [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
520 - [HSR_EC_CP14_64] = kvm_handle_cp14_access,
521 + [HSR_EC_CP14_64] = kvm_handle_cp14_64,
522 [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
523 [HSR_EC_CP10_ID] = kvm_handle_cp10_id,
524 [HSR_EC_HVC] = handle_hvc,
525 diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
526 index 3023bb530edf..8679405b0b2b 100644
527 --- a/arch/arm/kvm/hyp/Makefile
528 +++ b/arch/arm/kvm/hyp/Makefile
529 @@ -2,6 +2,8 @@
530 # Makefile for Kernel-based Virtual Machine module, HYP part
531 #
532
533 +ccflags-y += -fno-stack-protector
534 +
535 KVM=../../../../virt/kvm
536
537 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
538 diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
539 index 92678b7bd046..624a510d31df 100644
540 --- a/arch/arm/kvm/hyp/switch.c
541 +++ b/arch/arm/kvm/hyp/switch.c
542 @@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
543 write_sysreg(HSTR_T(15), HSTR);
544 write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
545 val = read_sysreg(HDCR);
546 - write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
547 + val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
548 + val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
549 + write_sysreg(val, HDCR);
550 }
551
552 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
553 diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
554 index 8dea61640cc1..50497778c2e5 100644
555 --- a/arch/arm/mm/proc-v7m.S
556 +++ b/arch/arm/mm/proc-v7m.S
557 @@ -147,10 +147,10 @@ __v7m_setup_cont:
558
559 @ Configure caches (if implemented)
560 teq r8, #0
561 - stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
562 + stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
563 blne v7m_invalidate_l1
564 teq r8, #0 @ re-evalutae condition
565 - ldmneia r12, {r0-r6, lr}
566 + ldmneia sp, {r0-r6, lr}
567
568 @ Configure the System Control Register to ensure 8-byte stack alignment
569 @ Note the STKALIGN bit is either RW or RAO.
570 diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
571 index 17839db585d5..509a2eda2ce4 100644
572 --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
573 +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi
574 @@ -772,6 +772,7 @@
575 clocks = <&sys_ctrl 2>, <&sys_ctrl 1>;
576 clock-names = "ciu", "biu";
577 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>;
578 + reset-names = "reset";
579 bus-width = <0x8>;
580 vmmc-supply = <&ldo19>;
581 pinctrl-names = "default";
582 @@ -795,6 +796,7 @@
583 clocks = <&sys_ctrl 4>, <&sys_ctrl 3>;
584 clock-names = "ciu", "biu";
585 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>;
586 + reset-names = "reset";
587 vqmmc-supply = <&ldo7>;
588 vmmc-supply = <&ldo10>;
589 bus-width = <0x4>;
590 @@ -813,6 +815,7 @@
591 clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>;
592 clock-names = "ciu", "biu";
593 resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>;
594 + reset-names = "reset";
595 bus-width = <0x4>;
596 broken-cd;
597 pinctrl-names = "default", "idle";
598 diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
599 index 4e0497f581a0..0fe7e43b7fbc 100644
600 --- a/arch/arm64/include/asm/barrier.h
601 +++ b/arch/arm64/include/asm/barrier.h
602 @@ -42,25 +42,35 @@
603 #define __smp_rmb() dmb(ishld)
604 #define __smp_wmb() dmb(ishst)
605
606 -#define __smp_store_release(p, v) \
607 +#define __smp_store_release(p, v) \
608 do { \
609 + union { typeof(*p) __val; char __c[1]; } __u = \
610 + { .__val = (__force typeof(*p)) (v) }; \
611 compiletime_assert_atomic_type(*p); \
612 switch (sizeof(*p)) { \
613 case 1: \
614 asm volatile ("stlrb %w1, %0" \
615 - : "=Q" (*p) : "r" (v) : "memory"); \
616 + : "=Q" (*p) \
617 + : "r" (*(__u8 *)__u.__c) \
618 + : "memory"); \
619 break; \
620 case 2: \
621 asm volatile ("stlrh %w1, %0" \
622 - : "=Q" (*p) : "r" (v) : "memory"); \
623 + : "=Q" (*p) \
624 + : "r" (*(__u16 *)__u.__c) \
625 + : "memory"); \
626 break; \
627 case 4: \
628 asm volatile ("stlr %w1, %0" \
629 - : "=Q" (*p) : "r" (v) : "memory"); \
630 + : "=Q" (*p) \
631 + : "r" (*(__u32 *)__u.__c) \
632 + : "memory"); \
633 break; \
634 case 8: \
635 asm volatile ("stlr %1, %0" \
636 - : "=Q" (*p) : "r" (v) : "memory"); \
637 + : "=Q" (*p) \
638 + : "r" (*(__u64 *)__u.__c) \
639 + : "memory"); \
640 break; \
641 } \
642 } while (0)
643 diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
644 index 91b26d26af8a..ae852add053d 100644
645 --- a/arch/arm64/include/asm/cmpxchg.h
646 +++ b/arch/arm64/include/asm/cmpxchg.h
647 @@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \
648 " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
649 __nops(3) \
650 " " #nop_lse) \
651 - : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
652 + : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
653 : "r" (x) \
654 : cl); \
655 \
656 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
657 index 55d0adbf6509..14cca10aeb4e 100644
658 --- a/arch/arm64/include/asm/uaccess.h
659 +++ b/arch/arm64/include/asm/uaccess.h
660 @@ -93,11 +93,12 @@ static inline void set_fs(mm_segment_t fs)
661 */
662 #define __range_ok(addr, size) \
663 ({ \
664 + unsigned long __addr = (unsigned long __force)(addr); \
665 unsigned long flag, roksum; \
666 __chk_user_ptr(addr); \
667 asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
668 : "=&r" (flag), "=&r" (roksum) \
669 - : "1" (addr), "Ir" (size), \
670 + : "1" (__addr), "Ir" (size), \
671 "r" (current_thread_info()->addr_limit) \
672 : "cc"); \
673 flag; \
674 diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
675 index b0988bb1bf64..c0ede237c14b 100644
676 --- a/arch/arm64/kernel/armv8_deprecated.c
677 +++ b/arch/arm64/kernel/armv8_deprecated.c
678 @@ -309,7 +309,8 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
679 ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
680 CONFIG_ARM64_PAN) \
681 : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \
682 - : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \
683 + : "r" ((unsigned long)addr), "i" (-EAGAIN), \
684 + "i" (-EFAULT), \
685 "i" (__SWP_LL_SC_LOOPS) \
686 : "memory")
687
688 diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
689 index aaf42ae8d8c3..14c4e3b14bcb 100644
690 --- a/arch/arm64/kvm/hyp/Makefile
691 +++ b/arch/arm64/kvm/hyp/Makefile
692 @@ -2,6 +2,8 @@
693 # Makefile for Kernel-based Virtual Machine module, HYP part
694 #
695
696 +ccflags-y += -fno-stack-protector
697 +
698 KVM=../../../../virt/kvm
699
700 obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
701 diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
702 index 07238b39638c..3db381205928 100644
703 --- a/arch/metag/include/asm/uaccess.h
704 +++ b/arch/metag/include/asm/uaccess.h
705 @@ -28,24 +28,32 @@
706
707 #define segment_eq(a, b) ((a).seg == (b).seg)
708
709 -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
710 -/*
711 - * Explicitly allow NULL pointers here. Parts of the kernel such
712 - * as readv/writev use access_ok to validate pointers, but want
713 - * to allow NULL pointers for various reasons. NULL pointers are
714 - * safe to allow through because the first page is not mappable on
715 - * Meta.
716 - *
717 - * We also wish to avoid letting user code access the system area
718 - * and the kernel half of the address space.
719 - */
720 -#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
721 - ((addr) > PAGE_OFFSET && \
722 - (addr) < LINCORE_BASE))
723 -
724 static inline int __access_ok(unsigned long addr, unsigned long size)
725 {
726 - return __kernel_ok || !__user_bad(addr, size);
727 + /*
728 + * Allow access to the user mapped memory area, but not the system area
729 + * before it. The check extends to the top of the address space when
730 + * kernel access is allowed (there's no real reason to user copy to the
731 + * system area in any case).
732 + */
733 + if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
734 + size <= get_fs().seg - addr))
735 + return true;
736 + /*
737 + * Explicitly allow NULL pointers here. Parts of the kernel such
738 + * as readv/writev use access_ok to validate pointers, but want
739 + * to allow NULL pointers for various reasons. NULL pointers are
740 + * safe to allow through because the first page is not mappable on
741 + * Meta.
742 + */
743 + if (!addr)
744 + return true;
745 + /* Allow access to core code memory area... */
746 + if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
747 + size <= LINCORE_CODE_LIMIT + 1 - addr)
748 + return true;
749 + /* ... but no other areas. */
750 + return false;
751 }
752
753 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
754 @@ -186,8 +194,13 @@ do { \
755 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
756 long count);
757
758 -#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
759 -
760 +static inline long
761 +strncpy_from_user(char *dst, const char __user *src, long count)
762 +{
763 + if (!access_ok(VERIFY_READ, src, 1))
764 + return -EFAULT;
765 + return __strncpy_from_user(dst, src, count);
766 +}
767 /*
768 * Return the size of a string (including the ending 0)
769 *
770 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
771 index 5a4f2eb9d0d5..5e844f68e847 100644
772 --- a/arch/mips/Kconfig
773 +++ b/arch/mips/Kconfig
774 @@ -1368,6 +1368,7 @@ config CPU_LOONGSON3
775 select WEAK_ORDERING
776 select WEAK_REORDERING_BEYOND_LLSC
777 select MIPS_PGD_C0_CONTEXT
778 + select MIPS_L1_CACHE_SHIFT_6
779 select GPIOLIB
780 help
781 The Loongson 3 processor implements the MIPS64R2 instruction
782 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
783 index b9e3f0aca261..0012f0353fd6 100644
784 --- a/arch/powerpc/include/asm/mmu_context.h
785 +++ b/arch/powerpc/include/asm/mmu_context.h
786 @@ -70,8 +70,9 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm);
787 * switch_mm is the entry point called from the architecture independent
788 * code in kernel/sched/core.c
789 */
790 -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
791 - struct task_struct *tsk)
792 +static inline void switch_mm_irqs_off(struct mm_struct *prev,
793 + struct mm_struct *next,
794 + struct task_struct *tsk)
795 {
796 /* Mark this context has been used on the new CPU */
797 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
798 @@ -110,6 +111,18 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
799 switch_mmu_context(prev, next, tsk);
800 }
801
802 +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
803 + struct task_struct *tsk)
804 +{
805 + unsigned long flags;
806 +
807 + local_irq_save(flags);
808 + switch_mm_irqs_off(prev, next, tsk);
809 + local_irq_restore(flags);
810 +}
811 +#define switch_mm_irqs_off switch_mm_irqs_off
812 +
813 +
814 #define deactivate_mm(tsk,mm) do { } while (0)
815
816 /*
817 diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
818 index a5dd493670a0..6ef8f0bceacd 100644
819 --- a/arch/powerpc/kernel/eeh_driver.c
820 +++ b/arch/powerpc/kernel/eeh_driver.c
821 @@ -724,7 +724,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
822 */
823 #define MAX_WAIT_FOR_RECOVERY 300
824
825 -static void eeh_handle_normal_event(struct eeh_pe *pe)
826 +static bool eeh_handle_normal_event(struct eeh_pe *pe)
827 {
828 struct pci_bus *frozen_bus;
829 struct eeh_dev *edev, *tmp;
830 @@ -736,7 +736,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
831 if (!frozen_bus) {
832 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
833 __func__, pe->phb->global_number, pe->addr);
834 - return;
835 + return false;
836 }
837
838 eeh_pe_update_time_stamp(pe);
839 @@ -870,7 +870,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
840 pr_info("EEH: Notify device driver to resume\n");
841 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
842
843 - return;
844 + return false;
845
846 excess_failures:
847 /*
848 @@ -915,8 +915,12 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
849 pci_lock_rescan_remove();
850 pci_hp_remove_devices(frozen_bus);
851 pci_unlock_rescan_remove();
852 +
853 + /* The passed PE should no longer be used */
854 + return true;
855 }
856 }
857 + return false;
858 }
859
860 static void eeh_handle_special_event(void)
861 @@ -982,7 +986,14 @@ static void eeh_handle_special_event(void)
862 */
863 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
864 rc == EEH_NEXT_ERR_FENCED_PHB) {
865 - eeh_handle_normal_event(pe);
866 + /*
867 + * eeh_handle_normal_event() can make the PE stale if it
868 + * determines that the PE cannot possibly be recovered.
869 + * Don't modify the PE state if that's the case.
870 + */
871 + if (eeh_handle_normal_event(pe))
872 + continue;
873 +
874 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
875 } else {
876 pci_lock_rescan_remove();
877 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
878 index 38a1f96430e1..ca03eb229a9a 100644
879 --- a/arch/powerpc/kernel/exceptions-64e.S
880 +++ b/arch/powerpc/kernel/exceptions-64e.S
881 @@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
882 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
883 beq+ 1f
884
885 +#ifdef CONFIG_RELOCATABLE
886 + ld r15,PACATOC(r13)
887 + ld r14,interrupt_base_book3e@got(r15)
888 + ld r15,__end_interrupts@got(r15)
889 +#else
890 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
891 LOAD_REG_IMMEDIATE(r15,__end_interrupts)
892 +#endif
893 cmpld cr0,r10,r14
894 cmpld cr1,r10,r15
895 blt+ cr0,1f
896 @@ -799,8 +805,14 @@ kernel_dbg_exc:
897 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
898 beq+ 1f
899
900 +#ifdef CONFIG_RELOCATABLE
901 + ld r15,PACATOC(r13)
902 + ld r14,interrupt_base_book3e@got(r15)
903 + ld r15,__end_interrupts@got(r15)
904 +#else
905 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
906 LOAD_REG_IMMEDIATE(r15,__end_interrupts)
907 +#endif
908 cmpld cr0,r10,r14
909 cmpld cr1,r10,r15
910 blt+ cr0,1f
911 diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
912 index 5e7ece0fda9f..ea236bfd841f 100644
913 --- a/arch/powerpc/kernel/mce.c
914 +++ b/arch/powerpc/kernel/mce.c
915 @@ -205,6 +205,8 @@ static void machine_check_process_queued_event(struct irq_work *work)
916 {
917 int index;
918
919 + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
920 +
921 /*
922 * For now just print it to console.
923 * TODO: log this error event to FSP or nvram.
924 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
925 index 49a680d5ae37..c7164739dc75 100644
926 --- a/arch/powerpc/kernel/process.c
927 +++ b/arch/powerpc/kernel/process.c
928 @@ -839,6 +839,25 @@ static void tm_reclaim_thread(struct thread_struct *thr,
929 if (!MSR_TM_SUSPENDED(mfmsr()))
930 return;
931
932 + /*
933 + * If we are in a transaction and FP is off then we can't have
934 + * used FP inside that transaction. Hence the checkpointed
935 + * state is the same as the live state. We need to copy the
936 + * live state to the checkpointed state so that when the
937 + * transaction is restored, the checkpointed state is correct
938 + * and the aborted transaction sees the correct state. We use
939 + * ckpt_regs.msr here as that's what tm_reclaim will use to
940 + * determine if it's going to write the checkpointed state or
941 + * not. So either this will write the checkpointed registers,
942 + * or reclaim will. Similarly for VMX.
943 + */
944 + if ((thr->ckpt_regs.msr & MSR_FP) == 0)
945 + memcpy(&thr->ckfp_state, &thr->fp_state,
946 + sizeof(struct thread_fp_state));
947 + if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
948 + memcpy(&thr->ckvr_state, &thr->vr_state,
949 + sizeof(struct thread_vr_state));
950 +
951 giveup_all(container_of(thr, struct task_struct, thread));
952
953 tm_reclaim(thr, thr->ckpt_regs.msr, cause);
954 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
955 index 023a462725b5..43021f8e47a6 100644
956 --- a/arch/powerpc/kernel/traps.c
957 +++ b/arch/powerpc/kernel/traps.c
958 @@ -302,8 +302,6 @@ long machine_check_early(struct pt_regs *regs)
959
960 __this_cpu_inc(irq_stat.mce_exceptions);
961
962 - add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
963 -
964 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
965 handled = cur_cpu_spec->machine_check_early(regs);
966 return handled;
967 @@ -737,6 +735,8 @@ void machine_check_exception(struct pt_regs *regs)
968
969 __this_cpu_inc(irq_stat.mce_exceptions);
970
971 + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
972 +
973 /* See if any machine dependent calls. In theory, we would want
974 * to call the CPU first, and call the ppc_md. one if the CPU
975 * one returns a positive number. However there is existing code
976 diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
977 index 7de7124ac91b..fd596808ac24 100644
978 --- a/arch/powerpc/mm/mmu_context_iommu.c
979 +++ b/arch/powerpc/mm/mmu_context_iommu.c
980 @@ -81,7 +81,7 @@ struct page *new_iommu_non_cma_page(struct page *page, unsigned long private,
981 gfp_t gfp_mask = GFP_USER;
982 struct page *new_page;
983
984 - if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
985 + if (PageCompound(page))
986 return NULL;
987
988 if (PageHighMem(page))
989 @@ -100,7 +100,7 @@ static int mm_iommu_move_page_from_cma(struct page *page)
990 LIST_HEAD(cma_migrate_pages);
991
992 /* Ignore huge pages for now */
993 - if (PageHuge(page) || PageTransHuge(page) || PageCompound(page))
994 + if (PageCompound(page))
995 return -EBUSY;
996
997 lru_add_drain();
998 diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
999 index 423e450efe07..72ae2cdbcd6a 100644
1000 --- a/arch/powerpc/platforms/pseries/dlpar.c
1001 +++ b/arch/powerpc/platforms/pseries/dlpar.c
1002 @@ -288,7 +288,6 @@ int dlpar_detach_node(struct device_node *dn)
1003 if (rc)
1004 return rc;
1005
1006 - of_node_put(dn); /* Must decrement the refcount */
1007 return 0;
1008 }
1009
1010 diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
1011 index 408b4f4fda0f..598254461fb7 100644
1012 --- a/arch/s390/kernel/crash_dump.c
1013 +++ b/arch/s390/kernel/crash_dump.c
1014 @@ -427,6 +427,20 @@ static void *nt_vmcoreinfo(void *ptr)
1015 }
1016
1017 /*
1018 + * Initialize final note (needed for /proc/vmcore code)
1019 + */
1020 +static void *nt_final(void *ptr)
1021 +{
1022 + Elf64_Nhdr *note;
1023 +
1024 + note = (Elf64_Nhdr *) ptr;
1025 + note->n_namesz = 0;
1026 + note->n_descsz = 0;
1027 + note->n_type = 0;
1028 + return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
1029 +}
1030 +
1031 +/*
1032 * Initialize ELF header (new kernel)
1033 */
1034 static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
1035 @@ -513,6 +527,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset)
1036 if (sa->prefix != 0)
1037 ptr = fill_cpu_elf_notes(ptr, cpu++, sa);
1038 ptr = nt_vmcoreinfo(ptr);
1039 + ptr = nt_final(ptr);
1040 memset(phdr, 0, sizeof(*phdr));
1041 phdr->p_type = PT_NOTE;
1042 phdr->p_offset = notes_offset;
1043 diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
1044 index 49a30737adde..c43816886839 100644
1045 --- a/arch/s390/kernel/entry.S
1046 +++ b/arch/s390/kernel/entry.S
1047 @@ -321,6 +321,7 @@ ENTRY(system_call)
1048 lg %r14,__LC_VDSO_PER_CPU
1049 lmg %r0,%r10,__PT_R0(%r11)
1050 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
1051 +.Lsysc_exit_timer:
1052 stpt __LC_EXIT_TIMER
1053 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1054 lmg %r11,%r15,__PT_R11(%r11)
1055 @@ -606,6 +607,7 @@ ENTRY(io_int_handler)
1056 lg %r14,__LC_VDSO_PER_CPU
1057 lmg %r0,%r10,__PT_R0(%r11)
1058 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
1059 +.Lio_exit_timer:
1060 stpt __LC_EXIT_TIMER
1061 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
1062 lmg %r11,%r15,__PT_R11(%r11)
1063 @@ -1135,15 +1137,23 @@ cleanup_critical:
1064 br %r14
1065
1066 .Lcleanup_sysc_restore:
1067 + # check if stpt has been executed
1068 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
1069 + jh 0f
1070 + mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
1071 + cghi %r11,__LC_SAVE_AREA_ASYNC
1072 je 0f
1073 + mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1074 +0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
1075 + je 1f
1076 lg %r9,24(%r11) # get saved pointer to pt_regs
1077 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1078 mvc 0(64,%r11),__PT_R8(%r9)
1079 lmg %r0,%r7,__PT_R0(%r9)
1080 -0: lmg %r8,%r9,__LC_RETURN_PSW
1081 +1: lmg %r8,%r9,__LC_RETURN_PSW
1082 br %r14
1083 .Lcleanup_sysc_restore_insn:
1084 + .quad .Lsysc_exit_timer
1085 .quad .Lsysc_done - 4
1086
1087 .Lcleanup_io_tif:
1088 @@ -1151,15 +1161,20 @@ cleanup_critical:
1089 br %r14
1090
1091 .Lcleanup_io_restore:
1092 + # check if stpt has been executed
1093 clg %r9,BASED(.Lcleanup_io_restore_insn)
1094 - je 0f
1095 + jh 0f
1096 + mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
1097 +0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
1098 + je 1f
1099 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
1100 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
1101 mvc 0(64,%r11),__PT_R8(%r9)
1102 lmg %r0,%r7,__PT_R0(%r9)
1103 -0: lmg %r8,%r9,__LC_RETURN_PSW
1104 +1: lmg %r8,%r9,__LC_RETURN_PSW
1105 br %r14
1106 .Lcleanup_io_restore_insn:
1107 + .quad .Lio_exit_timer
1108 .quad .Lio_done - 4
1109
1110 .Lcleanup_idle:
1111 diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c
1112 index 48bae81f8dca..6f6e7896e53f 100644
1113 --- a/arch/um/kernel/initrd.c
1114 +++ b/arch/um/kernel/initrd.c
1115 @@ -14,7 +14,7 @@
1116 static char *initrd __initdata = NULL;
1117 static int load_initrd(char *filename, void *buf, int size);
1118
1119 -static int __init read_initrd(void)
1120 +int __init read_initrd(void)
1121 {
1122 void *area;
1123 long long size;
1124 @@ -46,8 +46,6 @@ static int __init read_initrd(void)
1125 return 0;
1126 }
1127
1128 -__uml_postsetup(read_initrd);
1129 -
1130 static int __init uml_initrd_setup(char *line, int *add)
1131 {
1132 initrd = line;
1133 diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c
1134 index e8175a8aa22c..26b47deca2a0 100644
1135 --- a/arch/um/kernel/um_arch.c
1136 +++ b/arch/um/kernel/um_arch.c
1137 @@ -336,11 +336,17 @@ int __init linux_main(int argc, char **argv)
1138 return start_uml();
1139 }
1140
1141 +int __init __weak read_initrd(void)
1142 +{
1143 + return 0;
1144 +}
1145 +
1146 void __init setup_arch(char **cmdline_p)
1147 {
1148 stack_protections((unsigned long) &init_thread_info);
1149 setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem);
1150 mem_total_pages(physmem_size, iomem_size, highmem);
1151 + read_initrd();
1152
1153 paging_init();
1154 strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
1155 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
1156 index faf3687f1035..a300aa10ebc5 100644
1157 --- a/arch/x86/include/asm/uaccess.h
1158 +++ b/arch/x86/include/asm/uaccess.h
1159 @@ -315,10 +315,10 @@ do { \
1160 #define __get_user_asm_u64(x, ptr, retval, errret) \
1161 ({ \
1162 __typeof__(ptr) __ptr = (ptr); \
1163 - asm volatile(ASM_STAC "\n" \
1164 + asm volatile("\n" \
1165 "1: movl %2,%%eax\n" \
1166 "2: movl %3,%%edx\n" \
1167 - "3: " ASM_CLAC "\n" \
1168 + "3:\n" \
1169 ".section .fixup,\"ax\"\n" \
1170 "4: mov %4,%0\n" \
1171 " xorl %%eax,%%eax\n" \
1172 @@ -327,7 +327,7 @@ do { \
1173 ".previous\n" \
1174 _ASM_EXTABLE(1b, 4b) \
1175 _ASM_EXTABLE(2b, 4b) \
1176 - : "=r" (retval), "=A"(x) \
1177 + : "=r" (retval), "=&A"(x) \
1178 : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
1179 "i" (errret), "0" (retval)); \
1180 })
1181 diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
1182 index 2f2b8c7ccb85..6f0ab305dd5e 100644
1183 --- a/arch/x86/kernel/fpu/init.c
1184 +++ b/arch/x86/kernel/fpu/init.c
1185 @@ -101,6 +101,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
1186 * Boot time FPU feature detection code:
1187 */
1188 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
1189 +EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
1190
1191 static void __init fpu__init_system_mxcsr(void)
1192 {
1193 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1194 index 43c152853969..81bba3c2137d 100644
1195 --- a/arch/x86/kvm/x86.c
1196 +++ b/arch/x86/kvm/x86.c
1197 @@ -1735,6 +1735,7 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
1198 {
1199 struct kvm_arch *ka = &kvm->arch;
1200 struct pvclock_vcpu_time_info hv_clock;
1201 + u64 ret;
1202
1203 spin_lock(&ka->pvclock_gtod_sync_lock);
1204 if (!ka->use_master_clock) {
1205 @@ -1746,10 +1747,17 @@ static u64 __get_kvmclock_ns(struct kvm *kvm)
1206 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
1207 spin_unlock(&ka->pvclock_gtod_sync_lock);
1208
1209 + /* both __this_cpu_read() and rdtsc() should be on the same cpu */
1210 + get_cpu();
1211 +
1212 kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1213 &hv_clock.tsc_shift,
1214 &hv_clock.tsc_to_system_mul);
1215 - return __pvclock_read_cycles(&hv_clock, rdtsc());
1216 + ret = __pvclock_read_cycles(&hv_clock, rdtsc());
1217 +
1218 + put_cpu();
1219 +
1220 + return ret;
1221 }
1222
1223 u64 get_kvmclock_ns(struct kvm *kvm)
1224 @@ -3231,11 +3239,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
1225 }
1226 }
1227
1228 +#define XSAVE_MXCSR_OFFSET 24
1229 +
1230 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
1231 struct kvm_xsave *guest_xsave)
1232 {
1233 u64 xstate_bv =
1234 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
1235 + u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
1236
1237 if (boot_cpu_has(X86_FEATURE_XSAVE)) {
1238 /*
1239 @@ -3243,11 +3254,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
1240 * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
1241 * with old userspace.
1242 */
1243 - if (xstate_bv & ~kvm_supported_xcr0())
1244 + if (xstate_bv & ~kvm_supported_xcr0() ||
1245 + mxcsr & ~mxcsr_feature_mask)
1246 return -EINVAL;
1247 load_xsave(vcpu, (u8 *)guest_xsave->region);
1248 } else {
1249 - if (xstate_bv & ~XFEATURE_MASK_FPSSE)
1250 + if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
1251 + mxcsr & ~mxcsr_feature_mask)
1252 return -EINVAL;
1253 memcpy(&vcpu->arch.guest_fpu.state.fxsave,
1254 guest_xsave->region, sizeof(struct fxregs_state));
1255 @@ -4750,16 +4763,20 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
1256
1257 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
1258 {
1259 - /* TODO: String I/O for in kernel device */
1260 - int r;
1261 + int r = 0, i;
1262
1263 - if (vcpu->arch.pio.in)
1264 - r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
1265 - vcpu->arch.pio.size, pd);
1266 - else
1267 - r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
1268 - vcpu->arch.pio.port, vcpu->arch.pio.size,
1269 - pd);
1270 + for (i = 0; i < vcpu->arch.pio.count; i++) {
1271 + if (vcpu->arch.pio.in)
1272 + r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
1273 + vcpu->arch.pio.size, pd);
1274 + else
1275 + r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
1276 + vcpu->arch.pio.port, vcpu->arch.pio.size,
1277 + pd);
1278 + if (r)
1279 + break;
1280 + pd += vcpu->arch.pio.size;
1281 + }
1282 return r;
1283 }
1284
1285 @@ -4797,6 +4814,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1286 if (vcpu->arch.pio.count)
1287 goto data_avail;
1288
1289 + memset(vcpu->arch.pio_data, 0, size * count);
1290 +
1291 ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
1292 if (ret) {
1293 data_avail:
1294 diff --git a/drivers/char/lp.c b/drivers/char/lp.c
1295 index c4094c4e22c1..34ef474a3923 100644
1296 --- a/drivers/char/lp.c
1297 +++ b/drivers/char/lp.c
1298 @@ -859,7 +859,11 @@ static int __init lp_setup (char *str)
1299 } else if (!strcmp(str, "auto")) {
1300 parport_nr[0] = LP_PARPORT_AUTO;
1301 } else if (!strcmp(str, "none")) {
1302 - parport_nr[parport_ptr++] = LP_PARPORT_NONE;
1303 + if (parport_ptr < LP_NO)
1304 + parport_nr[parport_ptr++] = LP_PARPORT_NONE;
1305 + else
1306 + printk(KERN_INFO "lp: too many ports, %s ignored.\n",
1307 + str);
1308 } else if (!strcmp(str, "reset")) {
1309 reset = 1;
1310 }
1311 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
1312 index 7e4a9d1296bb..6e0cbe092220 100644
1313 --- a/drivers/char/mem.c
1314 +++ b/drivers/char/mem.c
1315 @@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = {
1316 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
1317 {
1318 size_t size = vma->vm_end - vma->vm_start;
1319 + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1320 +
1321 + /* It's illegal to wrap around the end of the physical address space. */
1322 + if (offset + (phys_addr_t)size < offset)
1323 + return -EINVAL;
1324
1325 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
1326 return -EINVAL;
1327 diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
1328 index a7c870af916c..fa0f66809503 100644
1329 --- a/drivers/char/tpm/tpm_crb.c
1330 +++ b/drivers/char/tpm/tpm_crb.c
1331 @@ -111,8 +111,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count)
1332
1333 memcpy_fromio(buf, priv->rsp, 6);
1334 expected = be32_to_cpup((__be32 *) &buf[2]);
1335 -
1336 - if (expected > count)
1337 + if (expected > count || expected < 6)
1338 return -EIO;
1339
1340 memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
1341 diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
1342 index e3a9155ee671..c6428771841f 100644
1343 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c
1344 +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
1345 @@ -49,9 +49,10 @@
1346 */
1347 #define TPM_I2C_MAX_BUF_SIZE 32
1348 #define TPM_I2C_RETRY_COUNT 32
1349 -#define TPM_I2C_BUS_DELAY 1 /* msec */
1350 -#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */
1351 -#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */
1352 +#define TPM_I2C_BUS_DELAY 1000 /* usec */
1353 +#define TPM_I2C_RETRY_DELAY_SHORT (2 * 1000) /* usec */
1354 +#define TPM_I2C_RETRY_DELAY_LONG (10 * 1000) /* usec */
1355 +#define TPM_I2C_DELAY_RANGE 300 /* usec */
1356
1357 #define OF_IS_TPM2 ((void *)1)
1358 #define I2C_IS_TPM2 1
1359 @@ -123,7 +124,9 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data)
1360 /* this causes the current command to be aborted */
1361 for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) {
1362 status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data);
1363 - msleep(TPM_I2C_BUS_DELAY);
1364 + if (status < 0)
1365 + usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
1366 + + TPM_I2C_DELAY_RANGE);
1367 }
1368 return status;
1369 }
1370 @@ -160,7 +163,8 @@ static int i2c_nuvoton_get_burstcount(struct i2c_client *client,
1371 burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data);
1372 break;
1373 }
1374 - msleep(TPM_I2C_BUS_DELAY);
1375 + usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY
1376 + + TPM_I2C_DELAY_RANGE);
1377 } while (time_before(jiffies, stop));
1378
1379 return burst_count;
1380 @@ -203,13 +207,17 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value,
1381 return 0;
1382
1383 /* use polling to wait for the event */
1384 - ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
1385 + ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG);
1386 stop = jiffies + timeout;
1387 do {
1388 if (time_before(jiffies, ten_msec))
1389 - msleep(TPM_I2C_RETRY_DELAY_SHORT);
1390 + usleep_range(TPM_I2C_RETRY_DELAY_SHORT,
1391 + TPM_I2C_RETRY_DELAY_SHORT
1392 + + TPM_I2C_DELAY_RANGE);
1393 else
1394 - msleep(TPM_I2C_RETRY_DELAY_LONG);
1395 + usleep_range(TPM_I2C_RETRY_DELAY_LONG,
1396 + TPM_I2C_RETRY_DELAY_LONG
1397 + + TPM_I2C_DELAY_RANGE);
1398 status_valid = i2c_nuvoton_check_status(chip, mask,
1399 value);
1400 if (status_valid)
1401 diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
1402 index a1ce0607bf7b..4d24ec3d7cd6 100644
1403 --- a/drivers/char/tpm/tpm_tis_core.c
1404 +++ b/drivers/char/tpm/tpm_tis_core.c
1405 @@ -160,8 +160,10 @@ static int get_burstcount(struct tpm_chip *chip)
1406 u32 value;
1407
1408 /* wait for burstcount */
1409 - /* which timeout value, spec has 2 answers (c & d) */
1410 - stop = jiffies + chip->timeout_d;
1411 + if (chip->flags & TPM_CHIP_FLAG_TPM2)
1412 + stop = jiffies + chip->timeout_a;
1413 + else
1414 + stop = jiffies + chip->timeout_d;
1415 do {
1416 rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value);
1417 if (rc < 0)
1418 diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1419 index dbaad9c681e3..3b97b14c3417 100644
1420 --- a/drivers/char/tpm/tpm_tis_spi.c
1421 +++ b/drivers/char/tpm/tpm_tis_spi.c
1422 @@ -48,8 +48,8 @@ struct tpm_tis_spi_phy {
1423 struct tpm_tis_data priv;
1424 struct spi_device *spi_device;
1425
1426 - u8 tx_buf[MAX_SPI_FRAMESIZE + 4];
1427 - u8 rx_buf[MAX_SPI_FRAMESIZE + 4];
1428 + u8 tx_buf[4];
1429 + u8 rx_buf[4];
1430 };
1431
1432 static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
1433 @@ -57,122 +57,98 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
1434 return container_of(data, struct tpm_tis_spi_phy, priv);
1435 }
1436
1437 -static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
1438 - u16 len, u8 *result)
1439 +static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
1440 + u8 *buffer, u8 direction)
1441 {
1442 struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
1443 - int ret, i;
1444 + int ret = 0;
1445 + int i;
1446 struct spi_message m;
1447 - struct spi_transfer spi_xfer = {
1448 - .tx_buf = phy->tx_buf,
1449 - .rx_buf = phy->rx_buf,
1450 - .len = 4,
1451 - };
1452 + struct spi_transfer spi_xfer;
1453 + u8 transfer_len;
1454
1455 - if (len > MAX_SPI_FRAMESIZE)
1456 - return -ENOMEM;
1457 + spi_bus_lock(phy->spi_device->master);
1458
1459 - phy->tx_buf[0] = 0x80 | (len - 1);
1460 - phy->tx_buf[1] = 0xd4;
1461 - phy->tx_buf[2] = (addr >> 8) & 0xFF;
1462 - phy->tx_buf[3] = addr & 0xFF;
1463 + while (len) {
1464 + transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
1465
1466 - spi_xfer.cs_change = 1;
1467 - spi_message_init(&m);
1468 - spi_message_add_tail(&spi_xfer, &m);
1469 + phy->tx_buf[0] = direction | (transfer_len - 1);
1470 + phy->tx_buf[1] = 0xd4;
1471 + phy->tx_buf[2] = addr >> 8;
1472 + phy->tx_buf[3] = addr;
1473 +
1474 + memset(&spi_xfer, 0, sizeof(spi_xfer));
1475 + spi_xfer.tx_buf = phy->tx_buf;
1476 + spi_xfer.rx_buf = phy->rx_buf;
1477 + spi_xfer.len = 4;
1478 + spi_xfer.cs_change = 1;
1479
1480 - spi_bus_lock(phy->spi_device->master);
1481 - ret = spi_sync_locked(phy->spi_device, &m);
1482 - if (ret < 0)
1483 - goto exit;
1484 -
1485 - memset(phy->tx_buf, 0, len);
1486 -
1487 - /* According to TCG PTP specification, if there is no TPM present at
1488 - * all, then the design has a weak pull-up on MISO. If a TPM is not
1489 - * present, a pull-up on MISO means that the SB controller sees a 1,
1490 - * and will latch in 0xFF on the read.
1491 - */
1492 - for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
1493 - spi_xfer.len = 1;
1494 spi_message_init(&m);
1495 spi_message_add_tail(&spi_xfer, &m);
1496 ret = spi_sync_locked(phy->spi_device, &m);
1497 if (ret < 0)
1498 goto exit;
1499 - }
1500 -
1501 - spi_xfer.cs_change = 0;
1502 - spi_xfer.len = len;
1503 - spi_xfer.rx_buf = result;
1504 -
1505 - spi_message_init(&m);
1506 - spi_message_add_tail(&spi_xfer, &m);
1507 - ret = spi_sync_locked(phy->spi_device, &m);
1508 -
1509 -exit:
1510 - spi_bus_unlock(phy->spi_device->master);
1511 - return ret;
1512 -}
1513 -
1514 -static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
1515 - u16 len, u8 *value)
1516 -{
1517 - struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
1518 - int ret, i;
1519 - struct spi_message m;
1520 - struct spi_transfer spi_xfer = {
1521 - .tx_buf = phy->tx_buf,
1522 - .rx_buf = phy->rx_buf,
1523 - .len = 4,
1524 - };
1525 -
1526 - if (len > MAX_SPI_FRAMESIZE)
1527 - return -ENOMEM;
1528 -
1529 - phy->tx_buf[0] = len - 1;
1530 - phy->tx_buf[1] = 0xd4;
1531 - phy->tx_buf[2] = (addr >> 8) & 0xFF;
1532 - phy->tx_buf[3] = addr & 0xFF;
1533
1534 - spi_xfer.cs_change = 1;
1535 - spi_message_init(&m);
1536 - spi_message_add_tail(&spi_xfer, &m);
1537 + if ((phy->rx_buf[3] & 0x01) == 0) {
1538 + // handle SPI wait states
1539 + phy->tx_buf[0] = 0;
1540 +
1541 + for (i = 0; i < TPM_RETRY; i++) {
1542 + spi_xfer.len = 1;
1543 + spi_message_init(&m);
1544 + spi_message_add_tail(&spi_xfer, &m);
1545 + ret = spi_sync_locked(phy->spi_device, &m);
1546 + if (ret < 0)
1547 + goto exit;
1548 + if (phy->rx_buf[0] & 0x01)
1549 + break;
1550 + }
1551 +
1552 + if (i == TPM_RETRY) {
1553 + ret = -ETIMEDOUT;
1554 + goto exit;
1555 + }
1556 + }
1557 +
1558 + spi_xfer.cs_change = 0;
1559 + spi_xfer.len = transfer_len;
1560 + spi_xfer.delay_usecs = 5;
1561 +
1562 + if (direction) {
1563 + spi_xfer.tx_buf = NULL;
1564 + spi_xfer.rx_buf = buffer;
1565 + } else {
1566 + spi_xfer.tx_buf = buffer;
1567 + spi_xfer.rx_buf = NULL;
1568 + }
1569
1570 - spi_bus_lock(phy->spi_device->master);
1571 - ret = spi_sync_locked(phy->spi_device, &m);
1572 - if (ret < 0)
1573 - goto exit;
1574 -
1575 - memset(phy->tx_buf, 0, len);
1576 -
1577 - /* According to TCG PTP specification, if there is no TPM present at
1578 - * all, then the design has a weak pull-up on MISO. If a TPM is not
1579 - * present, a pull-up on MISO means that the SB controller sees a 1,
1580 - * and will latch in 0xFF on the read.
1581 - */
1582 - for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) {
1583 - spi_xfer.len = 1;
1584 spi_message_init(&m);
1585 spi_message_add_tail(&spi_xfer, &m);
1586 ret = spi_sync_locked(phy->spi_device, &m);
1587 if (ret < 0)
1588 goto exit;
1589 - }
1590
1591 - spi_xfer.len = len;
1592 - spi_xfer.tx_buf = value;
1593 - spi_xfer.cs_change = 0;
1594 - spi_xfer.tx_buf = value;
1595 - spi_message_init(&m);
1596 - spi_message_add_tail(&spi_xfer, &m);
1597 - ret = spi_sync_locked(phy->spi_device, &m);
1598 + len -= transfer_len;
1599 + buffer += transfer_len;
1600 + }
1601
1602 exit:
1603 spi_bus_unlock(phy->spi_device->master);
1604 return ret;
1605 }
1606
1607 +static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
1608 + u16 len, u8 *result)
1609 +{
1610 + return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
1611 +}
1612 +
1613 +static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
1614 + u16 len, u8 *value)
1615 +{
1616 + return tpm_tis_spi_transfer(data, addr, len, value, 0);
1617 +}
1618 +
1619 static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
1620 {
1621 int rc;
1622 diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
1623 index b98ede78c9d8..6f9c9ac6ee70 100644
1624 --- a/drivers/gpio/gpio-omap.c
1625 +++ b/drivers/gpio/gpio-omap.c
1626 @@ -208,9 +208,11 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank)
1627 * OMAP's debounce time is in 31us steps
1628 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31
1629 * so we need to convert and round up to the closest unit.
1630 + *
1631 + * Return: 0 on success, negative error otherwise.
1632 */
1633 -static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
1634 - unsigned debounce)
1635 +static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
1636 + unsigned debounce)
1637 {
1638 void __iomem *reg;
1639 u32 val;
1640 @@ -218,11 +220,12 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
1641 bool enable = !!debounce;
1642
1643 if (!bank->dbck_flag)
1644 - return;
1645 + return -ENOTSUPP;
1646
1647 if (enable) {
1648 debounce = DIV_ROUND_UP(debounce, 31) - 1;
1649 - debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK;
1650 + if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce)
1651 + return -EINVAL;
1652 }
1653
1654 l = BIT(offset);
1655 @@ -255,6 +258,8 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset,
1656 bank->context.debounce = debounce;
1657 bank->context.debounce_en = val;
1658 }
1659 +
1660 + return 0;
1661 }
1662
1663 /**
1664 @@ -964,14 +969,20 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset,
1665 {
1666 struct gpio_bank *bank;
1667 unsigned long flags;
1668 + int ret;
1669
1670 bank = gpiochip_get_data(chip);
1671
1672 raw_spin_lock_irqsave(&bank->lock, flags);
1673 - omap2_set_gpio_debounce(bank, offset, debounce);
1674 + ret = omap2_set_gpio_debounce(bank, offset, debounce);
1675 raw_spin_unlock_irqrestore(&bank->lock, flags);
1676
1677 - return 0;
1678 + if (ret)
1679 + dev_info(chip->parent,
1680 + "Could not set line %u debounce to %u microseconds (%d)",
1681 + offset, debounce, ret);
1682 +
1683 + return ret;
1684 }
1685
1686 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
1687 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1688 index 882404cefbc2..42448c7c5ff5 100644
1689 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1690 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
1691 @@ -1173,23 +1173,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
1692 a.full = dfixed_const(available_bandwidth);
1693 b.full = dfixed_const(wm->num_heads);
1694 a.full = dfixed_div(a, b);
1695 + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1696 + tmp = min(dfixed_trunc(a), tmp);
1697
1698 - b.full = dfixed_const(mc_latency + 512);
1699 - c.full = dfixed_const(wm->disp_clk);
1700 - b.full = dfixed_div(b, c);
1701 -
1702 - c.full = dfixed_const(dmif_size);
1703 - b.full = dfixed_div(c, b);
1704 -
1705 - tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1706 -
1707 - b.full = dfixed_const(1000);
1708 - c.full = dfixed_const(wm->disp_clk);
1709 - b.full = dfixed_div(c, b);
1710 - c.full = dfixed_const(wm->bytes_per_pixel);
1711 - b.full = dfixed_mul(b, c);
1712 -
1713 - lb_fill_bw = min(tmp, dfixed_trunc(b));
1714 + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1715
1716 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1717 b.full = dfixed_const(1000);
1718 @@ -1297,14 +1284,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1719 {
1720 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1721 struct dce10_wm_params wm_low, wm_high;
1722 - u32 pixel_period;
1723 + u32 active_time;
1724 u32 line_time = 0;
1725 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1726 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1727
1728 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1729 - pixel_period = 1000000 / (u32)mode->clock;
1730 - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1731 + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1732 + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1733
1734 /* watermark for high clocks */
1735 if (adev->pm.dpm_enabled) {
1736 @@ -1319,7 +1306,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1737
1738 wm_high.disp_clk = mode->clock;
1739 wm_high.src_width = mode->crtc_hdisplay;
1740 - wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1741 + wm_high.active_time = active_time;
1742 wm_high.blank_time = line_time - wm_high.active_time;
1743 wm_high.interlaced = false;
1744 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1745 @@ -1358,7 +1345,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
1746
1747 wm_low.disp_clk = mode->clock;
1748 wm_low.src_width = mode->crtc_hdisplay;
1749 - wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1750 + wm_low.active_time = active_time;
1751 wm_low.blank_time = line_time - wm_low.active_time;
1752 wm_low.interlaced = false;
1753 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1754 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
1755 index 64a1df62cc65..904dabdc3a1e 100644
1756 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
1757 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
1758 @@ -1140,23 +1140,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
1759 a.full = dfixed_const(available_bandwidth);
1760 b.full = dfixed_const(wm->num_heads);
1761 a.full = dfixed_div(a, b);
1762 + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1763 + tmp = min(dfixed_trunc(a), tmp);
1764
1765 - b.full = dfixed_const(mc_latency + 512);
1766 - c.full = dfixed_const(wm->disp_clk);
1767 - b.full = dfixed_div(b, c);
1768 -
1769 - c.full = dfixed_const(dmif_size);
1770 - b.full = dfixed_div(c, b);
1771 -
1772 - tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1773 -
1774 - b.full = dfixed_const(1000);
1775 - c.full = dfixed_const(wm->disp_clk);
1776 - b.full = dfixed_div(c, b);
1777 - c.full = dfixed_const(wm->bytes_per_pixel);
1778 - b.full = dfixed_mul(b, c);
1779 -
1780 - lb_fill_bw = min(tmp, dfixed_trunc(b));
1781 + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1782
1783 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1784 b.full = dfixed_const(1000);
1785 @@ -1264,14 +1251,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1786 {
1787 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1788 struct dce10_wm_params wm_low, wm_high;
1789 - u32 pixel_period;
1790 + u32 active_time;
1791 u32 line_time = 0;
1792 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1793 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1794
1795 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1796 - pixel_period = 1000000 / (u32)mode->clock;
1797 - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1798 + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1799 + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1800
1801 /* watermark for high clocks */
1802 if (adev->pm.dpm_enabled) {
1803 @@ -1286,7 +1273,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1804
1805 wm_high.disp_clk = mode->clock;
1806 wm_high.src_width = mode->crtc_hdisplay;
1807 - wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1808 + wm_high.active_time = active_time;
1809 wm_high.blank_time = line_time - wm_high.active_time;
1810 wm_high.interlaced = false;
1811 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1812 @@ -1325,7 +1312,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1813
1814 wm_low.disp_clk = mode->clock;
1815 wm_low.src_width = mode->crtc_hdisplay;
1816 - wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1817 + wm_low.active_time = active_time;
1818 wm_low.blank_time = line_time - wm_low.active_time;
1819 wm_low.interlaced = false;
1820 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1821 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1822 index fde6ee1f6f2b..6d02bdb25d98 100644
1823 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1824 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
1825 @@ -932,23 +932,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
1826 a.full = dfixed_const(available_bandwidth);
1827 b.full = dfixed_const(wm->num_heads);
1828 a.full = dfixed_div(a, b);
1829 + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1830 + tmp = min(dfixed_trunc(a), tmp);
1831
1832 - b.full = dfixed_const(mc_latency + 512);
1833 - c.full = dfixed_const(wm->disp_clk);
1834 - b.full = dfixed_div(b, c);
1835 -
1836 - c.full = dfixed_const(dmif_size);
1837 - b.full = dfixed_div(c, b);
1838 -
1839 - tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1840 -
1841 - b.full = dfixed_const(1000);
1842 - c.full = dfixed_const(wm->disp_clk);
1843 - b.full = dfixed_div(c, b);
1844 - c.full = dfixed_const(wm->bytes_per_pixel);
1845 - b.full = dfixed_mul(b, c);
1846 -
1847 - lb_fill_bw = min(tmp, dfixed_trunc(b));
1848 + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1849
1850 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1851 b.full = dfixed_const(1000);
1852 @@ -1057,18 +1044,18 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1853 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1854 struct dce6_wm_params wm_low, wm_high;
1855 u32 dram_channels;
1856 - u32 pixel_period;
1857 + u32 active_time;
1858 u32 line_time = 0;
1859 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1860 u32 priority_a_mark = 0, priority_b_mark = 0;
1861 u32 priority_a_cnt = PRIORITY_OFF;
1862 u32 priority_b_cnt = PRIORITY_OFF;
1863 - u32 tmp, arb_control3;
1864 + u32 tmp, arb_control3, lb_vblank_lead_lines = 0;
1865 fixed20_12 a, b, c;
1866
1867 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1868 - pixel_period = 1000000 / (u32)mode->clock;
1869 - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1870 + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1871 + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1872 priority_a_cnt = 0;
1873 priority_b_cnt = 0;
1874
1875 @@ -1087,7 +1074,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1876
1877 wm_high.disp_clk = mode->clock;
1878 wm_high.src_width = mode->crtc_hdisplay;
1879 - wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1880 + wm_high.active_time = active_time;
1881 wm_high.blank_time = line_time - wm_high.active_time;
1882 wm_high.interlaced = false;
1883 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1884 @@ -1114,7 +1101,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1885
1886 wm_low.disp_clk = mode->clock;
1887 wm_low.src_width = mode->crtc_hdisplay;
1888 - wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1889 + wm_low.active_time = active_time;
1890 wm_low.blank_time = line_time - wm_low.active_time;
1891 wm_low.interlaced = false;
1892 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1893 @@ -1175,6 +1162,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1894 c.full = dfixed_div(c, a);
1895 priority_b_mark = dfixed_trunc(c);
1896 priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
1897 +
1898 + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1899 }
1900
1901 /* select wm A */
1902 @@ -1204,6 +1193,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
1903 /* save values for DPM */
1904 amdgpu_crtc->line_time = line_time;
1905 amdgpu_crtc->wm_high = latency_watermark_a;
1906 +
1907 + /* Save number of lines the linebuffer leads before the scanout */
1908 + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1909 }
1910
1911 /* watermark setup */
1912 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1913 index 7d9ffde0a628..b1fb60107cfa 100644
1914 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1915 +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
1916 @@ -1094,23 +1094,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
1917 a.full = dfixed_const(available_bandwidth);
1918 b.full = dfixed_const(wm->num_heads);
1919 a.full = dfixed_div(a, b);
1920 + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
1921 + tmp = min(dfixed_trunc(a), tmp);
1922
1923 - b.full = dfixed_const(mc_latency + 512);
1924 - c.full = dfixed_const(wm->disp_clk);
1925 - b.full = dfixed_div(b, c);
1926 -
1927 - c.full = dfixed_const(dmif_size);
1928 - b.full = dfixed_div(c, b);
1929 -
1930 - tmp = min(dfixed_trunc(a), dfixed_trunc(b));
1931 -
1932 - b.full = dfixed_const(1000);
1933 - c.full = dfixed_const(wm->disp_clk);
1934 - b.full = dfixed_div(c, b);
1935 - c.full = dfixed_const(wm->bytes_per_pixel);
1936 - b.full = dfixed_mul(b, c);
1937 -
1938 - lb_fill_bw = min(tmp, dfixed_trunc(b));
1939 + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
1940
1941 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
1942 b.full = dfixed_const(1000);
1943 @@ -1218,14 +1205,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1944 {
1945 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1946 struct dce8_wm_params wm_low, wm_high;
1947 - u32 pixel_period;
1948 + u32 active_time;
1949 u32 line_time = 0;
1950 u32 latency_watermark_a = 0, latency_watermark_b = 0;
1951 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1952
1953 if (amdgpu_crtc->base.enabled && num_heads && mode) {
1954 - pixel_period = 1000000 / (u32)mode->clock;
1955 - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
1956 + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
1957 + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
1958
1959 /* watermark for high clocks */
1960 if (adev->pm.dpm_enabled) {
1961 @@ -1240,7 +1227,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1962
1963 wm_high.disp_clk = mode->clock;
1964 wm_high.src_width = mode->crtc_hdisplay;
1965 - wm_high.active_time = mode->crtc_hdisplay * pixel_period;
1966 + wm_high.active_time = active_time;
1967 wm_high.blank_time = line_time - wm_high.active_time;
1968 wm_high.interlaced = false;
1969 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1970 @@ -1279,7 +1266,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
1971
1972 wm_low.disp_clk = mode->clock;
1973 wm_low.src_width = mode->crtc_hdisplay;
1974 - wm_low.active_time = mode->crtc_hdisplay * pixel_period;
1975 + wm_low.active_time = active_time;
1976 wm_low.blank_time = line_time - wm_low.active_time;
1977 wm_low.interlaced = false;
1978 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1979 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1980 index 0bc0afb6321e..0151ed2de770 100644
1981 --- a/drivers/gpu/drm/drm_edid.c
1982 +++ b/drivers/gpu/drm/drm_edid.c
1983 @@ -76,6 +76,8 @@
1984 #define EDID_QUIRK_FORCE_12BPC (1 << 9)
1985 /* Force 6bpc */
1986 #define EDID_QUIRK_FORCE_6BPC (1 << 10)
1987 +/* Force 10bpc */
1988 +#define EDID_QUIRK_FORCE_10BPC (1 << 11)
1989
1990 struct detailed_mode_closure {
1991 struct drm_connector *connector;
1992 @@ -118,6 +120,9 @@ static const struct edid_quirk {
1993 { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
1994 EDID_QUIRK_DETAILED_IN_CM },
1995
1996 + /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
1997 + { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
1998 +
1999 /* LG Philips LCD LP154W01-A5 */
2000 { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
2001 { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
2002 @@ -4105,6 +4110,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
2003 if (quirks & EDID_QUIRK_FORCE_8BPC)
2004 connector->display_info.bpc = 8;
2005
2006 + if (quirks & EDID_QUIRK_FORCE_10BPC)
2007 + connector->display_info.bpc = 10;
2008 +
2009 if (quirks & EDID_QUIRK_FORCE_12BPC)
2010 connector->display_info.bpc = 12;
2011
2012 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
2013 index f46aac1e85fb..c75f4bb6a4bd 100644
2014 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
2015 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
2016 @@ -420,6 +420,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
2017 return 0;
2018 }
2019
2020 + if (intel_vgpu_active(dev_priv)) {
2021 + DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
2022 + return 0;
2023 + }
2024 +
2025 #ifdef CONFIG_INTEL_IOMMU
2026 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
2027 DRM_INFO("DMAR active, disabling use of stolen memory\n");
2028 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
2029 index 8894fee30cbc..cbacbb673e54 100644
2030 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
2031 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
2032 @@ -130,7 +130,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
2033 poll = false;
2034 }
2035
2036 - if (list_empty(&therm->alarm.head) && poll)
2037 + if (poll)
2038 nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
2039 spin_unlock_irqrestore(&therm->lock, flags);
2040
2041 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
2042 index 91198d79393a..e2feccec25f5 100644
2043 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
2044 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
2045 @@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target)
2046 spin_unlock_irqrestore(&fan->lock, flags);
2047
2048 /* schedule next fan update, if not at target speed already */
2049 - if (list_empty(&fan->alarm.head) && target != duty) {
2050 + if (target != duty) {
2051 u16 bump_period = fan->bios.bump_period;
2052 u16 slow_down_period = fan->bios.slow_down_period;
2053 u64 delay;
2054 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
2055 index 59701b7a6597..ff9fbe7950e5 100644
2056 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
2057 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
2058 @@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent)
2059 duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
2060 nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
2061
2062 - if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
2063 + if (percent != (duty * 100)) {
2064 u64 next_change = (percent * fan->period_us) / 100;
2065 if (!duty)
2066 next_change = fan->period_us - next_change;
2067 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
2068 index b9703c02d8ca..9a79e91fdfdc 100644
2069 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
2070 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
2071 @@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm)
2072 spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
2073
2074 /* schedule the next poll in one second */
2075 - if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
2076 + if (therm->func->temp_get(therm) >= 0)
2077 nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
2078 }
2079
2080 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
2081 index 07dc82bfe346..f2a86eae0a0d 100644
2082 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
2083 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
2084 @@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
2085 unsigned long flags;
2086 LIST_HEAD(exec);
2087
2088 - /* move any due alarms off the pending list */
2089 + /* Process pending alarms. */
2090 spin_lock_irqsave(&tmr->lock, flags);
2091 list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
2092 - if (alarm->timestamp <= nvkm_timer_read(tmr))
2093 - list_move_tail(&alarm->head, &exec);
2094 + /* Have we hit the earliest alarm that hasn't gone off? */
2095 + if (alarm->timestamp > nvkm_timer_read(tmr)) {
2096 + /* Schedule it. If we didn't race, we're done. */
2097 + tmr->func->alarm_init(tmr, alarm->timestamp);
2098 + if (alarm->timestamp > nvkm_timer_read(tmr))
2099 + break;
2100 + }
2101 +
2102 + /* Move to completed list. We'll drop the lock before
2103 + * executing the callback so it can reschedule itself.
2104 + */
2105 + list_move_tail(&alarm->head, &exec);
2106 }
2107
2108 - /* reschedule interrupt for next alarm time */
2109 - if (!list_empty(&tmr->alarms)) {
2110 - alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
2111 - tmr->func->alarm_init(tmr, alarm->timestamp);
2112 - } else {
2113 + /* Shut down interrupt if no more pending alarms. */
2114 + if (list_empty(&tmr->alarms))
2115 tmr->func->alarm_fini(tmr);
2116 - }
2117 spin_unlock_irqrestore(&tmr->lock, flags);
2118
2119 - /* execute any pending alarm handlers */
2120 + /* Execute completed callbacks. */
2121 list_for_each_entry_safe(alarm, atemp, &exec, head) {
2122 list_del_init(&alarm->head);
2123 alarm->func(alarm);
2124 @@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
2125 struct nvkm_alarm *list;
2126 unsigned long flags;
2127
2128 - alarm->timestamp = nvkm_timer_read(tmr) + nsec;
2129 -
2130 - /* append new alarm to list, in soonest-alarm-first order */
2131 + /* Remove alarm from pending list.
2132 + *
2133 + * This both protects against the corruption of the list,
2134 + * and implements alarm rescheduling/cancellation.
2135 + */
2136 spin_lock_irqsave(&tmr->lock, flags);
2137 - if (!nsec) {
2138 - if (!list_empty(&alarm->head))
2139 - list_del(&alarm->head);
2140 - } else {
2141 + list_del_init(&alarm->head);
2142 +
2143 + if (nsec) {
2144 + /* Insert into pending list, ordered earliest to latest. */
2145 + alarm->timestamp = nvkm_timer_read(tmr) + nsec;
2146 list_for_each_entry(list, &tmr->alarms, head) {
2147 if (list->timestamp > alarm->timestamp)
2148 break;
2149 }
2150 +
2151 list_add_tail(&alarm->head, &list->head);
2152 +
2153 + /* Update HW if this is now the earliest alarm. */
2154 + list = list_first_entry(&tmr->alarms, typeof(*list), head);
2155 + if (list == alarm) {
2156 + tmr->func->alarm_init(tmr, alarm->timestamp);
2157 + /* This shouldn't happen if callers aren't stupid.
2158 + *
2159 + * Worst case scenario is that it'll take roughly
2160 + * 4 seconds for the next alarm to trigger.
2161 + */
2162 + WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
2163 + }
2164 }
2165 spin_unlock_irqrestore(&tmr->lock, flags);
2166 -
2167 - /* process pending alarms */
2168 - nvkm_timer_alarm_trigger(tmr);
2169 }
2170
2171 void
2172 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
2173 index 7b9ce87f0617..7f48249f41de 100644
2174 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
2175 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
2176 @@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr)
2177 u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
2178
2179 if (stat & 0x00000001) {
2180 - nvkm_timer_alarm_trigger(tmr);
2181 nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
2182 + nvkm_timer_alarm_trigger(tmr);
2183 stat &= ~0x00000001;
2184 }
2185
2186 diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
2187 index b5beea53d6f6..ab646a90e3da 100644
2188 --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
2189 +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c
2190 @@ -217,7 +217,15 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st,
2191 if (ret < 0 || value < 0)
2192 ret = -EINVAL;
2193
2194 - return ret;
2195 + ret = sensor_hub_get_feature(st->hsdev,
2196 + st->poll.report_id,
2197 + st->poll.index, sizeof(value), &value);
2198 + if (ret < 0 || value < 0)
2199 + return -EINVAL;
2200 +
2201 + st->poll_interval = value;
2202 +
2203 + return 0;
2204 }
2205 EXPORT_SYMBOL(hid_sensor_write_samp_freq_value);
2206
2207 @@ -259,7 +267,16 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st,
2208 if (ret < 0 || value < 0)
2209 ret = -EINVAL;
2210
2211 - return ret;
2212 + ret = sensor_hub_get_feature(st->hsdev,
2213 + st->sensitivity.report_id,
2214 + st->sensitivity.index, sizeof(value),
2215 + &value);
2216 + if (ret < 0 || value < 0)
2217 + return -EINVAL;
2218 +
2219 + st->raw_hystersis = value;
2220 +
2221 + return 0;
2222 }
2223 EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value);
2224
2225 @@ -355,6 +372,9 @@ int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev,
2226 /* Default unit of measure is milliseconds */
2227 if (st->poll.units == 0)
2228 st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND;
2229 +
2230 + st->poll_interval = -1;
2231 +
2232 return 0;
2233
2234 }
2235 @@ -377,6 +397,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev,
2236 HID_USAGE_SENSOR_PROY_POWER_STATE,
2237 &st->power_state);
2238
2239 + st->raw_hystersis = -1;
2240 +
2241 sensor_hub_input_get_attribute_info(hsdev,
2242 HID_FEATURE_REPORT, usage_id,
2243 HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS,
2244 diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
2245 index ecf592d69043..60829340a82e 100644
2246 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
2247 +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
2248 @@ -51,6 +51,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
2249 st->report_state.report_id,
2250 st->report_state.index,
2251 HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
2252 +
2253 + poll_value = hid_sensor_read_poll_value(st);
2254 } else {
2255 int val;
2256
2257 @@ -87,9 +89,7 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
2258 sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
2259 st->power_state.index,
2260 sizeof(state_val), &state_val);
2261 - if (state)
2262 - poll_value = hid_sensor_read_poll_value(st);
2263 - if (poll_value > 0)
2264 + if (state && poll_value)
2265 msleep_interruptible(poll_value * 2);
2266
2267 return 0;
2268 @@ -127,6 +127,20 @@ static void hid_sensor_set_power_work(struct work_struct *work)
2269 struct hid_sensor_common *attrb = container_of(work,
2270 struct hid_sensor_common,
2271 work);
2272 +
2273 + if (attrb->poll_interval >= 0)
2274 + sensor_hub_set_feature(attrb->hsdev, attrb->poll.report_id,
2275 + attrb->poll.index,
2276 + sizeof(attrb->poll_interval),
2277 + &attrb->poll_interval);
2278 +
2279 + if (attrb->raw_hystersis >= 0)
2280 + sensor_hub_set_feature(attrb->hsdev,
2281 + attrb->sensitivity.report_id,
2282 + attrb->sensitivity.index,
2283 + sizeof(attrb->raw_hystersis),
2284 + &attrb->raw_hystersis);
2285 +
2286 _hid_sensor_power_state(attrb, true);
2287 }
2288
2289 diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
2290 index e690dd11e99f..4b0f942b8914 100644
2291 --- a/drivers/iio/dac/ad7303.c
2292 +++ b/drivers/iio/dac/ad7303.c
2293 @@ -184,9 +184,9 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = {
2294 .address = (chan), \
2295 .scan_type = { \
2296 .sign = 'u', \
2297 - .realbits = '8', \
2298 - .storagebits = '8', \
2299 - .shift = '0', \
2300 + .realbits = 8, \
2301 + .storagebits = 8, \
2302 + .shift = 0, \
2303 }, \
2304 .ext_info = ad7303_ext_info, \
2305 }
2306 diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
2307 index e5a533cbd53f..f762eb8b174a 100644
2308 --- a/drivers/iio/pressure/bmp280-core.c
2309 +++ b/drivers/iio/pressure/bmp280-core.c
2310 @@ -175,11 +175,12 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data,
2311 }
2312 H6 = sign_extend32(tmp, 7);
2313
2314 - var = ((s32)data->t_fine) - 76800;
2315 - var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
2316 - * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
2317 - + 2097152) * H2 + 8192) >> 14);
2318 - var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
2319 + var = ((s32)data->t_fine) - (s32)76800;
2320 + var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var))
2321 + + (s32)16384) >> 15) * (((((((var * H6) >> 10)
2322 + * (((var * (s32)H3) >> 11) + (s32)32768)) >> 10)
2323 + + (s32)2097152) * H2 + 8192) >> 14);
2324 + var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4;
2325
2326 return var >> 12;
2327 };
2328 diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
2329 index 5656deb17261..020459513384 100644
2330 --- a/drivers/iio/proximity/as3935.c
2331 +++ b/drivers/iio/proximity/as3935.c
2332 @@ -50,7 +50,6 @@
2333 #define AS3935_TUNE_CAP 0x08
2334 #define AS3935_CALIBRATE 0x3D
2335
2336 -#define AS3935_WRITE_DATA BIT(15)
2337 #define AS3935_READ_DATA BIT(14)
2338 #define AS3935_ADDRESS(x) ((x) << 8)
2339
2340 @@ -105,7 +104,7 @@ static int as3935_write(struct as3935_state *st,
2341 {
2342 u8 *buf = st->buf;
2343
2344 - buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
2345 + buf[0] = AS3935_ADDRESS(reg) >> 8;
2346 buf[1] = val;
2347
2348 return spi_write(st->spi, buf, 2);
2349 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
2350 index 0f58f46dbad7..8fd108d89527 100644
2351 --- a/drivers/infiniband/core/addr.c
2352 +++ b/drivers/infiniband/core/addr.c
2353 @@ -444,8 +444,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
2354 fl6.saddr = src_in->sin6_addr;
2355 fl6.flowi6_oif = addr->bound_dev_if;
2356
2357 - dst = ip6_route_output(addr->net, NULL, &fl6);
2358 - if ((ret = dst->error))
2359 + ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
2360 + if (ret < 0)
2361 goto put;
2362
2363 rt = (struct rt6_info *)dst;
2364 diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
2365 index bd786b7bd30b..bb729764a799 100644
2366 --- a/drivers/infiniband/hw/hfi1/file_ops.c
2367 +++ b/drivers/infiniband/hw/hfi1/file_ops.c
2368 @@ -751,6 +751,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
2369 /* release the cpu */
2370 hfi1_put_proc_affinity(fdata->rec_cpu_num);
2371
2372 + /* clean up rcv side */
2373 + hfi1_user_exp_rcv_free(fdata);
2374 +
2375 /*
2376 * Clear any left over, unhandled events so the next process that
2377 * gets this context doesn't get confused.
2378 @@ -790,7 +793,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
2379
2380 dd->rcd[uctxt->ctxt] = NULL;
2381
2382 - hfi1_user_exp_rcv_free(fdata);
2383 + hfi1_user_exp_rcv_grp_free(uctxt);
2384 hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
2385
2386 uctxt->rcvwait_to = 0;
2387 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
2388 index e3b5bc93bc70..34cfd341b6d6 100644
2389 --- a/drivers/infiniband/hw/hfi1/init.c
2390 +++ b/drivers/infiniband/hw/hfi1/init.c
2391 @@ -1757,6 +1757,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
2392 !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) {
2393 dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n",
2394 rcd->ctxt);
2395 + ret = -ENOMEM;
2396 goto bail_rcvegrbuf_phys;
2397 }
2398
2399 diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
2400 index 64d26525435a..db0f140e2116 100644
2401 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
2402 +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
2403 @@ -250,36 +250,40 @@ int hfi1_user_exp_rcv_init(struct file *fp)
2404 return ret;
2405 }
2406
2407 +void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt)
2408 +{
2409 + struct tid_group *grp, *gptr;
2410 +
2411 + list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
2412 + list) {
2413 + list_del_init(&grp->list);
2414 + kfree(grp);
2415 + }
2416 + hfi1_clear_tids(uctxt);
2417 +}
2418 +
2419 int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
2420 {
2421 struct hfi1_ctxtdata *uctxt = fd->uctxt;
2422 - struct tid_group *grp, *gptr;
2423
2424 - if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
2425 - return 0;
2426 /*
2427 * The notifier would have been removed when the process'es mm
2428 * was freed.
2429 */
2430 - if (fd->handler)
2431 + if (fd->handler) {
2432 hfi1_mmu_rb_unregister(fd->handler);
2433 -
2434 - kfree(fd->invalid_tids);
2435 -
2436 - if (!uctxt->cnt) {
2437 + } else {
2438 if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
2439 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
2440 if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
2441 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
2442 - list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list,
2443 - list) {
2444 - list_del_init(&grp->list);
2445 - kfree(grp);
2446 - }
2447 - hfi1_clear_tids(uctxt);
2448 }
2449
2450 + kfree(fd->invalid_tids);
2451 + fd->invalid_tids = NULL;
2452 +
2453 kfree(fd->entry_to_rb);
2454 + fd->entry_to_rb = NULL;
2455 return 0;
2456 }
2457
2458 diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
2459 index 9bc8d9fba87e..d1d7d3d3bd44 100644
2460 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h
2461 +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
2462 @@ -70,6 +70,7 @@
2463 (tid) |= EXP_TID_SET(field, (value)); \
2464 } while (0)
2465
2466 +void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt);
2467 int hfi1_user_exp_rcv_init(struct file *);
2468 int hfi1_user_exp_rcv_free(struct hfi1_filedata *);
2469 int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *);
2470 diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
2471 index be2d02b6a6aa..1fb31a47966d 100644
2472 --- a/drivers/infiniband/hw/mlx5/mr.c
2473 +++ b/drivers/infiniband/hw/mlx5/mr.c
2474 @@ -1828,7 +1828,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2475 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
2476 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
2477 klms[i].key = cpu_to_be32(lkey);
2478 - mr->ibmr.length += sg_dma_len(sg);
2479 + mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2480
2481 sg_offset = 0;
2482 }
2483 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2484 index b9e50c10213b..87fcbf71b85a 100644
2485 --- a/drivers/iommu/intel-iommu.c
2486 +++ b/drivers/iommu/intel-iommu.c
2487 @@ -2049,11 +2049,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
2488 if (context_copied(context)) {
2489 u16 did_old = context_domain_id(context);
2490
2491 - if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
2492 + if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
2493 iommu->flush.flush_context(iommu, did_old,
2494 (((u16)bus) << 8) | devfn,
2495 DMA_CCMD_MASK_NOBIT,
2496 DMA_CCMD_DEVICE_INVL);
2497 + iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
2498 + DMA_TLB_DSI_FLUSH);
2499 + }
2500 }
2501
2502 pgd = domain->pgd;
2503 diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
2504 index 02a5345a44a6..197e29d1c2e6 100644
2505 --- a/drivers/md/Kconfig
2506 +++ b/drivers/md/Kconfig
2507 @@ -357,6 +357,7 @@ config DM_LOG_USERSPACE
2508 config DM_RAID
2509 tristate "RAID 1/4/5/6/10 target"
2510 depends on BLK_DEV_DM
2511 + select MD_RAID0
2512 select MD_RAID1
2513 select MD_RAID10
2514 select MD_RAID456
2515 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
2516 index 125aedc3875f..8bf9667ff46b 100644
2517 --- a/drivers/md/dm-bufio.c
2518 +++ b/drivers/md/dm-bufio.c
2519 @@ -215,7 +215,7 @@ static DEFINE_SPINLOCK(param_spinlock);
2520 * Buffers are freed after this timeout
2521 */
2522 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
2523 -static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
2524 +static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
2525
2526 static unsigned long dm_bufio_peak_allocated;
2527 static unsigned long dm_bufio_allocated_kmem_cache;
2528 @@ -923,10 +923,11 @@ static void __get_memory_limit(struct dm_bufio_client *c,
2529 {
2530 unsigned long buffers;
2531
2532 - if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
2533 - mutex_lock(&dm_bufio_clients_lock);
2534 - __cache_size_refresh();
2535 - mutex_unlock(&dm_bufio_clients_lock);
2536 + if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
2537 + if (mutex_trylock(&dm_bufio_clients_lock)) {
2538 + __cache_size_refresh();
2539 + mutex_unlock(&dm_bufio_clients_lock);
2540 + }
2541 }
2542
2543 buffers = dm_bufio_cache_size_per_client >>
2544 @@ -1540,10 +1541,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
2545 return true;
2546 }
2547
2548 -static unsigned get_retain_buffers(struct dm_bufio_client *c)
2549 +static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2550 {
2551 - unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
2552 - return retain_bytes / c->block_size;
2553 + unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
2554 + return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
2555 }
2556
2557 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
2558 @@ -1553,7 +1554,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
2559 struct dm_buffer *b, *tmp;
2560 unsigned long freed = 0;
2561 unsigned long count = nr_to_scan;
2562 - unsigned retain_target = get_retain_buffers(c);
2563 + unsigned long retain_target = get_retain_buffers(c);
2564
2565 for (l = 0; l < LIST_SIZE; l++) {
2566 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
2567 @@ -1779,11 +1780,19 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2568 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2569 {
2570 struct dm_buffer *b, *tmp;
2571 - unsigned retain_target = get_retain_buffers(c);
2572 - unsigned count;
2573 + unsigned long retain_target = get_retain_buffers(c);
2574 + unsigned long count;
2575 + LIST_HEAD(write_list);
2576
2577 dm_bufio_lock(c);
2578
2579 + __check_watermark(c, &write_list);
2580 + if (unlikely(!list_empty(&write_list))) {
2581 + dm_bufio_unlock(c);
2582 + __flush_write_list(&write_list);
2583 + dm_bufio_lock(c);
2584 + }
2585 +
2586 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
2587 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
2588 if (count <= retain_target)
2589 @@ -1808,6 +1817,8 @@ static void cleanup_old_buffers(void)
2590
2591 mutex_lock(&dm_bufio_clients_lock);
2592
2593 + __cache_size_refresh();
2594 +
2595 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2596 __evict_old_buffers(c, max_age_hz);
2597
2598 @@ -1930,7 +1941,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2599 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
2600 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2601
2602 -module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
2603 +module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
2604 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2605
2606 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
2607 diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2608 index 695577812cf6..6937ca42be8c 100644
2609 --- a/drivers/md/dm-cache-metadata.c
2610 +++ b/drivers/md/dm-cache-metadata.c
2611 @@ -1383,17 +1383,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
2612
2613 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
2614 {
2615 - int r;
2616 + int r = -EINVAL;
2617 flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
2618 clear_clean_shutdown);
2619
2620 WRITE_LOCK(cmd);
2621 + if (cmd->fail_io)
2622 + goto out;
2623 +
2624 r = __commit_transaction(cmd, mutator);
2625 if (r)
2626 goto out;
2627
2628 r = __begin_transaction(cmd);
2629 -
2630 out:
2631 WRITE_UNLOCK(cmd);
2632 return r;
2633 @@ -1405,7 +1407,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
2634 int r = -EINVAL;
2635
2636 READ_LOCK(cmd);
2637 - r = dm_sm_get_nr_free(cmd->metadata_sm, result);
2638 + if (!cmd->fail_io)
2639 + r = dm_sm_get_nr_free(cmd->metadata_sm, result);
2640 READ_UNLOCK(cmd);
2641
2642 return r;
2643 @@ -1417,7 +1420,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
2644 int r = -EINVAL;
2645
2646 READ_LOCK(cmd);
2647 - r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
2648 + if (!cmd->fail_io)
2649 + r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
2650 READ_UNLOCK(cmd);
2651
2652 return r;
2653 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
2654 index e477af8596e2..ac8235bda61b 100644
2655 --- a/drivers/md/dm-mpath.c
2656 +++ b/drivers/md/dm-mpath.c
2657 @@ -119,7 +119,8 @@ static struct kmem_cache *_mpio_cache;
2658
2659 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
2660 static void trigger_event(struct work_struct *work);
2661 -static void activate_path(struct work_struct *work);
2662 +static void activate_or_offline_path(struct pgpath *pgpath);
2663 +static void activate_path_work(struct work_struct *work);
2664 static void process_queued_bios(struct work_struct *work);
2665
2666 /*-----------------------------------------------
2667 @@ -144,7 +145,7 @@ static struct pgpath *alloc_pgpath(void)
2668
2669 if (pgpath) {
2670 pgpath->is_active = true;
2671 - INIT_DELAYED_WORK(&pgpath->activate_path, activate_path);
2672 + INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work);
2673 }
2674
2675 return pgpath;
2676 @@ -1515,10 +1516,8 @@ static void pg_init_done(void *data, int errors)
2677 spin_unlock_irqrestore(&m->lock, flags);
2678 }
2679
2680 -static void activate_path(struct work_struct *work)
2681 +static void activate_or_offline_path(struct pgpath *pgpath)
2682 {
2683 - struct pgpath *pgpath =
2684 - container_of(work, struct pgpath, activate_path.work);
2685 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2686
2687 if (pgpath->is_active && !blk_queue_dying(q))
2688 @@ -1527,6 +1526,14 @@ static void activate_path(struct work_struct *work)
2689 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
2690 }
2691
2692 +static void activate_path_work(struct work_struct *work)
2693 +{
2694 + struct pgpath *pgpath =
2695 + container_of(work, struct pgpath, activate_path.work);
2696 +
2697 + activate_or_offline_path(pgpath);
2698 +}
2699 +
2700 static int noretry_error(int error)
2701 {
2702 switch (error) {
2703 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
2704 index a15091a0d40c..4477bf930cf4 100644
2705 --- a/drivers/md/dm-thin-metadata.c
2706 +++ b/drivers/md/dm-thin-metadata.c
2707 @@ -485,11 +485,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
2708 if (r < 0)
2709 return r;
2710
2711 - r = save_sm_roots(pmd);
2712 + r = dm_tm_pre_commit(pmd->tm);
2713 if (r < 0)
2714 return r;
2715
2716 - r = dm_tm_pre_commit(pmd->tm);
2717 + r = save_sm_roots(pmd);
2718 if (r < 0)
2719 return r;
2720
2721 diff --git a/drivers/md/md.c b/drivers/md/md.c
2722 index 24925f2aa235..eddd360624a1 100644
2723 --- a/drivers/md/md.c
2724 +++ b/drivers/md/md.c
2725 @@ -6752,6 +6752,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2726 void __user *argp = (void __user *)arg;
2727 struct mddev *mddev = NULL;
2728 int ro;
2729 + bool did_set_md_closing = false;
2730
2731 if (!md_ioctl_valid(cmd))
2732 return -ENOTTY;
2733 @@ -6841,7 +6842,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2734 err = -EBUSY;
2735 goto out;
2736 }
2737 + WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
2738 set_bit(MD_CLOSING, &mddev->flags);
2739 + did_set_md_closing = true;
2740 mutex_unlock(&mddev->open_mutex);
2741 sync_blockdev(bdev);
2742 }
2743 @@ -7041,6 +7044,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
2744 mddev->hold_active = 0;
2745 mddev_unlock(mddev);
2746 out:
2747 + if(did_set_md_closing)
2748 + clear_bit(MD_CLOSING, &mddev->flags);
2749 return err;
2750 }
2751 #ifdef CONFIG_COMPAT
2752 diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
2753 index 20a40329d84a..7a75b5010f73 100644
2754 --- a/drivers/md/persistent-data/dm-btree.c
2755 +++ b/drivers/md/persistent-data/dm-btree.c
2756 @@ -897,8 +897,12 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
2757 else
2758 *result_key = le64_to_cpu(ro_node(s)->keys[0]);
2759
2760 - if (next_block || flags & INTERNAL_NODE)
2761 - block = value64(ro_node(s), i);
2762 + if (next_block || flags & INTERNAL_NODE) {
2763 + if (find_highest)
2764 + block = value64(ro_node(s), i);
2765 + else
2766 + block = value64(ro_node(s), 0);
2767 + }
2768
2769 } while (flags & INTERNAL_NODE);
2770
2771 diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
2772 index ebb280a14325..32adf6b4a9c7 100644
2773 --- a/drivers/md/persistent-data/dm-space-map-disk.c
2774 +++ b/drivers/md/persistent-data/dm-space-map-disk.c
2775 @@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b)
2776
2777 static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
2778 {
2779 + int r;
2780 + uint32_t old_count;
2781 enum allocation_event ev;
2782 struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
2783
2784 - return sm_ll_dec(&smd->ll, b, &ev);
2785 + r = sm_ll_dec(&smd->ll, b, &ev);
2786 + if (!r && (ev == SM_FREE)) {
2787 + /*
2788 + * It's only free if it's also free in the last
2789 + * transaction.
2790 + */
2791 + r = sm_ll_lookup(&smd->old_ll, b, &old_count);
2792 + if (!r && !old_count)
2793 + smd->nr_allocated_this_transaction--;
2794 + }
2795 +
2796 + return r;
2797 }
2798
2799 static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
2800 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2801 index cce6057b9aca..f34ad2be66a1 100644
2802 --- a/drivers/md/raid5.c
2803 +++ b/drivers/md/raid5.c
2804 @@ -2253,6 +2253,10 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2805 err = -ENOMEM;
2806
2807 mutex_unlock(&conf->cache_size_mutex);
2808 +
2809 + conf->slab_cache = sc;
2810 + conf->active_name = 1-conf->active_name;
2811 +
2812 /* Step 4, return new stripes to service */
2813 while(!list_empty(&newstripes)) {
2814 nsh = list_entry(newstripes.next, struct stripe_head, lru);
2815 @@ -2270,8 +2274,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2816 }
2817 /* critical section pass, GFP_NOIO no longer needed */
2818
2819 - conf->slab_cache = sc;
2820 - conf->active_name = 1-conf->active_name;
2821 if (!err)
2822 conf->pool_size = newsize;
2823 return err;
2824 diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
2825 index 5afb9c508f65..fd0f25ee251f 100644
2826 --- a/drivers/media/dvb-frontends/cxd2841er.c
2827 +++ b/drivers/media/dvb-frontends/cxd2841er.c
2828 @@ -3852,7 +3852,9 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = {
2829 FE_CAN_MUTE_TS |
2830 FE_CAN_2G_MODULATION,
2831 .frequency_min = 42000000,
2832 - .frequency_max = 1002000000
2833 + .frequency_max = 1002000000,
2834 + .symbol_rate_min = 870000,
2835 + .symbol_rate_max = 11700000
2836 },
2837 .init = cxd2841er_init_tc,
2838 .sleep = cxd2841er_sleep_tc,
2839 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2840 index 27e7cf65c2a7..7c24da51626c 100644
2841 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
2842 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2843 @@ -206,6 +206,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work)
2844 }
2845 s5p_mfc_clock_on();
2846 ret = s5p_mfc_init_hw(dev);
2847 + s5p_mfc_clock_off();
2848 if (ret)
2849 mfc_err("Failed to reinit FW\n");
2850 }
2851 @@ -663,9 +664,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
2852 break;
2853 }
2854 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
2855 - wake_up_ctx(ctx, reason, err);
2856 WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
2857 s5p_mfc_clock_off();
2858 + wake_up_ctx(ctx, reason, err);
2859 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
2860 } else {
2861 s5p_mfc_handle_frame(ctx, reason, err);
2862 @@ -679,15 +680,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
2863 case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
2864 ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
2865 ctx->state = MFCINST_GOT_INST;
2866 - clear_work_bit(ctx);
2867 - wake_up(&ctx->queue);
2868 goto irq_cleanup_hw;
2869
2870 case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
2871 - clear_work_bit(ctx);
2872 ctx->inst_no = MFC_NO_INSTANCE_SET;
2873 ctx->state = MFCINST_FREE;
2874 - wake_up(&ctx->queue);
2875 goto irq_cleanup_hw;
2876
2877 case S5P_MFC_R2H_CMD_SYS_INIT_RET:
2878 @@ -697,9 +694,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
2879 if (ctx)
2880 clear_work_bit(ctx);
2881 s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
2882 - wake_up_dev(dev, reason, err);
2883 clear_bit(0, &dev->hw_lock);
2884 clear_bit(0, &dev->enter_suspend);
2885 + wake_up_dev(dev, reason, err);
2886 break;
2887
2888 case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
2889 @@ -714,9 +711,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
2890 break;
2891
2892 case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
2893 - clear_work_bit(ctx);
2894 ctx->state = MFCINST_RUNNING;
2895 - wake_up(&ctx->queue);
2896 goto irq_cleanup_hw;
2897
2898 default:
2899 @@ -735,6 +730,8 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
2900 mfc_err("Failed to unlock hw\n");
2901
2902 s5p_mfc_clock_off();
2903 + clear_work_bit(ctx);
2904 + wake_up(&ctx->queue);
2905
2906 s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
2907 spin_unlock(&dev->irqlock);
2908 diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
2909 index 4f8c7effdcee..db525cdfac88 100644
2910 --- a/drivers/media/rc/mceusb.c
2911 +++ b/drivers/media/rc/mceusb.c
2912 @@ -1332,8 +1332,8 @@ static int mceusb_dev_probe(struct usb_interface *intf,
2913 }
2914 }
2915 }
2916 - if (ep_in == NULL) {
2917 - dev_dbg(&intf->dev, "inbound and/or endpoint not found");
2918 + if (!ep_in || !ep_out) {
2919 + dev_dbg(&intf->dev, "required endpoints not found\n");
2920 return -ENODEV;
2921 }
2922
2923 diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
2924 index 8263c4b0610b..bf4b3ca2abf9 100644
2925 --- a/drivers/media/usb/cx231xx/cx231xx-audio.c
2926 +++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
2927 @@ -674,10 +674,8 @@ static int cx231xx_audio_init(struct cx231xx *dev)
2928
2929 spin_lock_init(&adev->slock);
2930 err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
2931 - if (err < 0) {
2932 - snd_card_free(card);
2933 - return err;
2934 - }
2935 + if (err < 0)
2936 + goto err_free_card;
2937
2938 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
2939 &snd_cx231xx_pcm_capture);
2940 @@ -691,10 +689,9 @@ static int cx231xx_audio_init(struct cx231xx *dev)
2941 INIT_WORK(&dev->wq_trigger, audio_trigger);
2942
2943 err = snd_card_register(card);
2944 - if (err < 0) {
2945 - snd_card_free(card);
2946 - return err;
2947 - }
2948 + if (err < 0)
2949 + goto err_free_card;
2950 +
2951 adev->sndcard = card;
2952 adev->udev = dev->udev;
2953
2954 @@ -704,6 +701,11 @@ static int cx231xx_audio_init(struct cx231xx *dev)
2955 hs_config_info[0].interface_info.
2956 audio_index + 1];
2957
2958 + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
2959 + err = -ENODEV;
2960 + goto err_free_card;
2961 + }
2962 +
2963 adev->end_point_addr =
2964 uif->altsetting[0].endpoint[isoc_pipe].desc.
2965 bEndpointAddress;
2966 @@ -713,13 +715,20 @@ static int cx231xx_audio_init(struct cx231xx *dev)
2967 "audio EndPoint Addr 0x%x, Alternate settings: %i\n",
2968 adev->end_point_addr, adev->num_alt);
2969 adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
2970 -
2971 - if (adev->alt_max_pkt_size == NULL)
2972 - return -ENOMEM;
2973 + if (!adev->alt_max_pkt_size) {
2974 + err = -ENOMEM;
2975 + goto err_free_card;
2976 + }
2977
2978 for (i = 0; i < adev->num_alt; i++) {
2979 - u16 tmp =
2980 - le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
2981 + u16 tmp;
2982 +
2983 + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
2984 + err = -ENODEV;
2985 + goto err_free_pkt_size;
2986 + }
2987 +
2988 + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
2989 wMaxPacketSize);
2990 adev->alt_max_pkt_size[i] =
2991 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
2992 @@ -729,6 +738,13 @@ static int cx231xx_audio_init(struct cx231xx *dev)
2993 }
2994
2995 return 0;
2996 +
2997 +err_free_pkt_size:
2998 + kfree(adev->alt_max_pkt_size);
2999 +err_free_card:
3000 + snd_card_free(card);
3001 +
3002 + return err;
3003 }
3004
3005 static int cx231xx_audio_fini(struct cx231xx *dev)
3006 diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
3007 index 36bc25494319..be9e3335dcb7 100644
3008 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c
3009 +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
3010 @@ -1397,6 +1397,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
3011
3012 uif = udev->actconfig->interface[idx];
3013
3014 + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
3015 + return -ENODEV;
3016 +
3017 dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
3018 dev->video_mode.num_alt = uif->num_altsetting;
3019
3020 @@ -1410,7 +1413,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
3021 return -ENOMEM;
3022
3023 for (i = 0; i < dev->video_mode.num_alt; i++) {
3024 - u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
3025 + u16 tmp;
3026 +
3027 + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
3028 + return -ENODEV;
3029 +
3030 + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
3031 dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
3032 dev_dbg(dev->dev,
3033 "Alternate setting %i, max size= %i\n", i,
3034 @@ -1427,6 +1435,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
3035 }
3036 uif = udev->actconfig->interface[idx];
3037
3038 + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
3039 + return -ENODEV;
3040 +
3041 dev->vbi_mode.end_point_addr =
3042 uif->altsetting[0].endpoint[isoc_pipe].desc.
3043 bEndpointAddress;
3044 @@ -1443,8 +1454,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
3045 return -ENOMEM;
3046
3047 for (i = 0; i < dev->vbi_mode.num_alt; i++) {
3048 - u16 tmp =
3049 - le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
3050 + u16 tmp;
3051 +
3052 + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
3053 + return -ENODEV;
3054 +
3055 + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
3056 desc.wMaxPacketSize);
3057 dev->vbi_mode.alt_max_pkt_size[i] =
3058 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
3059 @@ -1464,6 +1479,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
3060 }
3061 uif = udev->actconfig->interface[idx];
3062
3063 + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
3064 + return -ENODEV;
3065 +
3066 dev->sliced_cc_mode.end_point_addr =
3067 uif->altsetting[0].endpoint[isoc_pipe].desc.
3068 bEndpointAddress;
3069 @@ -1478,7 +1496,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev,
3070 return -ENOMEM;
3071
3072 for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
3073 - u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
3074 + u16 tmp;
3075 +
3076 + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
3077 + return -ENODEV;
3078 +
3079 + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
3080 desc.wMaxPacketSize);
3081 dev->sliced_cc_mode.alt_max_pkt_size[i] =
3082 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
3083 @@ -1647,6 +1670,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
3084 }
3085 uif = udev->actconfig->interface[idx];
3086
3087 + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
3088 + retval = -ENODEV;
3089 + goto err_video_alt;
3090 + }
3091 +
3092 dev->ts1_mode.end_point_addr =
3093 uif->altsetting[0].endpoint[isoc_pipe].
3094 desc.bEndpointAddress;
3095 @@ -1664,7 +1692,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
3096 }
3097
3098 for (i = 0; i < dev->ts1_mode.num_alt; i++) {
3099 - u16 tmp = le16_to_cpu(uif->altsetting[i].
3100 + u16 tmp;
3101 +
3102 + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
3103 + retval = -ENODEV;
3104 + goto err_video_alt;
3105 + }
3106 +
3107 + tmp = le16_to_cpu(uif->altsetting[i].
3108 endpoint[isoc_pipe].desc.
3109 wMaxPacketSize);
3110 dev->ts1_mode.alt_max_pkt_size[i] =
3111 diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
3112 index 47ce9d5de4c6..563f690cd978 100644
3113 --- a/drivers/media/usb/dvb-usb/dib0700_core.c
3114 +++ b/drivers/media/usb/dvb-usb/dib0700_core.c
3115 @@ -812,6 +812,9 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
3116
3117 /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
3118
3119 + if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
3120 + return -ENODEV;
3121 +
3122 purb = usb_alloc_urb(0, GFP_KERNEL);
3123 if (purb == NULL)
3124 return -ENOMEM;
3125 diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
3126 index d66f56cc46a5..1f7bce64777f 100644
3127 --- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c
3128 +++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c
3129 @@ -12,6 +12,8 @@
3130 #include <linux/kconfig.h>
3131 #include "dibusb.h"
3132
3133 +MODULE_LICENSE("GPL");
3134 +
3135 /* 3000MC/P stuff */
3136 // Config Adjacent channels Perf -cal22
3137 static struct dibx000_agc_config dib3000p_mt2060_agc_config = {
3138 diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c
3139 index 4284f6984dc1..475a3c0cdee7 100644
3140 --- a/drivers/media/usb/dvb-usb/digitv.c
3141 +++ b/drivers/media/usb/dvb-usb/digitv.c
3142 @@ -33,6 +33,9 @@ static int digitv_ctrl_msg(struct dvb_usb_device *d,
3143
3144 wo = (rbuf == NULL || rlen == 0); /* write-only */
3145
3146 + if (wlen > 4 || rlen > 4)
3147 + return -EIO;
3148 +
3149 memset(st->sndbuf, 0, 7);
3150 memset(st->rcvbuf, 0, 7);
3151
3152 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
3153 index c3e67347a977..4a0cc54870c7 100644
3154 --- a/drivers/media/usb/dvb-usb/dw2102.c
3155 +++ b/drivers/media/usb/dvb-usb/dw2102.c
3156 @@ -205,6 +205,20 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
3157
3158 switch (num) {
3159 case 2:
3160 + if (msg[0].len != 1) {
3161 + warn("i2c rd: len=%d is not 1!\n",
3162 + msg[0].len);
3163 + num = -EOPNOTSUPP;
3164 + break;
3165 + }
3166 +
3167 + if (2 + msg[1].len > sizeof(buf6)) {
3168 + warn("i2c rd: len=%d is too big!\n",
3169 + msg[1].len);
3170 + num = -EOPNOTSUPP;
3171 + break;
3172 + }
3173 +
3174 /* read si2109 register by number */
3175 buf6[0] = msg[0].addr << 1;
3176 buf6[1] = msg[0].len;
3177 @@ -220,6 +234,13 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap,
3178 case 1:
3179 switch (msg[0].addr) {
3180 case 0x68:
3181 + if (2 + msg[0].len > sizeof(buf6)) {
3182 + warn("i2c wr: len=%d is too big!\n",
3183 + msg[0].len);
3184 + num = -EOPNOTSUPP;
3185 + break;
3186 + }
3187 +
3188 /* write to si2109 register */
3189 buf6[0] = msg[0].addr << 1;
3190 buf6[1] = msg[0].len;
3191 @@ -263,6 +284,13 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms
3192 /* first write first register number */
3193 u8 ibuf[MAX_XFER_SIZE], obuf[3];
3194
3195 + if (2 + msg[0].len != sizeof(obuf)) {
3196 + warn("i2c rd: len=%d is not 1!\n",
3197 + msg[0].len);
3198 + ret = -EOPNOTSUPP;
3199 + goto unlock;
3200 + }
3201 +
3202 if (2 + msg[1].len > sizeof(ibuf)) {
3203 warn("i2c rd: len=%d is too big!\n",
3204 msg[1].len);
3205 @@ -463,6 +491,12 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
3206 /* first write first register number */
3207 u8 ibuf[MAX_XFER_SIZE], obuf[3];
3208
3209 + if (2 + msg[0].len != sizeof(obuf)) {
3210 + warn("i2c rd: len=%d is not 1!\n",
3211 + msg[0].len);
3212 + ret = -EOPNOTSUPP;
3213 + goto unlock;
3214 + }
3215 if (2 + msg[1].len > sizeof(ibuf)) {
3216 warn("i2c rd: len=%d is too big!\n",
3217 msg[1].len);
3218 @@ -697,6 +731,13 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
3219 msg[0].buf[0] = state->data[1];
3220 break;
3221 default:
3222 + if (3 + msg[0].len > sizeof(state->data)) {
3223 + warn("i2c wr: len=%d is too big!\n",
3224 + msg[0].len);
3225 + num = -EOPNOTSUPP;
3226 + break;
3227 + }
3228 +
3229 /* always i2c write*/
3230 state->data[0] = 0x08;
3231 state->data[1] = msg[0].addr;
3232 @@ -712,6 +753,19 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
3233 break;
3234 case 2:
3235 /* always i2c read */
3236 + if (4 + msg[0].len > sizeof(state->data)) {
3237 + warn("i2c rd: len=%d is too big!\n",
3238 + msg[0].len);
3239 + num = -EOPNOTSUPP;
3240 + break;
3241 + }
3242 + if (1 + msg[1].len > sizeof(state->data)) {
3243 + warn("i2c rd: len=%d is too big!\n",
3244 + msg[1].len);
3245 + num = -EOPNOTSUPP;
3246 + break;
3247 + }
3248 +
3249 state->data[0] = 0x09;
3250 state->data[1] = msg[0].len;
3251 state->data[2] = msg[1].len;
3252 diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
3253 index ecc207fbaf3c..9e0d6a4166d2 100644
3254 --- a/drivers/media/usb/dvb-usb/ttusb2.c
3255 +++ b/drivers/media/usb/dvb-usb/ttusb2.c
3256 @@ -78,6 +78,9 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd,
3257 u8 *s, *r = NULL;
3258 int ret = 0;
3259
3260 + if (4 + rlen > 64)
3261 + return -EIO;
3262 +
3263 s = kzalloc(wlen+4, GFP_KERNEL);
3264 if (!s)
3265 return -ENOMEM;
3266 @@ -381,6 +384,22 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
3267 write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
3268 read = msg[i].flags & I2C_M_RD;
3269
3270 + if (3 + msg[i].len > sizeof(obuf)) {
3271 + err("i2c wr len=%d too high", msg[i].len);
3272 + break;
3273 + }
3274 + if (write_read) {
3275 + if (3 + msg[i+1].len > sizeof(ibuf)) {
3276 + err("i2c rd len=%d too high", msg[i+1].len);
3277 + break;
3278 + }
3279 + } else if (read) {
3280 + if (3 + msg[i].len > sizeof(ibuf)) {
3281 + err("i2c rd len=%d too high", msg[i].len);
3282 + break;
3283 + }
3284 + }
3285 +
3286 obuf[0] = (msg[i].addr << 1) | (write_read | read);
3287 if (read)
3288 obuf[1] = 0;
3289 diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
3290 index 40aaaa9c5f30..78542fff403f 100644
3291 --- a/drivers/media/usb/gspca/konica.c
3292 +++ b/drivers/media/usb/gspca/konica.c
3293 @@ -188,6 +188,9 @@ static int sd_start(struct gspca_dev *gspca_dev)
3294 return -EIO;
3295 }
3296
3297 + if (alt->desc.bNumEndpoints < 2)
3298 + return -ENODEV;
3299 +
3300 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
3301
3302 n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
3303 diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
3304 index c8b4eb2ee7a2..bfdf72355332 100644
3305 --- a/drivers/media/usb/usbvision/usbvision-video.c
3306 +++ b/drivers/media/usb/usbvision/usbvision-video.c
3307 @@ -1506,7 +1506,14 @@ static int usbvision_probe(struct usb_interface *intf,
3308 }
3309
3310 for (i = 0; i < usbvision->num_alt; i++) {
3311 - u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
3312 + u16 tmp;
3313 +
3314 + if (uif->altsetting[i].desc.bNumEndpoints < 2) {
3315 + ret = -ENODEV;
3316 + goto err_pkt;
3317 + }
3318 +
3319 + tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
3320 wMaxPacketSize);
3321 usbvision->alt_max_pkt_size[i] =
3322 (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
3323 diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
3324 index cc128db85723..e3735bfcc02f 100644
3325 --- a/drivers/media/usb/zr364xx/zr364xx.c
3326 +++ b/drivers/media/usb/zr364xx/zr364xx.c
3327 @@ -604,6 +604,14 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam,
3328 ptr = pdest = frm->lpvbits;
3329
3330 if (frm->ulState == ZR364XX_READ_IDLE) {
3331 + if (purb->actual_length < 128) {
3332 + /* header incomplete */
3333 + dev_info(&cam->udev->dev,
3334 + "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
3335 + __func__, purb->actual_length);
3336 + return -EINVAL;
3337 + }
3338 +
3339 frm->ulState = ZR364XX_READ_FRAME;
3340 frm->cur_size = 0;
3341
3342 diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
3343 index dd99b06e121a..fa4fe02cfef4 100644
3344 --- a/drivers/misc/cxl/pci.c
3345 +++ b/drivers/misc/cxl/pci.c
3346 @@ -1496,8 +1496,6 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
3347 if ((rc = cxl_native_register_psl_err_irq(adapter)))
3348 goto err;
3349
3350 - /* Release the context lock as adapter is configured */
3351 - cxl_adapter_context_unlock(adapter);
3352 return 0;
3353
3354 err:
3355 @@ -1596,6 +1594,9 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
3356 if ((rc = cxl_sysfs_adapter_add(adapter)))
3357 goto err_put1;
3358
3359 + /* Release the context lock as adapter is configured */
3360 + cxl_adapter_context_unlock(adapter);
3361 +
3362 return adapter;
3363
3364 err_put1:
3365 @@ -1778,7 +1779,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
3366 {
3367 struct cxl *adapter = pci_get_drvdata(pdev);
3368 struct cxl_afu *afu;
3369 - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
3370 + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result;
3371 int i;
3372
3373 /* At this point, we could still have an interrupt pending.
3374 @@ -1883,16 +1884,26 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
3375 for (i = 0; i < adapter->slices; i++) {
3376 afu = adapter->afu[i];
3377
3378 - result = cxl_vphb_error_detected(afu, state);
3379 -
3380 - /* Only continue if everyone agrees on NEED_RESET */
3381 - if (result != PCI_ERS_RESULT_NEED_RESET)
3382 - return result;
3383 + afu_result = cxl_vphb_error_detected(afu, state);
3384
3385 cxl_context_detach_all(afu);
3386 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
3387 pci_deconfigure_afu(afu);
3388 +
3389 + /* Disconnect trumps all, NONE trumps NEED_RESET */
3390 + if (afu_result == PCI_ERS_RESULT_DISCONNECT)
3391 + result = PCI_ERS_RESULT_DISCONNECT;
3392 + else if ((afu_result == PCI_ERS_RESULT_NONE) &&
3393 + (result == PCI_ERS_RESULT_NEED_RESET))
3394 + result = PCI_ERS_RESULT_NONE;
3395 }
3396 +
3397 + /* should take the context lock here */
3398 + if (cxl_adapter_context_lock(adapter) != 0)
3399 + dev_warn(&adapter->dev,
3400 + "Couldn't take context lock with %d active-contexts\n",
3401 + atomic_read(&adapter->contexts_num));
3402 +
3403 cxl_deconfigure_adapter(adapter);
3404
3405 return result;
3406 @@ -1911,6 +1922,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
3407 if (cxl_configure_adapter(adapter, pdev))
3408 goto err;
3409
3410 + /*
3411 + * Unlock context activation for the adapter. Ideally this should be
3412 + * done in cxl_pci_resume but cxlflash module tries to activate the
3413 + * master context as part of slot_reset callback.
3414 + */
3415 + cxl_adapter_context_unlock(adapter);
3416 +
3417 for (i = 0; i < adapter->slices; i++) {
3418 afu = adapter->afu[i];
3419
3420 diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3421 index 3bde96a3f7bf..f222f8a7ba52 100644
3422 --- a/drivers/mtd/nand/nand_base.c
3423 +++ b/drivers/mtd/nand/nand_base.c
3424 @@ -138,6 +138,74 @@ const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
3425 };
3426 EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
3427
3428 +/*
3429 + * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
3430 + * are placed at a fixed offset.
3431 + */
3432 +static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
3433 + struct mtd_oob_region *oobregion)
3434 +{
3435 + struct nand_chip *chip = mtd_to_nand(mtd);
3436 + struct nand_ecc_ctrl *ecc = &chip->ecc;
3437 +
3438 + if (section)
3439 + return -ERANGE;
3440 +
3441 + switch (mtd->oobsize) {
3442 + case 64:
3443 + oobregion->offset = 40;
3444 + break;
3445 + case 128:
3446 + oobregion->offset = 80;
3447 + break;
3448 + default:
3449 + return -EINVAL;
3450 + }
3451 +
3452 + oobregion->length = ecc->total;
3453 + if (oobregion->offset + oobregion->length > mtd->oobsize)
3454 + return -ERANGE;
3455 +
3456 + return 0;
3457 +}
3458 +
3459 +static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
3460 + struct mtd_oob_region *oobregion)
3461 +{
3462 + struct nand_chip *chip = mtd_to_nand(mtd);
3463 + struct nand_ecc_ctrl *ecc = &chip->ecc;
3464 + int ecc_offset = 0;
3465 +
3466 + if (section < 0 || section > 1)
3467 + return -ERANGE;
3468 +
3469 + switch (mtd->oobsize) {
3470 + case 64:
3471 + ecc_offset = 40;
3472 + break;
3473 + case 128:
3474 + ecc_offset = 80;
3475 + break;
3476 + default:
3477 + return -EINVAL;
3478 + }
3479 +
3480 + if (section == 0) {
3481 + oobregion->offset = 2;
3482 + oobregion->length = ecc_offset - 2;
3483 + } else {
3484 + oobregion->offset = ecc_offset + ecc->total;
3485 + oobregion->length = mtd->oobsize - oobregion->offset;
3486 + }
3487 +
3488 + return 0;
3489 +}
3490 +
3491 +const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
3492 + .ecc = nand_ooblayout_ecc_lp_hamming,
3493 + .free = nand_ooblayout_free_lp_hamming,
3494 +};
3495 +
3496 static int check_offs_len(struct mtd_info *mtd,
3497 loff_t ofs, uint64_t len)
3498 {
3499 @@ -4565,7 +4633,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3500 break;
3501 case 64:
3502 case 128:
3503 - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
3504 + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
3505 break;
3506 default:
3507 WARN(1, "No oob scheme defined for oobsize %d\n",
3508 diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
3509 index 5513bfd9cdc9..c178cb0dd219 100644
3510 --- a/drivers/mtd/nand/omap2.c
3511 +++ b/drivers/mtd/nand/omap2.c
3512 @@ -1856,6 +1856,15 @@ static int omap_nand_probe(struct platform_device *pdev)
3513 nand_chip->ecc.priv = NULL;
3514 nand_set_flash_node(nand_chip, dev->of_node);
3515
3516 + if (!mtd->name) {
3517 + mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
3518 + "omap2-nand.%d", info->gpmc_cs);
3519 + if (!mtd->name) {
3520 + dev_err(&pdev->dev, "Failed to set MTD name\n");
3521 + return -ENOMEM;
3522 + }
3523 + }
3524 +
3525 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3526 nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
3527 if (IS_ERR(nand_chip->IO_ADDR_R))
3528 diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
3529 index 40a7c4a2cf0d..af2f09135fb0 100644
3530 --- a/drivers/mtd/nand/orion_nand.c
3531 +++ b/drivers/mtd/nand/orion_nand.c
3532 @@ -23,6 +23,11 @@
3533 #include <asm/sizes.h>
3534 #include <linux/platform_data/mtd-orion_nand.h>
3535
3536 +struct orion_nand_info {
3537 + struct nand_chip chip;
3538 + struct clk *clk;
3539 +};
3540 +
3541 static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
3542 {
3543 struct nand_chip *nc = mtd_to_nand(mtd);
3544 @@ -75,20 +80,21 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
3545
3546 static int __init orion_nand_probe(struct platform_device *pdev)
3547 {
3548 + struct orion_nand_info *info;
3549 struct mtd_info *mtd;
3550 struct nand_chip *nc;
3551 struct orion_nand_data *board;
3552 struct resource *res;
3553 - struct clk *clk;
3554 void __iomem *io_base;
3555 int ret = 0;
3556 u32 val = 0;
3557
3558 - nc = devm_kzalloc(&pdev->dev,
3559 - sizeof(struct nand_chip),
3560 + info = devm_kzalloc(&pdev->dev,
3561 + sizeof(struct orion_nand_info),
3562 GFP_KERNEL);
3563 - if (!nc)
3564 + if (!info)
3565 return -ENOMEM;
3566 + nc = &info->chip;
3567 mtd = nand_to_mtd(nc);
3568
3569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3570 @@ -145,15 +151,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
3571 if (board->dev_ready)
3572 nc->dev_ready = board->dev_ready;
3573
3574 - platform_set_drvdata(pdev, mtd);
3575 + platform_set_drvdata(pdev, info);
3576
3577 /* Not all platforms can gate the clock, so it is not
3578 an error if the clock does not exists. */
3579 - clk = clk_get(&pdev->dev, NULL);
3580 - if (!IS_ERR(clk)) {
3581 - clk_prepare_enable(clk);
3582 - clk_put(clk);
3583 - }
3584 + info->clk = devm_clk_get(&pdev->dev, NULL);
3585 + if (!IS_ERR(info->clk))
3586 + clk_prepare_enable(info->clk);
3587
3588 if (nand_scan(mtd, 1)) {
3589 ret = -ENXIO;
3590 @@ -170,26 +174,22 @@ static int __init orion_nand_probe(struct platform_device *pdev)
3591 return 0;
3592
3593 no_dev:
3594 - if (!IS_ERR(clk)) {
3595 - clk_disable_unprepare(clk);
3596 - clk_put(clk);
3597 - }
3598 + if (!IS_ERR(info->clk))
3599 + clk_disable_unprepare(info->clk);
3600
3601 return ret;
3602 }
3603
3604 static int orion_nand_remove(struct platform_device *pdev)
3605 {
3606 - struct mtd_info *mtd = platform_get_drvdata(pdev);
3607 - struct clk *clk;
3608 + struct orion_nand_info *info = platform_get_drvdata(pdev);
3609 + struct nand_chip *chip = &info->chip;
3610 + struct mtd_info *mtd = nand_to_mtd(chip);
3611
3612 nand_release(mtd);
3613
3614 - clk = clk_get(&pdev->dev, NULL);
3615 - if (!IS_ERR(clk)) {
3616 - clk_disable_unprepare(clk);
3617 - clk_put(clk);
3618 - }
3619 + if (!IS_ERR(info->clk))
3620 + clk_disable_unprepare(info->clk);
3621
3622 return 0;
3623 }
3624 diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
3625 index 8716b8c07feb..6f3c805f7211 100644
3626 --- a/drivers/net/irda/irda-usb.c
3627 +++ b/drivers/net/irda/irda-usb.c
3628 @@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
3629 * are "42101001.sb" or "42101002.sb"
3630 */
3631 sprintf(stir421x_fw_name, "4210%4X.sb",
3632 - self->usbdev->descriptor.bcdDevice);
3633 + le16_to_cpu(self->usbdev->descriptor.bcdDevice));
3634 ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
3635 if (ret < 0)
3636 return ret;
3637 diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
3638 index e1c338cb9cb5..f15589c70284 100644
3639 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c
3640 +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
3641 @@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
3642 { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
3643 { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
3644 { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
3645 + { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
3646
3647 { USB_DEVICE(0x0cf3, 0x7015),
3648 .driver_info = AR9287_USB }, /* Atheros */
3649 @@ -1218,6 +1219,9 @@ static int send_eject_command(struct usb_interface *interface)
3650 u8 bulk_out_ep;
3651 int r;
3652
3653 + if (iface_desc->desc.bNumEndpoints < 2)
3654 + return -ENODEV;
3655 +
3656 /* Find bulk out endpoint */
3657 for (r = 1; r >= 0; r--) {
3658 endpoint = &iface_desc->endpoint[r].desc;
3659 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
3660 index 16241d21727b..afdbbf59a278 100644
3661 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
3662 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
3663 @@ -2512,9 +2512,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
3664 priv->random_mac[i] |= get_random_int() &
3665 ~(request->mac_addr_mask[i]);
3666 }
3667 + ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
3668 + } else {
3669 + eth_zero_addr(priv->random_mac);
3670 }
3671
3672 - ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac);
3673 user_scan_cfg->num_ssids = request->n_ssids;
3674 user_scan_cfg->ssid_list = request->ssids;
3675
3676 diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
3677 index 7a310c491ea5..1fdb86cd4734 100644
3678 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c
3679 +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
3680 @@ -995,6 +995,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
3681 if (card && card->cmd_buf) {
3682 mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
3683 PCI_DMA_TODEVICE);
3684 + dev_kfree_skb_any(card->cmd_buf);
3685 }
3686 return 0;
3687 }
3688 @@ -1561,6 +1562,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
3689 return -1;
3690
3691 card->cmd_buf = skb;
3692 + /*
3693 + * Need to keep a reference, since core driver might free up this
3694 + * buffer before we've unmapped it.
3695 + */
3696 + skb_get(skb);
3697
3698 /* To send a command, the driver will:
3699 1. Write the 64bit physical address of the data buffer to
3700 @@ -1658,6 +1664,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
3701 if (card->cmd_buf) {
3702 mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
3703 PCI_DMA_TODEVICE);
3704 + dev_kfree_skb_any(card->cmd_buf);
3705 card->cmd_buf = NULL;
3706 }
3707
3708 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
3709 index 5dad402171c2..a74fad6e53c1 100644
3710 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
3711 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
3712 @@ -359,6 +359,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw)
3713 return rtl8821ae_phy_rf6052_config(hw);
3714 }
3715
3716 +static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
3717 +{
3718 + struct rtl_priv *rtlpriv = rtl_priv(hw);
3719 + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
3720 + u8 tmp;
3721 +
3722 + switch (rtlhal->rfe_type) {
3723 + case 3:
3724 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
3725 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
3726 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
3727 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
3728 + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
3729 + break;
3730 + case 4:
3731 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
3732 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
3733 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
3734 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
3735 + break;
3736 + case 5:
3737 + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
3738 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
3739 + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
3740 + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
3741 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
3742 + break;
3743 + case 1:
3744 + if (rtlpriv->btcoexist.bt_coexistence) {
3745 + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
3746 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
3747 + 0x77777777);
3748 + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
3749 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
3750 + break;
3751 + }
3752 + case 0:
3753 + case 2:
3754 + default:
3755 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
3756 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
3757 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
3758 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
3759 + break;
3760 + }
3761 +}
3762 +
3763 +static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
3764 +{
3765 + struct rtl_priv *rtlpriv = rtl_priv(hw);
3766 + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
3767 + u8 tmp;
3768 +
3769 + switch (rtlhal->rfe_type) {
3770 + case 0:
3771 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
3772 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
3773 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
3774 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
3775 + break;
3776 + case 1:
3777 + if (rtlpriv->btcoexist.bt_coexistence) {
3778 + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
3779 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
3780 + 0x77337717);
3781 + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
3782 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
3783 + } else {
3784 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
3785 + 0x77337717);
3786 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
3787 + 0x77337717);
3788 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
3789 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
3790 + }
3791 + break;
3792 + case 3:
3793 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
3794 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
3795 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
3796 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
3797 + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
3798 + break;
3799 + case 5:
3800 + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
3801 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
3802 + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
3803 + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
3804 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
3805 + break;
3806 + case 2:
3807 + case 4:
3808 + default:
3809 + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
3810 + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
3811 + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
3812 + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
3813 + break;
3814 + }
3815 +}
3816 +
3817 u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
3818 u8 rf_path)
3819 {
3820 @@ -553,14 +654,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
3821 /* 0x82C[1:0] = 2b'00 */
3822 rtl_set_bbreg(hw, 0x82c, 0x3, 0);
3823 }
3824 - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
3825 - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
3826 - 0x77777777);
3827 - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
3828 - 0x77777777);
3829 - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
3830 - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
3831 - }
3832 +
3833 + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
3834 + _rtl8812ae_phy_set_rfe_reg_24g(hw);
3835
3836 rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
3837 rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
3838 @@ -615,14 +711,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
3839 /* 0x82C[1:0] = 2'b00 */
3840 rtl_set_bbreg(hw, 0x82c, 0x3, 1);
3841
3842 - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
3843 - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
3844 - 0x77337777);
3845 - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
3846 - 0x77337777);
3847 - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
3848 - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
3849 - }
3850 + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
3851 + _rtl8812ae_phy_set_rfe_reg_5g(hw);
3852
3853 rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
3854 rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
3855 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
3856 index 1d6110f9c1fb..ed69dbe178ff 100644
3857 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
3858 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
3859 @@ -2424,6 +2424,7 @@
3860 #define BMASKH4BITS 0xf0000000
3861 #define BMASKOFDM_D 0xffc00000
3862 #define BMASKCCK 0x3f3f3f3f
3863 +#define BMASKRFEINV 0x3ff00000
3864
3865 #define BRFREGOFFSETMASK 0xfffff
3866
3867 diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
3868 index 351bac8f6503..0392eb8a0dea 100644
3869 --- a/drivers/nvdimm/bus.c
3870 +++ b/drivers/nvdimm/bus.c
3871 @@ -218,7 +218,10 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
3872 if (cmd_rc < 0)
3873 return cmd_rc;
3874
3875 - nvdimm_clear_from_poison_list(nvdimm_bus, phys, len);
3876 + if (clear_err.cleared > 0)
3877 + nvdimm_clear_from_poison_list(nvdimm_bus, phys,
3878 + clear_err.cleared);
3879 +
3880 return clear_err.cleared;
3881 }
3882 EXPORT_SYMBOL_GPL(nvdimm_clear_poison);
3883 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
3884 index 8a9c186898c7..14eac73e8dbc 100644
3885 --- a/drivers/nvme/host/pci.c
3886 +++ b/drivers/nvme/host/pci.c
3887 @@ -1384,6 +1384,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
3888 if (dev->cmb) {
3889 iounmap(dev->cmb);
3890 dev->cmb = NULL;
3891 + if (dev->cmbsz) {
3892 + sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
3893 + &dev_attr_cmb.attr, NULL);
3894 + dev->cmbsz = 0;
3895 + }
3896 }
3897 }
3898
3899 @@ -1655,6 +1660,7 @@ static void nvme_pci_disable(struct nvme_dev *dev)
3900 {
3901 struct pci_dev *pdev = to_pci_dev(dev->dev);
3902
3903 + nvme_release_cmb(dev);
3904 pci_free_irq_vectors(pdev);
3905
3906 if (pci_is_enabled(pdev)) {
3907 @@ -1993,7 +1999,6 @@ static void nvme_remove(struct pci_dev *pdev)
3908 nvme_dev_disable(dev, true);
3909 nvme_dev_remove_admin(dev);
3910 nvme_free_queues(dev, 0);
3911 - nvme_release_cmb(dev);
3912 nvme_release_prp_pools(dev);
3913 nvme_dev_unmap(dev);
3914 nvme_put_ctrl(&dev->ctrl);
3915 diff --git a/drivers/of/address.c b/drivers/of/address.c
3916 index 02b2903fe9d2..72914cdfce2a 100644
3917 --- a/drivers/of/address.c
3918 +++ b/drivers/of/address.c
3919 @@ -263,7 +263,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
3920 if (!parser->range || parser->range + parser->np > parser->end)
3921 return NULL;
3922
3923 - range->pci_space = parser->range[0];
3924 + range->pci_space = be32_to_cpup(parser->range);
3925 range->flags = of_bus_pci_get_flags(parser->range);
3926 range->pci_addr = of_read_number(parser->range + 1, ns);
3927 range->cpu_addr = of_translate_address(parser->node,
3928 diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
3929 index c89d5d231a0e..6a43fd3d0576 100644
3930 --- a/drivers/of/fdt.c
3931 +++ b/drivers/of/fdt.c
3932 @@ -505,6 +505,9 @@ static void *__unflatten_device_tree(const void *blob,
3933
3934 /* Allocate memory for the expanded device tree */
3935 mem = dt_alloc(size + 4, __alignof__(struct device_node));
3936 + if (!mem)
3937 + return NULL;
3938 +
3939 memset(mem, 0, size);
3940
3941 *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
3942 diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
3943 index a53982a330ea..2db1f7a04baf 100644
3944 --- a/drivers/of/of_numa.c
3945 +++ b/drivers/of/of_numa.c
3946 @@ -57,6 +57,8 @@ static void __init of_numa_parse_cpu_nodes(void)
3947 else
3948 node_set(nid, numa_nodes_parsed);
3949 }
3950 +
3951 + of_node_put(cpus);
3952 }
3953
3954 static int __init of_numa_parse_memory_nodes(void)
3955 diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
3956 index 61fc349c96d4..dafb4cdb2b7f 100644
3957 --- a/drivers/pci/host/pci-hyperv.c
3958 +++ b/drivers/pci/host/pci-hyperv.c
3959 @@ -72,6 +72,7 @@ enum {
3960 PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1
3961 };
3962
3963 +#define CPU_AFFINITY_ALL -1ULL
3964 #define PCI_CONFIG_MMIO_LENGTH 0x2000
3965 #define CFG_PAGE_OFFSET 0x1000
3966 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
3967 @@ -868,7 +869,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
3968 hv_int_desc_free(hpdev, int_desc);
3969 }
3970
3971 - int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL);
3972 + int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
3973 if (!int_desc)
3974 goto drop_reference;
3975
3976 @@ -889,9 +890,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
3977 * processors because Hyper-V only supports 64 in a guest.
3978 */
3979 affinity = irq_data_get_affinity_mask(data);
3980 - for_each_cpu_and(cpu, affinity, cpu_online_mask) {
3981 - int_pkt->int_desc.cpu_mask |=
3982 - (1ULL << vmbus_cpu_number_to_vp_number(cpu));
3983 + if (cpumask_weight(affinity) >= 32) {
3984 + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
3985 + } else {
3986 + for_each_cpu_and(cpu, affinity, cpu_online_mask) {
3987 + int_pkt->int_desc.cpu_mask |=
3988 + (1ULL << vmbus_cpu_number_to_vp_number(cpu));
3989 + }
3990 }
3991
3992 ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt,
3993 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
3994 index bcd10c795284..1b0786555394 100644
3995 --- a/drivers/pci/pci-sysfs.c
3996 +++ b/drivers/pci/pci-sysfs.c
3997 @@ -974,15 +974,19 @@ void pci_remove_legacy_files(struct pci_bus *b)
3998 int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
3999 enum pci_mmap_api mmap_api)
4000 {
4001 - unsigned long nr, start, size, pci_start;
4002 + unsigned long nr, start, size;
4003 + resource_size_t pci_start = 0, pci_end;
4004
4005 if (pci_resource_len(pdev, resno) == 0)
4006 return 0;
4007 nr = vma_pages(vma);
4008 start = vma->vm_pgoff;
4009 size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
4010 - pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
4011 - pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
4012 + if (mmap_api == PCI_MMAP_PROCFS) {
4013 + pci_resource_to_user(pdev, resno, &pdev->resource[resno],
4014 + &pci_start, &pci_end);
4015 + pci_start >>= PAGE_SHIFT;
4016 + }
4017 if (start >= pci_start && start < pci_start + size &&
4018 start + nr <= pci_start + size)
4019 return 1;
4020 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
4021 index 6922964e3dff..579c4946dc6e 100644
4022 --- a/drivers/pci/pci.c
4023 +++ b/drivers/pci/pci.c
4024 @@ -1782,8 +1782,8 @@ static void pci_pme_list_scan(struct work_struct *work)
4025 }
4026 }
4027 if (!list_empty(&pci_pme_list))
4028 - schedule_delayed_work(&pci_pme_work,
4029 - msecs_to_jiffies(PME_TIMEOUT));
4030 + queue_delayed_work(system_freezable_wq, &pci_pme_work,
4031 + msecs_to_jiffies(PME_TIMEOUT));
4032 mutex_unlock(&pci_pme_list_mutex);
4033 }
4034
4035 @@ -1848,8 +1848,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
4036 mutex_lock(&pci_pme_list_mutex);
4037 list_add(&pme_dev->list, &pci_pme_list);
4038 if (list_is_singular(&pci_pme_list))
4039 - schedule_delayed_work(&pci_pme_work,
4040 - msecs_to_jiffies(PME_TIMEOUT));
4041 + queue_delayed_work(system_freezable_wq,
4042 + &pci_pme_work,
4043 + msecs_to_jiffies(PME_TIMEOUT));
4044 mutex_unlock(&pci_pme_list_mutex);
4045 } else {
4046 mutex_lock(&pci_pme_list_mutex);
4047 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
4048 index 2408abe4ee8c..66c886365fbc 100644
4049 --- a/drivers/pci/proc.c
4050 +++ b/drivers/pci/proc.c
4051 @@ -231,24 +231,33 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
4052 {
4053 struct pci_dev *dev = PDE_DATA(file_inode(file));
4054 struct pci_filp_private *fpriv = file->private_data;
4055 - int i, ret, write_combine;
4056 + int i, ret, write_combine = 0, res_bit;
4057
4058 if (!capable(CAP_SYS_RAWIO))
4059 return -EPERM;
4060
4061 + if (fpriv->mmap_state == pci_mmap_io)
4062 + res_bit = IORESOURCE_IO;
4063 + else
4064 + res_bit = IORESOURCE_MEM;
4065 +
4066 /* Make sure the caller is mapping a real resource for this device */
4067 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
4068 - if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
4069 + if (dev->resource[i].flags & res_bit &&
4070 + pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
4071 break;
4072 }
4073
4074 if (i >= PCI_ROM_RESOURCE)
4075 return -ENODEV;
4076
4077 - if (fpriv->mmap_state == pci_mmap_mem)
4078 - write_combine = fpriv->write_combine;
4079 - else
4080 - write_combine = 0;
4081 + if (fpriv->mmap_state == pci_mmap_mem &&
4082 + fpriv->write_combine) {
4083 + if (dev->resource[i].flags & IORESOURCE_PREFETCH)
4084 + write_combine = 1;
4085 + else
4086 + return -EINVAL;
4087 + }
4088 ret = pci_mmap_page_range(dev, vma,
4089 fpriv->mmap_state, write_combine);
4090 if (ret < 0)
4091 diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
4092 index 3314bf299a51..dfa8d50a5d74 100644
4093 --- a/drivers/regulator/rk808-regulator.c
4094 +++ b/drivers/regulator/rk808-regulator.c
4095 @@ -520,7 +520,7 @@ static const struct regulator_desc rk818_reg[] = {
4096 RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
4097 BIT(0), 400),
4098 RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100,
4099 - RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
4100 + RK818_LDO2_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG,
4101 BIT(1), 400),
4102 {
4103 .name = "LDO_REG3",
4104 diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
4105 index d2c3d7cc35f5..5ca6d2130593 100644
4106 --- a/drivers/regulator/tps65023-regulator.c
4107 +++ b/drivers/regulator/tps65023-regulator.c
4108 @@ -311,8 +311,7 @@ static int tps_65023_probe(struct i2c_client *client,
4109
4110 /* Enable setting output voltage by I2C */
4111 regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
4112 - TPS65023_REG_CTRL2_CORE_ADJ,
4113 - TPS65023_REG_CTRL2_CORE_ADJ);
4114 + TPS65023_REG_CTRL2_CORE_ADJ, 0);
4115
4116 return 0;
4117 }
4118 diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
4119 index c4fe95a25621..904422f5b62f 100644
4120 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
4121 +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
4122 @@ -1169,6 +1169,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
4123 cmd = list_first_entry_or_null(&vscsi->free_cmd,
4124 struct ibmvscsis_cmd, list);
4125 if (cmd) {
4126 + cmd->flags &= ~(DELAY_SEND);
4127 list_del(&cmd->list);
4128 cmd->iue = iue;
4129 cmd->type = UNSET_TYPE;
4130 @@ -1748,45 +1749,79 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
4131 static void ibmvscsis_send_messages(struct scsi_info *vscsi)
4132 {
4133 u64 msg_hi = 0;
4134 - /* note do not attmempt to access the IU_data_ptr with this pointer
4135 + /* note do not attempt to access the IU_data_ptr with this pointer
4136 * it is not valid
4137 */
4138 struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
4139 struct ibmvscsis_cmd *cmd, *nxt;
4140 struct iu_entry *iue;
4141 long rc = ADAPT_SUCCESS;
4142 + bool retry = false;
4143
4144 if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
4145 - list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
4146 - iue = cmd->iue;
4147 + do {
4148 + retry = false;
4149 + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
4150 + list) {
4151 + /*
4152 + * Check to make sure abort cmd gets processed
4153 + * prior to the abort tmr cmd
4154 + */
4155 + if (cmd->flags & DELAY_SEND)
4156 + continue;
4157
4158 - crq->valid = VALID_CMD_RESP_EL;
4159 - crq->format = cmd->rsp.format;
4160 + if (cmd->abort_cmd) {
4161 + retry = true;
4162 + cmd->abort_cmd->flags &= ~(DELAY_SEND);
4163 + }
4164
4165 - if (cmd->flags & CMD_FAST_FAIL)
4166 - crq->status = VIOSRP_ADAPTER_FAIL;
4167 + /*
4168 + * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
4169 + * the case where LIO issued a
4170 + * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
4171 + * case then we dont send a response, since it
4172 + * was already done.
4173 + */
4174 + if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
4175 + !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
4176 + list_del(&cmd->list);
4177 + ibmvscsis_free_cmd_resources(vscsi,
4178 + cmd);
4179 + } else {
4180 + iue = cmd->iue;
4181
4182 - crq->IU_length = cpu_to_be16(cmd->rsp.len);
4183 + crq->valid = VALID_CMD_RESP_EL;
4184 + crq->format = cmd->rsp.format;
4185
4186 - rc = h_send_crq(vscsi->dma_dev->unit_address,
4187 - be64_to_cpu(msg_hi),
4188 - be64_to_cpu(cmd->rsp.tag));
4189 + if (cmd->flags & CMD_FAST_FAIL)
4190 + crq->status = VIOSRP_ADAPTER_FAIL;
4191
4192 - pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
4193 - cmd, be64_to_cpu(cmd->rsp.tag), rc);
4194 + crq->IU_length = cpu_to_be16(cmd->rsp.len);
4195
4196 - /* if all ok free up the command element resources */
4197 - if (rc == H_SUCCESS) {
4198 - /* some movement has occurred */
4199 - vscsi->rsp_q_timer.timer_pops = 0;
4200 - list_del(&cmd->list);
4201 + rc = h_send_crq(vscsi->dma_dev->unit_address,
4202 + be64_to_cpu(msg_hi),
4203 + be64_to_cpu(cmd->rsp.tag));
4204
4205 - ibmvscsis_free_cmd_resources(vscsi, cmd);
4206 - } else {
4207 - srp_snd_msg_failed(vscsi, rc);
4208 - break;
4209 + pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n",
4210 + cmd, be64_to_cpu(cmd->rsp.tag), rc);
4211 +
4212 + /* if all ok free up the command
4213 + * element resources
4214 + */
4215 + if (rc == H_SUCCESS) {
4216 + /* some movement has occurred */
4217 + vscsi->rsp_q_timer.timer_pops = 0;
4218 + list_del(&cmd->list);
4219 +
4220 + ibmvscsis_free_cmd_resources(vscsi,
4221 + cmd);
4222 + } else {
4223 + srp_snd_msg_failed(vscsi, rc);
4224 + break;
4225 + }
4226 + }
4227 }
4228 - }
4229 + } while (retry);
4230
4231 if (!rc) {
4232 /*
4233 @@ -2707,6 +2742,7 @@ static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
4234
4235 for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
4236 i++, cmd++) {
4237 + cmd->abort_cmd = NULL;
4238 cmd->adapter = vscsi;
4239 INIT_WORK(&cmd->work, ibmvscsis_scheduler);
4240 list_add_tail(&cmd->list, &vscsi->free_cmd);
4241 @@ -3578,9 +3614,20 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
4242 {
4243 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
4244 se_cmd);
4245 + struct scsi_info *vscsi = cmd->adapter;
4246 struct iu_entry *iue = cmd->iue;
4247 int rc;
4248
4249 + /*
4250 + * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
4251 + * since LIO can't do anything about it, and we dont want to
4252 + * attempt an srp_transfer_data.
4253 + */
4254 + if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
4255 + pr_err("write_pending failed since: %d\n", vscsi->flags);
4256 + return 0;
4257 + }
4258 +
4259 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
4260 1, 1);
4261 if (rc) {
4262 @@ -3659,11 +3706,28 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
4263 struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
4264 se_cmd);
4265 struct scsi_info *vscsi = cmd->adapter;
4266 + struct ibmvscsis_cmd *cmd_itr;
4267 + struct iu_entry *iue = iue = cmd->iue;
4268 + struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
4269 + u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
4270 uint len;
4271
4272 pr_debug("queue_tm_rsp %p, status %d\n",
4273 se_cmd, (int)se_cmd->se_tmr_req->response);
4274
4275 + if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
4276 + cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
4277 + spin_lock_bh(&vscsi->intr_lock);
4278 + list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
4279 + if (tag_to_abort == cmd_itr->se_cmd.tag) {
4280 + cmd_itr->abort_cmd = cmd;
4281 + cmd->flags |= DELAY_SEND;
4282 + break;
4283 + }
4284 + }
4285 + spin_unlock_bh(&vscsi->intr_lock);
4286 + }
4287 +
4288 srp_build_response(vscsi, cmd, &len);
4289 cmd->rsp.format = SRP_FORMAT;
4290 cmd->rsp.len = len;
4291 @@ -3671,8 +3735,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
4292
4293 static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
4294 {
4295 - /* TBD: What (if anything) should we do here? */
4296 - pr_debug("ibmvscsis_aborted_task %p\n", se_cmd);
4297 + pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n",
4298 + se_cmd, se_cmd->tag);
4299 }
4300
4301 static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
4302 diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
4303 index 98b0ca79a5c5..f5683affeff3 100644
4304 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
4305 +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h
4306 @@ -167,10 +167,12 @@ struct ibmvscsis_cmd {
4307 struct iu_rsp rsp;
4308 struct work_struct work;
4309 struct scsi_info *adapter;
4310 + struct ibmvscsis_cmd *abort_cmd;
4311 /* Sense buffer that will be mapped into outgoing status */
4312 unsigned char sense_buf[TRANSPORT_SENSE_BUFFER];
4313 u64 init_time;
4314 #define CMD_FAST_FAIL BIT(0)
4315 +#define DELAY_SEND BIT(1)
4316 u32 flags;
4317 char type;
4318 };
4319 diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
4320 index 8d6bca61e7aa..591f2740e5af 100644
4321 --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
4322 +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
4323 @@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
4324
4325 switch (variable) {
4326 case HW_VAR_BSSID:
4327 - rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
4328 - rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
4329 + /* BSSIDR 2 byte alignment */
4330 + rtl92e_writew(dev, BSSIDR, *(u16 *)val);
4331 + rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
4332 break;
4333
4334 case HW_VAR_MEDIA_STATUS:
4335 @@ -626,7 +627,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev)
4336 struct r8192_priv *priv = rtllib_priv(dev);
4337
4338 RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
4339 - curCR = rtl92e_readl(dev, EPROM_CMD);
4340 + curCR = rtl92e_readw(dev, EPROM_CMD);
4341 RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
4342 curCR);
4343 priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
4344 @@ -963,8 +964,8 @@ static void _rtl92e_net_update(struct net_device *dev)
4345 rtl92e_config_rate(dev, &rate_config);
4346 priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
4347 priv->basic_rate = rate_config &= 0x15f;
4348 - rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
4349 - rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
4350 + rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
4351 + rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
4352
4353 if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
4354 rtl92e_writew(dev, ATIMWND, 2);
4355 @@ -1184,8 +1185,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
4356 struct cb_desc *cb_desc, struct sk_buff *skb)
4357 {
4358 struct r8192_priv *priv = rtllib_priv(dev);
4359 - dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
4360 - PCI_DMA_TODEVICE);
4361 + dma_addr_t mapping;
4362 struct tx_fwinfo_8190pci *pTxFwInfo;
4363
4364 pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
4365 @@ -1196,8 +1196,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
4366 pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
4367 pTxFwInfo->TxRate, cb_desc);
4368
4369 - if (pci_dma_mapping_error(priv->pdev, mapping))
4370 - netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
4371 if (cb_desc->bAMPDUEnable) {
4372 pTxFwInfo->AllowAggregation = 1;
4373 pTxFwInfo->RxMF = cb_desc->ampdu_factor;
4374 @@ -1232,6 +1230,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
4375 }
4376
4377 memset((u8 *)pdesc, 0, 12);
4378 +
4379 + mapping = pci_map_single(priv->pdev, skb->data, skb->len,
4380 + PCI_DMA_TODEVICE);
4381 + if (pci_dma_mapping_error(priv->pdev, mapping)) {
4382 + netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
4383 + return;
4384 + }
4385 +
4386 pdesc->LINIP = 0;
4387 pdesc->CmdInit = 1;
4388 pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
4389 diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
4390 index a966a8e490ab..4615a6f0128e 100644
4391 --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
4392 +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
4393 @@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr,
4394 pTsCommonInfo->TClasNum = TCLAS_Num;
4395 }
4396
4397 -static bool IsACValid(unsigned int tid)
4398 -{
4399 - return tid < 7;
4400 -}
4401 -
4402 bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
4403 u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
4404 {
4405 @@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
4406 if (ieee->current_network.qos_data.supported == 0) {
4407 UP = 0;
4408 } else {
4409 - if (!IsACValid(TID)) {
4410 - netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
4411 - __func__, TID);
4412 - return false;
4413 - }
4414 -
4415 switch (TID) {
4416 case 0:
4417 case 3:
4418 @@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS,
4419 case 7:
4420 UP = 7;
4421 break;
4422 + default:
4423 + netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n",
4424 + __func__, TID);
4425 + return false;
4426 }
4427 }
4428
4429 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
4430 index c5ff13f22b24..a876d47246dc 100644
4431 --- a/drivers/usb/class/cdc-acm.c
4432 +++ b/drivers/usb/class/cdc-acm.c
4433 @@ -311,6 +311,12 @@ static void acm_ctrl_irq(struct urb *urb)
4434 break;
4435
4436 case USB_CDC_NOTIFY_SERIAL_STATE:
4437 + if (le16_to_cpu(dr->wLength) != 2) {
4438 + dev_dbg(&acm->control->dev,
4439 + "%s - malformed serial state\n", __func__);
4440 + break;
4441 + }
4442 +
4443 newctrl = get_unaligned_le16(data);
4444
4445 if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
4446 @@ -347,11 +353,10 @@ static void acm_ctrl_irq(struct urb *urb)
4447
4448 default:
4449 dev_dbg(&acm->control->dev,
4450 - "%s - unknown notification %d received: index %d "
4451 - "len %d data0 %d data1 %d\n",
4452 + "%s - unknown notification %d received: index %d len %d\n",
4453 __func__,
4454 - dr->bNotificationType, dr->wIndex,
4455 - dr->wLength, data[0], data[1]);
4456 + dr->bNotificationType, dr->wIndex, dr->wLength);
4457 +
4458 break;
4459 }
4460 exit:
4461 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
4462 index 4016dae7433b..840930b014f6 100644
4463 --- a/drivers/usb/core/devio.c
4464 +++ b/drivers/usb/core/devio.c
4465 @@ -481,11 +481,11 @@ static void snoop_urb(struct usb_device *udev,
4466
4467 if (userurb) { /* Async */
4468 if (when == SUBMIT)
4469 - dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
4470 + dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
4471 "length %u\n",
4472 userurb, ep, t, d, length);
4473 else
4474 - dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
4475 + dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, "
4476 "actual_length %u status %d\n",
4477 userurb, ep, t, d, length,
4478 timeout_or_status);
4479 @@ -1905,7 +1905,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg)
4480 if (as) {
4481 int retval;
4482
4483 - snoop(&ps->dev->dev, "reap %p\n", as->userurb);
4484 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
4485 retval = processcompl(as, (void __user * __user *)arg);
4486 free_async(as);
4487 return retval;
4488 @@ -1922,7 +1922,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg)
4489
4490 as = async_getcompleted(ps);
4491 if (as) {
4492 - snoop(&ps->dev->dev, "reap %p\n", as->userurb);
4493 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
4494 retval = processcompl(as, (void __user * __user *)arg);
4495 free_async(as);
4496 } else {
4497 @@ -2053,7 +2053,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg)
4498 if (as) {
4499 int retval;
4500
4501 - snoop(&ps->dev->dev, "reap %p\n", as->userurb);
4502 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
4503 retval = processcompl_compat(as, (void __user * __user *)arg);
4504 free_async(as);
4505 return retval;
4506 @@ -2070,7 +2070,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar
4507
4508 as = async_getcompleted(ps);
4509 if (as) {
4510 - snoop(&ps->dev->dev, "reap %p\n", as->userurb);
4511 + snoop(&ps->dev->dev, "reap %pK\n", as->userurb);
4512 retval = processcompl_compat(as, (void __user * __user *)arg);
4513 free_async(as);
4514 } else {
4515 @@ -2499,7 +2499,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
4516 #endif
4517
4518 case USBDEVFS_DISCARDURB:
4519 - snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p);
4520 + snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p);
4521 ret = proc_unlinkurb(ps, p);
4522 break;
4523
4524 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4525 index f029aad67183..3b9735abf2e0 100644
4526 --- a/drivers/usb/core/hcd.c
4527 +++ b/drivers/usb/core/hcd.c
4528 @@ -1722,7 +1722,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
4529 if (retval == 0)
4530 retval = -EINPROGRESS;
4531 else if (retval != -EIDRM && retval != -EBUSY)
4532 - dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
4533 + dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n",
4534 urb, retval);
4535 usb_put_dev(udev);
4536 }
4537 @@ -1889,7 +1889,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev,
4538 /* kick hcd */
4539 unlink1(hcd, urb, -ESHUTDOWN);
4540 dev_dbg (hcd->self.controller,
4541 - "shutdown urb %p ep%d%s%s\n",
4542 + "shutdown urb %pK ep%d%s%s\n",
4543 urb, usb_endpoint_num(&ep->desc),
4544 is_in ? "in" : "out",
4545 ({ char *s;
4546 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
4547 index 579900640faa..8714b352e57f 100644
4548 --- a/drivers/usb/core/hub.c
4549 +++ b/drivers/usb/core/hub.c
4550 @@ -360,7 +360,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
4551 }
4552
4553 /* USB 2.0 spec Section 11.24.4.5 */
4554 -static int get_hub_descriptor(struct usb_device *hdev, void *data)
4555 +static int get_hub_descriptor(struct usb_device *hdev,
4556 + struct usb_hub_descriptor *desc)
4557 {
4558 int i, ret, size;
4559 unsigned dtype;
4560 @@ -376,10 +377,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data)
4561 for (i = 0; i < 3; i++) {
4562 ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
4563 USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
4564 - dtype << 8, 0, data, size,
4565 + dtype << 8, 0, desc, size,
4566 USB_CTRL_GET_TIMEOUT);
4567 - if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
4568 + if (hub_is_superspeed(hdev)) {
4569 + if (ret == size)
4570 + return ret;
4571 + } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
4572 + /* Make sure we have the DeviceRemovable field. */
4573 + size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
4574 + if (ret < size)
4575 + return -EMSGSIZE;
4576 return ret;
4577 + }
4578 }
4579 return -EINVAL;
4580 }
4581 @@ -1311,7 +1320,7 @@ static int hub_configure(struct usb_hub *hub,
4582 }
4583 mutex_init(&hub->status_mutex);
4584
4585 - hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
4586 + hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
4587 if (!hub->descriptor) {
4588 ret = -ENOMEM;
4589 goto fail;
4590 @@ -1319,7 +1328,7 @@ static int hub_configure(struct usb_hub *hub,
4591
4592 /* Request the entire hub descriptor.
4593 * hub->descriptor can handle USB_MAXCHILDREN ports,
4594 - * but the hub can/will return fewer bytes here.
4595 + * but a (non-SS) hub can/will return fewer bytes here.
4596 */
4597 ret = get_hub_descriptor(hdev, hub->descriptor);
4598 if (ret < 0) {
4599 diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
4600 index a9039696476e..5133ab965229 100644
4601 --- a/drivers/usb/core/urb.c
4602 +++ b/drivers/usb/core/urb.c
4603 @@ -333,7 +333,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
4604 if (!urb || !urb->complete)
4605 return -EINVAL;
4606 if (urb->hcpriv) {
4607 - WARN_ONCE(1, "URB %p submitted while active\n", urb);
4608 + WARN_ONCE(1, "URB %pK submitted while active\n", urb);
4609 return -EBUSY;
4610 }
4611
4612 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
4613 index ce603dcbd493..a7e28095026d 100644
4614 --- a/drivers/usb/dwc3/gadget.c
4615 +++ b/drivers/usb/dwc3/gadget.c
4616 @@ -2856,6 +2856,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
4617 return IRQ_HANDLED;
4618 }
4619
4620 + /*
4621 + * With PCIe legacy interrupt, test shows that top-half irq handler can
4622 + * be called again after HW interrupt deassertion. Check if bottom-half
4623 + * irq event handler completes before caching new event to prevent
4624 + * losing events.
4625 + */
4626 + if (evt->flags & DWC3_EVENT_PENDING)
4627 + return IRQ_HANDLED;
4628 +
4629 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
4630 count &= DWC3_GEVNTCOUNT_MASK;
4631 if (!count)
4632 diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
4633 index 86612ac3fda2..f6c7a2744e5c 100644
4634 --- a/drivers/usb/host/ohci-hcd.c
4635 +++ b/drivers/usb/host/ohci-hcd.c
4636 @@ -231,7 +231,8 @@ static int ohci_urb_enqueue (
4637
4638 /* Start up the I/O watchdog timer, if it's not running */
4639 if (!timer_pending(&ohci->io_watchdog) &&
4640 - list_empty(&ohci->eds_in_use)) {
4641 + list_empty(&ohci->eds_in_use) &&
4642 + !(ohci->flags & OHCI_QUIRK_QEMU)) {
4643 ohci->prev_frame_no = ohci_frame_no(ohci);
4644 mod_timer(&ohci->io_watchdog,
4645 jiffies + IO_WATCHDOG_DELAY);
4646 diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
4647 index bb1509675727..a84aebe9b0a9 100644
4648 --- a/drivers/usb/host/ohci-pci.c
4649 +++ b/drivers/usb/host/ohci-pci.c
4650 @@ -164,6 +164,15 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
4651 return 0;
4652 }
4653
4654 +static int ohci_quirk_qemu(struct usb_hcd *hcd)
4655 +{
4656 + struct ohci_hcd *ohci = hcd_to_ohci(hcd);
4657 +
4658 + ohci->flags |= OHCI_QUIRK_QEMU;
4659 + ohci_dbg(ohci, "enabled qemu quirk\n");
4660 + return 0;
4661 +}
4662 +
4663 /* List of quirks for OHCI */
4664 static const struct pci_device_id ohci_pci_quirks[] = {
4665 {
4666 @@ -214,6 +223,13 @@ static const struct pci_device_id ohci_pci_quirks[] = {
4667 PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399),
4668 .driver_data = (unsigned long)ohci_quirk_amd700,
4669 },
4670 + {
4671 + .vendor = PCI_VENDOR_ID_APPLE,
4672 + .device = 0x003f,
4673 + .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
4674 + .subdevice = PCI_SUBDEVICE_ID_QEMU,
4675 + .driver_data = (unsigned long)ohci_quirk_qemu,
4676 + },
4677
4678 /* FIXME for some of the early AMD 760 southbridges, OHCI
4679 * won't work at all. blacklist them.
4680 diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
4681 index 37f1725e7a46..a51b189bdbd8 100644
4682 --- a/drivers/usb/host/ohci.h
4683 +++ b/drivers/usb/host/ohci.h
4684 @@ -418,6 +418,7 @@ struct ohci_hcd {
4685 #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/
4686 #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */
4687 #define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */
4688 +#define OHCI_QUIRK_QEMU 0x1000 /* relax timing expectations */
4689
4690 // there are also chip quirks/bugs in init logic
4691
4692 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
4693 index 709b52841c87..8c6eafe8966c 100644
4694 --- a/drivers/usb/host/xhci-mem.c
4695 +++ b/drivers/usb/host/xhci-mem.c
4696 @@ -56,7 +56,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
4697 }
4698
4699 if (max_packet) {
4700 - seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
4701 + seg->bounce_buf = kzalloc(max_packet, flags);
4702 if (!seg->bounce_buf) {
4703 dma_pool_free(xhci->segment_pool, seg->trbs, dma);
4704 kfree(seg);
4705 @@ -1721,7 +1721,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
4706 xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
4707 for (i = 0; i < num_sp; i++) {
4708 dma_addr_t dma;
4709 - void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
4710 + void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
4711 flags);
4712 if (!buf)
4713 goto fail_sp5;
4714 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
4715 index 93f566cb968b..e7d6752eff32 100644
4716 --- a/drivers/usb/host/xhci-pci.c
4717 +++ b/drivers/usb/host/xhci-pci.c
4718 @@ -52,6 +52,7 @@
4719 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
4720 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
4721 #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
4722 +#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
4723
4724 static const char hcd_name[] = "xhci_hcd";
4725
4726 @@ -166,7 +167,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4727 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
4728 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
4729 pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
4730 - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
4731 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
4732 + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
4733 xhci->quirks |= XHCI_PME_STUCK_QUIRK;
4734 }
4735 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
4736 @@ -175,7 +177,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
4737 }
4738 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
4739 (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
4740 - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
4741 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
4742 + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
4743 xhci->quirks |= XHCI_MISSING_CAS;
4744
4745 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
4746 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
4747 index be1572331a64..ca8b0b1ae37d 100644
4748 --- a/drivers/usb/host/xhci-plat.c
4749 +++ b/drivers/usb/host/xhci-plat.c
4750 @@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
4751
4752 irq = platform_get_irq(pdev, 0);
4753 if (irq < 0)
4754 - return -ENODEV;
4755 + return irq;
4756
4757 /* Try to set 64-bit DMA first */
4758 if (WARN_ON(!pdev->dev.dma_mask))
4759 diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c
4760 index 6ddd08a32777..efecb87428b1 100644
4761 --- a/drivers/usb/misc/chaoskey.c
4762 +++ b/drivers/usb/misc/chaoskey.c
4763 @@ -194,7 +194,7 @@ static int chaoskey_probe(struct usb_interface *interface,
4764
4765 dev->in_ep = in_ep;
4766
4767 - if (udev->descriptor.idVendor != ALEA_VENDOR_ID)
4768 + if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
4769 dev->reads_started = 1;
4770
4771 dev->size = size;
4772 diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
4773 index 37c63cb39714..0ef29d202263 100644
4774 --- a/drivers/usb/misc/iowarrior.c
4775 +++ b/drivers/usb/misc/iowarrior.c
4776 @@ -554,7 +554,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
4777 info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
4778
4779 /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
4780 - info.speed = le16_to_cpu(dev->udev->speed);
4781 + info.speed = dev->udev->speed;
4782 info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
4783 info.report_size = dev->report_size;
4784
4785 diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
4786 index c8fbe7b739a0..c2e2b2ea32d8 100644
4787 --- a/drivers/usb/misc/legousbtower.c
4788 +++ b/drivers/usb/misc/legousbtower.c
4789 @@ -317,9 +317,16 @@ static int tower_open (struct inode *inode, struct file *file)
4790 int subminor;
4791 int retval = 0;
4792 struct usb_interface *interface;
4793 - struct tower_reset_reply reset_reply;
4794 + struct tower_reset_reply *reset_reply;
4795 int result;
4796
4797 + reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
4798 +
4799 + if (!reset_reply) {
4800 + retval = -ENOMEM;
4801 + goto exit;
4802 + }
4803 +
4804 nonseekable_open(inode, file);
4805 subminor = iminor(inode);
4806
4807 @@ -364,8 +371,8 @@ static int tower_open (struct inode *inode, struct file *file)
4808 USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
4809 0,
4810 0,
4811 - &reset_reply,
4812 - sizeof(reset_reply),
4813 + reset_reply,
4814 + sizeof(*reset_reply),
4815 1000);
4816 if (result < 0) {
4817 dev_err(&dev->udev->dev,
4818 @@ -406,6 +413,7 @@ static int tower_open (struct inode *inode, struct file *file)
4819 mutex_unlock(&dev->lock);
4820
4821 exit:
4822 + kfree(reset_reply);
4823 return retval;
4824 }
4825
4826 @@ -808,7 +816,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
4827 struct lego_usb_tower *dev = NULL;
4828 struct usb_host_interface *iface_desc;
4829 struct usb_endpoint_descriptor* endpoint;
4830 - struct tower_get_version_reply get_version_reply;
4831 + struct tower_get_version_reply *get_version_reply = NULL;
4832 int i;
4833 int retval = -ENOMEM;
4834 int result;
4835 @@ -886,6 +894,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
4836 dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
4837 dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
4838
4839 + get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
4840 +
4841 + if (!get_version_reply) {
4842 + retval = -ENOMEM;
4843 + goto error;
4844 + }
4845 +
4846 /* get the firmware version and log it */
4847 result = usb_control_msg (udev,
4848 usb_rcvctrlpipe(udev, 0),
4849 @@ -893,18 +908,19 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
4850 USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
4851 0,
4852 0,
4853 - &get_version_reply,
4854 - sizeof(get_version_reply),
4855 + get_version_reply,
4856 + sizeof(*get_version_reply),
4857 1000);
4858 if (result < 0) {
4859 dev_err(idev, "LEGO USB Tower get version control request failed\n");
4860 retval = result;
4861 goto error;
4862 }
4863 - dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
4864 - "build %d\n", get_version_reply.major,
4865 - get_version_reply.minor,
4866 - le16_to_cpu(get_version_reply.build_no));
4867 + dev_info(&interface->dev,
4868 + "LEGO USB Tower firmware version is %d.%d build %d\n",
4869 + get_version_reply->major,
4870 + get_version_reply->minor,
4871 + le16_to_cpu(get_version_reply->build_no));
4872
4873 /* we can register the device now, as it is ready */
4874 usb_set_intfdata (interface, dev);
4875 @@ -925,9 +941,11 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
4876 USB_MAJOR, dev->minor);
4877
4878 exit:
4879 + kfree(get_version_reply);
4880 return retval;
4881
4882 error:
4883 + kfree(get_version_reply);
4884 tower_delete(dev);
4885 return retval;
4886 }
4887 diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
4888 index 806451418cfe..99beda9e241d 100644
4889 --- a/drivers/usb/musb/musb_host.c
4890 +++ b/drivers/usb/musb/musb_host.c
4891 @@ -2780,10 +2780,11 @@ int musb_host_setup(struct musb *musb, int power_budget)
4892 int ret;
4893 struct usb_hcd *hcd = musb->hcd;
4894
4895 - MUSB_HST_MODE(musb);
4896 - musb->xceiv->otg->default_a = 1;
4897 - musb->xceiv->otg->state = OTG_STATE_A_IDLE;
4898 -
4899 + if (musb->port_mode == MUSB_PORT_MODE_HOST) {
4900 + MUSB_HST_MODE(musb);
4901 + musb->xceiv->otg->default_a = 1;
4902 + musb->xceiv->otg->state = OTG_STATE_A_IDLE;
4903 + }
4904 otg_set_host(musb->xceiv->otg, &hcd->self);
4905 hcd->self.otg_port = 1;
4906 musb->xceiv->otg->host = &hcd->self;
4907 diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
4908 index e6959ccb4453..404742672658 100644
4909 --- a/drivers/usb/musb/tusb6010_omap.c
4910 +++ b/drivers/usb/musb/tusb6010_omap.c
4911 @@ -220,6 +220,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
4912 u32 dma_remaining;
4913 int src_burst, dst_burst;
4914 u16 csr;
4915 + u32 psize;
4916 int ch;
4917 s8 dmareq;
4918 s8 sync_dev;
4919 @@ -391,15 +392,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
4920
4921 if (chdat->tx) {
4922 /* Send transfer_packet_sz packets at a time */
4923 - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
4924 - chdat->transfer_packet_sz);
4925 + psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
4926 + psize &= ~0x7ff;
4927 + psize |= chdat->transfer_packet_sz;
4928 + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
4929
4930 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
4931 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
4932 } else {
4933 /* Receive transfer_packet_sz packets at a time */
4934 - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
4935 - chdat->transfer_packet_sz << 16);
4936 + psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
4937 + psize &= ~(0x7ff << 16);
4938 + psize |= (chdat->transfer_packet_sz << 16);
4939 + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
4940
4941 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
4942 TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
4943 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
4944 index 1dc75db16cbd..19394963f675 100644
4945 --- a/drivers/usb/serial/ftdi_sio.c
4946 +++ b/drivers/usb/serial/ftdi_sio.c
4947 @@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = {
4948 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
4949 { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
4950 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
4951 - { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
4952 - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
4953 - { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
4954 - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
4955 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
4956 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
4957 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
4958 + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
4959 { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
4960 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
4961 { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
4962 @@ -1508,9 +1508,9 @@ static int set_serial_info(struct tty_struct *tty,
4963 (new_serial.flags & ASYNC_FLAGS));
4964 priv->custom_divisor = new_serial.custom_divisor;
4965
4966 +check_and_exit:
4967 write_latency_timer(port);
4968
4969 -check_and_exit:
4970 if ((old_priv.flags & ASYNC_SPD_MASK) !=
4971 (priv->flags & ASYNC_SPD_MASK)) {
4972 if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
4973 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
4974 index 71fb9e59db71..4fcf1cecb6d7 100644
4975 --- a/drivers/usb/serial/ftdi_sio_ids.h
4976 +++ b/drivers/usb/serial/ftdi_sio_ids.h
4977 @@ -882,6 +882,8 @@
4978 /* Olimex */
4979 #define OLIMEX_VID 0x15BA
4980 #define OLIMEX_ARM_USB_OCD_PID 0x0003
4981 +#define OLIMEX_ARM_USB_TINY_PID 0x0004
4982 +#define OLIMEX_ARM_USB_TINY_H_PID 0x002a
4983 #define OLIMEX_ARM_USB_OCD_H_PID 0x002b
4984
4985 /*
4986 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
4987 index f1a8fdcd8674..e98532feb0cc 100644
4988 --- a/drivers/usb/serial/io_ti.c
4989 +++ b/drivers/usb/serial/io_ti.c
4990 @@ -2349,8 +2349,11 @@ static void change_port_settings(struct tty_struct *tty,
4991 if (!baud) {
4992 /* pick a default, any default... */
4993 baud = 9600;
4994 - } else
4995 + } else {
4996 + /* Avoid a zero divisor. */
4997 + baud = min(baud, 461550);
4998 tty_encode_baud_rate(tty, baud, baud);
4999 + }
5000
5001 edge_port->baud_rate = baud;
5002 config->wBaudRate = (__u16)((461550L + baud/2) / baud);
5003 diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
5004 index edbc81f205c2..70f346f1aa86 100644
5005 --- a/drivers/usb/serial/mct_u232.c
5006 +++ b/drivers/usb/serial/mct_u232.c
5007 @@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
5008 return -ENOMEM;
5009
5010 divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
5011 - put_unaligned_le32(cpu_to_le32(divisor), buf);
5012 + put_unaligned_le32(divisor, buf);
5013 rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
5014 MCT_U232_SET_BAUD_RATE_REQUEST,
5015 MCT_U232_SET_REQUEST_TYPE,
5016 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
5017 index af67a0de6b5d..3bf61acfc26b 100644
5018 --- a/drivers/usb/serial/option.c
5019 +++ b/drivers/usb/serial/option.c
5020 @@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb);
5021 #define TELIT_PRODUCT_LE922_USBCFG0 0x1042
5022 #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
5023 #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
5024 +#define TELIT_PRODUCT_ME910 0x1100
5025 #define TELIT_PRODUCT_LE920 0x1200
5026 #define TELIT_PRODUCT_LE910 0x1201
5027 #define TELIT_PRODUCT_LE910_USBCFG4 0x1206
5028 @@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = {
5029 .reserved = BIT(5) | BIT(6),
5030 };
5031
5032 +static const struct option_blacklist_info telit_me910_blacklist = {
5033 + .sendsetup = BIT(0),
5034 + .reserved = BIT(1) | BIT(3),
5035 +};
5036 +
5037 static const struct option_blacklist_info telit_le910_blacklist = {
5038 .sendsetup = BIT(0),
5039 .reserved = BIT(1) | BIT(2),
5040 @@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = {
5041 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
5042 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
5043 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
5044 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
5045 + .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
5046 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
5047 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
5048 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
5049 diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
5050 index 38b3f0d8cd58..fd509ed6cf70 100644
5051 --- a/drivers/usb/serial/qcserial.c
5052 +++ b/drivers/usb/serial/qcserial.c
5053 @@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
5054 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
5055 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
5056 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
5057 + {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
5058 + {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
5059 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
5060 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
5061 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
5062 diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
5063 index 02bdaa912164..4340b4925daa 100644
5064 --- a/drivers/usb/storage/ene_ub6250.c
5065 +++ b/drivers/usb/storage/ene_ub6250.c
5066 @@ -446,6 +446,10 @@ struct ms_lib_ctrl {
5067 #define SD_BLOCK_LEN 9
5068
5069 struct ene_ub6250_info {
5070 +
5071 + /* I/O bounce buffer */
5072 + u8 *bbuf;
5073 +
5074 /* for 6250 code */
5075 struct SD_STATUS SD_Status;
5076 struct MS_STATUS MS_Status;
5077 @@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag);
5078
5079 static void ene_ub6250_info_destructor(void *extra)
5080 {
5081 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
5082 +
5083 if (!extra)
5084 return;
5085 + kfree(info->bbuf);
5086 }
5087
5088 static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
5089 @@ -860,8 +867,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
5090 u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
5091 {
5092 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
5093 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
5094 + u8 *bbuf = info->bbuf;
5095 int result;
5096 - u8 ExtBuf[4];
5097 u32 bn = PhyBlockAddr * 0x20 + PageNum;
5098
5099 result = ene_load_bincode(us, MS_RW_PATTERN);
5100 @@ -901,7 +909,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
5101 bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
5102 bcb->CDB[6] = 0x01;
5103
5104 - result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
5105 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
5106 if (result != USB_STOR_XFER_GOOD)
5107 return USB_STOR_TRANSPORT_ERROR;
5108
5109 @@ -910,9 +918,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr,
5110 ExtraDat->status0 = 0x10; /* Not yet,fireware support */
5111
5112 ExtraDat->status1 = 0x00; /* Not yet,fireware support */
5113 - ExtraDat->ovrflg = ExtBuf[0];
5114 - ExtraDat->mngflg = ExtBuf[1];
5115 - ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
5116 + ExtraDat->ovrflg = bbuf[0];
5117 + ExtraDat->mngflg = bbuf[1];
5118 + ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
5119
5120 return USB_STOR_TRANSPORT_GOOD;
5121 }
5122 @@ -1332,8 +1340,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
5123 u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
5124 {
5125 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
5126 + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
5127 + u8 *bbuf = info->bbuf;
5128 int result;
5129 - u8 ExtBuf[4];
5130
5131 memset(bcb, 0, sizeof(struct bulk_cb_wrap));
5132 bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
5133 @@ -1347,7 +1356,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
5134 bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
5135 bcb->CDB[6] = 0x01;
5136
5137 - result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
5138 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
5139 if (result != USB_STOR_XFER_GOOD)
5140 return USB_STOR_TRANSPORT_ERROR;
5141
5142 @@ -1355,9 +1364,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock,
5143 ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
5144 ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
5145 ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
5146 - ExtraDat->ovrflg = ExtBuf[0];
5147 - ExtraDat->mngflg = ExtBuf[1];
5148 - ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
5149 + ExtraDat->ovrflg = bbuf[0];
5150 + ExtraDat->mngflg = bbuf[1];
5151 + ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
5152
5153 return USB_STOR_TRANSPORT_GOOD;
5154 }
5155 @@ -1558,9 +1567,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
5156 u16 PhyBlock, newblk, i;
5157 u16 LogStart, LogEnde;
5158 struct ms_lib_type_extdat extdat;
5159 - u8 buf[0x200];
5160 u32 count = 0, index = 0;
5161 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
5162 + u8 *bbuf = info->bbuf;
5163
5164 for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
5165 ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
5166 @@ -1574,14 +1583,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st)
5167 }
5168
5169 if (count == PhyBlock) {
5170 - ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
5171 + ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
5172 + bbuf);
5173 count += 0x80;
5174 }
5175 index = (PhyBlock % 0x80) * 4;
5176
5177 - extdat.ovrflg = buf[index];
5178 - extdat.mngflg = buf[index+1];
5179 - extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
5180 + extdat.ovrflg = bbuf[index];
5181 + extdat.mngflg = bbuf[index+1];
5182 + extdat.logadr = memstick_logaddr(bbuf[index+2],
5183 + bbuf[index+3]);
5184
5185 if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
5186 ms_lib_setacquired_errorblock(us, PhyBlock);
5187 @@ -2064,9 +2075,9 @@ static int ene_ms_init(struct us_data *us)
5188 {
5189 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
5190 int result;
5191 - u8 buf[0x200];
5192 u16 MSP_BlockSize, MSP_UserAreaBlocks;
5193 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
5194 + u8 *bbuf = info->bbuf;
5195
5196 printk(KERN_INFO "transport --- ENE_MSInit\n");
5197
5198 @@ -2085,13 +2096,13 @@ static int ene_ms_init(struct us_data *us)
5199 bcb->CDB[0] = 0xF1;
5200 bcb->CDB[1] = 0x01;
5201
5202 - result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
5203 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
5204 if (result != USB_STOR_XFER_GOOD) {
5205 printk(KERN_ERR "Execution MS Init Code Fail !!\n");
5206 return USB_STOR_TRANSPORT_ERROR;
5207 }
5208 /* the same part to test ENE */
5209 - info->MS_Status = *(struct MS_STATUS *)&buf[0];
5210 + info->MS_Status = *(struct MS_STATUS *) bbuf;
5211
5212 if (info->MS_Status.Insert && info->MS_Status.Ready) {
5213 printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
5214 @@ -2100,15 +2111,15 @@ static int ene_ms_init(struct us_data *us)
5215 printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
5216 printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
5217 if (info->MS_Status.IsMSPro) {
5218 - MSP_BlockSize = (buf[6] << 8) | buf[7];
5219 - MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
5220 + MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
5221 + MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
5222 info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
5223 } else {
5224 ms_card_init(us); /* Card is MS (to ms.c)*/
5225 }
5226 usb_stor_dbg(us, "MS Init Code OK !!\n");
5227 } else {
5228 - usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]);
5229 + usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
5230 return USB_STOR_TRANSPORT_ERROR;
5231 }
5232
5233 @@ -2118,9 +2129,9 @@ static int ene_ms_init(struct us_data *us)
5234 static int ene_sd_init(struct us_data *us)
5235 {
5236 int result;
5237 - u8 buf[0x200];
5238 struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
5239 struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
5240 + u8 *bbuf = info->bbuf;
5241
5242 usb_stor_dbg(us, "transport --- ENE_SDInit\n");
5243 /* SD Init Part-1 */
5244 @@ -2154,17 +2165,17 @@ static int ene_sd_init(struct us_data *us)
5245 bcb->Flags = US_BULK_FLAG_IN;
5246 bcb->CDB[0] = 0xF1;
5247
5248 - result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
5249 + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
5250 if (result != USB_STOR_XFER_GOOD) {
5251 usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
5252 return USB_STOR_TRANSPORT_ERROR;
5253 }
5254
5255 - info->SD_Status = *(struct SD_STATUS *)&buf[0];
5256 + info->SD_Status = *(struct SD_STATUS *) bbuf;
5257 if (info->SD_Status.Insert && info->SD_Status.Ready) {
5258 struct SD_STATUS *s = &info->SD_Status;
5259
5260 - ene_get_card_status(us, (unsigned char *)&buf);
5261 + ene_get_card_status(us, bbuf);
5262 usb_stor_dbg(us, "Insert = %x\n", s->Insert);
5263 usb_stor_dbg(us, "Ready = %x\n", s->Ready);
5264 usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC);
5265 @@ -2172,7 +2183,7 @@ static int ene_sd_init(struct us_data *us)
5266 usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed);
5267 usb_stor_dbg(us, "WtP = %x\n", s->WtP);
5268 } else {
5269 - usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]);
5270 + usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
5271 return USB_STOR_TRANSPORT_ERROR;
5272 }
5273 return USB_STOR_TRANSPORT_GOOD;
5274 @@ -2182,13 +2193,15 @@ static int ene_sd_init(struct us_data *us)
5275 static int ene_init(struct us_data *us)
5276 {
5277 int result;
5278 - u8 misc_reg03 = 0;
5279 + u8 misc_reg03;
5280 struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
5281 + u8 *bbuf = info->bbuf;
5282
5283 - result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
5284 + result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
5285 if (result != USB_STOR_XFER_GOOD)
5286 return USB_STOR_TRANSPORT_ERROR;
5287
5288 + misc_reg03 = bbuf[0];
5289 if (misc_reg03 & 0x01) {
5290 if (!info->SD_Status.Ready) {
5291 result = ene_sd_init(us);
5292 @@ -2305,8 +2318,9 @@ static int ene_ub6250_probe(struct usb_interface *intf,
5293 const struct usb_device_id *id)
5294 {
5295 int result;
5296 - u8 misc_reg03 = 0;
5297 + u8 misc_reg03;
5298 struct us_data *us;
5299 + struct ene_ub6250_info *info;
5300
5301 result = usb_stor_probe1(&us, intf, id,
5302 (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
5303 @@ -2315,11 +2329,16 @@ static int ene_ub6250_probe(struct usb_interface *intf,
5304 return result;
5305
5306 /* FIXME: where should the code alloc extra buf ? */
5307 - if (!us->extra) {
5308 - us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
5309 - if (!us->extra)
5310 - return -ENOMEM;
5311 - us->extra_destructor = ene_ub6250_info_destructor;
5312 + us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
5313 + if (!us->extra)
5314 + return -ENOMEM;
5315 + us->extra_destructor = ene_ub6250_info_destructor;
5316 +
5317 + info = (struct ene_ub6250_info *)(us->extra);
5318 + info->bbuf = kmalloc(512, GFP_KERNEL);
5319 + if (!info->bbuf) {
5320 + kfree(us->extra);
5321 + return -ENOMEM;
5322 }
5323
5324 us->transport_name = "ene_ub6250";
5325 @@ -2331,12 +2350,13 @@ static int ene_ub6250_probe(struct usb_interface *intf,
5326 return result;
5327
5328 /* probe card type */
5329 - result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
5330 + result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
5331 if (result != USB_STOR_XFER_GOOD) {
5332 usb_stor_disconnect(intf);
5333 return USB_STOR_TRANSPORT_ERROR;
5334 }
5335
5336 + misc_reg03 = info->bbuf[0];
5337 if (!(misc_reg03 & 0x01)) {
5338 pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
5339 "It does not support SM cards.\n");
5340 diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
5341 index 6345e85822a4..a50cf45e530f 100644
5342 --- a/drivers/uwb/i1480/dfu/usb.c
5343 +++ b/drivers/uwb/i1480/dfu/usb.c
5344 @@ -341,6 +341,7 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size)
5345 static
5346 int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
5347 {
5348 + struct usb_device *udev = interface_to_usbdev(iface);
5349 struct i1480_usb *i1480_usb;
5350 struct i1480 *i1480;
5351 struct device *dev = &iface->dev;
5352 @@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
5353 iface->cur_altsetting->desc.bInterfaceNumber);
5354 goto error;
5355 }
5356 - if (iface->num_altsetting > 1
5357 - && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
5358 + if (iface->num_altsetting > 1 &&
5359 + le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
5360 /* Need altsetting #1 [HW QUIRK] or EP1 won't work */
5361 result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
5362 if (result < 0)
5363 diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
5364 index 99ebf6ea3de6..5615f4013924 100644
5365 --- a/drivers/watchdog/pcwd_usb.c
5366 +++ b/drivers/watchdog/pcwd_usb.c
5367 @@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface,
5368 return -ENODEV;
5369 }
5370
5371 + if (iface_desc->desc.bNumEndpoints < 1)
5372 + return -ENODEV;
5373 +
5374 /* check out the endpoint: it has to be Interrupt & IN */
5375 endpoint = &iface_desc->endpoint[0].desc;
5376
5377 diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
5378 index 80bb956e14e5..d1bbdc9dda76 100644
5379 --- a/fs/crypto/fname.c
5380 +++ b/fs/crypto/fname.c
5381 @@ -300,7 +300,7 @@ int fscrypt_fname_disk_to_usr(struct inode *inode,
5382 } else {
5383 memset(buf, 0, 8);
5384 }
5385 - memcpy(buf + 8, iname->name + iname->len - 16, 16);
5386 + memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
5387 oname->name[0] = '_';
5388 oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
5389 return 0;
5390 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
5391 index c4a389a6027b..423a21cd077c 100644
5392 --- a/fs/ext4/namei.c
5393 +++ b/fs/ext4/namei.c
5394 @@ -1255,9 +1255,9 @@ static inline int ext4_match(struct ext4_filename *fname,
5395 if (unlikely(!name)) {
5396 if (fname->usr_fname->name[0] == '_') {
5397 int ret;
5398 - if (de->name_len < 16)
5399 + if (de->name_len <= 32)
5400 return 0;
5401 - ret = memcmp(de->name + de->name_len - 16,
5402 + ret = memcmp(de->name + ((de->name_len - 17) & ~15),
5403 fname->crypto_buf.name + 8, 16);
5404 return (ret == 0) ? 1 : 0;
5405 }
5406 diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
5407 index ebdc90fc71b7..11f3717ce481 100644
5408 --- a/fs/f2fs/dir.c
5409 +++ b/fs/f2fs/dir.c
5410 @@ -130,19 +130,29 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname,
5411 continue;
5412 }
5413
5414 - /* encrypted case */
5415 + if (de->hash_code != namehash)
5416 + goto not_match;
5417 +
5418 de_name.name = d->filename[bit_pos];
5419 de_name.len = le16_to_cpu(de->name_len);
5420
5421 - /* show encrypted name */
5422 - if (fname->hash) {
5423 - if (de->hash_code == fname->hash)
5424 - goto found;
5425 - } else if (de_name.len == name->len &&
5426 - de->hash_code == namehash &&
5427 - !memcmp(de_name.name, name->name, name->len))
5428 +#ifdef CONFIG_F2FS_FS_ENCRYPTION
5429 + if (unlikely(!name->name)) {
5430 + if (fname->usr_fname->name[0] == '_') {
5431 + if (de_name.len > 32 &&
5432 + !memcmp(de_name.name + ((de_name.len - 17) & ~15),
5433 + fname->crypto_buf.name + 8, 16))
5434 + goto found;
5435 + goto not_match;
5436 + }
5437 + name->name = fname->crypto_buf.name;
5438 + name->len = fname->crypto_buf.len;
5439 + }
5440 +#endif
5441 + if (de_name.len == name->len &&
5442 + !memcmp(de_name.name, name->name, name->len))
5443 goto found;
5444 -
5445 +not_match:
5446 if (max_slots && max_len > *max_slots)
5447 *max_slots = max_len;
5448 max_len = 0;
5449 @@ -170,12 +180,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
5450 struct f2fs_dir_entry *de = NULL;
5451 bool room = false;
5452 int max_slots;
5453 - f2fs_hash_t namehash;
5454 -
5455 - if(fname->hash)
5456 - namehash = cpu_to_le32(fname->hash);
5457 - else
5458 - namehash = f2fs_dentry_hash(&name);
5459 + f2fs_hash_t namehash = f2fs_dentry_hash(&name, fname);
5460
5461 nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
5462 nblock = bucket_blocks(level);
5463 @@ -539,7 +544,7 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
5464
5465 level = 0;
5466 slots = GET_DENTRY_SLOTS(new_name->len);
5467 - dentry_hash = f2fs_dentry_hash(new_name);
5468 + dentry_hash = f2fs_dentry_hash(new_name, NULL);
5469
5470 current_depth = F2FS_I(dir)->i_current_depth;
5471 if (F2FS_I(dir)->chash == dentry_hash) {
5472 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
5473 index 3a1640be7ffc..c12f695923b6 100644
5474 --- a/fs/f2fs/f2fs.h
5475 +++ b/fs/f2fs/f2fs.h
5476 @@ -2016,7 +2016,8 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi);
5477 /*
5478 * hash.c
5479 */
5480 -f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
5481 +f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
5482 + struct fscrypt_name *fname);
5483
5484 /*
5485 * node.c
5486 diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
5487 index 71b7206c431e..eb2e031ea887 100644
5488 --- a/fs/f2fs/hash.c
5489 +++ b/fs/f2fs/hash.c
5490 @@ -70,7 +70,8 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
5491 *buf++ = pad;
5492 }
5493
5494 -f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
5495 +f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
5496 + struct fscrypt_name *fname)
5497 {
5498 __u32 hash;
5499 f2fs_hash_t f2fs_hash;
5500 @@ -79,6 +80,10 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
5501 const unsigned char *name = name_info->name;
5502 size_t len = name_info->len;
5503
5504 + /* encrypted bigname case */
5505 + if (fname && !fname->disk_name.name)
5506 + return cpu_to_le32(fname->hash);
5507 +
5508 if (is_dot_dotdot(name_info))
5509 return 0;
5510
5511 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
5512 index 5f1a67f756af..a21faa1c6817 100644
5513 --- a/fs/f2fs/inline.c
5514 +++ b/fs/f2fs/inline.c
5515 @@ -294,7 +294,7 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
5516 return NULL;
5517 }
5518
5519 - namehash = f2fs_dentry_hash(&name);
5520 + namehash = f2fs_dentry_hash(&name, fname);
5521
5522 inline_dentry = inline_data_addr(ipage);
5523
5524 @@ -531,7 +531,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
5525
5526 f2fs_wait_on_page_writeback(ipage, NODE, true);
5527
5528 - name_hash = f2fs_dentry_hash(new_name);
5529 + name_hash = f2fs_dentry_hash(new_name, NULL);
5530 make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
5531 f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
5532
5533 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
5534 index 4e894d301c88..fc9b04941739 100644
5535 --- a/fs/nfs/nfs4proc.c
5536 +++ b/fs/nfs/nfs4proc.c
5537 @@ -2385,8 +2385,10 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
5538 if (status != 0)
5539 return status;
5540 }
5541 - if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
5542 + if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
5543 + nfs4_sequence_free_slot(&o_res->seq_res);
5544 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
5545 + }
5546 return 0;
5547 }
5548
5549 diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
5550 index 965db474f4b0..142a74f3c59b 100644
5551 --- a/fs/nfs/pagelist.c
5552 +++ b/fs/nfs/pagelist.c
5553 @@ -29,13 +29,14 @@
5554 static struct kmem_cache *nfs_page_cachep;
5555 static const struct rpc_call_ops nfs_pgio_common_ops;
5556
5557 -static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
5558 +static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount,
5559 + gfp_t gfp_flags)
5560 {
5561 p->npages = pagecount;
5562 if (pagecount <= ARRAY_SIZE(p->page_array))
5563 p->pagevec = p->page_array;
5564 else {
5565 - p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
5566 + p->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
5567 if (!p->pagevec)
5568 p->npages = 0;
5569 }
5570 @@ -681,6 +682,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
5571 {
5572 struct nfs_pgio_mirror *new;
5573 int i;
5574 + gfp_t gfp_flags = GFP_KERNEL;
5575
5576 desc->pg_moreio = 0;
5577 desc->pg_inode = inode;
5578 @@ -700,8 +702,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
5579 if (pg_ops->pg_get_mirror_count) {
5580 /* until we have a request, we don't have an lseg and no
5581 * idea how many mirrors there will be */
5582 + if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
5583 + gfp_flags = GFP_NOIO;
5584 new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
5585 - sizeof(struct nfs_pgio_mirror), GFP_KERNEL);
5586 + sizeof(struct nfs_pgio_mirror), gfp_flags);
5587 desc->pg_mirrors_dynamic = new;
5588 desc->pg_mirrors = new;
5589
5590 @@ -755,9 +759,12 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
5591 struct list_head *head = &mirror->pg_list;
5592 struct nfs_commit_info cinfo;
5593 unsigned int pagecount, pageused;
5594 + gfp_t gfp_flags = GFP_KERNEL;
5595
5596 pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
5597 - if (!nfs_pgarray_set(&hdr->page_array, pagecount)) {
5598 + if (desc->pg_rw_ops->rw_mode == FMODE_WRITE)
5599 + gfp_flags = GFP_NOIO;
5600 + if (!nfs_pgarray_set(&hdr->page_array, pagecount, gfp_flags)) {
5601 nfs_pgio_error(hdr);
5602 desc->pg_error = -ENOMEM;
5603 return desc->pg_error;
5604 diff --git a/fs/nfs/write.c b/fs/nfs/write.c
5605 index 53211838f72a..e4772a8340f8 100644
5606 --- a/fs/nfs/write.c
5607 +++ b/fs/nfs/write.c
5608 @@ -548,9 +548,9 @@ static void nfs_write_error_remove_page(struct nfs_page *req)
5609 {
5610 nfs_unlock_request(req);
5611 nfs_end_page_writeback(req);
5612 - nfs_release_request(req);
5613 generic_error_remove_page(page_file_mapping(req->wb_page),
5614 req->wb_page);
5615 + nfs_release_request(req);
5616 }
5617
5618 /*
5619 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
5620 index abb09b580389..650226f33298 100644
5621 --- a/fs/nfsd/nfs4proc.c
5622 +++ b/fs/nfsd/nfs4proc.c
5623 @@ -1273,7 +1273,8 @@ nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
5624 return NULL;
5625 }
5626
5627 - if (!(exp->ex_layout_types & (1 << layout_type))) {
5628 + if (layout_type >= LAYOUT_TYPE_MAX ||
5629 + !(exp->ex_layout_types & (1 << layout_type))) {
5630 dprintk("%s: layout type %d not supported\n",
5631 __func__, layout_type);
5632 return NULL;
5633 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
5634 index c2d2895a1ec1..2ee80e1f5230 100644
5635 --- a/fs/nfsd/nfs4xdr.c
5636 +++ b/fs/nfsd/nfs4xdr.c
5637 @@ -4081,8 +4081,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
5638 struct nfsd4_getdeviceinfo *gdev)
5639 {
5640 struct xdr_stream *xdr = &resp->xdr;
5641 - const struct nfsd4_layout_ops *ops =
5642 - nfsd4_layout_ops[gdev->gd_layout_type];
5643 + const struct nfsd4_layout_ops *ops;
5644 u32 starting_len = xdr->buf->len, needed_len;
5645 __be32 *p;
5646
5647 @@ -4099,6 +4098,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr,
5648
5649 /* If maxcount is 0 then just update notifications */
5650 if (gdev->gd_maxcount != 0) {
5651 + ops = nfsd4_layout_ops[gdev->gd_layout_type];
5652 nfserr = ops->encode_getdeviceinfo(xdr, gdev);
5653 if (nfserr) {
5654 /*
5655 @@ -4151,8 +4151,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
5656 struct nfsd4_layoutget *lgp)
5657 {
5658 struct xdr_stream *xdr = &resp->xdr;
5659 - const struct nfsd4_layout_ops *ops =
5660 - nfsd4_layout_ops[lgp->lg_layout_type];
5661 + const struct nfsd4_layout_ops *ops;
5662 __be32 *p;
5663
5664 dprintk("%s: err %d\n", __func__, nfserr);
5665 @@ -4175,6 +4174,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr,
5666 *p++ = cpu_to_be32(lgp->lg_seg.iomode);
5667 *p++ = cpu_to_be32(lgp->lg_layout_type);
5668
5669 + ops = nfsd4_layout_ops[lgp->lg_layout_type];
5670 nfserr = ops->encode_layoutget(xdr, lgp);
5671 out:
5672 kfree(lgp->lg_content);
5673 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
5674 index 7ebfca6a1427..258e8f635148 100644
5675 --- a/fs/notify/fanotify/fanotify_user.c
5676 +++ b/fs/notify/fanotify/fanotify_user.c
5677 @@ -294,27 +294,37 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
5678 }
5679
5680 ret = copy_event_to_user(group, kevent, buf);
5681 + if (unlikely(ret == -EOPENSTALE)) {
5682 + /*
5683 + * We cannot report events with stale fd so drop it.
5684 + * Setting ret to 0 will continue the event loop and
5685 + * do the right thing if there are no more events to
5686 + * read (i.e. return bytes read, -EAGAIN or wait).
5687 + */
5688 + ret = 0;
5689 + }
5690 +
5691 /*
5692 * Permission events get queued to wait for response. Other
5693 * events can be destroyed now.
5694 */
5695 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
5696 fsnotify_destroy_event(group, kevent);
5697 - if (ret < 0)
5698 - break;
5699 } else {
5700 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
5701 - if (ret < 0) {
5702 + if (ret <= 0) {
5703 FANOTIFY_PE(kevent)->response = FAN_DENY;
5704 wake_up(&group->fanotify_data.access_waitq);
5705 - break;
5706 + } else {
5707 + spin_lock(&group->notification_lock);
5708 + list_add_tail(&kevent->list,
5709 + &group->fanotify_data.access_list);
5710 + spin_unlock(&group->notification_lock);
5711 }
5712 - spin_lock(&group->notification_lock);
5713 - list_add_tail(&kevent->list,
5714 - &group->fanotify_data.access_list);
5715 - spin_unlock(&group->notification_lock);
5716 #endif
5717 }
5718 + if (ret < 0)
5719 + break;
5720 buf += ret;
5721 count -= ret;
5722 }
5723 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
5724 index 5f2dc2032c79..6047471575bb 100644
5725 --- a/fs/proc/generic.c
5726 +++ b/fs/proc/generic.c
5727 @@ -471,6 +471,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
5728 ent->data = NULL;
5729 ent->proc_fops = NULL;
5730 ent->proc_iops = NULL;
5731 + parent->nlink++;
5732 if (proc_register(parent, ent) < 0) {
5733 kfree(ent);
5734 parent->nlink--;
5735 diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
5736 index dd85f3503410..039e6ababb67 100644
5737 --- a/include/linux/hid-sensor-hub.h
5738 +++ b/include/linux/hid-sensor-hub.h
5739 @@ -231,6 +231,8 @@ struct hid_sensor_common {
5740 unsigned usage_id;
5741 atomic_t data_ready;
5742 atomic_t user_requested_state;
5743 + int poll_interval;
5744 + int raw_hystersis;
5745 struct iio_trigger *trigger;
5746 struct hid_sensor_hub_attribute_info poll;
5747 struct hid_sensor_hub_attribute_info report_state;
5748 diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
5749 index 8f6849084248..e23392517db9 100644
5750 --- a/include/linux/kprobes.h
5751 +++ b/include/linux/kprobes.h
5752 @@ -330,7 +330,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
5753 int write, void __user *buffer,
5754 size_t *length, loff_t *ppos);
5755 #endif
5756 -
5757 +extern void wait_for_kprobe_optimizer(void);
5758 +#else
5759 +static inline void wait_for_kprobe_optimizer(void) { }
5760 #endif /* CONFIG_OPTPROBES */
5761 #ifdef CONFIG_KPROBES_ON_FTRACE
5762 extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
5763 diff --git a/kernel/fork.c b/kernel/fork.c
5764 index ba8a01564985..59faac4de181 100644
5765 --- a/kernel/fork.c
5766 +++ b/kernel/fork.c
5767 @@ -521,7 +521,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
5768 set_task_stack_end_magic(tsk);
5769
5770 #ifdef CONFIG_CC_STACKPROTECTOR
5771 - tsk->stack_canary = get_random_int();
5772 + tsk->stack_canary = get_random_long();
5773 #endif
5774
5775 /*
5776 @@ -1773,11 +1773,13 @@ static __latent_entropy struct task_struct *copy_process(
5777 */
5778 recalc_sigpending();
5779 if (signal_pending(current)) {
5780 - spin_unlock(&current->sighand->siglock);
5781 - write_unlock_irq(&tasklist_lock);
5782 retval = -ERESTARTNOINTR;
5783 goto bad_fork_cancel_cgroup;
5784 }
5785 + if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
5786 + retval = -ENOMEM;
5787 + goto bad_fork_cancel_cgroup;
5788 + }
5789
5790 if (likely(p->pid)) {
5791 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
5792 @@ -1828,6 +1830,8 @@ static __latent_entropy struct task_struct *copy_process(
5793 return p;
5794
5795 bad_fork_cancel_cgroup:
5796 + spin_unlock(&current->sighand->siglock);
5797 + write_unlock_irq(&tasklist_lock);
5798 cgroup_cancel_fork(p);
5799 bad_fork_free_pid:
5800 threadgroup_change_end(current);
5801 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
5802 index be3c34e4f2ac..077c87f40f4d 100644
5803 --- a/kernel/irq/chip.c
5804 +++ b/kernel/irq/chip.c
5805 @@ -877,8 +877,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
5806 if (!desc)
5807 return;
5808
5809 - __irq_do_set_handler(desc, handle, 1, NULL);
5810 desc->irq_common_data.handler_data = data;
5811 + __irq_do_set_handler(desc, handle, 1, NULL);
5812
5813 irq_put_desc_busunlock(desc, flags);
5814 }
5815 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5816 index d63095472ea9..a1a07cf1101f 100644
5817 --- a/kernel/kprobes.c
5818 +++ b/kernel/kprobes.c
5819 @@ -563,7 +563,7 @@ static void kprobe_optimizer(struct work_struct *work)
5820 }
5821
5822 /* Wait for completing optimization and unoptimization */
5823 -static void wait_for_kprobe_optimizer(void)
5824 +void wait_for_kprobe_optimizer(void)
5825 {
5826 mutex_lock(&kprobe_mutex);
5827
5828 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
5829 index eef2ce968636..3976dd57db78 100644
5830 --- a/kernel/pid_namespace.c
5831 +++ b/kernel/pid_namespace.c
5832 @@ -274,7 +274,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
5833 * if reparented.
5834 */
5835 for (;;) {
5836 - set_current_state(TASK_UNINTERRUPTIBLE);
5837 + set_current_state(TASK_INTERRUPTIBLE);
5838 if (pid_ns->nr_hashed == init_pids)
5839 break;
5840 schedule();
5841 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
5842 index eb6c9f1d3a93..8d2b4d8fd714 100644
5843 --- a/kernel/trace/trace_kprobe.c
5844 +++ b/kernel/trace/trace_kprobe.c
5845 @@ -1484,6 +1484,11 @@ static __init int kprobe_trace_self_tests_init(void)
5846
5847 end:
5848 release_all_trace_kprobes();
5849 + /*
5850 + * Wait for the optimizer work to finish. Otherwise it might fiddle
5851 + * with probes in already freed __init text.
5852 + */
5853 + wait_for_kprobe_optimizer();
5854 if (warn)
5855 pr_cont("NG: Some tests are failed. Please check them.\n");
5856 else
5857 diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
5858 index 48d0dc89b58d..e735f781e4f3 100644
5859 --- a/net/ipx/af_ipx.c
5860 +++ b/net/ipx/af_ipx.c
5861 @@ -1168,11 +1168,10 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg)
5862 sipx->sipx_network = ipxif->if_netnum;
5863 memcpy(sipx->sipx_node, ipxif->if_node,
5864 sizeof(sipx->sipx_node));
5865 - rc = -EFAULT;
5866 + rc = 0;
5867 if (copy_to_user(arg, &ifr, sizeof(ifr)))
5868 - break;
5869 + rc = -EFAULT;
5870 ipxitf_put(ipxif);
5871 - rc = 0;
5872 break;
5873 }
5874 case SIOCAIPXITFCRT:
5875 diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
5876 index 389325ac6067..097459830454 100644
5877 --- a/security/integrity/ima/ima_appraise.c
5878 +++ b/security/integrity/ima/ima_appraise.c
5879 @@ -204,10 +204,11 @@ int ima_appraise_measurement(enum ima_hooks func,
5880
5881 cause = "missing-hash";
5882 status = INTEGRITY_NOLABEL;
5883 - if (opened & FILE_CREATED) {
5884 + if (opened & FILE_CREATED)
5885 iint->flags |= IMA_NEW_FILE;
5886 + if ((iint->flags & IMA_NEW_FILE) &&
5887 + !(iint->flags & IMA_DIGSIG_REQUIRED))
5888 status = INTEGRITY_PASS;
5889 - }
5890 goto out;
5891 }
5892
5893 diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
5894 index 043065867656..0f41257d339e 100644
5895 --- a/sound/hda/hdac_controller.c
5896 +++ b/sound/hda/hdac_controller.c
5897 @@ -106,7 +106,11 @@ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus)
5898 /* disable ringbuffer DMAs */
5899 snd_hdac_chip_writeb(bus, RIRBCTL, 0);
5900 snd_hdac_chip_writeb(bus, CORBCTL, 0);
5901 + spin_unlock_irq(&bus->reg_lock);
5902 +
5903 hdac_wait_for_cmd_dmas(bus);
5904 +
5905 + spin_lock_irq(&bus->reg_lock);
5906 /* disable unsolicited responses */
5907 snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0);
5908 spin_unlock_irq(&bus->reg_lock);
5909 diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
5910 index 8c0f3b89b5bc..e78b5f055f25 100644
5911 --- a/sound/soc/codecs/cs4271.c
5912 +++ b/sound/soc/codecs/cs4271.c
5913 @@ -498,7 +498,7 @@ static int cs4271_reset(struct snd_soc_codec *codec)
5914 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
5915
5916 if (gpio_is_valid(cs4271->gpio_nreset)) {
5917 - gpio_set_value(cs4271->gpio_nreset, 0);
5918 + gpio_direction_output(cs4271->gpio_nreset, 0);
5919 mdelay(1);
5920 gpio_set_value(cs4271->gpio_nreset, 1);
5921 mdelay(1);