Contents of /trunk/kernel-magellan/patches-4.11/0102-4.11.3-all-fixes.patch
Parent Directory | Revision Log
Revision 2932 -
(show annotations)
(download)
Fri May 26 08:57:51 2017 UTC (7 years, 4 months ago) by niro
File size: 245233 byte(s)
Fri May 26 08:57:51 2017 UTC (7 years, 4 months ago) by niro
File size: 245233 byte(s)
-linux-4.11.3
1 | diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt |
2 | index d9995f1f51b3..a25a99e82bb1 100644 |
3 | --- a/Documentation/arm64/tagged-pointers.txt |
4 | +++ b/Documentation/arm64/tagged-pointers.txt |
5 | @@ -11,24 +11,56 @@ in AArch64 Linux. |
6 | The kernel configures the translation tables so that translations made |
7 | via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of |
8 | the virtual address ignored by the translation hardware. This frees up |
9 | -this byte for application use, with the following caveats: |
10 | +this byte for application use. |
11 | |
12 | - (1) The kernel requires that all user addresses passed to EL1 |
13 | - are tagged with tag 0x00. This means that any syscall |
14 | - parameters containing user virtual addresses *must* have |
15 | - their top byte cleared before trapping to the kernel. |
16 | |
17 | - (2) Non-zero tags are not preserved when delivering signals. |
18 | - This means that signal handlers in applications making use |
19 | - of tags cannot rely on the tag information for user virtual |
20 | - addresses being maintained for fields inside siginfo_t. |
21 | - One exception to this rule is for signals raised in response |
22 | - to watchpoint debug exceptions, where the tag information |
23 | - will be preserved. |
24 | +Passing tagged addresses to the kernel |
25 | +-------------------------------------- |
26 | |
27 | - (3) Special care should be taken when using tagged pointers, |
28 | - since it is likely that C compilers will not hazard two |
29 | - virtual addresses differing only in the upper byte. |
30 | +All interpretation of userspace memory addresses by the kernel assumes |
31 | +an address tag of 0x00. |
32 | + |
33 | +This includes, but is not limited to, addresses found in: |
34 | + |
35 | + - pointer arguments to system calls, including pointers in structures |
36 | + passed to system calls, |
37 | + |
38 | + - the stack pointer (sp), e.g. when interpreting it to deliver a |
39 | + signal, |
40 | + |
41 | + - the frame pointer (x29) and frame records, e.g. when interpreting |
42 | + them to generate a backtrace or call graph. |
43 | + |
44 | +Using non-zero address tags in any of these locations may result in an |
45 | +error code being returned, a (fatal) signal being raised, or other modes |
46 | +of failure. |
47 | + |
48 | +For these reasons, passing non-zero address tags to the kernel via |
49 | +system calls is forbidden, and using a non-zero address tag for sp is |
50 | +strongly discouraged. |
51 | + |
52 | +Programs maintaining a frame pointer and frame records that use non-zero |
53 | +address tags may suffer impaired or inaccurate debug and profiling |
54 | +visibility. |
55 | + |
56 | + |
57 | +Preserving tags |
58 | +--------------- |
59 | + |
60 | +Non-zero tags are not preserved when delivering signals. This means that |
61 | +signal handlers in applications making use of tags cannot rely on the |
62 | +tag information for user virtual addresses being maintained for fields |
63 | +inside siginfo_t. One exception to this rule is for signals raised in |
64 | +response to watchpoint debug exceptions, where the tag information will |
65 | +be preserved. |
66 | |
67 | The architecture prevents the use of a tagged PC, so the upper byte will |
68 | be set to a sign-extension of bit 55 on exception return. |
69 | + |
70 | + |
71 | +Other considerations |
72 | +-------------------- |
73 | + |
74 | +Special care should be taken when using tagged pointers, since it is |
75 | +likely that C compilers will not hazard two virtual addresses differing |
76 | +only in the upper byte. |
77 | diff --git a/Makefile b/Makefile |
78 | index d7b64830a7b7..7bab1279d0b5 100644 |
79 | --- a/Makefile |
80 | +++ b/Makefile |
81 | @@ -1,6 +1,6 @@ |
82 | VERSION = 4 |
83 | PATCHLEVEL = 11 |
84 | -SUBLEVEL = 2 |
85 | +SUBLEVEL = 3 |
86 | EXTRAVERSION = |
87 | NAME = Fearless Coyote |
88 | |
89 | diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c |
90 | index 6d76e528ab8f..af9189e8965b 100644 |
91 | --- a/arch/alpha/kernel/osf_sys.c |
92 | +++ b/arch/alpha/kernel/osf_sys.c |
93 | @@ -1199,8 +1199,10 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options, |
94 | if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) |
95 | return -EFAULT; |
96 | |
97 | - err = 0; |
98 | - err |= put_user(status, ustatus); |
99 | + err = put_user(status, ustatus); |
100 | + if (ret < 0) |
101 | + return err ? err : ret; |
102 | + |
103 | err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); |
104 | err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); |
105 | err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); |
106 | diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts |
107 | index c51fc652f6c7..5a53fcf542ab 100644 |
108 | --- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts |
109 | +++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts |
110 | @@ -162,9 +162,10 @@ |
111 | }; |
112 | |
113 | adc0: adc@f8018000 { |
114 | + atmel,adc-vref = <3300>; |
115 | + atmel,adc-channels-used = <0xfe>; |
116 | pinctrl-0 = < |
117 | &pinctrl_adc0_adtrg |
118 | - &pinctrl_adc0_ad0 |
119 | &pinctrl_adc0_ad1 |
120 | &pinctrl_adc0_ad2 |
121 | &pinctrl_adc0_ad3 |
122 | @@ -172,8 +173,6 @@ |
123 | &pinctrl_adc0_ad5 |
124 | &pinctrl_adc0_ad6 |
125 | &pinctrl_adc0_ad7 |
126 | - &pinctrl_adc0_ad8 |
127 | - &pinctrl_adc0_ad9 |
128 | >; |
129 | status = "okay"; |
130 | }; |
131 | diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts |
132 | index 5bb8fd57e7f5..d71da30c9cff 100644 |
133 | --- a/arch/arm/boot/dts/imx6sx-sdb.dts |
134 | +++ b/arch/arm/boot/dts/imx6sx-sdb.dts |
135 | @@ -12,23 +12,6 @@ |
136 | model = "Freescale i.MX6 SoloX SDB RevB Board"; |
137 | }; |
138 | |
139 | -&cpu0 { |
140 | - operating-points = < |
141 | - /* kHz uV */ |
142 | - 996000 1250000 |
143 | - 792000 1175000 |
144 | - 396000 1175000 |
145 | - 198000 1175000 |
146 | - >; |
147 | - fsl,soc-operating-points = < |
148 | - /* ARM kHz SOC uV */ |
149 | - 996000 1250000 |
150 | - 792000 1175000 |
151 | - 396000 1175000 |
152 | - 198000 1175000 |
153 | - >; |
154 | -}; |
155 | - |
156 | &i2c1 { |
157 | clock-frequency = <100000>; |
158 | pinctrl-names = "default"; |
159 | diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h |
160 | index 5c17d2dec777..8f967d1373f6 100644 |
161 | --- a/arch/arm/include/asm/fixmap.h |
162 | +++ b/arch/arm/include/asm/fixmap.h |
163 | @@ -41,7 +41,7 @@ static const enum fixed_addresses __end_of_fixed_addresses = |
164 | |
165 | #define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY) |
166 | |
167 | -#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK) |
168 | +#define FIXMAP_PAGE_NORMAL (pgprot_kernel | L_PTE_XN) |
169 | #define FIXMAP_PAGE_RO (FIXMAP_PAGE_NORMAL | L_PTE_RDONLY) |
170 | |
171 | /* Used by set_fixmap_(io|nocache), both meant for mapping a device */ |
172 | diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h |
173 | index 4917c2f7e459..e74ab0fbab79 100644 |
174 | --- a/arch/arm/include/asm/kvm_coproc.h |
175 | +++ b/arch/arm/include/asm/kvm_coproc.h |
176 | @@ -31,7 +31,8 @@ void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); |
177 | int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); |
178 | int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); |
179 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); |
180 | -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); |
181 | +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run); |
182 | +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run); |
183 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); |
184 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); |
185 | |
186 | diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h |
187 | index 464748b9fd7d..ed2319663a1e 100644 |
188 | --- a/arch/arm/include/asm/module.h |
189 | +++ b/arch/arm/include/asm/module.h |
190 | @@ -18,13 +18,18 @@ enum { |
191 | }; |
192 | #endif |
193 | |
194 | +struct mod_plt_sec { |
195 | + struct elf32_shdr *plt; |
196 | + int plt_count; |
197 | +}; |
198 | + |
199 | struct mod_arch_specific { |
200 | #ifdef CONFIG_ARM_UNWIND |
201 | struct unwind_table *unwind[ARM_SEC_MAX]; |
202 | #endif |
203 | #ifdef CONFIG_ARM_MODULE_PLTS |
204 | - struct elf32_shdr *plt; |
205 | - int plt_count; |
206 | + struct mod_plt_sec core; |
207 | + struct mod_plt_sec init; |
208 | #endif |
209 | }; |
210 | |
211 | diff --git a/arch/arm/kernel/module-plts.c b/arch/arm/kernel/module-plts.c |
212 | index 3a5cba90c971..3d0c2e4dda1d 100644 |
213 | --- a/arch/arm/kernel/module-plts.c |
214 | +++ b/arch/arm/kernel/module-plts.c |
215 | @@ -1,5 +1,5 @@ |
216 | /* |
217 | - * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org> |
218 | + * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org> |
219 | * |
220 | * This program is free software; you can redistribute it and/or modify |
221 | * it under the terms of the GNU General Public License version 2 as |
222 | @@ -31,9 +31,17 @@ struct plt_entries { |
223 | u32 lit[PLT_ENT_COUNT]; |
224 | }; |
225 | |
226 | +static bool in_init(const struct module *mod, unsigned long loc) |
227 | +{ |
228 | + return loc - (u32)mod->init_layout.base < mod->init_layout.size; |
229 | +} |
230 | + |
231 | u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) |
232 | { |
233 | - struct plt_entries *plt = (struct plt_entries *)mod->arch.plt->sh_addr; |
234 | + struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core : |
235 | + &mod->arch.init; |
236 | + |
237 | + struct plt_entries *plt = (struct plt_entries *)pltsec->plt->sh_addr; |
238 | int idx = 0; |
239 | |
240 | /* |
241 | @@ -41,9 +49,9 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) |
242 | * relocations are sorted, this will be the last entry we allocated. |
243 | * (if one exists). |
244 | */ |
245 | - if (mod->arch.plt_count > 0) { |
246 | - plt += (mod->arch.plt_count - 1) / PLT_ENT_COUNT; |
247 | - idx = (mod->arch.plt_count - 1) % PLT_ENT_COUNT; |
248 | + if (pltsec->plt_count > 0) { |
249 | + plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT; |
250 | + idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT; |
251 | |
252 | if (plt->lit[idx] == val) |
253 | return (u32)&plt->ldr[idx]; |
254 | @@ -53,8 +61,8 @@ u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val) |
255 | plt++; |
256 | } |
257 | |
258 | - mod->arch.plt_count++; |
259 | - BUG_ON(mod->arch.plt_count * PLT_ENT_SIZE > mod->arch.plt->sh_size); |
260 | + pltsec->plt_count++; |
261 | + BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size); |
262 | |
263 | if (!idx) |
264 | /* Populate a new set of entries */ |
265 | @@ -129,7 +137,7 @@ static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num) |
266 | |
267 | /* Count how many PLT entries we may need */ |
268 | static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, |
269 | - const Elf32_Rel *rel, int num) |
270 | + const Elf32_Rel *rel, int num, Elf32_Word dstidx) |
271 | { |
272 | unsigned int ret = 0; |
273 | const Elf32_Sym *s; |
274 | @@ -144,13 +152,17 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, |
275 | case R_ARM_THM_JUMP24: |
276 | /* |
277 | * We only have to consider branch targets that resolve |
278 | - * to undefined symbols. This is not simply a heuristic, |
279 | - * it is a fundamental limitation, since the PLT itself |
280 | - * is part of the module, and needs to be within range |
281 | - * as well, so modules can never grow beyond that limit. |
282 | + * to symbols that are defined in a different section. |
283 | + * This is not simply a heuristic, it is a fundamental |
284 | + * limitation, since there is no guaranteed way to emit |
285 | + * PLT entries sufficiently close to the branch if the |
286 | + * section size exceeds the range of a branch |
287 | + * instruction. So ignore relocations against defined |
288 | + * symbols if they live in the same section as the |
289 | + * relocation target. |
290 | */ |
291 | s = syms + ELF32_R_SYM(rel[i].r_info); |
292 | - if (s->st_shndx != SHN_UNDEF) |
293 | + if (s->st_shndx == dstidx) |
294 | break; |
295 | |
296 | /* |
297 | @@ -161,7 +173,12 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, |
298 | * So we need to support them, but there is no need to |
299 | * take them into consideration when trying to optimize |
300 | * this code. So let's only check for duplicates when |
301 | - * the addend is zero. |
302 | + * the addend is zero. (Note that calls into the core |
303 | + * module via init PLT entries could involve section |
304 | + * relative symbol references with non-zero addends, for |
305 | + * which we may end up emitting duplicates, but the init |
306 | + * PLT is released along with the rest of the .init |
307 | + * region as soon as module loading completes.) |
308 | */ |
309 | if (!is_zero_addend_relocation(base, rel + i) || |
310 | !duplicate_rel(base, rel, i)) |
311 | @@ -174,7 +191,8 @@ static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base, |
312 | int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, |
313 | char *secstrings, struct module *mod) |
314 | { |
315 | - unsigned long plts = 0; |
316 | + unsigned long core_plts = 0; |
317 | + unsigned long init_plts = 0; |
318 | Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum; |
319 | Elf32_Sym *syms = NULL; |
320 | |
321 | @@ -184,13 +202,15 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, |
322 | */ |
323 | for (s = sechdrs; s < sechdrs_end; ++s) { |
324 | if (strcmp(".plt", secstrings + s->sh_name) == 0) |
325 | - mod->arch.plt = s; |
326 | + mod->arch.core.plt = s; |
327 | + else if (strcmp(".init.plt", secstrings + s->sh_name) == 0) |
328 | + mod->arch.init.plt = s; |
329 | else if (s->sh_type == SHT_SYMTAB) |
330 | syms = (Elf32_Sym *)s->sh_addr; |
331 | } |
332 | |
333 | - if (!mod->arch.plt) { |
334 | - pr_err("%s: module PLT section missing\n", mod->name); |
335 | + if (!mod->arch.core.plt || !mod->arch.init.plt) { |
336 | + pr_err("%s: module PLT section(s) missing\n", mod->name); |
337 | return -ENOEXEC; |
338 | } |
339 | if (!syms) { |
340 | @@ -213,16 +233,29 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, |
341 | /* sort by type and symbol index */ |
342 | sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL); |
343 | |
344 | - plts += count_plts(syms, dstsec->sh_addr, rels, numrels); |
345 | + if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) |
346 | + core_plts += count_plts(syms, dstsec->sh_addr, rels, |
347 | + numrels, s->sh_info); |
348 | + else |
349 | + init_plts += count_plts(syms, dstsec->sh_addr, rels, |
350 | + numrels, s->sh_info); |
351 | } |
352 | |
353 | - mod->arch.plt->sh_type = SHT_NOBITS; |
354 | - mod->arch.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; |
355 | - mod->arch.plt->sh_addralign = L1_CACHE_BYTES; |
356 | - mod->arch.plt->sh_size = round_up(plts * PLT_ENT_SIZE, |
357 | - sizeof(struct plt_entries)); |
358 | - mod->arch.plt_count = 0; |
359 | - |
360 | - pr_debug("%s: plt=%x\n", __func__, mod->arch.plt->sh_size); |
361 | + mod->arch.core.plt->sh_type = SHT_NOBITS; |
362 | + mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; |
363 | + mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES; |
364 | + mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE, |
365 | + sizeof(struct plt_entries)); |
366 | + mod->arch.core.plt_count = 0; |
367 | + |
368 | + mod->arch.init.plt->sh_type = SHT_NOBITS; |
369 | + mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC; |
370 | + mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES; |
371 | + mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE, |
372 | + sizeof(struct plt_entries)); |
373 | + mod->arch.init.plt_count = 0; |
374 | + |
375 | + pr_debug("%s: plt=%x, init.plt=%x\n", __func__, |
376 | + mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size); |
377 | return 0; |
378 | } |
379 | diff --git a/arch/arm/kernel/module.lds b/arch/arm/kernel/module.lds |
380 | index 05881e2b414c..eacb5c67f61e 100644 |
381 | --- a/arch/arm/kernel/module.lds |
382 | +++ b/arch/arm/kernel/module.lds |
383 | @@ -1,3 +1,4 @@ |
384 | SECTIONS { |
385 | .plt : { BYTE(0) } |
386 | + .init.plt : { BYTE(0) } |
387 | } |
388 | diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c |
389 | index f4e54503afa9..32e1a9513dc7 100644 |
390 | --- a/arch/arm/kernel/setup.c |
391 | +++ b/arch/arm/kernel/setup.c |
392 | @@ -80,7 +80,7 @@ __setup("fpe=", fpe_setup); |
393 | |
394 | extern void init_default_cache_policy(unsigned long); |
395 | extern void paging_init(const struct machine_desc *desc); |
396 | -extern void early_paging_init(const struct machine_desc *); |
397 | +extern void early_mm_init(const struct machine_desc *); |
398 | extern void adjust_lowmem_bounds(void); |
399 | extern enum reboot_mode reboot_mode; |
400 | extern void setup_dma_zone(const struct machine_desc *desc); |
401 | @@ -1088,7 +1088,7 @@ void __init setup_arch(char **cmdline_p) |
402 | parse_early_param(); |
403 | |
404 | #ifdef CONFIG_MMU |
405 | - early_paging_init(mdesc); |
406 | + early_mm_init(mdesc); |
407 | #endif |
408 | setup_dma_zone(mdesc); |
409 | xen_early_init(); |
410 | diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c |
411 | index 3e5e4194ef86..c3ed6bd5ddf3 100644 |
412 | --- a/arch/arm/kvm/coproc.c |
413 | +++ b/arch/arm/kvm/coproc.c |
414 | @@ -93,12 +93,6 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |
415 | return 1; |
416 | } |
417 | |
418 | -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) |
419 | -{ |
420 | - kvm_inject_undefined(vcpu); |
421 | - return 1; |
422 | -} |
423 | - |
424 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) |
425 | { |
426 | /* |
427 | @@ -514,12 +508,7 @@ static int emulate_cp15(struct kvm_vcpu *vcpu, |
428 | return 1; |
429 | } |
430 | |
431 | -/** |
432 | - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access |
433 | - * @vcpu: The VCPU pointer |
434 | - * @run: The kvm_run struct |
435 | - */ |
436 | -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
437 | +static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu) |
438 | { |
439 | struct coproc_params params; |
440 | |
441 | @@ -533,9 +522,38 @@ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
442 | params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; |
443 | params.CRm = 0; |
444 | |
445 | + return params; |
446 | +} |
447 | + |
448 | +/** |
449 | + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access |
450 | + * @vcpu: The VCPU pointer |
451 | + * @run: The kvm_run struct |
452 | + */ |
453 | +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
454 | +{ |
455 | + struct coproc_params params = decode_64bit_hsr(vcpu); |
456 | + |
457 | return emulate_cp15(vcpu, ¶ms); |
458 | } |
459 | |
460 | +/** |
461 | + * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access |
462 | + * @vcpu: The VCPU pointer |
463 | + * @run: The kvm_run struct |
464 | + */ |
465 | +int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) |
466 | +{ |
467 | + struct coproc_params params = decode_64bit_hsr(vcpu); |
468 | + |
469 | + /* raz_wi cp14 */ |
470 | + pm_fake(vcpu, ¶ms, NULL); |
471 | + |
472 | + /* handled */ |
473 | + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
474 | + return 1; |
475 | +} |
476 | + |
477 | static void reset_coproc_regs(struct kvm_vcpu *vcpu, |
478 | const struct coproc_reg *table, size_t num) |
479 | { |
480 | @@ -546,12 +564,7 @@ static void reset_coproc_regs(struct kvm_vcpu *vcpu, |
481 | table[i].reset(vcpu, &table[i]); |
482 | } |
483 | |
484 | -/** |
485 | - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access |
486 | - * @vcpu: The VCPU pointer |
487 | - * @run: The kvm_run struct |
488 | - */ |
489 | -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
490 | +static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) |
491 | { |
492 | struct coproc_params params; |
493 | |
494 | @@ -565,9 +578,37 @@ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
495 | params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; |
496 | params.Rt2 = 0; |
497 | |
498 | + return params; |
499 | +} |
500 | + |
501 | +/** |
502 | + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access |
503 | + * @vcpu: The VCPU pointer |
504 | + * @run: The kvm_run struct |
505 | + */ |
506 | +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
507 | +{ |
508 | + struct coproc_params params = decode_32bit_hsr(vcpu); |
509 | return emulate_cp15(vcpu, ¶ms); |
510 | } |
511 | |
512 | +/** |
513 | + * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access |
514 | + * @vcpu: The VCPU pointer |
515 | + * @run: The kvm_run struct |
516 | + */ |
517 | +int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) |
518 | +{ |
519 | + struct coproc_params params = decode_32bit_hsr(vcpu); |
520 | + |
521 | + /* raz_wi cp14 */ |
522 | + pm_fake(vcpu, ¶ms, NULL); |
523 | + |
524 | + /* handled */ |
525 | + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); |
526 | + return 1; |
527 | +} |
528 | + |
529 | /****************************************************************************** |
530 | * Userspace API |
531 | *****************************************************************************/ |
532 | diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c |
533 | index 96af65a30d78..42f5daf715d0 100644 |
534 | --- a/arch/arm/kvm/handle_exit.c |
535 | +++ b/arch/arm/kvm/handle_exit.c |
536 | @@ -95,9 +95,9 @@ static exit_handle_fn arm_exit_handlers[] = { |
537 | [HSR_EC_WFI] = kvm_handle_wfx, |
538 | [HSR_EC_CP15_32] = kvm_handle_cp15_32, |
539 | [HSR_EC_CP15_64] = kvm_handle_cp15_64, |
540 | - [HSR_EC_CP14_MR] = kvm_handle_cp14_access, |
541 | + [HSR_EC_CP14_MR] = kvm_handle_cp14_32, |
542 | [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, |
543 | - [HSR_EC_CP14_64] = kvm_handle_cp14_access, |
544 | + [HSR_EC_CP14_64] = kvm_handle_cp14_64, |
545 | [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, |
546 | [HSR_EC_CP10_ID] = kvm_handle_cp10_id, |
547 | [HSR_EC_HVC] = handle_hvc, |
548 | diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile |
549 | index 3023bb530edf..8679405b0b2b 100644 |
550 | --- a/arch/arm/kvm/hyp/Makefile |
551 | +++ b/arch/arm/kvm/hyp/Makefile |
552 | @@ -2,6 +2,8 @@ |
553 | # Makefile for Kernel-based Virtual Machine module, HYP part |
554 | # |
555 | |
556 | +ccflags-y += -fno-stack-protector |
557 | + |
558 | KVM=../../../../virt/kvm |
559 | |
560 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
561 | diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c |
562 | index 92678b7bd046..624a510d31df 100644 |
563 | --- a/arch/arm/kvm/hyp/switch.c |
564 | +++ b/arch/arm/kvm/hyp/switch.c |
565 | @@ -48,7 +48,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) |
566 | write_sysreg(HSTR_T(15), HSTR); |
567 | write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); |
568 | val = read_sysreg(HDCR); |
569 | - write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); |
570 | + val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */ |
571 | + val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */ |
572 | + write_sysreg(val, HDCR); |
573 | } |
574 | |
575 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) |
576 | diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c |
577 | index 582a972371cf..3837b096e1a6 100644 |
578 | --- a/arch/arm/kvm/mmu.c |
579 | +++ b/arch/arm/kvm/mmu.c |
580 | @@ -295,6 +295,13 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
581 | assert_spin_locked(&kvm->mmu_lock); |
582 | pgd = kvm->arch.pgd + stage2_pgd_index(addr); |
583 | do { |
584 | + /* |
585 | + * Make sure the page table is still active, as another thread |
586 | + * could have possibly freed the page table, while we released |
587 | + * the lock. |
588 | + */ |
589 | + if (!READ_ONCE(kvm->arch.pgd)) |
590 | + break; |
591 | next = stage2_pgd_addr_end(addr, end); |
592 | if (!stage2_pgd_none(*pgd)) |
593 | unmap_stage2_puds(kvm, pgd, addr, next); |
594 | @@ -829,22 +836,22 @@ void stage2_unmap_vm(struct kvm *kvm) |
595 | * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all |
596 | * underlying level-2 and level-3 tables before freeing the actual level-1 table |
597 | * and setting the struct pointer to NULL. |
598 | - * |
599 | - * Note we don't need locking here as this is only called when the VM is |
600 | - * destroyed, which can only be done once. |
601 | */ |
602 | void kvm_free_stage2_pgd(struct kvm *kvm) |
603 | { |
604 | - if (kvm->arch.pgd == NULL) |
605 | - return; |
606 | + void *pgd = NULL; |
607 | |
608 | spin_lock(&kvm->mmu_lock); |
609 | - unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
610 | + if (kvm->arch.pgd) { |
611 | + unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
612 | + pgd = READ_ONCE(kvm->arch.pgd); |
613 | + kvm->arch.pgd = NULL; |
614 | + } |
615 | spin_unlock(&kvm->mmu_lock); |
616 | |
617 | /* Free the HW pgd, one page at a time */ |
618 | - free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); |
619 | - kvm->arch.pgd = NULL; |
620 | + if (pgd) |
621 | + free_pages_exact(pgd, S2_PGD_SIZE); |
622 | } |
623 | |
624 | static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, |
625 | @@ -1170,11 +1177,13 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) |
626 | * large. Otherwise, we may see kernel panics with |
627 | * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, |
628 | * CONFIG_LOCKDEP. Additionally, holding the lock too long |
629 | - * will also starve other vCPUs. |
630 | + * will also starve other vCPUs. We have to also make sure |
631 | + * that the page tables are not freed while we released |
632 | + * the lock. |
633 | */ |
634 | - if (need_resched() || spin_needbreak(&kvm->mmu_lock)) |
635 | - cond_resched_lock(&kvm->mmu_lock); |
636 | - |
637 | + cond_resched_lock(&kvm->mmu_lock); |
638 | + if (!READ_ONCE(kvm->arch.pgd)) |
639 | + break; |
640 | next = stage2_pgd_addr_end(addr, end); |
641 | if (stage2_pgd_present(*pgd)) |
642 | stage2_wp_puds(pgd, addr, next); |
643 | diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c |
644 | index 4e016d7f37b3..347cca965783 100644 |
645 | --- a/arch/arm/mm/mmu.c |
646 | +++ b/arch/arm/mm/mmu.c |
647 | @@ -414,6 +414,11 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) |
648 | FIXADDR_END); |
649 | BUG_ON(idx >= __end_of_fixed_addresses); |
650 | |
651 | + /* we only support device mappings until pgprot_kernel has been set */ |
652 | + if (WARN_ON(pgprot_val(prot) != pgprot_val(FIXMAP_PAGE_IO) && |
653 | + pgprot_val(pgprot_kernel) == 0)) |
654 | + return; |
655 | + |
656 | if (pgprot_val(prot)) |
657 | set_pte_at(NULL, vaddr, pte, |
658 | pfn_pte(phys >> PAGE_SHIFT, prot)); |
659 | @@ -1492,7 +1497,7 @@ pgtables_remap lpae_pgtables_remap_asm; |
660 | * early_paging_init() recreates boot time page table setup, allowing machines |
661 | * to switch over to a high (>4G) address space on LPAE systems |
662 | */ |
663 | -void __init early_paging_init(const struct machine_desc *mdesc) |
664 | +static void __init early_paging_init(const struct machine_desc *mdesc) |
665 | { |
666 | pgtables_remap *lpae_pgtables_remap; |
667 | unsigned long pa_pgd; |
668 | @@ -1560,7 +1565,7 @@ void __init early_paging_init(const struct machine_desc *mdesc) |
669 | |
670 | #else |
671 | |
672 | -void __init early_paging_init(const struct machine_desc *mdesc) |
673 | +static void __init early_paging_init(const struct machine_desc *mdesc) |
674 | { |
675 | long long offset; |
676 | |
677 | @@ -1616,7 +1621,6 @@ void __init paging_init(const struct machine_desc *mdesc) |
678 | { |
679 | void *zero_page; |
680 | |
681 | - build_mem_type_table(); |
682 | prepare_page_table(); |
683 | map_lowmem(); |
684 | memblock_set_current_limit(arm_lowmem_limit); |
685 | @@ -1636,3 +1640,9 @@ void __init paging_init(const struct machine_desc *mdesc) |
686 | empty_zero_page = virt_to_page(zero_page); |
687 | __flush_dcache_page(NULL, empty_zero_page); |
688 | } |
689 | + |
690 | +void __init early_mm_init(const struct machine_desc *mdesc) |
691 | +{ |
692 | + build_mem_type_table(); |
693 | + early_paging_init(mdesc); |
694 | +} |
695 | diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S |
696 | index 8dea61640cc1..50497778c2e5 100644 |
697 | --- a/arch/arm/mm/proc-v7m.S |
698 | +++ b/arch/arm/mm/proc-v7m.S |
699 | @@ -147,10 +147,10 @@ __v7m_setup_cont: |
700 | |
701 | @ Configure caches (if implemented) |
702 | teq r8, #0 |
703 | - stmneia r12, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 |
704 | + stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6 |
705 | blne v7m_invalidate_l1 |
706 | teq r8, #0 @ re-evalutae condition |
707 | - ldmneia r12, {r0-r6, lr} |
708 | + ldmneia sp, {r0-r6, lr} |
709 | |
710 | @ Configure the System Control Register to ensure 8-byte stack alignment |
711 | @ Note the STKALIGN bit is either RW or RAO. |
712 | diff --git a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi |
713 | index 470461ddd427..1e5129b19280 100644 |
714 | --- a/arch/arm64/boot/dts/hisilicon/hi6220.dtsi |
715 | +++ b/arch/arm64/boot/dts/hisilicon/hi6220.dtsi |
716 | @@ -774,6 +774,7 @@ |
717 | clocks = <&sys_ctrl 2>, <&sys_ctrl 1>; |
718 | clock-names = "ciu", "biu"; |
719 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC0>; |
720 | + reset-names = "reset"; |
721 | bus-width = <0x8>; |
722 | vmmc-supply = <&ldo19>; |
723 | pinctrl-names = "default"; |
724 | @@ -797,6 +798,7 @@ |
725 | clocks = <&sys_ctrl 4>, <&sys_ctrl 3>; |
726 | clock-names = "ciu", "biu"; |
727 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC1>; |
728 | + reset-names = "reset"; |
729 | vqmmc-supply = <&ldo7>; |
730 | vmmc-supply = <&ldo10>; |
731 | bus-width = <0x4>; |
732 | @@ -815,6 +817,7 @@ |
733 | clocks = <&sys_ctrl HI6220_MMC2_CIUCLK>, <&sys_ctrl HI6220_MMC2_CLK>; |
734 | clock-names = "ciu", "biu"; |
735 | resets = <&sys_ctrl PERIPH_RSTDIS0_MMC2>; |
736 | + reset-names = "reset"; |
737 | bus-width = <0x4>; |
738 | broken-cd; |
739 | pinctrl-names = "default", "idle"; |
740 | diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h |
741 | index df411f3e083c..ecd9788cd298 100644 |
742 | --- a/arch/arm64/include/asm/asm-uaccess.h |
743 | +++ b/arch/arm64/include/asm/asm-uaccess.h |
744 | @@ -62,4 +62,13 @@ alternative_if ARM64_ALT_PAN_NOT_UAO |
745 | alternative_else_nop_endif |
746 | .endm |
747 | |
748 | +/* |
749 | + * Remove the address tag from a virtual address, if present. |
750 | + */ |
751 | + .macro clear_address_tag, dst, addr |
752 | + tst \addr, #(1 << 55) |
753 | + bic \dst, \addr, #(0xff << 56) |
754 | + csel \dst, \dst, \addr, eq |
755 | + .endm |
756 | + |
757 | #endif |
758 | diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h |
759 | index 4e0497f581a0..0fe7e43b7fbc 100644 |
760 | --- a/arch/arm64/include/asm/barrier.h |
761 | +++ b/arch/arm64/include/asm/barrier.h |
762 | @@ -42,25 +42,35 @@ |
763 | #define __smp_rmb() dmb(ishld) |
764 | #define __smp_wmb() dmb(ishst) |
765 | |
766 | -#define __smp_store_release(p, v) \ |
767 | +#define __smp_store_release(p, v) \ |
768 | do { \ |
769 | + union { typeof(*p) __val; char __c[1]; } __u = \ |
770 | + { .__val = (__force typeof(*p)) (v) }; \ |
771 | compiletime_assert_atomic_type(*p); \ |
772 | switch (sizeof(*p)) { \ |
773 | case 1: \ |
774 | asm volatile ("stlrb %w1, %0" \ |
775 | - : "=Q" (*p) : "r" (v) : "memory"); \ |
776 | + : "=Q" (*p) \ |
777 | + : "r" (*(__u8 *)__u.__c) \ |
778 | + : "memory"); \ |
779 | break; \ |
780 | case 2: \ |
781 | asm volatile ("stlrh %w1, %0" \ |
782 | - : "=Q" (*p) : "r" (v) : "memory"); \ |
783 | + : "=Q" (*p) \ |
784 | + : "r" (*(__u16 *)__u.__c) \ |
785 | + : "memory"); \ |
786 | break; \ |
787 | case 4: \ |
788 | asm volatile ("stlr %w1, %0" \ |
789 | - : "=Q" (*p) : "r" (v) : "memory"); \ |
790 | + : "=Q" (*p) \ |
791 | + : "r" (*(__u32 *)__u.__c) \ |
792 | + : "memory"); \ |
793 | break; \ |
794 | case 8: \ |
795 | asm volatile ("stlr %1, %0" \ |
796 | - : "=Q" (*p) : "r" (v) : "memory"); \ |
797 | + : "=Q" (*p) \ |
798 | + : "r" (*(__u64 *)__u.__c) \ |
799 | + : "memory"); \ |
800 | break; \ |
801 | } \ |
802 | } while (0) |
803 | diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h |
804 | index 91b26d26af8a..ae852add053d 100644 |
805 | --- a/arch/arm64/include/asm/cmpxchg.h |
806 | +++ b/arch/arm64/include/asm/cmpxchg.h |
807 | @@ -46,7 +46,7 @@ static inline unsigned long __xchg_case_##name(unsigned long x, \ |
808 | " swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \ |
809 | __nops(3) \ |
810 | " " #nop_lse) \ |
811 | - : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \ |
812 | + : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \ |
813 | : "r" (x) \ |
814 | : cl); \ |
815 | \ |
816 | diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h |
817 | index 5308d696311b..e6540471dcda 100644 |
818 | --- a/arch/arm64/include/asm/uaccess.h |
819 | +++ b/arch/arm64/include/asm/uaccess.h |
820 | @@ -95,20 +95,21 @@ static inline void set_fs(mm_segment_t fs) |
821 | */ |
822 | #define __range_ok(addr, size) \ |
823 | ({ \ |
824 | + unsigned long __addr = (unsigned long __force)(addr); \ |
825 | unsigned long flag, roksum; \ |
826 | __chk_user_ptr(addr); \ |
827 | asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ |
828 | : "=&r" (flag), "=&r" (roksum) \ |
829 | - : "1" (addr), "Ir" (size), \ |
830 | + : "1" (__addr), "Ir" (size), \ |
831 | "r" (current_thread_info()->addr_limit) \ |
832 | : "cc"); \ |
833 | flag; \ |
834 | }) |
835 | |
836 | /* |
837 | - * When dealing with data aborts or instruction traps we may end up with |
838 | - * a tagged userland pointer. Clear the tag to get a sane pointer to pass |
839 | - * on to access_ok(), for instance. |
840 | + * When dealing with data aborts, watchpoints, or instruction traps we may end |
841 | + * up with a tagged userland pointer. Clear the tag to get a sane pointer to |
842 | + * pass on to access_ok(), for instance. |
843 | */ |
844 | #define untagged_addr(addr) sign_extend64(addr, 55) |
845 | |
846 | diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c |
847 | index 657977e77ec8..f0e6d717885b 100644 |
848 | --- a/arch/arm64/kernel/armv8_deprecated.c |
849 | +++ b/arch/arm64/kernel/armv8_deprecated.c |
850 | @@ -306,7 +306,8 @@ do { \ |
851 | _ASM_EXTABLE(0b, 4b) \ |
852 | _ASM_EXTABLE(1b, 4b) \ |
853 | : "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2) \ |
854 | - : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT), \ |
855 | + : "r" ((unsigned long)addr), "i" (-EAGAIN), \ |
856 | + "i" (-EFAULT), \ |
857 | "i" (__SWP_LL_SC_LOOPS) \ |
858 | : "memory"); \ |
859 | uaccess_disable(); \ |
860 | diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
861 | index 43512d4d7df2..b738880350f9 100644 |
862 | --- a/arch/arm64/kernel/entry.S |
863 | +++ b/arch/arm64/kernel/entry.S |
864 | @@ -428,12 +428,13 @@ el1_da: |
865 | /* |
866 | * Data abort handling |
867 | */ |
868 | - mrs x0, far_el1 |
869 | + mrs x3, far_el1 |
870 | enable_dbg |
871 | // re-enable interrupts if they were enabled in the aborted context |
872 | tbnz x23, #7, 1f // PSR_I_BIT |
873 | enable_irq |
874 | 1: |
875 | + clear_address_tag x0, x3 |
876 | mov x2, sp // struct pt_regs |
877 | bl do_mem_abort |
878 | |
879 | @@ -594,7 +595,7 @@ el0_da: |
880 | // enable interrupts before calling the main handler |
881 | enable_dbg_and_irq |
882 | ct_user_exit |
883 | - bic x0, x26, #(0xff << 56) |
884 | + clear_address_tag x0, x26 |
885 | mov x1, x25 |
886 | mov x2, sp |
887 | bl do_mem_abort |
888 | diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c |
889 | index 0296e7924240..749f81779420 100644 |
890 | --- a/arch/arm64/kernel/hw_breakpoint.c |
891 | +++ b/arch/arm64/kernel/hw_breakpoint.c |
892 | @@ -36,6 +36,7 @@ |
893 | #include <asm/traps.h> |
894 | #include <asm/cputype.h> |
895 | #include <asm/system_misc.h> |
896 | +#include <asm/uaccess.h> |
897 | |
898 | /* Breakpoint currently in use for each BRP. */ |
899 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); |
900 | @@ -721,6 +722,8 @@ static u64 get_distance_from_watchpoint(unsigned long addr, u64 val, |
901 | u64 wp_low, wp_high; |
902 | u32 lens, lene; |
903 | |
904 | + addr = untagged_addr(addr); |
905 | + |
906 | lens = __ffs(ctrl->len); |
907 | lene = __fls(ctrl->len); |
908 | |
909 | diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c |
910 | index e52be6aa44ee..45c8eca951bc 100644 |
911 | --- a/arch/arm64/kernel/traps.c |
912 | +++ b/arch/arm64/kernel/traps.c |
913 | @@ -443,7 +443,7 @@ int cpu_enable_cache_maint_trap(void *__unused) |
914 | } |
915 | |
916 | #define __user_cache_maint(insn, address, res) \ |
917 | - if (untagged_addr(address) >= user_addr_max()) { \ |
918 | + if (address >= user_addr_max()) { \ |
919 | res = -EFAULT; \ |
920 | } else { \ |
921 | uaccess_ttbr0_enable(); \ |
922 | @@ -469,7 +469,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
923 | int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; |
924 | int ret = 0; |
925 | |
926 | - address = pt_regs_read_reg(regs, rt); |
927 | + address = untagged_addr(pt_regs_read_reg(regs, rt)); |
928 | |
929 | switch (crm) { |
930 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ |
931 | diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile |
932 | index aaf42ae8d8c3..14c4e3b14bcb 100644 |
933 | --- a/arch/arm64/kvm/hyp/Makefile |
934 | +++ b/arch/arm64/kvm/hyp/Makefile |
935 | @@ -2,6 +2,8 @@ |
936 | # Makefile for Kernel-based Virtual Machine module, HYP part |
937 | # |
938 | |
939 | +ccflags-y += -fno-stack-protector |
940 | + |
941 | KVM=../../../../virt/kvm |
942 | |
943 | obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o |
944 | diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h |
945 | index 07238b39638c..3db381205928 100644 |
946 | --- a/arch/metag/include/asm/uaccess.h |
947 | +++ b/arch/metag/include/asm/uaccess.h |
948 | @@ -28,24 +28,32 @@ |
949 | |
950 | #define segment_eq(a, b) ((a).seg == (b).seg) |
951 | |
952 | -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
953 | -/* |
954 | - * Explicitly allow NULL pointers here. Parts of the kernel such |
955 | - * as readv/writev use access_ok to validate pointers, but want |
956 | - * to allow NULL pointers for various reasons. NULL pointers are |
957 | - * safe to allow through because the first page is not mappable on |
958 | - * Meta. |
959 | - * |
960 | - * We also wish to avoid letting user code access the system area |
961 | - * and the kernel half of the address space. |
962 | - */ |
963 | -#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \ |
964 | - ((addr) > PAGE_OFFSET && \ |
965 | - (addr) < LINCORE_BASE)) |
966 | - |
967 | static inline int __access_ok(unsigned long addr, unsigned long size) |
968 | { |
969 | - return __kernel_ok || !__user_bad(addr, size); |
970 | + /* |
971 | + * Allow access to the user mapped memory area, but not the system area |
972 | + * before it. The check extends to the top of the address space when |
973 | + * kernel access is allowed (there's no real reason to user copy to the |
974 | + * system area in any case). |
975 | + */ |
976 | + if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg && |
977 | + size <= get_fs().seg - addr)) |
978 | + return true; |
979 | + /* |
980 | + * Explicitly allow NULL pointers here. Parts of the kernel such |
981 | + * as readv/writev use access_ok to validate pointers, but want |
982 | + * to allow NULL pointers for various reasons. NULL pointers are |
983 | + * safe to allow through because the first page is not mappable on |
984 | + * Meta. |
985 | + */ |
986 | + if (!addr) |
987 | + return true; |
988 | + /* Allow access to core code memory area... */ |
989 | + if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT && |
990 | + size <= LINCORE_CODE_LIMIT + 1 - addr) |
991 | + return true; |
992 | + /* ... but no other areas. */ |
993 | + return false; |
994 | } |
995 | |
996 | #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ |
997 | @@ -186,8 +194,13 @@ do { \ |
998 | extern long __must_check __strncpy_from_user(char *dst, const char __user *src, |
999 | long count); |
1000 | |
1001 | -#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) |
1002 | - |
1003 | +static inline long |
1004 | +strncpy_from_user(char *dst, const char __user *src, long count) |
1005 | +{ |
1006 | + if (!access_ok(VERIFY_READ, src, 1)) |
1007 | + return -EFAULT; |
1008 | + return __strncpy_from_user(dst, src, count); |
1009 | +} |
1010 | /* |
1011 | * Return the size of a string (including the ending 0) |
1012 | * |
1013 | diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig |
1014 | index e0bb576410bb..c3c7d8aa3283 100644 |
1015 | --- a/arch/mips/Kconfig |
1016 | +++ b/arch/mips/Kconfig |
1017 | @@ -1373,6 +1373,7 @@ config CPU_LOONGSON3 |
1018 | select WEAK_ORDERING |
1019 | select WEAK_REORDERING_BEYOND_LLSC |
1020 | select MIPS_PGD_C0_CONTEXT |
1021 | + select MIPS_L1_CACHE_SHIFT_6 |
1022 | select GPIOLIB |
1023 | help |
1024 | The Loongson 3 processor implements the MIPS64R2 instruction |
1025 | diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h |
1026 | index b9e3f0aca261..0012f0353fd6 100644 |
1027 | --- a/arch/powerpc/include/asm/mmu_context.h |
1028 | +++ b/arch/powerpc/include/asm/mmu_context.h |
1029 | @@ -70,8 +70,9 @@ extern void drop_cop(unsigned long acop, struct mm_struct *mm); |
1030 | * switch_mm is the entry point called from the architecture independent |
1031 | * code in kernel/sched/core.c |
1032 | */ |
1033 | -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1034 | - struct task_struct *tsk) |
1035 | +static inline void switch_mm_irqs_off(struct mm_struct *prev, |
1036 | + struct mm_struct *next, |
1037 | + struct task_struct *tsk) |
1038 | { |
1039 | /* Mark this context has been used on the new CPU */ |
1040 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) |
1041 | @@ -110,6 +111,18 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1042 | switch_mmu_context(prev, next, tsk); |
1043 | } |
1044 | |
1045 | +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
1046 | + struct task_struct *tsk) |
1047 | +{ |
1048 | + unsigned long flags; |
1049 | + |
1050 | + local_irq_save(flags); |
1051 | + switch_mm_irqs_off(prev, next, tsk); |
1052 | + local_irq_restore(flags); |
1053 | +} |
1054 | +#define switch_mm_irqs_off switch_mm_irqs_off |
1055 | + |
1056 | + |
1057 | #define deactivate_mm(tsk,mm) do { } while (0) |
1058 | |
1059 | /* |
1060 | diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c |
1061 | index b94887165a10..e50d1470714f 100644 |
1062 | --- a/arch/powerpc/kernel/eeh_driver.c |
1063 | +++ b/arch/powerpc/kernel/eeh_driver.c |
1064 | @@ -724,7 +724,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, |
1065 | */ |
1066 | #define MAX_WAIT_FOR_RECOVERY 300 |
1067 | |
1068 | -static void eeh_handle_normal_event(struct eeh_pe *pe) |
1069 | +static bool eeh_handle_normal_event(struct eeh_pe *pe) |
1070 | { |
1071 | struct pci_bus *frozen_bus; |
1072 | struct eeh_dev *edev, *tmp; |
1073 | @@ -736,7 +736,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) |
1074 | if (!frozen_bus) { |
1075 | pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n", |
1076 | __func__, pe->phb->global_number, pe->addr); |
1077 | - return; |
1078 | + return false; |
1079 | } |
1080 | |
1081 | eeh_pe_update_time_stamp(pe); |
1082 | @@ -870,7 +870,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) |
1083 | pr_info("EEH: Notify device driver to resume\n"); |
1084 | eeh_pe_dev_traverse(pe, eeh_report_resume, NULL); |
1085 | |
1086 | - return; |
1087 | + return false; |
1088 | |
1089 | excess_failures: |
1090 | /* |
1091 | @@ -915,8 +915,12 @@ static void eeh_handle_normal_event(struct eeh_pe *pe) |
1092 | pci_lock_rescan_remove(); |
1093 | pci_hp_remove_devices(frozen_bus); |
1094 | pci_unlock_rescan_remove(); |
1095 | + |
1096 | + /* The passed PE should no longer be used */ |
1097 | + return true; |
1098 | } |
1099 | } |
1100 | + return false; |
1101 | } |
1102 | |
1103 | static void eeh_handle_special_event(void) |
1104 | @@ -982,7 +986,14 @@ static void eeh_handle_special_event(void) |
1105 | */ |
1106 | if (rc == EEH_NEXT_ERR_FROZEN_PE || |
1107 | rc == EEH_NEXT_ERR_FENCED_PHB) { |
1108 | - eeh_handle_normal_event(pe); |
1109 | + /* |
1110 | + * eeh_handle_normal_event() can make the PE stale if it |
1111 | + * determines that the PE cannot possibly be recovered. |
1112 | + * Don't modify the PE state if that's the case. |
1113 | + */ |
1114 | + if (eeh_handle_normal_event(pe)) |
1115 | + continue; |
1116 | + |
1117 | eeh_pe_state_clear(pe, EEH_PE_RECOVERING); |
1118 | } else { |
1119 | pci_lock_rescan_remove(); |
1120 | diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S |
1121 | index 45b453e4d0c8..acd8ca76233e 100644 |
1122 | --- a/arch/powerpc/kernel/exceptions-64e.S |
1123 | +++ b/arch/powerpc/kernel/exceptions-64e.S |
1124 | @@ -735,8 +735,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
1125 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
1126 | beq+ 1f |
1127 | |
1128 | +#ifdef CONFIG_RELOCATABLE |
1129 | + ld r15,PACATOC(r13) |
1130 | + ld r14,interrupt_base_book3e@got(r15) |
1131 | + ld r15,__end_interrupts@got(r15) |
1132 | +#else |
1133 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
1134 | LOAD_REG_IMMEDIATE(r15,__end_interrupts) |
1135 | +#endif |
1136 | cmpld cr0,r10,r14 |
1137 | cmpld cr1,r10,r15 |
1138 | blt+ cr0,1f |
1139 | @@ -799,8 +805,14 @@ kernel_dbg_exc: |
1140 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
1141 | beq+ 1f |
1142 | |
1143 | +#ifdef CONFIG_RELOCATABLE |
1144 | + ld r15,PACATOC(r13) |
1145 | + ld r14,interrupt_base_book3e@got(r15) |
1146 | + ld r15,__end_interrupts@got(r15) |
1147 | +#else |
1148 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
1149 | LOAD_REG_IMMEDIATE(r15,__end_interrupts) |
1150 | +#endif |
1151 | cmpld cr0,r10,r14 |
1152 | cmpld cr1,r10,r15 |
1153 | blt+ cr0,1f |
1154 | diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c |
1155 | index a1475e6aef3a..b23b32385583 100644 |
1156 | --- a/arch/powerpc/kernel/mce.c |
1157 | +++ b/arch/powerpc/kernel/mce.c |
1158 | @@ -221,6 +221,8 @@ static void machine_check_process_queued_event(struct irq_work *work) |
1159 | { |
1160 | int index; |
1161 | |
1162 | + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
1163 | + |
1164 | /* |
1165 | * For now just print it to console. |
1166 | * TODO: log this error event to FSP or nvram. |
1167 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
1168 | index d645da302bf2..baae104b16c7 100644 |
1169 | --- a/arch/powerpc/kernel/process.c |
1170 | +++ b/arch/powerpc/kernel/process.c |
1171 | @@ -864,6 +864,25 @@ static void tm_reclaim_thread(struct thread_struct *thr, |
1172 | if (!MSR_TM_SUSPENDED(mfmsr())) |
1173 | return; |
1174 | |
1175 | + /* |
1176 | + * If we are in a transaction and FP is off then we can't have |
1177 | + * used FP inside that transaction. Hence the checkpointed |
1178 | + * state is the same as the live state. We need to copy the |
1179 | + * live state to the checkpointed state so that when the |
1180 | + * transaction is restored, the checkpointed state is correct |
1181 | + * and the aborted transaction sees the correct state. We use |
1182 | + * ckpt_regs.msr here as that's what tm_reclaim will use to |
1183 | + * determine if it's going to write the checkpointed state or |
1184 | + * not. So either this will write the checkpointed registers, |
1185 | + * or reclaim will. Similarly for VMX. |
1186 | + */ |
1187 | + if ((thr->ckpt_regs.msr & MSR_FP) == 0) |
1188 | + memcpy(&thr->ckfp_state, &thr->fp_state, |
1189 | + sizeof(struct thread_fp_state)); |
1190 | + if ((thr->ckpt_regs.msr & MSR_VEC) == 0) |
1191 | + memcpy(&thr->ckvr_state, &thr->vr_state, |
1192 | + sizeof(struct thread_vr_state)); |
1193 | + |
1194 | giveup_all(container_of(thr, struct task_struct, thread)); |
1195 | |
1196 | tm_reclaim(thr, thr->ckpt_regs.msr, cause); |
1197 | diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c |
1198 | index c1fb255a60d6..949957b97ead 100644 |
1199 | --- a/arch/powerpc/kernel/sysfs.c |
1200 | +++ b/arch/powerpc/kernel/sysfs.c |
1201 | @@ -710,6 +710,10 @@ static int register_cpu_online(unsigned int cpu) |
1202 | struct device_attribute *attrs, *pmc_attrs; |
1203 | int i, nattrs; |
1204 | |
1205 | + /* For cpus present at boot a reference was already grabbed in register_cpu() */ |
1206 | + if (!s->of_node) |
1207 | + s->of_node = of_get_cpu_node(cpu, NULL); |
1208 | + |
1209 | #ifdef CONFIG_PPC64 |
1210 | if (cpu_has_feature(CPU_FTR_SMT)) |
1211 | device_create_file(s, &dev_attr_smt_snooze_delay); |
1212 | @@ -864,6 +868,8 @@ static int unregister_cpu_online(unsigned int cpu) |
1213 | } |
1214 | #endif |
1215 | cacheinfo_cpu_offline(cpu); |
1216 | + of_node_put(s->of_node); |
1217 | + s->of_node = NULL; |
1218 | #endif /* CONFIG_HOTPLUG_CPU */ |
1219 | return 0; |
1220 | } |
1221 | diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c |
1222 | index ff365f9de27a..af97e8135939 100644 |
1223 | --- a/arch/powerpc/kernel/traps.c |
1224 | +++ b/arch/powerpc/kernel/traps.c |
1225 | @@ -306,8 +306,6 @@ long machine_check_early(struct pt_regs *regs) |
1226 | |
1227 | __this_cpu_inc(irq_stat.mce_exceptions); |
1228 | |
1229 | - add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
1230 | - |
1231 | if (cur_cpu_spec && cur_cpu_spec->machine_check_early) |
1232 | handled = cur_cpu_spec->machine_check_early(regs); |
1233 | return handled; |
1234 | @@ -741,6 +739,8 @@ void machine_check_exception(struct pt_regs *regs) |
1235 | |
1236 | __this_cpu_inc(irq_stat.mce_exceptions); |
1237 | |
1238 | + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); |
1239 | + |
1240 | /* See if any machine dependent calls. In theory, we would want |
1241 | * to call the CPU first, and call the ppc_md. one if the CPU |
1242 | * one returns a positive number. However there is existing code |
1243 | diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c |
1244 | index 49abaf4dc8e3..292214afe0f1 100644 |
1245 | --- a/arch/powerpc/mm/dump_linuxpagetables.c |
1246 | +++ b/arch/powerpc/mm/dump_linuxpagetables.c |
1247 | @@ -16,6 +16,7 @@ |
1248 | */ |
1249 | #include <linux/debugfs.h> |
1250 | #include <linux/fs.h> |
1251 | +#include <linux/hugetlb.h> |
1252 | #include <linux/io.h> |
1253 | #include <linux/mm.h> |
1254 | #include <linux/sched.h> |
1255 | @@ -331,7 +332,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) |
1256 | |
1257 | for (i = 0; i < PTRS_PER_PMD; i++, pmd++) { |
1258 | addr = start + i * PMD_SIZE; |
1259 | - if (!pmd_none(*pmd)) |
1260 | + if (!pmd_none(*pmd) && !pmd_huge(*pmd)) |
1261 | /* pmd exists */ |
1262 | walk_pte(st, pmd, addr); |
1263 | else |
1264 | @@ -347,7 +348,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) |
1265 | |
1266 | for (i = 0; i < PTRS_PER_PUD; i++, pud++) { |
1267 | addr = start + i * PUD_SIZE; |
1268 | - if (!pud_none(*pud)) |
1269 | + if (!pud_none(*pud) && !pud_huge(*pud)) |
1270 | /* pud exists */ |
1271 | walk_pmd(st, pud, addr); |
1272 | else |
1273 | @@ -367,7 +368,7 @@ static void walk_pagetables(struct pg_state *st) |
1274 | */ |
1275 | for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { |
1276 | addr = KERN_VIRT_START + i * PGDIR_SIZE; |
1277 | - if (!pgd_none(*pgd)) |
1278 | + if (!pgd_none(*pgd) && !pgd_huge(*pgd)) |
1279 | /* pgd exists */ |
1280 | walk_pud(st, pgd, addr); |
1281 | else |
1282 | diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c |
1283 | index 497130c5c742..96f835cbf212 100644 |
1284 | --- a/arch/powerpc/mm/mmu_context_iommu.c |
1285 | +++ b/arch/powerpc/mm/mmu_context_iommu.c |
1286 | @@ -81,7 +81,7 @@ struct page *new_iommu_non_cma_page(struct page *page, unsigned long private, |
1287 | gfp_t gfp_mask = GFP_USER; |
1288 | struct page *new_page; |
1289 | |
1290 | - if (PageHuge(page) || PageTransHuge(page) || PageCompound(page)) |
1291 | + if (PageCompound(page)) |
1292 | return NULL; |
1293 | |
1294 | if (PageHighMem(page)) |
1295 | @@ -100,7 +100,7 @@ static int mm_iommu_move_page_from_cma(struct page *page) |
1296 | LIST_HEAD(cma_migrate_pages); |
1297 | |
1298 | /* Ignore huge pages for now */ |
1299 | - if (PageHuge(page) || PageTransHuge(page) || PageCompound(page)) |
1300 | + if (PageCompound(page)) |
1301 | return -EBUSY; |
1302 | |
1303 | lru_add_drain(); |
1304 | diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c |
1305 | index 1c383f38031d..d5b73eb1fae5 100644 |
1306 | --- a/arch/powerpc/platforms/powernv/npu-dma.c |
1307 | +++ b/arch/powerpc/platforms/powernv/npu-dma.c |
1308 | @@ -180,7 +180,7 @@ long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, |
1309 | pe_err(npe, "Failed to configure TCE table, err %lld\n", rc); |
1310 | return rc; |
1311 | } |
1312 | - pnv_pci_phb3_tce_invalidate_entire(phb, false); |
1313 | + pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
1314 | |
1315 | /* Add the table to the list so its TCE cache will get invalidated */ |
1316 | pnv_pci_link_table_and_group(phb->hose->node, num, |
1317 | @@ -204,7 +204,7 @@ long pnv_npu_unset_window(struct pnv_ioda_pe *npe, int num) |
1318 | pe_err(npe, "Unmapping failed, ret = %lld\n", rc); |
1319 | return rc; |
1320 | } |
1321 | - pnv_pci_phb3_tce_invalidate_entire(phb, false); |
1322 | + pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
1323 | |
1324 | pnv_pci_unlink_table_and_group(npe->table_group.tables[num], |
1325 | &npe->table_group); |
1326 | @@ -270,7 +270,7 @@ static int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe) |
1327 | 0 /* bypass base */, top); |
1328 | |
1329 | if (rc == OPAL_SUCCESS) |
1330 | - pnv_pci_phb3_tce_invalidate_entire(phb, false); |
1331 | + pnv_pci_ioda2_tce_invalidate_entire(phb, false); |
1332 | |
1333 | return rc; |
1334 | } |
1335 | @@ -334,7 +334,7 @@ void pnv_npu_take_ownership(struct pnv_ioda_pe *npe) |
1336 | pe_err(npe, "Failed to disable bypass, err %lld\n", rc); |
1337 | return; |
1338 | } |
1339 | - pnv_pci_phb3_tce_invalidate_entire(npe->phb, false); |
1340 | + pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false); |
1341 | } |
1342 | |
1343 | struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe) |
1344 | diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c |
1345 | index e36738291c32..ecfbf66ca040 100644 |
1346 | --- a/arch/powerpc/platforms/powernv/pci-ioda.c |
1347 | +++ b/arch/powerpc/platforms/powernv/pci-ioda.c |
1348 | @@ -1883,7 +1883,7 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = { |
1349 | #define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1) |
1350 | #define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2) |
1351 | |
1352 | -void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm) |
1353 | +static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm) |
1354 | { |
1355 | __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm); |
1356 | const unsigned long val = PHB3_TCE_KILL_INVAL_ALL; |
1357 | @@ -1979,6 +1979,14 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl, |
1358 | } |
1359 | } |
1360 | |
1361 | +void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm) |
1362 | +{ |
1363 | + if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3) |
1364 | + pnv_pci_phb3_tce_invalidate_entire(phb, rm); |
1365 | + else |
1366 | + opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0); |
1367 | +} |
1368 | + |
1369 | static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index, |
1370 | long npages, unsigned long uaddr, |
1371 | enum dma_data_direction direction, |
1372 | diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h |
1373 | index e1d3e5526b54..5d6599373f32 100644 |
1374 | --- a/arch/powerpc/platforms/powernv/pci.h |
1375 | +++ b/arch/powerpc/platforms/powernv/pci.h |
1376 | @@ -229,7 +229,7 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, |
1377 | |
1378 | /* Nvlink functions */ |
1379 | extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass); |
1380 | -extern void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm); |
1381 | +extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm); |
1382 | extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe); |
1383 | extern long pnv_npu_set_window(struct pnv_ioda_pe *npe, int num, |
1384 | struct iommu_table *tbl); |
1385 | diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c |
1386 | index 193e052fa0dd..bda18d8e1674 100644 |
1387 | --- a/arch/powerpc/platforms/pseries/dlpar.c |
1388 | +++ b/arch/powerpc/platforms/pseries/dlpar.c |
1389 | @@ -288,7 +288,6 @@ int dlpar_detach_node(struct device_node *dn) |
1390 | if (rc) |
1391 | return rc; |
1392 | |
1393 | - of_node_put(dn); /* Must decrement the refcount */ |
1394 | return 0; |
1395 | } |
1396 | |
1397 | diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c |
1398 | index dd1d5c62c374..d628afc26708 100644 |
1399 | --- a/arch/s390/kernel/crash_dump.c |
1400 | +++ b/arch/s390/kernel/crash_dump.c |
1401 | @@ -429,6 +429,20 @@ static void *nt_vmcoreinfo(void *ptr) |
1402 | } |
1403 | |
1404 | /* |
1405 | + * Initialize final note (needed for /proc/vmcore code) |
1406 | + */ |
1407 | +static void *nt_final(void *ptr) |
1408 | +{ |
1409 | + Elf64_Nhdr *note; |
1410 | + |
1411 | + note = (Elf64_Nhdr *) ptr; |
1412 | + note->n_namesz = 0; |
1413 | + note->n_descsz = 0; |
1414 | + note->n_type = 0; |
1415 | + return PTR_ADD(ptr, sizeof(Elf64_Nhdr)); |
1416 | +} |
1417 | + |
1418 | +/* |
1419 | * Initialize ELF header (new kernel) |
1420 | */ |
1421 | static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt) |
1422 | @@ -515,6 +529,7 @@ static void *notes_init(Elf64_Phdr *phdr, void *ptr, u64 notes_offset) |
1423 | if (sa->prefix != 0) |
1424 | ptr = fill_cpu_elf_notes(ptr, cpu++, sa); |
1425 | ptr = nt_vmcoreinfo(ptr); |
1426 | + ptr = nt_final(ptr); |
1427 | memset(phdr, 0, sizeof(*phdr)); |
1428 | phdr->p_type = PT_NOTE; |
1429 | phdr->p_offset = notes_offset; |
1430 | diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S |
1431 | index 6a7d737d514c..f3920d684b0e 100644 |
1432 | --- a/arch/s390/kernel/entry.S |
1433 | +++ b/arch/s390/kernel/entry.S |
1434 | @@ -314,6 +314,7 @@ ENTRY(system_call) |
1435 | lg %r14,__LC_VDSO_PER_CPU |
1436 | lmg %r0,%r10,__PT_R0(%r11) |
1437 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
1438 | +.Lsysc_exit_timer: |
1439 | stpt __LC_EXIT_TIMER |
1440 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
1441 | lmg %r11,%r15,__PT_R11(%r11) |
1442 | @@ -601,6 +602,7 @@ ENTRY(io_int_handler) |
1443 | lg %r14,__LC_VDSO_PER_CPU |
1444 | lmg %r0,%r10,__PT_R0(%r11) |
1445 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) |
1446 | +.Lio_exit_timer: |
1447 | stpt __LC_EXIT_TIMER |
1448 | mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER |
1449 | lmg %r11,%r15,__PT_R11(%r11) |
1450 | @@ -1124,15 +1126,23 @@ cleanup_critical: |
1451 | br %r14 |
1452 | |
1453 | .Lcleanup_sysc_restore: |
1454 | + # check if stpt has been executed |
1455 | clg %r9,BASED(.Lcleanup_sysc_restore_insn) |
1456 | + jh 0f |
1457 | + mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER |
1458 | + cghi %r11,__LC_SAVE_AREA_ASYNC |
1459 | je 0f |
1460 | + mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER |
1461 | +0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8) |
1462 | + je 1f |
1463 | lg %r9,24(%r11) # get saved pointer to pt_regs |
1464 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
1465 | mvc 0(64,%r11),__PT_R8(%r9) |
1466 | lmg %r0,%r7,__PT_R0(%r9) |
1467 | -0: lmg %r8,%r9,__LC_RETURN_PSW |
1468 | +1: lmg %r8,%r9,__LC_RETURN_PSW |
1469 | br %r14 |
1470 | .Lcleanup_sysc_restore_insn: |
1471 | + .quad .Lsysc_exit_timer |
1472 | .quad .Lsysc_done - 4 |
1473 | |
1474 | .Lcleanup_io_tif: |
1475 | @@ -1140,15 +1150,20 @@ cleanup_critical: |
1476 | br %r14 |
1477 | |
1478 | .Lcleanup_io_restore: |
1479 | + # check if stpt has been executed |
1480 | clg %r9,BASED(.Lcleanup_io_restore_insn) |
1481 | - je 0f |
1482 | + jh 0f |
1483 | + mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER |
1484 | +0: clg %r9,BASED(.Lcleanup_io_restore_insn+8) |
1485 | + je 1f |
1486 | lg %r9,24(%r11) # get saved r11 pointer to pt_regs |
1487 | mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) |
1488 | mvc 0(64,%r11),__PT_R8(%r9) |
1489 | lmg %r0,%r7,__PT_R0(%r9) |
1490 | -0: lmg %r8,%r9,__LC_RETURN_PSW |
1491 | +1: lmg %r8,%r9,__LC_RETURN_PSW |
1492 | br %r14 |
1493 | .Lcleanup_io_restore_insn: |
1494 | + .quad .Lio_exit_timer |
1495 | .quad .Lio_done - 4 |
1496 | |
1497 | .Lcleanup_idle: |
1498 | diff --git a/arch/um/kernel/initrd.c b/arch/um/kernel/initrd.c |
1499 | index 48bae81f8dca..6f6e7896e53f 100644 |
1500 | --- a/arch/um/kernel/initrd.c |
1501 | +++ b/arch/um/kernel/initrd.c |
1502 | @@ -14,7 +14,7 @@ |
1503 | static char *initrd __initdata = NULL; |
1504 | static int load_initrd(char *filename, void *buf, int size); |
1505 | |
1506 | -static int __init read_initrd(void) |
1507 | +int __init read_initrd(void) |
1508 | { |
1509 | void *area; |
1510 | long long size; |
1511 | @@ -46,8 +46,6 @@ static int __init read_initrd(void) |
1512 | return 0; |
1513 | } |
1514 | |
1515 | -__uml_postsetup(read_initrd); |
1516 | - |
1517 | static int __init uml_initrd_setup(char *line, int *add) |
1518 | { |
1519 | initrd = line; |
1520 | diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c |
1521 | index 4b85acd4020c..64a1fd06f3fd 100644 |
1522 | --- a/arch/um/kernel/um_arch.c |
1523 | +++ b/arch/um/kernel/um_arch.c |
1524 | @@ -338,11 +338,17 @@ int __init linux_main(int argc, char **argv) |
1525 | return start_uml(); |
1526 | } |
1527 | |
1528 | +int __init __weak read_initrd(void) |
1529 | +{ |
1530 | + return 0; |
1531 | +} |
1532 | + |
1533 | void __init setup_arch(char **cmdline_p) |
1534 | { |
1535 | stack_protections((unsigned long) &init_thread_info); |
1536 | setup_physmem(uml_physmem, uml_reserved, physmem_size, highmem); |
1537 | mem_total_pages(physmem_size, iomem_size, highmem); |
1538 | + read_initrd(); |
1539 | |
1540 | paging_init(); |
1541 | strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); |
1542 | diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h |
1543 | index ea148313570f..dead0f3921f3 100644 |
1544 | --- a/arch/x86/include/asm/uaccess.h |
1545 | +++ b/arch/x86/include/asm/uaccess.h |
1546 | @@ -324,10 +324,10 @@ do { \ |
1547 | #define __get_user_asm_u64(x, ptr, retval, errret) \ |
1548 | ({ \ |
1549 | __typeof__(ptr) __ptr = (ptr); \ |
1550 | - asm volatile(ASM_STAC "\n" \ |
1551 | + asm volatile("\n" \ |
1552 | "1: movl %2,%%eax\n" \ |
1553 | "2: movl %3,%%edx\n" \ |
1554 | - "3: " ASM_CLAC "\n" \ |
1555 | + "3:\n" \ |
1556 | ".section .fixup,\"ax\"\n" \ |
1557 | "4: mov %4,%0\n" \ |
1558 | " xorl %%eax,%%eax\n" \ |
1559 | @@ -336,7 +336,7 @@ do { \ |
1560 | ".previous\n" \ |
1561 | _ASM_EXTABLE(1b, 4b) \ |
1562 | _ASM_EXTABLE(2b, 4b) \ |
1563 | - : "=r" (retval), "=A"(x) \ |
1564 | + : "=r" (retval), "=&A"(x) \ |
1565 | : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \ |
1566 | "i" (errret), "0" (retval)); \ |
1567 | }) |
1568 | diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c |
1569 | index c2f8dde3255c..d5d44c452624 100644 |
1570 | --- a/arch/x86/kernel/fpu/init.c |
1571 | +++ b/arch/x86/kernel/fpu/init.c |
1572 | @@ -90,6 +90,7 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) |
1573 | * Boot time FPU feature detection code: |
1574 | */ |
1575 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
1576 | +EXPORT_SYMBOL_GPL(mxcsr_feature_mask); |
1577 | |
1578 | static void __init fpu__init_system_mxcsr(void) |
1579 | { |
1580 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
1581 | index 421a069b5429..a4a2bae7c274 100644 |
1582 | --- a/arch/x86/kvm/x86.c |
1583 | +++ b/arch/x86/kvm/x86.c |
1584 | @@ -1779,6 +1779,7 @@ static u64 __get_kvmclock_ns(struct kvm *kvm) |
1585 | { |
1586 | struct kvm_arch *ka = &kvm->arch; |
1587 | struct pvclock_vcpu_time_info hv_clock; |
1588 | + u64 ret; |
1589 | |
1590 | spin_lock(&ka->pvclock_gtod_sync_lock); |
1591 | if (!ka->use_master_clock) { |
1592 | @@ -1790,10 +1791,17 @@ static u64 __get_kvmclock_ns(struct kvm *kvm) |
1593 | hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; |
1594 | spin_unlock(&ka->pvclock_gtod_sync_lock); |
1595 | |
1596 | + /* both __this_cpu_read() and rdtsc() should be on the same cpu */ |
1597 | + get_cpu(); |
1598 | + |
1599 | kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, |
1600 | &hv_clock.tsc_shift, |
1601 | &hv_clock.tsc_to_system_mul); |
1602 | - return __pvclock_read_cycles(&hv_clock, rdtsc()); |
1603 | + ret = __pvclock_read_cycles(&hv_clock, rdtsc()); |
1604 | + |
1605 | + put_cpu(); |
1606 | + |
1607 | + return ret; |
1608 | } |
1609 | |
1610 | u64 get_kvmclock_ns(struct kvm *kvm) |
1611 | @@ -3307,11 +3315,14 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, |
1612 | } |
1613 | } |
1614 | |
1615 | +#define XSAVE_MXCSR_OFFSET 24 |
1616 | + |
1617 | static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, |
1618 | struct kvm_xsave *guest_xsave) |
1619 | { |
1620 | u64 xstate_bv = |
1621 | *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; |
1622 | + u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; |
1623 | |
1624 | if (boot_cpu_has(X86_FEATURE_XSAVE)) { |
1625 | /* |
1626 | @@ -3319,11 +3330,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, |
1627 | * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility |
1628 | * with old userspace. |
1629 | */ |
1630 | - if (xstate_bv & ~kvm_supported_xcr0()) |
1631 | + if (xstate_bv & ~kvm_supported_xcr0() || |
1632 | + mxcsr & ~mxcsr_feature_mask) |
1633 | return -EINVAL; |
1634 | load_xsave(vcpu, (u8 *)guest_xsave->region); |
1635 | } else { |
1636 | - if (xstate_bv & ~XFEATURE_MASK_FPSSE) |
1637 | + if (xstate_bv & ~XFEATURE_MASK_FPSSE || |
1638 | + mxcsr & ~mxcsr_feature_mask) |
1639 | return -EINVAL; |
1640 | memcpy(&vcpu->arch.guest_fpu.state.fxsave, |
1641 | guest_xsave->region, sizeof(struct fxregs_state)); |
1642 | @@ -4849,16 +4862,20 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, |
1643 | |
1644 | static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) |
1645 | { |
1646 | - /* TODO: String I/O for in kernel device */ |
1647 | - int r; |
1648 | + int r = 0, i; |
1649 | |
1650 | - if (vcpu->arch.pio.in) |
1651 | - r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, |
1652 | - vcpu->arch.pio.size, pd); |
1653 | - else |
1654 | - r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, |
1655 | - vcpu->arch.pio.port, vcpu->arch.pio.size, |
1656 | - pd); |
1657 | + for (i = 0; i < vcpu->arch.pio.count; i++) { |
1658 | + if (vcpu->arch.pio.in) |
1659 | + r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, |
1660 | + vcpu->arch.pio.size, pd); |
1661 | + else |
1662 | + r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, |
1663 | + vcpu->arch.pio.port, vcpu->arch.pio.size, |
1664 | + pd); |
1665 | + if (r) |
1666 | + break; |
1667 | + pd += vcpu->arch.pio.size; |
1668 | + } |
1669 | return r; |
1670 | } |
1671 | |
1672 | @@ -4896,6 +4913,8 @@ static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt, |
1673 | if (vcpu->arch.pio.count) |
1674 | goto data_avail; |
1675 | |
1676 | + memset(vcpu->arch.pio_data, 0, size * count); |
1677 | + |
1678 | ret = emulator_pio_in_out(vcpu, size, port, val, count, true); |
1679 | if (ret) { |
1680 | data_avail: |
1681 | diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c |
1682 | index 2944353253ed..a4e8432fc2fb 100644 |
1683 | --- a/drivers/acpi/pci_mcfg.c |
1684 | +++ b/drivers/acpi/pci_mcfg.c |
1685 | @@ -54,6 +54,7 @@ static struct mcfg_fixup mcfg_quirks[] = { |
1686 | |
1687 | #define QCOM_ECAM32(seg) \ |
1688 | { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } |
1689 | + |
1690 | QCOM_ECAM32(0), |
1691 | QCOM_ECAM32(1), |
1692 | QCOM_ECAM32(2), |
1693 | @@ -68,6 +69,7 @@ static struct mcfg_fixup mcfg_quirks[] = { |
1694 | { "HISI ", table_id, 0, (seg) + 1, MCFG_BUS_ANY, ops }, \ |
1695 | { "HISI ", table_id, 0, (seg) + 2, MCFG_BUS_ANY, ops }, \ |
1696 | { "HISI ", table_id, 0, (seg) + 3, MCFG_BUS_ANY, ops } |
1697 | + |
1698 | HISI_QUAD_DOM("HIP05 ", 0, &hisi_pcie_ops), |
1699 | HISI_QUAD_DOM("HIP06 ", 0, &hisi_pcie_ops), |
1700 | HISI_QUAD_DOM("HIP07 ", 0, &hisi_pcie_ops), |
1701 | @@ -77,6 +79,7 @@ static struct mcfg_fixup mcfg_quirks[] = { |
1702 | |
1703 | #define THUNDER_PEM_RES(addr, node) \ |
1704 | DEFINE_RES_MEM((addr) + ((u64) (node) << 44), 0x39 * SZ_16M) |
1705 | + |
1706 | #define THUNDER_PEM_QUIRK(rev, node) \ |
1707 | { "CAVIUM", "THUNDERX", rev, 4 + (10 * (node)), MCFG_BUS_ANY, \ |
1708 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x88001f000000UL, node) }, \ |
1709 | @@ -90,13 +93,16 @@ static struct mcfg_fixup mcfg_quirks[] = { |
1710 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x894057000000UL, node) }, \ |
1711 | { "CAVIUM", "THUNDERX", rev, 9 + (10 * (node)), MCFG_BUS_ANY, \ |
1712 | &thunder_pem_ecam_ops, THUNDER_PEM_RES(0x89808f000000UL, node) } |
1713 | - /* SoC pass2.x */ |
1714 | - THUNDER_PEM_QUIRK(1, 0), |
1715 | - THUNDER_PEM_QUIRK(1, 1), |
1716 | |
1717 | #define THUNDER_ECAM_QUIRK(rev, seg) \ |
1718 | { "CAVIUM", "THUNDERX", rev, seg, MCFG_BUS_ANY, \ |
1719 | &pci_thunder_ecam_ops } |
1720 | + |
1721 | + /* SoC pass2.x */ |
1722 | + THUNDER_PEM_QUIRK(1, 0), |
1723 | + THUNDER_PEM_QUIRK(1, 1), |
1724 | + THUNDER_ECAM_QUIRK(1, 10), |
1725 | + |
1726 | /* SoC pass1.x */ |
1727 | THUNDER_PEM_QUIRK(2, 0), /* off-chip devices */ |
1728 | THUNDER_PEM_QUIRK(2, 1), /* off-chip devices */ |
1729 | @@ -112,9 +118,11 @@ static struct mcfg_fixup mcfg_quirks[] = { |
1730 | #define XGENE_V1_ECAM_MCFG(rev, seg) \ |
1731 | {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ |
1732 | &xgene_v1_pcie_ecam_ops } |
1733 | + |
1734 | #define XGENE_V2_ECAM_MCFG(rev, seg) \ |
1735 | {"APM ", "XGENE ", rev, seg, MCFG_BUS_ANY, \ |
1736 | &xgene_v2_pcie_ecam_ops } |
1737 | + |
1738 | /* X-Gene SoC with v1 PCIe controller */ |
1739 | XGENE_V1_ECAM_MCFG(1, 0), |
1740 | XGENE_V1_ECAM_MCFG(1, 1), |
1741 | diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c |
1742 | index 652114ae1a8a..1fc8a671aabe 100644 |
1743 | --- a/drivers/block/drbd/drbd_req.c |
1744 | +++ b/drivers/block/drbd/drbd_req.c |
1745 | @@ -314,24 +314,32 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) |
1746 | } |
1747 | |
1748 | /* still holds resource->req_lock */ |
1749 | -static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
1750 | +static void drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) |
1751 | { |
1752 | struct drbd_device *device = req->device; |
1753 | D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); |
1754 | |
1755 | + if (!put) |
1756 | + return; |
1757 | + |
1758 | if (!atomic_sub_and_test(put, &req->completion_ref)) |
1759 | - return 0; |
1760 | + return; |
1761 | |
1762 | drbd_req_complete(req, m); |
1763 | |
1764 | + /* local completion may still come in later, |
1765 | + * we need to keep the req object around. */ |
1766 | + if (req->rq_state & RQ_LOCAL_ABORTED) |
1767 | + return; |
1768 | + |
1769 | if (req->rq_state & RQ_POSTPONED) { |
1770 | /* don't destroy the req object just yet, |
1771 | * but queue it for retry */ |
1772 | drbd_restart_request(req); |
1773 | - return 0; |
1774 | + return; |
1775 | } |
1776 | |
1777 | - return 1; |
1778 | + kref_put(&req->kref, drbd_req_destroy); |
1779 | } |
1780 | |
1781 | static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) |
1782 | @@ -518,12 +526,8 @@ static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, |
1783 | if (req->i.waiting) |
1784 | wake_up(&device->misc_wait); |
1785 | |
1786 | - if (c_put) { |
1787 | - if (drbd_req_put_completion_ref(req, m, c_put)) |
1788 | - kref_put(&req->kref, drbd_req_destroy); |
1789 | - } else { |
1790 | - kref_put(&req->kref, drbd_req_destroy); |
1791 | - } |
1792 | + drbd_req_put_completion_ref(req, m, c_put); |
1793 | + kref_put(&req->kref, drbd_req_destroy); |
1794 | } |
1795 | |
1796 | static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) |
1797 | @@ -1363,8 +1367,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request |
1798 | } |
1799 | |
1800 | out: |
1801 | - if (drbd_req_put_completion_ref(req, &m, 1)) |
1802 | - kref_put(&req->kref, drbd_req_destroy); |
1803 | + drbd_req_put_completion_ref(req, &m, 1); |
1804 | spin_unlock_irq(&resource->req_lock); |
1805 | |
1806 | /* Even though above is a kref_put(), this is safe. |
1807 | diff --git a/drivers/char/lp.c b/drivers/char/lp.c |
1808 | index 565e4cf04a02..8249762192d5 100644 |
1809 | --- a/drivers/char/lp.c |
1810 | +++ b/drivers/char/lp.c |
1811 | @@ -859,7 +859,11 @@ static int __init lp_setup (char *str) |
1812 | } else if (!strcmp(str, "auto")) { |
1813 | parport_nr[0] = LP_PARPORT_AUTO; |
1814 | } else if (!strcmp(str, "none")) { |
1815 | - parport_nr[parport_ptr++] = LP_PARPORT_NONE; |
1816 | + if (parport_ptr < LP_NO) |
1817 | + parport_nr[parport_ptr++] = LP_PARPORT_NONE; |
1818 | + else |
1819 | + printk(KERN_INFO "lp: too many ports, %s ignored.\n", |
1820 | + str); |
1821 | } else if (!strcmp(str, "reset")) { |
1822 | reset = 1; |
1823 | } |
1824 | diff --git a/drivers/char/mem.c b/drivers/char/mem.c |
1825 | index 7e4a9d1296bb..6e0cbe092220 100644 |
1826 | --- a/drivers/char/mem.c |
1827 | +++ b/drivers/char/mem.c |
1828 | @@ -340,6 +340,11 @@ static const struct vm_operations_struct mmap_mem_ops = { |
1829 | static int mmap_mem(struct file *file, struct vm_area_struct *vma) |
1830 | { |
1831 | size_t size = vma->vm_end - vma->vm_start; |
1832 | + phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; |
1833 | + |
1834 | + /* It's illegal to wrap around the end of the physical address space. */ |
1835 | + if (offset + (phys_addr_t)size < offset) |
1836 | + return -EINVAL; |
1837 | |
1838 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
1839 | return -EINVAL; |
1840 | diff --git a/drivers/char/tpm/tpm2_eventlog.c b/drivers/char/tpm/tpm2_eventlog.c |
1841 | index 513897cf9c4b..34a8afa69138 100644 |
1842 | --- a/drivers/char/tpm/tpm2_eventlog.c |
1843 | +++ b/drivers/char/tpm/tpm2_eventlog.c |
1844 | @@ -56,18 +56,24 @@ static int calc_tpm2_event_size(struct tcg_pcr_event2 *event, |
1845 | |
1846 | efispecid = (struct tcg_efi_specid_event *)event_header->event; |
1847 | |
1848 | - for (i = 0; (i < event->count) && (i < TPM2_ACTIVE_PCR_BANKS); |
1849 | - i++) { |
1850 | + /* Check if event is malformed. */ |
1851 | + if (event->count > efispecid->num_algs) |
1852 | + return 0; |
1853 | + |
1854 | + for (i = 0; i < event->count; i++) { |
1855 | halg_size = sizeof(event->digests[i].alg_id); |
1856 | memcpy(&halg, marker, halg_size); |
1857 | marker = marker + halg_size; |
1858 | - for (j = 0; (j < efispecid->num_algs); j++) { |
1859 | + for (j = 0; j < efispecid->num_algs; j++) { |
1860 | if (halg == efispecid->digest_sizes[j].alg_id) { |
1861 | - marker = marker + |
1862 | + marker += |
1863 | efispecid->digest_sizes[j].digest_size; |
1864 | break; |
1865 | } |
1866 | } |
1867 | + /* Algorithm without known length. Such event is unparseable. */ |
1868 | + if (j == efispecid->num_algs) |
1869 | + return 0; |
1870 | } |
1871 | |
1872 | event_field = (struct tcg_event_field *)marker; |
1873 | diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c |
1874 | index 86f355b6df1d..dc760117f5ad 100644 |
1875 | --- a/drivers/char/tpm/tpm_crb.c |
1876 | +++ b/drivers/char/tpm/tpm_crb.c |
1877 | @@ -176,8 +176,7 @@ static int crb_recv(struct tpm_chip *chip, u8 *buf, size_t count) |
1878 | |
1879 | memcpy_fromio(buf, priv->rsp, 6); |
1880 | expected = be32_to_cpup((__be32 *) &buf[2]); |
1881 | - |
1882 | - if (expected > count) |
1883 | + if (expected > count || expected < 6) |
1884 | return -EIO; |
1885 | |
1886 | memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6); |
1887 | diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c |
1888 | index e3a9155ee671..c6428771841f 100644 |
1889 | --- a/drivers/char/tpm/tpm_i2c_nuvoton.c |
1890 | +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c |
1891 | @@ -49,9 +49,10 @@ |
1892 | */ |
1893 | #define TPM_I2C_MAX_BUF_SIZE 32 |
1894 | #define TPM_I2C_RETRY_COUNT 32 |
1895 | -#define TPM_I2C_BUS_DELAY 1 /* msec */ |
1896 | -#define TPM_I2C_RETRY_DELAY_SHORT 2 /* msec */ |
1897 | -#define TPM_I2C_RETRY_DELAY_LONG 10 /* msec */ |
1898 | +#define TPM_I2C_BUS_DELAY 1000 /* usec */ |
1899 | +#define TPM_I2C_RETRY_DELAY_SHORT (2 * 1000) /* usec */ |
1900 | +#define TPM_I2C_RETRY_DELAY_LONG (10 * 1000) /* usec */ |
1901 | +#define TPM_I2C_DELAY_RANGE 300 /* usec */ |
1902 | |
1903 | #define OF_IS_TPM2 ((void *)1) |
1904 | #define I2C_IS_TPM2 1 |
1905 | @@ -123,7 +124,9 @@ static s32 i2c_nuvoton_write_status(struct i2c_client *client, u8 data) |
1906 | /* this causes the current command to be aborted */ |
1907 | for (i = 0, status = -1; i < TPM_I2C_RETRY_COUNT && status < 0; i++) { |
1908 | status = i2c_nuvoton_write_buf(client, TPM_STS, 1, &data); |
1909 | - msleep(TPM_I2C_BUS_DELAY); |
1910 | + if (status < 0) |
1911 | + usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY |
1912 | + + TPM_I2C_DELAY_RANGE); |
1913 | } |
1914 | return status; |
1915 | } |
1916 | @@ -160,7 +163,8 @@ static int i2c_nuvoton_get_burstcount(struct i2c_client *client, |
1917 | burst_count = min_t(u8, TPM_I2C_MAX_BUF_SIZE, data); |
1918 | break; |
1919 | } |
1920 | - msleep(TPM_I2C_BUS_DELAY); |
1921 | + usleep_range(TPM_I2C_BUS_DELAY, TPM_I2C_BUS_DELAY |
1922 | + + TPM_I2C_DELAY_RANGE); |
1923 | } while (time_before(jiffies, stop)); |
1924 | |
1925 | return burst_count; |
1926 | @@ -203,13 +207,17 @@ static int i2c_nuvoton_wait_for_stat(struct tpm_chip *chip, u8 mask, u8 value, |
1927 | return 0; |
1928 | |
1929 | /* use polling to wait for the event */ |
1930 | - ten_msec = jiffies + msecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); |
1931 | + ten_msec = jiffies + usecs_to_jiffies(TPM_I2C_RETRY_DELAY_LONG); |
1932 | stop = jiffies + timeout; |
1933 | do { |
1934 | if (time_before(jiffies, ten_msec)) |
1935 | - msleep(TPM_I2C_RETRY_DELAY_SHORT); |
1936 | + usleep_range(TPM_I2C_RETRY_DELAY_SHORT, |
1937 | + TPM_I2C_RETRY_DELAY_SHORT |
1938 | + + TPM_I2C_DELAY_RANGE); |
1939 | else |
1940 | - msleep(TPM_I2C_RETRY_DELAY_LONG); |
1941 | + usleep_range(TPM_I2C_RETRY_DELAY_LONG, |
1942 | + TPM_I2C_RETRY_DELAY_LONG |
1943 | + + TPM_I2C_DELAY_RANGE); |
1944 | status_valid = i2c_nuvoton_check_status(chip, mask, |
1945 | value); |
1946 | if (status_valid) |
1947 | diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c |
1948 | index 1b9d61ffe991..f01d083eced2 100644 |
1949 | --- a/drivers/char/tpm/tpm_ibmvtpm.c |
1950 | +++ b/drivers/char/tpm/tpm_ibmvtpm.c |
1951 | @@ -299,6 +299,8 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) |
1952 | } |
1953 | |
1954 | kfree(ibmvtpm); |
1955 | + /* For tpm_ibmvtpm_get_desired_dma */ |
1956 | + dev_set_drvdata(&vdev->dev, NULL); |
1957 | |
1958 | return 0; |
1959 | } |
1960 | @@ -313,14 +315,16 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev) |
1961 | static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev) |
1962 | { |
1963 | struct tpm_chip *chip = dev_get_drvdata(&vdev->dev); |
1964 | - struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); |
1965 | + struct ibmvtpm_dev *ibmvtpm; |
1966 | |
1967 | /* |
1968 | * ibmvtpm initializes at probe time, so the data we are |
1969 | * asking for may not be set yet. Estimate that 4K required |
1970 | * for TCE-mapped buffer in addition to CRQ. |
1971 | */ |
1972 | - if (!ibmvtpm) |
1973 | + if (chip) |
1974 | + ibmvtpm = dev_get_drvdata(&chip->dev); |
1975 | + else |
1976 | return CRQ_RES_BUF_SIZE + PAGE_SIZE; |
1977 | |
1978 | return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size; |
1979 | diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c |
1980 | index c0f296b5d413..fc0e9a2734ed 100644 |
1981 | --- a/drivers/char/tpm/tpm_tis_core.c |
1982 | +++ b/drivers/char/tpm/tpm_tis_core.c |
1983 | @@ -160,8 +160,10 @@ static int get_burstcount(struct tpm_chip *chip) |
1984 | u32 value; |
1985 | |
1986 | /* wait for burstcount */ |
1987 | - /* which timeout value, spec has 2 answers (c & d) */ |
1988 | - stop = jiffies + chip->timeout_d; |
1989 | + if (chip->flags & TPM_CHIP_FLAG_TPM2) |
1990 | + stop = jiffies + chip->timeout_a; |
1991 | + else |
1992 | + stop = jiffies + chip->timeout_d; |
1993 | do { |
1994 | rc = tpm_tis_read32(priv, TPM_STS(priv->locality), &value); |
1995 | if (rc < 0) |
1996 | diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c |
1997 | index 5292e5768a7e..88fe72ae967f 100644 |
1998 | --- a/drivers/char/tpm/tpm_tis_spi.c |
1999 | +++ b/drivers/char/tpm/tpm_tis_spi.c |
2000 | @@ -47,8 +47,8 @@ struct tpm_tis_spi_phy { |
2001 | struct tpm_tis_data priv; |
2002 | struct spi_device *spi_device; |
2003 | |
2004 | - u8 tx_buf[MAX_SPI_FRAMESIZE + 4]; |
2005 | - u8 rx_buf[MAX_SPI_FRAMESIZE + 4]; |
2006 | + u8 tx_buf[4]; |
2007 | + u8 rx_buf[4]; |
2008 | }; |
2009 | |
2010 | static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) |
2011 | @@ -56,122 +56,98 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da |
2012 | return container_of(data, struct tpm_tis_spi_phy, priv); |
2013 | } |
2014 | |
2015 | -static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, |
2016 | - u16 len, u8 *result) |
2017 | +static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, |
2018 | + u8 *buffer, u8 direction) |
2019 | { |
2020 | struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); |
2021 | - int ret, i; |
2022 | + int ret = 0; |
2023 | + int i; |
2024 | struct spi_message m; |
2025 | - struct spi_transfer spi_xfer = { |
2026 | - .tx_buf = phy->tx_buf, |
2027 | - .rx_buf = phy->rx_buf, |
2028 | - .len = 4, |
2029 | - }; |
2030 | + struct spi_transfer spi_xfer; |
2031 | + u8 transfer_len; |
2032 | |
2033 | - if (len > MAX_SPI_FRAMESIZE) |
2034 | - return -ENOMEM; |
2035 | + spi_bus_lock(phy->spi_device->master); |
2036 | |
2037 | - phy->tx_buf[0] = 0x80 | (len - 1); |
2038 | - phy->tx_buf[1] = 0xd4; |
2039 | - phy->tx_buf[2] = (addr >> 8) & 0xFF; |
2040 | - phy->tx_buf[3] = addr & 0xFF; |
2041 | + while (len) { |
2042 | + transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); |
2043 | |
2044 | - spi_xfer.cs_change = 1; |
2045 | - spi_message_init(&m); |
2046 | - spi_message_add_tail(&spi_xfer, &m); |
2047 | + phy->tx_buf[0] = direction | (transfer_len - 1); |
2048 | + phy->tx_buf[1] = 0xd4; |
2049 | + phy->tx_buf[2] = addr >> 8; |
2050 | + phy->tx_buf[3] = addr; |
2051 | + |
2052 | + memset(&spi_xfer, 0, sizeof(spi_xfer)); |
2053 | + spi_xfer.tx_buf = phy->tx_buf; |
2054 | + spi_xfer.rx_buf = phy->rx_buf; |
2055 | + spi_xfer.len = 4; |
2056 | + spi_xfer.cs_change = 1; |
2057 | |
2058 | - spi_bus_lock(phy->spi_device->master); |
2059 | - ret = spi_sync_locked(phy->spi_device, &m); |
2060 | - if (ret < 0) |
2061 | - goto exit; |
2062 | - |
2063 | - memset(phy->tx_buf, 0, len); |
2064 | - |
2065 | - /* According to TCG PTP specification, if there is no TPM present at |
2066 | - * all, then the design has a weak pull-up on MISO. If a TPM is not |
2067 | - * present, a pull-up on MISO means that the SB controller sees a 1, |
2068 | - * and will latch in 0xFF on the read. |
2069 | - */ |
2070 | - for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) { |
2071 | - spi_xfer.len = 1; |
2072 | spi_message_init(&m); |
2073 | spi_message_add_tail(&spi_xfer, &m); |
2074 | ret = spi_sync_locked(phy->spi_device, &m); |
2075 | if (ret < 0) |
2076 | goto exit; |
2077 | - } |
2078 | - |
2079 | - spi_xfer.cs_change = 0; |
2080 | - spi_xfer.len = len; |
2081 | - spi_xfer.rx_buf = result; |
2082 | - |
2083 | - spi_message_init(&m); |
2084 | - spi_message_add_tail(&spi_xfer, &m); |
2085 | - ret = spi_sync_locked(phy->spi_device, &m); |
2086 | - |
2087 | -exit: |
2088 | - spi_bus_unlock(phy->spi_device->master); |
2089 | - return ret; |
2090 | -} |
2091 | - |
2092 | -static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, |
2093 | - u16 len, u8 *value) |
2094 | -{ |
2095 | - struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); |
2096 | - int ret, i; |
2097 | - struct spi_message m; |
2098 | - struct spi_transfer spi_xfer = { |
2099 | - .tx_buf = phy->tx_buf, |
2100 | - .rx_buf = phy->rx_buf, |
2101 | - .len = 4, |
2102 | - }; |
2103 | - |
2104 | - if (len > MAX_SPI_FRAMESIZE) |
2105 | - return -ENOMEM; |
2106 | - |
2107 | - phy->tx_buf[0] = len - 1; |
2108 | - phy->tx_buf[1] = 0xd4; |
2109 | - phy->tx_buf[2] = (addr >> 8) & 0xFF; |
2110 | - phy->tx_buf[3] = addr & 0xFF; |
2111 | |
2112 | - spi_xfer.cs_change = 1; |
2113 | - spi_message_init(&m); |
2114 | - spi_message_add_tail(&spi_xfer, &m); |
2115 | + if ((phy->rx_buf[3] & 0x01) == 0) { |
2116 | + // handle SPI wait states |
2117 | + phy->tx_buf[0] = 0; |
2118 | + |
2119 | + for (i = 0; i < TPM_RETRY; i++) { |
2120 | + spi_xfer.len = 1; |
2121 | + spi_message_init(&m); |
2122 | + spi_message_add_tail(&spi_xfer, &m); |
2123 | + ret = spi_sync_locked(phy->spi_device, &m); |
2124 | + if (ret < 0) |
2125 | + goto exit; |
2126 | + if (phy->rx_buf[0] & 0x01) |
2127 | + break; |
2128 | + } |
2129 | + |
2130 | + if (i == TPM_RETRY) { |
2131 | + ret = -ETIMEDOUT; |
2132 | + goto exit; |
2133 | + } |
2134 | + } |
2135 | + |
2136 | + spi_xfer.cs_change = 0; |
2137 | + spi_xfer.len = transfer_len; |
2138 | + spi_xfer.delay_usecs = 5; |
2139 | + |
2140 | + if (direction) { |
2141 | + spi_xfer.tx_buf = NULL; |
2142 | + spi_xfer.rx_buf = buffer; |
2143 | + } else { |
2144 | + spi_xfer.tx_buf = buffer; |
2145 | + spi_xfer.rx_buf = NULL; |
2146 | + } |
2147 | |
2148 | - spi_bus_lock(phy->spi_device->master); |
2149 | - ret = spi_sync_locked(phy->spi_device, &m); |
2150 | - if (ret < 0) |
2151 | - goto exit; |
2152 | - |
2153 | - memset(phy->tx_buf, 0, len); |
2154 | - |
2155 | - /* According to TCG PTP specification, if there is no TPM present at |
2156 | - * all, then the design has a weak pull-up on MISO. If a TPM is not |
2157 | - * present, a pull-up on MISO means that the SB controller sees a 1, |
2158 | - * and will latch in 0xFF on the read. |
2159 | - */ |
2160 | - for (i = 0; (phy->rx_buf[0] & 0x01) == 0 && i < TPM_RETRY; i++) { |
2161 | - spi_xfer.len = 1; |
2162 | spi_message_init(&m); |
2163 | spi_message_add_tail(&spi_xfer, &m); |
2164 | ret = spi_sync_locked(phy->spi_device, &m); |
2165 | if (ret < 0) |
2166 | goto exit; |
2167 | - } |
2168 | |
2169 | - spi_xfer.len = len; |
2170 | - spi_xfer.tx_buf = value; |
2171 | - spi_xfer.cs_change = 0; |
2172 | - spi_xfer.tx_buf = value; |
2173 | - spi_message_init(&m); |
2174 | - spi_message_add_tail(&spi_xfer, &m); |
2175 | - ret = spi_sync_locked(phy->spi_device, &m); |
2176 | + len -= transfer_len; |
2177 | + buffer += transfer_len; |
2178 | + } |
2179 | |
2180 | exit: |
2181 | spi_bus_unlock(phy->spi_device->master); |
2182 | return ret; |
2183 | } |
2184 | |
2185 | +static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, |
2186 | + u16 len, u8 *result) |
2187 | +{ |
2188 | + return tpm_tis_spi_transfer(data, addr, len, result, 0x80); |
2189 | +} |
2190 | + |
2191 | +static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, |
2192 | + u16 len, u8 *value) |
2193 | +{ |
2194 | + return tpm_tis_spi_transfer(data, addr, len, value, 0); |
2195 | +} |
2196 | + |
2197 | static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) |
2198 | { |
2199 | int rc; |
2200 | diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c |
2201 | index 548b90be7685..2706be7ed334 100644 |
2202 | --- a/drivers/cpuidle/cpuidle.c |
2203 | +++ b/drivers/cpuidle/cpuidle.c |
2204 | @@ -111,7 +111,8 @@ void cpuidle_use_deepest_state(bool enable) |
2205 | |
2206 | preempt_disable(); |
2207 | dev = cpuidle_get_device(); |
2208 | - dev->use_deepest_state = enable; |
2209 | + if (dev) |
2210 | + dev->use_deepest_state = enable; |
2211 | preempt_enable(); |
2212 | } |
2213 | |
2214 | diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c |
2215 | index 82dab1692264..3aea55698165 100644 |
2216 | --- a/drivers/edac/amd64_edac.c |
2217 | +++ b/drivers/edac/amd64_edac.c |
2218 | @@ -782,24 +782,26 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan) |
2219 | |
2220 | static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl) |
2221 | { |
2222 | - u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases; |
2223 | - int dimm, size0, size1; |
2224 | + int dimm, size0, size1, cs0, cs1; |
2225 | |
2226 | edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl); |
2227 | |
2228 | for (dimm = 0; dimm < 4; dimm++) { |
2229 | size0 = 0; |
2230 | + cs0 = dimm * 2; |
2231 | |
2232 | - if (dcsb[dimm*2] & DCSB_CS_ENABLE) |
2233 | - size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); |
2234 | + if (csrow_enabled(cs0, ctrl, pvt)) |
2235 | + size0 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs0); |
2236 | |
2237 | size1 = 0; |
2238 | - if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE) |
2239 | - size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, dimm); |
2240 | + cs1 = dimm * 2 + 1; |
2241 | + |
2242 | + if (csrow_enabled(cs1, ctrl, pvt)) |
2243 | + size1 = pvt->ops->dbam_to_cs(pvt, ctrl, 0, cs1); |
2244 | |
2245 | amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n", |
2246 | - dimm * 2, size0, |
2247 | - dimm * 2 + 1, size1); |
2248 | + cs0, size0, |
2249 | + cs1, size1); |
2250 | } |
2251 | } |
2252 | |
2253 | @@ -2756,26 +2758,22 @@ static void read_mc_regs(struct amd64_pvt *pvt) |
2254 | * encompasses |
2255 | * |
2256 | */ |
2257 | -static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr) |
2258 | +static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig) |
2259 | { |
2260 | - u32 cs_mode, nr_pages; |
2261 | u32 dbam = dct ? pvt->dbam1 : pvt->dbam0; |
2262 | + int csrow_nr = csrow_nr_orig; |
2263 | + u32 cs_mode, nr_pages; |
2264 | |
2265 | + if (!pvt->umc) |
2266 | + csrow_nr >>= 1; |
2267 | |
2268 | - /* |
2269 | - * The math on this doesn't look right on the surface because x/2*4 can |
2270 | - * be simplified to x*2 but this expression makes use of the fact that |
2271 | - * it is integral math where 1/2=0. This intermediate value becomes the |
2272 | - * number of bits to shift the DBAM register to extract the proper CSROW |
2273 | - * field. |
2274 | - */ |
2275 | - cs_mode = DBAM_DIMM(csrow_nr / 2, dbam); |
2276 | + cs_mode = DBAM_DIMM(csrow_nr, dbam); |
2277 | |
2278 | - nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, (csrow_nr / 2)) |
2279 | - << (20 - PAGE_SHIFT); |
2280 | + nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr); |
2281 | + nr_pages <<= 20 - PAGE_SHIFT; |
2282 | |
2283 | edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n", |
2284 | - csrow_nr, dct, cs_mode); |
2285 | + csrow_nr_orig, dct, cs_mode); |
2286 | edac_dbg(0, "nr_pages/channel: %u\n", nr_pages); |
2287 | |
2288 | return nr_pages; |
2289 | diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c |
2290 | index 874ff32db366..00cfed3c3e1a 100644 |
2291 | --- a/drivers/firmware/ti_sci.c |
2292 | +++ b/drivers/firmware/ti_sci.c |
2293 | @@ -202,7 +202,8 @@ static int ti_sci_debugfs_create(struct platform_device *pdev, |
2294 | info->debug_buffer[info->debug_region_size] = 0; |
2295 | |
2296 | info->d = debugfs_create_file(strncat(debug_name, dev_name(dev), |
2297 | - sizeof(debug_name)), |
2298 | + sizeof(debug_name) - |
2299 | + sizeof("ti_sci_debug@")), |
2300 | 0444, NULL, info, &ti_sci_debug_fops); |
2301 | if (IS_ERR(info->d)) |
2302 | return PTR_ERR(info->d); |
2303 | diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c |
2304 | index efc85a279d54..fd9bce9889d0 100644 |
2305 | --- a/drivers/gpio/gpio-omap.c |
2306 | +++ b/drivers/gpio/gpio-omap.c |
2307 | @@ -208,9 +208,11 @@ static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) |
2308 | * OMAP's debounce time is in 31us steps |
2309 | * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 |
2310 | * so we need to convert and round up to the closest unit. |
2311 | + * |
2312 | + * Return: 0 on success, negative error otherwise. |
2313 | */ |
2314 | -static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, |
2315 | - unsigned debounce) |
2316 | +static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, |
2317 | + unsigned debounce) |
2318 | { |
2319 | void __iomem *reg; |
2320 | u32 val; |
2321 | @@ -218,11 +220,12 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, |
2322 | bool enable = !!debounce; |
2323 | |
2324 | if (!bank->dbck_flag) |
2325 | - return; |
2326 | + return -ENOTSUPP; |
2327 | |
2328 | if (enable) { |
2329 | debounce = DIV_ROUND_UP(debounce, 31) - 1; |
2330 | - debounce &= OMAP4_GPIO_DEBOUNCINGTIME_MASK; |
2331 | + if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) |
2332 | + return -EINVAL; |
2333 | } |
2334 | |
2335 | l = BIT(offset); |
2336 | @@ -255,6 +258,8 @@ static void omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, |
2337 | bank->context.debounce = debounce; |
2338 | bank->context.debounce_en = val; |
2339 | } |
2340 | + |
2341 | + return 0; |
2342 | } |
2343 | |
2344 | /** |
2345 | @@ -964,14 +969,20 @@ static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, |
2346 | { |
2347 | struct gpio_bank *bank; |
2348 | unsigned long flags; |
2349 | + int ret; |
2350 | |
2351 | bank = gpiochip_get_data(chip); |
2352 | |
2353 | raw_spin_lock_irqsave(&bank->lock, flags); |
2354 | - omap2_set_gpio_debounce(bank, offset, debounce); |
2355 | + ret = omap2_set_gpio_debounce(bank, offset, debounce); |
2356 | raw_spin_unlock_irqrestore(&bank->lock, flags); |
2357 | |
2358 | - return 0; |
2359 | + if (ret) |
2360 | + dev_info(chip->parent, |
2361 | + "Could not set line %u debounce to %u microseconds (%d)", |
2362 | + offset, debounce, ret); |
2363 | + |
2364 | + return ret; |
2365 | } |
2366 | |
2367 | static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, |
2368 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c |
2369 | index d4452d8f76ca..33541acdf329 100644 |
2370 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c |
2371 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c |
2372 | @@ -1090,23 +1090,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm) |
2373 | a.full = dfixed_const(available_bandwidth); |
2374 | b.full = dfixed_const(wm->num_heads); |
2375 | a.full = dfixed_div(a, b); |
2376 | + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); |
2377 | + tmp = min(dfixed_trunc(a), tmp); |
2378 | |
2379 | - b.full = dfixed_const(mc_latency + 512); |
2380 | - c.full = dfixed_const(wm->disp_clk); |
2381 | - b.full = dfixed_div(b, c); |
2382 | - |
2383 | - c.full = dfixed_const(dmif_size); |
2384 | - b.full = dfixed_div(c, b); |
2385 | - |
2386 | - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); |
2387 | - |
2388 | - b.full = dfixed_const(1000); |
2389 | - c.full = dfixed_const(wm->disp_clk); |
2390 | - b.full = dfixed_div(c, b); |
2391 | - c.full = dfixed_const(wm->bytes_per_pixel); |
2392 | - b.full = dfixed_mul(b, c); |
2393 | - |
2394 | - lb_fill_bw = min(tmp, dfixed_trunc(b)); |
2395 | + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); |
2396 | |
2397 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); |
2398 | b.full = dfixed_const(1000); |
2399 | @@ -1214,14 +1201,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, |
2400 | { |
2401 | struct drm_display_mode *mode = &amdgpu_crtc->base.mode; |
2402 | struct dce10_wm_params wm_low, wm_high; |
2403 | - u32 pixel_period; |
2404 | + u32 active_time; |
2405 | u32 line_time = 0; |
2406 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
2407 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
2408 | |
2409 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
2410 | - pixel_period = 1000000 / (u32)mode->clock; |
2411 | - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); |
2412 | + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; |
2413 | + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); |
2414 | |
2415 | /* watermark for high clocks */ |
2416 | if (adev->pm.dpm_enabled) { |
2417 | @@ -1236,7 +1223,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, |
2418 | |
2419 | wm_high.disp_clk = mode->clock; |
2420 | wm_high.src_width = mode->crtc_hdisplay; |
2421 | - wm_high.active_time = mode->crtc_hdisplay * pixel_period; |
2422 | + wm_high.active_time = active_time; |
2423 | wm_high.blank_time = line_time - wm_high.active_time; |
2424 | wm_high.interlaced = false; |
2425 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2426 | @@ -1275,7 +1262,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev, |
2427 | |
2428 | wm_low.disp_clk = mode->clock; |
2429 | wm_low.src_width = mode->crtc_hdisplay; |
2430 | - wm_low.active_time = mode->crtc_hdisplay * pixel_period; |
2431 | + wm_low.active_time = active_time; |
2432 | wm_low.blank_time = line_time - wm_low.active_time; |
2433 | wm_low.interlaced = false; |
2434 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2435 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c |
2436 | index 5b24e89552ec..1388f8a44a2b 100644 |
2437 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c |
2438 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c |
2439 | @@ -1059,23 +1059,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) |
2440 | a.full = dfixed_const(available_bandwidth); |
2441 | b.full = dfixed_const(wm->num_heads); |
2442 | a.full = dfixed_div(a, b); |
2443 | + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); |
2444 | + tmp = min(dfixed_trunc(a), tmp); |
2445 | |
2446 | - b.full = dfixed_const(mc_latency + 512); |
2447 | - c.full = dfixed_const(wm->disp_clk); |
2448 | - b.full = dfixed_div(b, c); |
2449 | - |
2450 | - c.full = dfixed_const(dmif_size); |
2451 | - b.full = dfixed_div(c, b); |
2452 | - |
2453 | - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); |
2454 | - |
2455 | - b.full = dfixed_const(1000); |
2456 | - c.full = dfixed_const(wm->disp_clk); |
2457 | - b.full = dfixed_div(c, b); |
2458 | - c.full = dfixed_const(wm->bytes_per_pixel); |
2459 | - b.full = dfixed_mul(b, c); |
2460 | - |
2461 | - lb_fill_bw = min(tmp, dfixed_trunc(b)); |
2462 | + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); |
2463 | |
2464 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); |
2465 | b.full = dfixed_const(1000); |
2466 | @@ -1183,14 +1170,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, |
2467 | { |
2468 | struct drm_display_mode *mode = &amdgpu_crtc->base.mode; |
2469 | struct dce10_wm_params wm_low, wm_high; |
2470 | - u32 pixel_period; |
2471 | + u32 active_time; |
2472 | u32 line_time = 0; |
2473 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
2474 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
2475 | |
2476 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
2477 | - pixel_period = 1000000 / (u32)mode->clock; |
2478 | - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); |
2479 | + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; |
2480 | + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); |
2481 | |
2482 | /* watermark for high clocks */ |
2483 | if (adev->pm.dpm_enabled) { |
2484 | @@ -1205,7 +1192,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, |
2485 | |
2486 | wm_high.disp_clk = mode->clock; |
2487 | wm_high.src_width = mode->crtc_hdisplay; |
2488 | - wm_high.active_time = mode->crtc_hdisplay * pixel_period; |
2489 | + wm_high.active_time = active_time; |
2490 | wm_high.blank_time = line_time - wm_high.active_time; |
2491 | wm_high.interlaced = false; |
2492 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2493 | @@ -1244,7 +1231,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, |
2494 | |
2495 | wm_low.disp_clk = mode->clock; |
2496 | wm_low.src_width = mode->crtc_hdisplay; |
2497 | - wm_low.active_time = mode->crtc_hdisplay * pixel_period; |
2498 | + wm_low.active_time = active_time; |
2499 | wm_low.blank_time = line_time - wm_low.active_time; |
2500 | wm_low.interlaced = false; |
2501 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2502 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c |
2503 | index 809aa94a0cc1..ea5365580b2d 100644 |
2504 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c |
2505 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c |
2506 | @@ -861,23 +861,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm) |
2507 | a.full = dfixed_const(available_bandwidth); |
2508 | b.full = dfixed_const(wm->num_heads); |
2509 | a.full = dfixed_div(a, b); |
2510 | + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); |
2511 | + tmp = min(dfixed_trunc(a), tmp); |
2512 | |
2513 | - b.full = dfixed_const(mc_latency + 512); |
2514 | - c.full = dfixed_const(wm->disp_clk); |
2515 | - b.full = dfixed_div(b, c); |
2516 | - |
2517 | - c.full = dfixed_const(dmif_size); |
2518 | - b.full = dfixed_div(c, b); |
2519 | - |
2520 | - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); |
2521 | - |
2522 | - b.full = dfixed_const(1000); |
2523 | - c.full = dfixed_const(wm->disp_clk); |
2524 | - b.full = dfixed_div(c, b); |
2525 | - c.full = dfixed_const(wm->bytes_per_pixel); |
2526 | - b.full = dfixed_mul(b, c); |
2527 | - |
2528 | - lb_fill_bw = min(tmp, dfixed_trunc(b)); |
2529 | + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); |
2530 | |
2531 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); |
2532 | b.full = dfixed_const(1000); |
2533 | @@ -986,18 +973,18 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, |
2534 | struct drm_display_mode *mode = &amdgpu_crtc->base.mode; |
2535 | struct dce6_wm_params wm_low, wm_high; |
2536 | u32 dram_channels; |
2537 | - u32 pixel_period; |
2538 | + u32 active_time; |
2539 | u32 line_time = 0; |
2540 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
2541 | u32 priority_a_mark = 0, priority_b_mark = 0; |
2542 | u32 priority_a_cnt = PRIORITY_OFF; |
2543 | u32 priority_b_cnt = PRIORITY_OFF; |
2544 | - u32 tmp, arb_control3; |
2545 | + u32 tmp, arb_control3, lb_vblank_lead_lines = 0; |
2546 | fixed20_12 a, b, c; |
2547 | |
2548 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
2549 | - pixel_period = 1000000 / (u32)mode->clock; |
2550 | - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); |
2551 | + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; |
2552 | + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); |
2553 | priority_a_cnt = 0; |
2554 | priority_b_cnt = 0; |
2555 | |
2556 | @@ -1016,7 +1003,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, |
2557 | |
2558 | wm_high.disp_clk = mode->clock; |
2559 | wm_high.src_width = mode->crtc_hdisplay; |
2560 | - wm_high.active_time = mode->crtc_hdisplay * pixel_period; |
2561 | + wm_high.active_time = active_time; |
2562 | wm_high.blank_time = line_time - wm_high.active_time; |
2563 | wm_high.interlaced = false; |
2564 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2565 | @@ -1043,7 +1030,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, |
2566 | |
2567 | wm_low.disp_clk = mode->clock; |
2568 | wm_low.src_width = mode->crtc_hdisplay; |
2569 | - wm_low.active_time = mode->crtc_hdisplay * pixel_period; |
2570 | + wm_low.active_time = active_time; |
2571 | wm_low.blank_time = line_time - wm_low.active_time; |
2572 | wm_low.interlaced = false; |
2573 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2574 | @@ -1104,6 +1091,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, |
2575 | c.full = dfixed_div(c, a); |
2576 | priority_b_mark = dfixed_trunc(c); |
2577 | priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK; |
2578 | + |
2579 | + lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); |
2580 | } |
2581 | |
2582 | /* select wm A */ |
2583 | @@ -1133,6 +1122,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev, |
2584 | /* save values for DPM */ |
2585 | amdgpu_crtc->line_time = line_time; |
2586 | amdgpu_crtc->wm_high = latency_watermark_a; |
2587 | + |
2588 | + /* Save number of lines the linebuffer leads before the scanout */ |
2589 | + amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; |
2590 | } |
2591 | |
2592 | /* watermark setup */ |
2593 | diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |
2594 | index d2590d75aa11..e52fc925b414 100644 |
2595 | --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |
2596 | +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c |
2597 | @@ -974,23 +974,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm) |
2598 | a.full = dfixed_const(available_bandwidth); |
2599 | b.full = dfixed_const(wm->num_heads); |
2600 | a.full = dfixed_div(a, b); |
2601 | + tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); |
2602 | + tmp = min(dfixed_trunc(a), tmp); |
2603 | |
2604 | - b.full = dfixed_const(mc_latency + 512); |
2605 | - c.full = dfixed_const(wm->disp_clk); |
2606 | - b.full = dfixed_div(b, c); |
2607 | - |
2608 | - c.full = dfixed_const(dmif_size); |
2609 | - b.full = dfixed_div(c, b); |
2610 | - |
2611 | - tmp = min(dfixed_trunc(a), dfixed_trunc(b)); |
2612 | - |
2613 | - b.full = dfixed_const(1000); |
2614 | - c.full = dfixed_const(wm->disp_clk); |
2615 | - b.full = dfixed_div(c, b); |
2616 | - c.full = dfixed_const(wm->bytes_per_pixel); |
2617 | - b.full = dfixed_mul(b, c); |
2618 | - |
2619 | - lb_fill_bw = min(tmp, dfixed_trunc(b)); |
2620 | + lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); |
2621 | |
2622 | a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); |
2623 | b.full = dfixed_const(1000); |
2624 | @@ -1098,14 +1085,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, |
2625 | { |
2626 | struct drm_display_mode *mode = &amdgpu_crtc->base.mode; |
2627 | struct dce8_wm_params wm_low, wm_high; |
2628 | - u32 pixel_period; |
2629 | + u32 active_time; |
2630 | u32 line_time = 0; |
2631 | u32 latency_watermark_a = 0, latency_watermark_b = 0; |
2632 | u32 tmp, wm_mask, lb_vblank_lead_lines = 0; |
2633 | |
2634 | if (amdgpu_crtc->base.enabled && num_heads && mode) { |
2635 | - pixel_period = 1000000 / (u32)mode->clock; |
2636 | - line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535); |
2637 | + active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; |
2638 | + line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); |
2639 | |
2640 | /* watermark for high clocks */ |
2641 | if (adev->pm.dpm_enabled) { |
2642 | @@ -1120,7 +1107,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, |
2643 | |
2644 | wm_high.disp_clk = mode->clock; |
2645 | wm_high.src_width = mode->crtc_hdisplay; |
2646 | - wm_high.active_time = mode->crtc_hdisplay * pixel_period; |
2647 | + wm_high.active_time = active_time; |
2648 | wm_high.blank_time = line_time - wm_high.active_time; |
2649 | wm_high.interlaced = false; |
2650 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2651 | @@ -1159,7 +1146,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev, |
2652 | |
2653 | wm_low.disp_clk = mode->clock; |
2654 | wm_low.src_width = mode->crtc_hdisplay; |
2655 | - wm_low.active_time = mode->crtc_hdisplay * pixel_period; |
2656 | + wm_low.active_time = active_time; |
2657 | wm_low.blank_time = line_time - wm_low.active_time; |
2658 | wm_low.interlaced = false; |
2659 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
2660 | diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c |
2661 | index ba58f1b11d1e..883b5cfe9504 100644 |
2662 | --- a/drivers/gpu/drm/drm_edid.c |
2663 | +++ b/drivers/gpu/drm/drm_edid.c |
2664 | @@ -79,6 +79,8 @@ |
2665 | #define EDID_QUIRK_FORCE_12BPC (1 << 9) |
2666 | /* Force 6bpc */ |
2667 | #define EDID_QUIRK_FORCE_6BPC (1 << 10) |
2668 | +/* Force 10bpc */ |
2669 | +#define EDID_QUIRK_FORCE_10BPC (1 << 11) |
2670 | |
2671 | struct detailed_mode_closure { |
2672 | struct drm_connector *connector; |
2673 | @@ -121,6 +123,9 @@ static const struct edid_quirk { |
2674 | { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 | |
2675 | EDID_QUIRK_DETAILED_IN_CM }, |
2676 | |
2677 | + /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */ |
2678 | + { "LGD", 764, EDID_QUIRK_FORCE_10BPC }, |
2679 | + |
2680 | /* LG Philips LCD LP154W01-A5 */ |
2681 | { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, |
2682 | { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE }, |
2683 | @@ -4174,6 +4179,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) |
2684 | if (quirks & EDID_QUIRK_FORCE_8BPC) |
2685 | connector->display_info.bpc = 8; |
2686 | |
2687 | + if (quirks & EDID_QUIRK_FORCE_10BPC) |
2688 | + connector->display_info.bpc = 10; |
2689 | + |
2690 | if (quirks & EDID_QUIRK_FORCE_12BPC) |
2691 | connector->display_info.bpc = 12; |
2692 | |
2693 | diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c |
2694 | index 9673bcc3b6ad..ed9a8e934d9a 100644 |
2695 | --- a/drivers/gpu/drm/i915/i915_gem_stolen.c |
2696 | +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c |
2697 | @@ -410,6 +410,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) |
2698 | return 0; |
2699 | } |
2700 | |
2701 | + if (intel_vgpu_active(dev_priv)) { |
2702 | + DRM_INFO("iGVT-g active, disabling use of stolen memory\n"); |
2703 | + return 0; |
2704 | + } |
2705 | + |
2706 | #ifdef CONFIG_INTEL_IOMMU |
2707 | if (intel_iommu_gfx_mapped && INTEL_GEN(dev_priv) < 8) { |
2708 | DRM_INFO("DMAR active, disabling use of stolen memory\n"); |
2709 | diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c |
2710 | index a9182d5e6011..ce37cbd274da 100644 |
2711 | --- a/drivers/gpu/drm/nouveau/nv50_display.c |
2712 | +++ b/drivers/gpu/drm/nouveau/nv50_display.c |
2713 | @@ -906,11 +906,9 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) |
2714 | if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point))) |
2715 | asyw->set.point = true; |
2716 | |
2717 | - if (!varm || asym || armw->state.fb != asyw->state.fb) { |
2718 | - ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh); |
2719 | - if (ret) |
2720 | - return ret; |
2721 | - } |
2722 | + ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh); |
2723 | + if (ret) |
2724 | + return ret; |
2725 | } else |
2726 | if (varm) { |
2727 | nv50_wndw_atomic_check_release(wndw, asyw, harm); |
2728 | @@ -1115,9 +1113,13 @@ static void |
2729 | nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh, |
2730 | struct nv50_wndw_atom *asyw) |
2731 | { |
2732 | - asyh->curs.handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle; |
2733 | - asyh->curs.offset = asyw->image.offset; |
2734 | - asyh->set.curs = asyh->curs.visible; |
2735 | + u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle; |
2736 | + u32 offset = asyw->image.offset; |
2737 | + if (asyh->curs.handle != handle || asyh->curs.offset != offset) { |
2738 | + asyh->curs.handle = handle; |
2739 | + asyh->curs.offset = offset; |
2740 | + asyh->set.curs = asyh->curs.visible; |
2741 | + } |
2742 | } |
2743 | |
2744 | static void |
2745 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c |
2746 | index df949fa7d05d..be691a7b972f 100644 |
2747 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c |
2748 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c |
2749 | @@ -146,7 +146,7 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode) |
2750 | poll = false; |
2751 | } |
2752 | |
2753 | - if (list_empty(&therm->alarm.head) && poll) |
2754 | + if (poll) |
2755 | nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm); |
2756 | spin_unlock_irqrestore(&therm->lock, flags); |
2757 | |
2758 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c |
2759 | index 91198d79393a..e2feccec25f5 100644 |
2760 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c |
2761 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c |
2762 | @@ -83,7 +83,7 @@ nvkm_fan_update(struct nvkm_fan *fan, bool immediate, int target) |
2763 | spin_unlock_irqrestore(&fan->lock, flags); |
2764 | |
2765 | /* schedule next fan update, if not at target speed already */ |
2766 | - if (list_empty(&fan->alarm.head) && target != duty) { |
2767 | + if (target != duty) { |
2768 | u16 bump_period = fan->bios.bump_period; |
2769 | u16 slow_down_period = fan->bios.slow_down_period; |
2770 | u64 delay; |
2771 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c |
2772 | index 59701b7a6597..ff9fbe7950e5 100644 |
2773 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c |
2774 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c |
2775 | @@ -53,7 +53,7 @@ nvkm_fantog_update(struct nvkm_fantog *fan, int percent) |
2776 | duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff); |
2777 | nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty); |
2778 | |
2779 | - if (list_empty(&fan->alarm.head) && percent != (duty * 100)) { |
2780 | + if (percent != (duty * 100)) { |
2781 | u64 next_change = (percent * fan->period_us) / 100; |
2782 | if (!duty) |
2783 | next_change = fan->period_us - next_change; |
2784 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c |
2785 | index b9703c02d8ca..9a79e91fdfdc 100644 |
2786 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c |
2787 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c |
2788 | @@ -185,7 +185,7 @@ alarm_timer_callback(struct nvkm_alarm *alarm) |
2789 | spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags); |
2790 | |
2791 | /* schedule the next poll in one second */ |
2792 | - if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head)) |
2793 | + if (therm->func->temp_get(therm) >= 0) |
2794 | nvkm_timer_alarm(tmr, 1000000000ULL, alarm); |
2795 | } |
2796 | |
2797 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
2798 | index 07dc82bfe346..f2a86eae0a0d 100644 |
2799 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
2800 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c |
2801 | @@ -36,23 +36,29 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr) |
2802 | unsigned long flags; |
2803 | LIST_HEAD(exec); |
2804 | |
2805 | - /* move any due alarms off the pending list */ |
2806 | + /* Process pending alarms. */ |
2807 | spin_lock_irqsave(&tmr->lock, flags); |
2808 | list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) { |
2809 | - if (alarm->timestamp <= nvkm_timer_read(tmr)) |
2810 | - list_move_tail(&alarm->head, &exec); |
2811 | + /* Have we hit the earliest alarm that hasn't gone off? */ |
2812 | + if (alarm->timestamp > nvkm_timer_read(tmr)) { |
2813 | + /* Schedule it. If we didn't race, we're done. */ |
2814 | + tmr->func->alarm_init(tmr, alarm->timestamp); |
2815 | + if (alarm->timestamp > nvkm_timer_read(tmr)) |
2816 | + break; |
2817 | + } |
2818 | + |
2819 | + /* Move to completed list. We'll drop the lock before |
2820 | + * executing the callback so it can reschedule itself. |
2821 | + */ |
2822 | + list_move_tail(&alarm->head, &exec); |
2823 | } |
2824 | |
2825 | - /* reschedule interrupt for next alarm time */ |
2826 | - if (!list_empty(&tmr->alarms)) { |
2827 | - alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head); |
2828 | - tmr->func->alarm_init(tmr, alarm->timestamp); |
2829 | - } else { |
2830 | + /* Shut down interrupt if no more pending alarms. */ |
2831 | + if (list_empty(&tmr->alarms)) |
2832 | tmr->func->alarm_fini(tmr); |
2833 | - } |
2834 | spin_unlock_irqrestore(&tmr->lock, flags); |
2835 | |
2836 | - /* execute any pending alarm handlers */ |
2837 | + /* Execute completed callbacks. */ |
2838 | list_for_each_entry_safe(alarm, atemp, &exec, head) { |
2839 | list_del_init(&alarm->head); |
2840 | alarm->func(alarm); |
2841 | @@ -65,24 +71,37 @@ nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm) |
2842 | struct nvkm_alarm *list; |
2843 | unsigned long flags; |
2844 | |
2845 | - alarm->timestamp = nvkm_timer_read(tmr) + nsec; |
2846 | - |
2847 | - /* append new alarm to list, in soonest-alarm-first order */ |
2848 | + /* Remove alarm from pending list. |
2849 | + * |
2850 | + * This both protects against the corruption of the list, |
2851 | + * and implements alarm rescheduling/cancellation. |
2852 | + */ |
2853 | spin_lock_irqsave(&tmr->lock, flags); |
2854 | - if (!nsec) { |
2855 | - if (!list_empty(&alarm->head)) |
2856 | - list_del(&alarm->head); |
2857 | - } else { |
2858 | + list_del_init(&alarm->head); |
2859 | + |
2860 | + if (nsec) { |
2861 | + /* Insert into pending list, ordered earliest to latest. */ |
2862 | + alarm->timestamp = nvkm_timer_read(tmr) + nsec; |
2863 | list_for_each_entry(list, &tmr->alarms, head) { |
2864 | if (list->timestamp > alarm->timestamp) |
2865 | break; |
2866 | } |
2867 | + |
2868 | list_add_tail(&alarm->head, &list->head); |
2869 | + |
2870 | + /* Update HW if this is now the earliest alarm. */ |
2871 | + list = list_first_entry(&tmr->alarms, typeof(*list), head); |
2872 | + if (list == alarm) { |
2873 | + tmr->func->alarm_init(tmr, alarm->timestamp); |
2874 | + /* This shouldn't happen if callers aren't stupid. |
2875 | + * |
2876 | + * Worst case scenario is that it'll take roughly |
2877 | + * 4 seconds for the next alarm to trigger. |
2878 | + */ |
2879 | + WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr)); |
2880 | + } |
2881 | } |
2882 | spin_unlock_irqrestore(&tmr->lock, flags); |
2883 | - |
2884 | - /* process pending alarms */ |
2885 | - nvkm_timer_alarm_trigger(tmr); |
2886 | } |
2887 | |
2888 | void |
2889 | diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c |
2890 | index 7b9ce87f0617..7f48249f41de 100644 |
2891 | --- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c |
2892 | +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c |
2893 | @@ -76,8 +76,8 @@ nv04_timer_intr(struct nvkm_timer *tmr) |
2894 | u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); |
2895 | |
2896 | if (stat & 0x00000001) { |
2897 | - nvkm_timer_alarm_trigger(tmr); |
2898 | nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); |
2899 | + nvkm_timer_alarm_trigger(tmr); |
2900 | stat &= ~0x00000001; |
2901 | } |
2902 | |
2903 | diff --git a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c |
2904 | index 01e02b9926d4..ca742ac8f128 100644 |
2905 | --- a/drivers/iio/common/hid-sensors/hid-sensor-attributes.c |
2906 | +++ b/drivers/iio/common/hid-sensors/hid-sensor-attributes.c |
2907 | @@ -221,7 +221,15 @@ int hid_sensor_write_samp_freq_value(struct hid_sensor_common *st, |
2908 | if (ret < 0 || value < 0) |
2909 | ret = -EINVAL; |
2910 | |
2911 | - return ret; |
2912 | + ret = sensor_hub_get_feature(st->hsdev, |
2913 | + st->poll.report_id, |
2914 | + st->poll.index, sizeof(value), &value); |
2915 | + if (ret < 0 || value < 0) |
2916 | + return -EINVAL; |
2917 | + |
2918 | + st->poll_interval = value; |
2919 | + |
2920 | + return 0; |
2921 | } |
2922 | EXPORT_SYMBOL(hid_sensor_write_samp_freq_value); |
2923 | |
2924 | @@ -266,7 +274,16 @@ int hid_sensor_write_raw_hyst_value(struct hid_sensor_common *st, |
2925 | if (ret < 0 || value < 0) |
2926 | ret = -EINVAL; |
2927 | |
2928 | - return ret; |
2929 | + ret = sensor_hub_get_feature(st->hsdev, |
2930 | + st->sensitivity.report_id, |
2931 | + st->sensitivity.index, sizeof(value), |
2932 | + &value); |
2933 | + if (ret < 0 || value < 0) |
2934 | + return -EINVAL; |
2935 | + |
2936 | + st->raw_hystersis = value; |
2937 | + |
2938 | + return 0; |
2939 | } |
2940 | EXPORT_SYMBOL(hid_sensor_write_raw_hyst_value); |
2941 | |
2942 | @@ -369,6 +386,9 @@ int hid_sensor_get_reporting_interval(struct hid_sensor_hub_device *hsdev, |
2943 | /* Default unit of measure is milliseconds */ |
2944 | if (st->poll.units == 0) |
2945 | st->poll.units = HID_USAGE_SENSOR_UNITS_MILLISECOND; |
2946 | + |
2947 | + st->poll_interval = -1; |
2948 | + |
2949 | return 0; |
2950 | |
2951 | } |
2952 | @@ -399,6 +419,8 @@ int hid_sensor_parse_common_attributes(struct hid_sensor_hub_device *hsdev, |
2953 | HID_USAGE_SENSOR_PROP_SENSITIVITY_ABS, |
2954 | &st->sensitivity); |
2955 | |
2956 | + st->raw_hystersis = -1; |
2957 | + |
2958 | sensor_hub_input_get_attribute_info(hsdev, |
2959 | HID_INPUT_REPORT, usage_id, |
2960 | HID_USAGE_SENSOR_TIME_TIMESTAMP, |
2961 | diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c |
2962 | index ecf592d69043..60829340a82e 100644 |
2963 | --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c |
2964 | +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c |
2965 | @@ -51,6 +51,8 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) |
2966 | st->report_state.report_id, |
2967 | st->report_state.index, |
2968 | HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); |
2969 | + |
2970 | + poll_value = hid_sensor_read_poll_value(st); |
2971 | } else { |
2972 | int val; |
2973 | |
2974 | @@ -87,9 +89,7 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) |
2975 | sensor_hub_get_feature(st->hsdev, st->power_state.report_id, |
2976 | st->power_state.index, |
2977 | sizeof(state_val), &state_val); |
2978 | - if (state) |
2979 | - poll_value = hid_sensor_read_poll_value(st); |
2980 | - if (poll_value > 0) |
2981 | + if (state && poll_value) |
2982 | msleep_interruptible(poll_value * 2); |
2983 | |
2984 | return 0; |
2985 | @@ -127,6 +127,20 @@ static void hid_sensor_set_power_work(struct work_struct *work) |
2986 | struct hid_sensor_common *attrb = container_of(work, |
2987 | struct hid_sensor_common, |
2988 | work); |
2989 | + |
2990 | + if (attrb->poll_interval >= 0) |
2991 | + sensor_hub_set_feature(attrb->hsdev, attrb->poll.report_id, |
2992 | + attrb->poll.index, |
2993 | + sizeof(attrb->poll_interval), |
2994 | + &attrb->poll_interval); |
2995 | + |
2996 | + if (attrb->raw_hystersis >= 0) |
2997 | + sensor_hub_set_feature(attrb->hsdev, |
2998 | + attrb->sensitivity.report_id, |
2999 | + attrb->sensitivity.index, |
3000 | + sizeof(attrb->raw_hystersis), |
3001 | + &attrb->raw_hystersis); |
3002 | + |
3003 | _hid_sensor_power_state(attrb, true); |
3004 | } |
3005 | |
3006 | diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c |
3007 | index e690dd11e99f..4b0f942b8914 100644 |
3008 | --- a/drivers/iio/dac/ad7303.c |
3009 | +++ b/drivers/iio/dac/ad7303.c |
3010 | @@ -184,9 +184,9 @@ static const struct iio_chan_spec_ext_info ad7303_ext_info[] = { |
3011 | .address = (chan), \ |
3012 | .scan_type = { \ |
3013 | .sign = 'u', \ |
3014 | - .realbits = '8', \ |
3015 | - .storagebits = '8', \ |
3016 | - .shift = '0', \ |
3017 | + .realbits = 8, \ |
3018 | + .storagebits = 8, \ |
3019 | + .shift = 0, \ |
3020 | }, \ |
3021 | .ext_info = ad7303_ext_info, \ |
3022 | } |
3023 | diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c |
3024 | index 4d18826ac63c..d82b788374b6 100644 |
3025 | --- a/drivers/iio/pressure/bmp280-core.c |
3026 | +++ b/drivers/iio/pressure/bmp280-core.c |
3027 | @@ -175,11 +175,12 @@ static u32 bmp280_compensate_humidity(struct bmp280_data *data, |
3028 | } |
3029 | H6 = sign_extend32(tmp, 7); |
3030 | |
3031 | - var = ((s32)data->t_fine) - 76800; |
3032 | - var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15) |
3033 | - * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10) |
3034 | - + 2097152) * H2 + 8192) >> 14); |
3035 | - var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4; |
3036 | + var = ((s32)data->t_fine) - (s32)76800; |
3037 | + var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) |
3038 | + + (s32)16384) >> 15) * (((((((var * H6) >> 10) |
3039 | + * (((var * (s32)H3) >> 11) + (s32)32768)) >> 10) |
3040 | + + (s32)2097152) * H2 + 8192) >> 14); |
3041 | + var -= ((((var >> 15) * (var >> 15)) >> 7) * (s32)H1) >> 4; |
3042 | |
3043 | return var >> 12; |
3044 | }; |
3045 | diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c |
3046 | index 5656deb17261..020459513384 100644 |
3047 | --- a/drivers/iio/proximity/as3935.c |
3048 | +++ b/drivers/iio/proximity/as3935.c |
3049 | @@ -50,7 +50,6 @@ |
3050 | #define AS3935_TUNE_CAP 0x08 |
3051 | #define AS3935_CALIBRATE 0x3D |
3052 | |
3053 | -#define AS3935_WRITE_DATA BIT(15) |
3054 | #define AS3935_READ_DATA BIT(14) |
3055 | #define AS3935_ADDRESS(x) ((x) << 8) |
3056 | |
3057 | @@ -105,7 +104,7 @@ static int as3935_write(struct as3935_state *st, |
3058 | { |
3059 | u8 *buf = st->buf; |
3060 | |
3061 | - buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8; |
3062 | + buf[0] = AS3935_ADDRESS(reg) >> 8; |
3063 | buf[1] = val; |
3064 | |
3065 | return spi_write(st->spi, buf, 2); |
3066 | diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c |
3067 | index 994b96d19750..5ee362bc618c 100644 |
3068 | --- a/drivers/iio/trigger/stm32-timer-trigger.c |
3069 | +++ b/drivers/iio/trigger/stm32-timer-trigger.c |
3070 | @@ -152,10 +152,10 @@ static ssize_t stm32_tt_read_frequency(struct device *dev, |
3071 | regmap_read(priv->regmap, TIM_PSC, &psc); |
3072 | regmap_read(priv->regmap, TIM_ARR, &arr); |
3073 | |
3074 | - if (psc && arr && (cr1 & TIM_CR1_CEN)) { |
3075 | + if (cr1 & TIM_CR1_CEN) { |
3076 | freq = (unsigned long long)clk_get_rate(priv->clk); |
3077 | - do_div(freq, psc); |
3078 | - do_div(freq, arr); |
3079 | + do_div(freq, psc + 1); |
3080 | + do_div(freq, arr + 1); |
3081 | } |
3082 | |
3083 | return sprintf(buf, "%d\n", (unsigned int)freq); |
3084 | diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c |
3085 | index 0f58f46dbad7..8fd108d89527 100644 |
3086 | --- a/drivers/infiniband/core/addr.c |
3087 | +++ b/drivers/infiniband/core/addr.c |
3088 | @@ -444,8 +444,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, |
3089 | fl6.saddr = src_in->sin6_addr; |
3090 | fl6.flowi6_oif = addr->bound_dev_if; |
3091 | |
3092 | - dst = ip6_route_output(addr->net, NULL, &fl6); |
3093 | - if ((ret = dst->error)) |
3094 | + ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6); |
3095 | + if (ret < 0) |
3096 | goto put; |
3097 | |
3098 | rt = (struct rt6_info *)dst; |
3099 | diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c |
3100 | index f78c739b330a..db02517b763b 100644 |
3101 | --- a/drivers/infiniband/hw/hfi1/file_ops.c |
3102 | +++ b/drivers/infiniband/hw/hfi1/file_ops.c |
3103 | @@ -752,6 +752,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) |
3104 | /* release the cpu */ |
3105 | hfi1_put_proc_affinity(fdata->rec_cpu_num); |
3106 | |
3107 | + /* clean up rcv side */ |
3108 | + hfi1_user_exp_rcv_free(fdata); |
3109 | + |
3110 | /* |
3111 | * Clear any left over, unhandled events so the next process that |
3112 | * gets this context doesn't get confused. |
3113 | @@ -791,7 +794,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) |
3114 | |
3115 | dd->rcd[uctxt->ctxt] = NULL; |
3116 | |
3117 | - hfi1_user_exp_rcv_free(fdata); |
3118 | + hfi1_user_exp_rcv_grp_free(uctxt); |
3119 | hfi1_clear_ctxt_pkey(dd, uctxt->ctxt); |
3120 | |
3121 | uctxt->rcvwait_to = 0; |
3122 | diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c |
3123 | index f40864e9a3b2..a3dd27b1305d 100644 |
3124 | --- a/drivers/infiniband/hw/hfi1/init.c |
3125 | +++ b/drivers/infiniband/hw/hfi1/init.c |
3126 | @@ -1758,6 +1758,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) |
3127 | !HFI1_CAP_KGET_MASK(rcd->flags, MULTI_PKT_EGR)) { |
3128 | dd_dev_err(dd, "ctxt%u: Failed to allocate eager buffers\n", |
3129 | rcd->ctxt); |
3130 | + ret = -ENOMEM; |
3131 | goto bail_rcvegrbuf_phys; |
3132 | } |
3133 | |
3134 | diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c |
3135 | index 4a8295399e71..ffe6ca12b48d 100644 |
3136 | --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c |
3137 | +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c |
3138 | @@ -251,36 +251,40 @@ int hfi1_user_exp_rcv_init(struct file *fp) |
3139 | return ret; |
3140 | } |
3141 | |
3142 | +void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt) |
3143 | +{ |
3144 | + struct tid_group *grp, *gptr; |
3145 | + |
3146 | + list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, |
3147 | + list) { |
3148 | + list_del_init(&grp->list); |
3149 | + kfree(grp); |
3150 | + } |
3151 | + hfi1_clear_tids(uctxt); |
3152 | +} |
3153 | + |
3154 | int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) |
3155 | { |
3156 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
3157 | - struct tid_group *grp, *gptr; |
3158 | |
3159 | - if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) |
3160 | - return 0; |
3161 | /* |
3162 | * The notifier would have been removed when the process'es mm |
3163 | * was freed. |
3164 | */ |
3165 | - if (fd->handler) |
3166 | + if (fd->handler) { |
3167 | hfi1_mmu_rb_unregister(fd->handler); |
3168 | - |
3169 | - kfree(fd->invalid_tids); |
3170 | - |
3171 | - if (!uctxt->cnt) { |
3172 | + } else { |
3173 | if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) |
3174 | unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); |
3175 | if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) |
3176 | unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); |
3177 | - list_for_each_entry_safe(grp, gptr, &uctxt->tid_group_list.list, |
3178 | - list) { |
3179 | - list_del_init(&grp->list); |
3180 | - kfree(grp); |
3181 | - } |
3182 | - hfi1_clear_tids(uctxt); |
3183 | } |
3184 | |
3185 | + kfree(fd->invalid_tids); |
3186 | + fd->invalid_tids = NULL; |
3187 | + |
3188 | kfree(fd->entry_to_rb); |
3189 | + fd->entry_to_rb = NULL; |
3190 | return 0; |
3191 | } |
3192 | |
3193 | diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h |
3194 | index 9bc8d9fba87e..d1d7d3d3bd44 100644 |
3195 | --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.h |
3196 | +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h |
3197 | @@ -70,6 +70,7 @@ |
3198 | (tid) |= EXP_TID_SET(field, (value)); \ |
3199 | } while (0) |
3200 | |
3201 | +void hfi1_user_exp_rcv_grp_free(struct hfi1_ctxtdata *uctxt); |
3202 | int hfi1_user_exp_rcv_init(struct file *); |
3203 | int hfi1_user_exp_rcv_free(struct hfi1_filedata *); |
3204 | int hfi1_user_exp_rcv_setup(struct file *, struct hfi1_tid_info *); |
3205 | diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c |
3206 | index 16ef7b12b0b8..1c67745a04f6 100644 |
3207 | --- a/drivers/infiniband/hw/hfi1/verbs.c |
3208 | +++ b/drivers/infiniband/hw/hfi1/verbs.c |
3209 | @@ -1524,6 +1524,7 @@ static const char * const driver_cntr_names[] = { |
3210 | "DRIVER_EgrHdrFull" |
3211 | }; |
3212 | |
3213 | +static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ |
3214 | static const char **dev_cntr_names; |
3215 | static const char **port_cntr_names; |
3216 | static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); |
3217 | @@ -1578,6 +1579,7 @@ static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev, |
3218 | { |
3219 | int i, err; |
3220 | |
3221 | + mutex_lock(&cntr_names_lock); |
3222 | if (!cntr_names_initialized) { |
3223 | struct hfi1_devdata *dd = dd_from_ibdev(ibdev); |
3224 | |
3225 | @@ -1586,8 +1588,10 @@ static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev, |
3226 | num_driver_cntrs, |
3227 | &num_dev_cntrs, |
3228 | &dev_cntr_names); |
3229 | - if (err) |
3230 | + if (err) { |
3231 | + mutex_unlock(&cntr_names_lock); |
3232 | return NULL; |
3233 | + } |
3234 | |
3235 | for (i = 0; i < num_driver_cntrs; i++) |
3236 | dev_cntr_names[num_dev_cntrs + i] = |
3237 | @@ -1601,10 +1605,12 @@ static struct rdma_hw_stats *alloc_hw_stats(struct ib_device *ibdev, |
3238 | if (err) { |
3239 | kfree(dev_cntr_names); |
3240 | dev_cntr_names = NULL; |
3241 | + mutex_unlock(&cntr_names_lock); |
3242 | return NULL; |
3243 | } |
3244 | cntr_names_initialized = 1; |
3245 | } |
3246 | + mutex_unlock(&cntr_names_lock); |
3247 | |
3248 | if (!port_num) |
3249 | return rdma_alloc_hw_stats_struct( |
3250 | @@ -1823,9 +1829,13 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd) |
3251 | del_timer_sync(&dev->mem_timer); |
3252 | verbs_txreq_exit(dev); |
3253 | |
3254 | + mutex_lock(&cntr_names_lock); |
3255 | kfree(dev_cntr_names); |
3256 | kfree(port_cntr_names); |
3257 | + dev_cntr_names = NULL; |
3258 | + port_cntr_names = NULL; |
3259 | cntr_names_initialized = 0; |
3260 | + mutex_unlock(&cntr_names_lock); |
3261 | } |
3262 | |
3263 | void hfi1_cnp_rcv(struct hfi1_packet *packet) |
3264 | diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c |
3265 | index b8f9382a8b7d..d9c6c0ea750b 100644 |
3266 | --- a/drivers/infiniband/hw/mlx5/mr.c |
3267 | +++ b/drivers/infiniband/hw/mlx5/mr.c |
3268 | @@ -1782,7 +1782,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, |
3269 | klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); |
3270 | klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); |
3271 | klms[i].key = cpu_to_be32(lkey); |
3272 | - mr->ibmr.length += sg_dma_len(sg); |
3273 | + mr->ibmr.length += sg_dma_len(sg) - sg_offset; |
3274 | |
3275 | sg_offset = 0; |
3276 | } |
3277 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
3278 | index d412a313a372..478130d0a495 100644 |
3279 | --- a/drivers/iommu/intel-iommu.c |
3280 | +++ b/drivers/iommu/intel-iommu.c |
3281 | @@ -2050,11 +2050,14 @@ static int domain_context_mapping_one(struct dmar_domain *domain, |
3282 | if (context_copied(context)) { |
3283 | u16 did_old = context_domain_id(context); |
3284 | |
3285 | - if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) |
3286 | + if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) { |
3287 | iommu->flush.flush_context(iommu, did_old, |
3288 | (((u16)bus) << 8) | devfn, |
3289 | DMA_CCMD_MASK_NOBIT, |
3290 | DMA_CCMD_DEVICE_INVL); |
3291 | + iommu->flush.flush_iotlb(iommu, did_old, 0, 0, |
3292 | + DMA_TLB_DSI_FLUSH); |
3293 | + } |
3294 | } |
3295 | |
3296 | pgd = domain->pgd; |
3297 | diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig |
3298 | index b7767da50c26..b5372fd7d441 100644 |
3299 | --- a/drivers/md/Kconfig |
3300 | +++ b/drivers/md/Kconfig |
3301 | @@ -365,6 +365,7 @@ config DM_LOG_USERSPACE |
3302 | config DM_RAID |
3303 | tristate "RAID 1/4/5/6/10 target" |
3304 | depends on BLK_DEV_DM |
3305 | + select MD_RAID0 |
3306 | select MD_RAID1 |
3307 | select MD_RAID10 |
3308 | select MD_RAID456 |
3309 | diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c |
3310 | index df4859f6ac6a..2ac24180505f 100644 |
3311 | --- a/drivers/md/dm-bufio.c |
3312 | +++ b/drivers/md/dm-bufio.c |
3313 | @@ -216,7 +216,7 @@ static DEFINE_SPINLOCK(param_spinlock); |
3314 | * Buffers are freed after this timeout |
3315 | */ |
3316 | static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS; |
3317 | -static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
3318 | +static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES; |
3319 | |
3320 | static unsigned long dm_bufio_peak_allocated; |
3321 | static unsigned long dm_bufio_allocated_kmem_cache; |
3322 | @@ -933,10 +933,11 @@ static void __get_memory_limit(struct dm_bufio_client *c, |
3323 | { |
3324 | unsigned long buffers; |
3325 | |
3326 | - if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) { |
3327 | - mutex_lock(&dm_bufio_clients_lock); |
3328 | - __cache_size_refresh(); |
3329 | - mutex_unlock(&dm_bufio_clients_lock); |
3330 | + if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { |
3331 | + if (mutex_trylock(&dm_bufio_clients_lock)) { |
3332 | + __cache_size_refresh(); |
3333 | + mutex_unlock(&dm_bufio_clients_lock); |
3334 | + } |
3335 | } |
3336 | |
3337 | buffers = dm_bufio_cache_size_per_client >> |
3338 | @@ -1550,10 +1551,10 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) |
3339 | return true; |
3340 | } |
3341 | |
3342 | -static unsigned get_retain_buffers(struct dm_bufio_client *c) |
3343 | +static unsigned long get_retain_buffers(struct dm_bufio_client *c) |
3344 | { |
3345 | - unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); |
3346 | - return retain_bytes / c->block_size; |
3347 | + unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); |
3348 | + return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); |
3349 | } |
3350 | |
3351 | static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, |
3352 | @@ -1563,7 +1564,7 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, |
3353 | struct dm_buffer *b, *tmp; |
3354 | unsigned long freed = 0; |
3355 | unsigned long count = nr_to_scan; |
3356 | - unsigned retain_target = get_retain_buffers(c); |
3357 | + unsigned long retain_target = get_retain_buffers(c); |
3358 | |
3359 | for (l = 0; l < LIST_SIZE; l++) { |
3360 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { |
3361 | @@ -1780,11 +1781,19 @@ static bool older_than(struct dm_buffer *b, unsigned long age_hz) |
3362 | static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz) |
3363 | { |
3364 | struct dm_buffer *b, *tmp; |
3365 | - unsigned retain_target = get_retain_buffers(c); |
3366 | - unsigned count; |
3367 | + unsigned long retain_target = get_retain_buffers(c); |
3368 | + unsigned long count; |
3369 | + LIST_HEAD(write_list); |
3370 | |
3371 | dm_bufio_lock(c); |
3372 | |
3373 | + __check_watermark(c, &write_list); |
3374 | + if (unlikely(!list_empty(&write_list))) { |
3375 | + dm_bufio_unlock(c); |
3376 | + __flush_write_list(&write_list); |
3377 | + dm_bufio_lock(c); |
3378 | + } |
3379 | + |
3380 | count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; |
3381 | list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) { |
3382 | if (count <= retain_target) |
3383 | @@ -1809,6 +1818,8 @@ static void cleanup_old_buffers(void) |
3384 | |
3385 | mutex_lock(&dm_bufio_clients_lock); |
3386 | |
3387 | + __cache_size_refresh(); |
3388 | + |
3389 | list_for_each_entry(c, &dm_bufio_all_clients, client_list) |
3390 | __evict_old_buffers(c, max_age_hz); |
3391 | |
3392 | @@ -1931,7 +1942,7 @@ MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache"); |
3393 | module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR); |
3394 | MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds"); |
3395 | |
3396 | -module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR); |
3397 | +module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR); |
3398 | MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory"); |
3399 | |
3400 | module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR); |
3401 | diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c |
3402 | index 6735c8d6a445..ee05d19f8bbc 100644 |
3403 | --- a/drivers/md/dm-cache-metadata.c |
3404 | +++ b/drivers/md/dm-cache-metadata.c |
3405 | @@ -1627,17 +1627,19 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, |
3406 | |
3407 | int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown) |
3408 | { |
3409 | - int r; |
3410 | + int r = -EINVAL; |
3411 | flags_mutator mutator = (clean_shutdown ? set_clean_shutdown : |
3412 | clear_clean_shutdown); |
3413 | |
3414 | WRITE_LOCK(cmd); |
3415 | + if (cmd->fail_io) |
3416 | + goto out; |
3417 | + |
3418 | r = __commit_transaction(cmd, mutator); |
3419 | if (r) |
3420 | goto out; |
3421 | |
3422 | r = __begin_transaction(cmd); |
3423 | - |
3424 | out: |
3425 | WRITE_UNLOCK(cmd); |
3426 | return r; |
3427 | @@ -1649,7 +1651,8 @@ int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd, |
3428 | int r = -EINVAL; |
3429 | |
3430 | READ_LOCK(cmd); |
3431 | - r = dm_sm_get_nr_free(cmd->metadata_sm, result); |
3432 | + if (!cmd->fail_io) |
3433 | + r = dm_sm_get_nr_free(cmd->metadata_sm, result); |
3434 | READ_UNLOCK(cmd); |
3435 | |
3436 | return r; |
3437 | @@ -1661,7 +1664,8 @@ int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd, |
3438 | int r = -EINVAL; |
3439 | |
3440 | READ_LOCK(cmd); |
3441 | - r = dm_sm_get_nr_blocks(cmd->metadata_sm, result); |
3442 | + if (!cmd->fail_io) |
3443 | + r = dm_sm_get_nr_blocks(cmd->metadata_sm, result); |
3444 | READ_UNLOCK(cmd); |
3445 | |
3446 | return r; |
3447 | diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c |
3448 | index 7f223dbed49f..d85baffa3377 100644 |
3449 | --- a/drivers/md/dm-mpath.c |
3450 | +++ b/drivers/md/dm-mpath.c |
3451 | @@ -111,7 +111,8 @@ typedef int (*action_fn) (struct pgpath *pgpath); |
3452 | |
3453 | static struct workqueue_struct *kmultipathd, *kmpath_handlerd; |
3454 | static void trigger_event(struct work_struct *work); |
3455 | -static void activate_path(struct work_struct *work); |
3456 | +static void activate_or_offline_path(struct pgpath *pgpath); |
3457 | +static void activate_path_work(struct work_struct *work); |
3458 | static void process_queued_bios(struct work_struct *work); |
3459 | |
3460 | /*----------------------------------------------- |
3461 | @@ -136,7 +137,7 @@ static struct pgpath *alloc_pgpath(void) |
3462 | |
3463 | if (pgpath) { |
3464 | pgpath->is_active = true; |
3465 | - INIT_DELAYED_WORK(&pgpath->activate_path, activate_path); |
3466 | + INIT_DELAYED_WORK(&pgpath->activate_path, activate_path_work); |
3467 | } |
3468 | |
3469 | return pgpath; |
3470 | @@ -321,13 +322,16 @@ static int __pg_init_all_paths(struct multipath *m) |
3471 | return atomic_read(&m->pg_init_in_progress); |
3472 | } |
3473 | |
3474 | -static void pg_init_all_paths(struct multipath *m) |
3475 | +static int pg_init_all_paths(struct multipath *m) |
3476 | { |
3477 | + int ret; |
3478 | unsigned long flags; |
3479 | |
3480 | spin_lock_irqsave(&m->lock, flags); |
3481 | - __pg_init_all_paths(m); |
3482 | + ret = __pg_init_all_paths(m); |
3483 | spin_unlock_irqrestore(&m->lock, flags); |
3484 | + |
3485 | + return ret; |
3486 | } |
3487 | |
3488 | static void __switch_pg(struct multipath *m, struct priority_group *pg) |
3489 | @@ -484,11 +488,11 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, |
3490 | struct request **__clone) |
3491 | { |
3492 | struct multipath *m = ti->private; |
3493 | - int r = DM_MAPIO_REQUEUE; |
3494 | size_t nr_bytes = blk_rq_bytes(rq); |
3495 | struct pgpath *pgpath; |
3496 | struct block_device *bdev; |
3497 | struct dm_mpath_io *mpio = get_mpio(map_context); |
3498 | + struct request_queue *q; |
3499 | struct request *clone; |
3500 | |
3501 | /* Do we need to select a new pgpath? */ |
3502 | @@ -502,8 +506,9 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, |
3503 | return -EIO; /* Failed */ |
3504 | } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) || |
3505 | test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { |
3506 | - pg_init_all_paths(m); |
3507 | - return r; |
3508 | + if (pg_init_all_paths(m)) |
3509 | + return DM_MAPIO_DELAY_REQUEUE; |
3510 | + return DM_MAPIO_REQUEUE; |
3511 | } |
3512 | |
3513 | memset(mpio, 0, sizeof(*mpio)); |
3514 | @@ -511,13 +516,19 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, |
3515 | mpio->nr_bytes = nr_bytes; |
3516 | |
3517 | bdev = pgpath->path.dev->bdev; |
3518 | - |
3519 | - clone = blk_get_request(bdev_get_queue(bdev), |
3520 | - rq->cmd_flags | REQ_NOMERGE, |
3521 | - GFP_ATOMIC); |
3522 | + q = bdev_get_queue(bdev); |
3523 | + clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, GFP_ATOMIC); |
3524 | if (IS_ERR(clone)) { |
3525 | /* EBUSY, ENODEV or EWOULDBLOCK: requeue */ |
3526 | - return r; |
3527 | + bool queue_dying = blk_queue_dying(q); |
3528 | + DMERR_LIMIT("blk_get_request() returned %ld%s - requeuing", |
3529 | + PTR_ERR(clone), queue_dying ? " (path offline)" : ""); |
3530 | + if (queue_dying) { |
3531 | + atomic_inc(&m->pg_init_in_progress); |
3532 | + activate_or_offline_path(pgpath); |
3533 | + return DM_MAPIO_REQUEUE; |
3534 | + } |
3535 | + return DM_MAPIO_DELAY_REQUEUE; |
3536 | } |
3537 | clone->bio = clone->biotail = NULL; |
3538 | clone->rq_disk = bdev->bd_disk; |
3539 | @@ -1437,10 +1448,8 @@ static void pg_init_done(void *data, int errors) |
3540 | spin_unlock_irqrestore(&m->lock, flags); |
3541 | } |
3542 | |
3543 | -static void activate_path(struct work_struct *work) |
3544 | +static void activate_or_offline_path(struct pgpath *pgpath) |
3545 | { |
3546 | - struct pgpath *pgpath = |
3547 | - container_of(work, struct pgpath, activate_path.work); |
3548 | struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); |
3549 | |
3550 | if (pgpath->is_active && !blk_queue_dying(q)) |
3551 | @@ -1449,6 +1458,14 @@ static void activate_path(struct work_struct *work) |
3552 | pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); |
3553 | } |
3554 | |
3555 | +static void activate_path_work(struct work_struct *work) |
3556 | +{ |
3557 | + struct pgpath *pgpath = |
3558 | + container_of(work, struct pgpath, activate_path.work); |
3559 | + |
3560 | + activate_or_offline_path(pgpath); |
3561 | +} |
3562 | + |
3563 | static int noretry_error(int error) |
3564 | { |
3565 | switch (error) { |
3566 | diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c |
3567 | index 505b9f6b4a47..93310edb3e09 100644 |
3568 | --- a/drivers/md/dm-rq.c |
3569 | +++ b/drivers/md/dm-rq.c |
3570 | @@ -280,7 +280,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_ |
3571 | if (!rq->q->mq_ops) |
3572 | dm_old_requeue_request(rq); |
3573 | else |
3574 | - dm_mq_delay_requeue_request(rq, delay_requeue ? 5000 : 0); |
3575 | + dm_mq_delay_requeue_request(rq, delay_requeue ? 100/*ms*/ : 0); |
3576 | |
3577 | rq_completed(md, rw, false); |
3578 | } |
3579 | diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c |
3580 | index a15091a0d40c..4477bf930cf4 100644 |
3581 | --- a/drivers/md/dm-thin-metadata.c |
3582 | +++ b/drivers/md/dm-thin-metadata.c |
3583 | @@ -485,11 +485,11 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd) |
3584 | if (r < 0) |
3585 | return r; |
3586 | |
3587 | - r = save_sm_roots(pmd); |
3588 | + r = dm_tm_pre_commit(pmd->tm); |
3589 | if (r < 0) |
3590 | return r; |
3591 | |
3592 | - r = dm_tm_pre_commit(pmd->tm); |
3593 | + r = save_sm_roots(pmd); |
3594 | if (r < 0) |
3595 | return r; |
3596 | |
3597 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
3598 | index f6ae1d67bcd0..906a4bf600ac 100644 |
3599 | --- a/drivers/md/md.c |
3600 | +++ b/drivers/md/md.c |
3601 | @@ -6776,6 +6776,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, |
3602 | void __user *argp = (void __user *)arg; |
3603 | struct mddev *mddev = NULL; |
3604 | int ro; |
3605 | + bool did_set_md_closing = false; |
3606 | |
3607 | if (!md_ioctl_valid(cmd)) |
3608 | return -ENOTTY; |
3609 | @@ -6865,7 +6866,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, |
3610 | err = -EBUSY; |
3611 | goto out; |
3612 | } |
3613 | + WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags)); |
3614 | set_bit(MD_CLOSING, &mddev->flags); |
3615 | + did_set_md_closing = true; |
3616 | mutex_unlock(&mddev->open_mutex); |
3617 | sync_blockdev(bdev); |
3618 | } |
3619 | @@ -7058,6 +7061,8 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, |
3620 | mddev->hold_active = 0; |
3621 | mddev_unlock(mddev); |
3622 | out: |
3623 | + if(did_set_md_closing) |
3624 | + clear_bit(MD_CLOSING, &mddev->flags); |
3625 | return err; |
3626 | } |
3627 | #ifdef CONFIG_COMPAT |
3628 | diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c |
3629 | index 02e2ee0d8a00..f21ce6a3d4cf 100644 |
3630 | --- a/drivers/md/persistent-data/dm-btree.c |
3631 | +++ b/drivers/md/persistent-data/dm-btree.c |
3632 | @@ -902,8 +902,12 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest, |
3633 | else |
3634 | *result_key = le64_to_cpu(ro_node(s)->keys[0]); |
3635 | |
3636 | - if (next_block || flags & INTERNAL_NODE) |
3637 | - block = value64(ro_node(s), i); |
3638 | + if (next_block || flags & INTERNAL_NODE) { |
3639 | + if (find_highest) |
3640 | + block = value64(ro_node(s), i); |
3641 | + else |
3642 | + block = value64(ro_node(s), 0); |
3643 | + } |
3644 | |
3645 | } while (flags & INTERNAL_NODE); |
3646 | |
3647 | diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c |
3648 | index ebb280a14325..32adf6b4a9c7 100644 |
3649 | --- a/drivers/md/persistent-data/dm-space-map-disk.c |
3650 | +++ b/drivers/md/persistent-data/dm-space-map-disk.c |
3651 | @@ -142,10 +142,23 @@ static int sm_disk_inc_block(struct dm_space_map *sm, dm_block_t b) |
3652 | |
3653 | static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b) |
3654 | { |
3655 | + int r; |
3656 | + uint32_t old_count; |
3657 | enum allocation_event ev; |
3658 | struct sm_disk *smd = container_of(sm, struct sm_disk, sm); |
3659 | |
3660 | - return sm_ll_dec(&smd->ll, b, &ev); |
3661 | + r = sm_ll_dec(&smd->ll, b, &ev); |
3662 | + if (!r && (ev == SM_FREE)) { |
3663 | + /* |
3664 | + * It's only free if it's also free in the last |
3665 | + * transaction. |
3666 | + */ |
3667 | + r = sm_ll_lookup(&smd->old_ll, b, &old_count); |
3668 | + if (!r && !old_count) |
3669 | + smd->nr_allocated_this_transaction--; |
3670 | + } |
3671 | + |
3672 | + return r; |
3673 | } |
3674 | |
3675 | static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b) |
3676 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
3677 | index ed5cd705b985..b095bd296da7 100644 |
3678 | --- a/drivers/md/raid5.c |
3679 | +++ b/drivers/md/raid5.c |
3680 | @@ -2323,6 +2323,10 @@ static int resize_stripes(struct r5conf *conf, int newsize) |
3681 | err = -ENOMEM; |
3682 | |
3683 | mutex_unlock(&conf->cache_size_mutex); |
3684 | + |
3685 | + conf->slab_cache = sc; |
3686 | + conf->active_name = 1-conf->active_name; |
3687 | + |
3688 | /* Step 4, return new stripes to service */ |
3689 | while(!list_empty(&newstripes)) { |
3690 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
3691 | @@ -2340,8 +2344,6 @@ static int resize_stripes(struct r5conf *conf, int newsize) |
3692 | } |
3693 | /* critical section pass, GFP_NOIO no longer needed */ |
3694 | |
3695 | - conf->slab_cache = sc; |
3696 | - conf->active_name = 1-conf->active_name; |
3697 | if (!err) |
3698 | conf->pool_size = newsize; |
3699 | return err; |
3700 | diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c |
3701 | index 37217e205040..cfe414aa0915 100644 |
3702 | --- a/drivers/media/cec/cec-core.c |
3703 | +++ b/drivers/media/cec/cec-core.c |
3704 | @@ -286,8 +286,8 @@ int cec_register_adapter(struct cec_adapter *adap, |
3705 | adap->devnode.dev.parent = parent; |
3706 | |
3707 | #if IS_REACHABLE(CONFIG_RC_CORE) |
3708 | - adap->rc->dev.parent = parent; |
3709 | if (adap->capabilities & CEC_CAP_RC) { |
3710 | + adap->rc->dev.parent = parent; |
3711 | res = rc_register_device(adap->rc); |
3712 | |
3713 | if (res) { |
3714 | diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c |
3715 | index 614bfb3740f1..ce37dc2e89c7 100644 |
3716 | --- a/drivers/media/dvb-frontends/cxd2841er.c |
3717 | +++ b/drivers/media/dvb-frontends/cxd2841er.c |
3718 | @@ -3852,7 +3852,9 @@ static struct dvb_frontend_ops cxd2841er_t_c_ops = { |
3719 | FE_CAN_MUTE_TS | |
3720 | FE_CAN_2G_MODULATION, |
3721 | .frequency_min = 42000000, |
3722 | - .frequency_max = 1002000000 |
3723 | + .frequency_max = 1002000000, |
3724 | + .symbol_rate_min = 870000, |
3725 | + .symbol_rate_max = 11700000 |
3726 | }, |
3727 | .init = cxd2841er_init_tc, |
3728 | .sleep = cxd2841er_sleep_tc, |
3729 | diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
3730 | index bb0a5887c9a9..6152a0587723 100644 |
3731 | --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c |
3732 | +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
3733 | @@ -206,6 +206,7 @@ static void s5p_mfc_watchdog_worker(struct work_struct *work) |
3734 | } |
3735 | s5p_mfc_clock_on(); |
3736 | ret = s5p_mfc_init_hw(dev); |
3737 | + s5p_mfc_clock_off(); |
3738 | if (ret) |
3739 | mfc_err("Failed to reinit FW\n"); |
3740 | } |
3741 | @@ -666,9 +667,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) |
3742 | break; |
3743 | } |
3744 | s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); |
3745 | - wake_up_ctx(ctx, reason, err); |
3746 | WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0); |
3747 | s5p_mfc_clock_off(); |
3748 | + wake_up_ctx(ctx, reason, err); |
3749 | s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); |
3750 | } else { |
3751 | s5p_mfc_handle_frame(ctx, reason, err); |
3752 | @@ -682,15 +683,11 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) |
3753 | case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET: |
3754 | ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev); |
3755 | ctx->state = MFCINST_GOT_INST; |
3756 | - clear_work_bit(ctx); |
3757 | - wake_up(&ctx->queue); |
3758 | goto irq_cleanup_hw; |
3759 | |
3760 | case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET: |
3761 | - clear_work_bit(ctx); |
3762 | ctx->inst_no = MFC_NO_INSTANCE_SET; |
3763 | ctx->state = MFCINST_FREE; |
3764 | - wake_up(&ctx->queue); |
3765 | goto irq_cleanup_hw; |
3766 | |
3767 | case S5P_MFC_R2H_CMD_SYS_INIT_RET: |
3768 | @@ -700,9 +697,9 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) |
3769 | if (ctx) |
3770 | clear_work_bit(ctx); |
3771 | s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev); |
3772 | - wake_up_dev(dev, reason, err); |
3773 | clear_bit(0, &dev->hw_lock); |
3774 | clear_bit(0, &dev->enter_suspend); |
3775 | + wake_up_dev(dev, reason, err); |
3776 | break; |
3777 | |
3778 | case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET: |
3779 | @@ -717,9 +714,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) |
3780 | break; |
3781 | |
3782 | case S5P_MFC_R2H_CMD_DPB_FLUSH_RET: |
3783 | - clear_work_bit(ctx); |
3784 | ctx->state = MFCINST_RUNNING; |
3785 | - wake_up(&ctx->queue); |
3786 | goto irq_cleanup_hw; |
3787 | |
3788 | default: |
3789 | @@ -738,6 +733,8 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv) |
3790 | mfc_err("Failed to unlock hw\n"); |
3791 | |
3792 | s5p_mfc_clock_off(); |
3793 | + clear_work_bit(ctx); |
3794 | + wake_up(&ctx->queue); |
3795 | |
3796 | s5p_mfc_hw_call(dev->mfc_ops, try_run, dev); |
3797 | spin_unlock(&dev->irqlock); |
3798 | diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c |
3799 | index 238d8eaf7d94..93b16fe3ab38 100644 |
3800 | --- a/drivers/media/rc/mceusb.c |
3801 | +++ b/drivers/media/rc/mceusb.c |
3802 | @@ -1288,8 +1288,8 @@ static int mceusb_dev_probe(struct usb_interface *intf, |
3803 | } |
3804 | } |
3805 | } |
3806 | - if (ep_in == NULL) { |
3807 | - dev_dbg(&intf->dev, "inbound and/or endpoint not found"); |
3808 | + if (!ep_in || !ep_out) { |
3809 | + dev_dbg(&intf->dev, "required endpoints not found\n"); |
3810 | return -ENODEV; |
3811 | } |
3812 | |
3813 | diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c |
3814 | index cf80842dfa08..a050d125934c 100644 |
3815 | --- a/drivers/media/usb/cx231xx/cx231xx-audio.c |
3816 | +++ b/drivers/media/usb/cx231xx/cx231xx-audio.c |
3817 | @@ -670,10 +670,8 @@ static int cx231xx_audio_init(struct cx231xx *dev) |
3818 | |
3819 | spin_lock_init(&adev->slock); |
3820 | err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm); |
3821 | - if (err < 0) { |
3822 | - snd_card_free(card); |
3823 | - return err; |
3824 | - } |
3825 | + if (err < 0) |
3826 | + goto err_free_card; |
3827 | |
3828 | snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, |
3829 | &snd_cx231xx_pcm_capture); |
3830 | @@ -687,10 +685,9 @@ static int cx231xx_audio_init(struct cx231xx *dev) |
3831 | INIT_WORK(&dev->wq_trigger, audio_trigger); |
3832 | |
3833 | err = snd_card_register(card); |
3834 | - if (err < 0) { |
3835 | - snd_card_free(card); |
3836 | - return err; |
3837 | - } |
3838 | + if (err < 0) |
3839 | + goto err_free_card; |
3840 | + |
3841 | adev->sndcard = card; |
3842 | adev->udev = dev->udev; |
3843 | |
3844 | @@ -700,6 +697,11 @@ static int cx231xx_audio_init(struct cx231xx *dev) |
3845 | hs_config_info[0].interface_info. |
3846 | audio_index + 1]; |
3847 | |
3848 | + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) { |
3849 | + err = -ENODEV; |
3850 | + goto err_free_card; |
3851 | + } |
3852 | + |
3853 | adev->end_point_addr = |
3854 | uif->altsetting[0].endpoint[isoc_pipe].desc. |
3855 | bEndpointAddress; |
3856 | @@ -709,13 +711,20 @@ static int cx231xx_audio_init(struct cx231xx *dev) |
3857 | "audio EndPoint Addr 0x%x, Alternate settings: %i\n", |
3858 | adev->end_point_addr, adev->num_alt); |
3859 | adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL); |
3860 | - |
3861 | - if (adev->alt_max_pkt_size == NULL) |
3862 | - return -ENOMEM; |
3863 | + if (!adev->alt_max_pkt_size) { |
3864 | + err = -ENOMEM; |
3865 | + goto err_free_card; |
3866 | + } |
3867 | |
3868 | for (i = 0; i < adev->num_alt; i++) { |
3869 | - u16 tmp = |
3870 | - le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc. |
3871 | + u16 tmp; |
3872 | + |
3873 | + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) { |
3874 | + err = -ENODEV; |
3875 | + goto err_free_pkt_size; |
3876 | + } |
3877 | + |
3878 | + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc. |
3879 | wMaxPacketSize); |
3880 | adev->alt_max_pkt_size[i] = |
3881 | (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); |
3882 | @@ -725,6 +734,13 @@ static int cx231xx_audio_init(struct cx231xx *dev) |
3883 | } |
3884 | |
3885 | return 0; |
3886 | + |
3887 | +err_free_pkt_size: |
3888 | + kfree(adev->alt_max_pkt_size); |
3889 | +err_free_card: |
3890 | + snd_card_free(card); |
3891 | + |
3892 | + return err; |
3893 | } |
3894 | |
3895 | static int cx231xx_audio_fini(struct cx231xx *dev) |
3896 | diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c |
3897 | index f730fdbc9156..f850267a0095 100644 |
3898 | --- a/drivers/media/usb/cx231xx/cx231xx-cards.c |
3899 | +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c |
3900 | @@ -1426,6 +1426,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, |
3901 | |
3902 | uif = udev->actconfig->interface[idx]; |
3903 | |
3904 | + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) |
3905 | + return -ENODEV; |
3906 | + |
3907 | dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress; |
3908 | dev->video_mode.num_alt = uif->num_altsetting; |
3909 | |
3910 | @@ -1439,7 +1442,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, |
3911 | return -ENOMEM; |
3912 | |
3913 | for (i = 0; i < dev->video_mode.num_alt; i++) { |
3914 | - u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); |
3915 | + u16 tmp; |
3916 | + |
3917 | + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) |
3918 | + return -ENODEV; |
3919 | + |
3920 | + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize); |
3921 | dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); |
3922 | dev_dbg(dev->dev, |
3923 | "Alternate setting %i, max size= %i\n", i, |
3924 | @@ -1456,6 +1464,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, |
3925 | } |
3926 | uif = udev->actconfig->interface[idx]; |
3927 | |
3928 | + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) |
3929 | + return -ENODEV; |
3930 | + |
3931 | dev->vbi_mode.end_point_addr = |
3932 | uif->altsetting[0].endpoint[isoc_pipe].desc. |
3933 | bEndpointAddress; |
3934 | @@ -1472,8 +1483,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, |
3935 | return -ENOMEM; |
3936 | |
3937 | for (i = 0; i < dev->vbi_mode.num_alt; i++) { |
3938 | - u16 tmp = |
3939 | - le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. |
3940 | + u16 tmp; |
3941 | + |
3942 | + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) |
3943 | + return -ENODEV; |
3944 | + |
3945 | + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. |
3946 | desc.wMaxPacketSize); |
3947 | dev->vbi_mode.alt_max_pkt_size[i] = |
3948 | (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); |
3949 | @@ -1493,6 +1508,9 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, |
3950 | } |
3951 | uif = udev->actconfig->interface[idx]; |
3952 | |
3953 | + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) |
3954 | + return -ENODEV; |
3955 | + |
3956 | dev->sliced_cc_mode.end_point_addr = |
3957 | uif->altsetting[0].endpoint[isoc_pipe].desc. |
3958 | bEndpointAddress; |
3959 | @@ -1507,7 +1525,12 @@ static int cx231xx_init_v4l2(struct cx231xx *dev, |
3960 | return -ENOMEM; |
3961 | |
3962 | for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) { |
3963 | - u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. |
3964 | + u16 tmp; |
3965 | + |
3966 | + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) |
3967 | + return -ENODEV; |
3968 | + |
3969 | + tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe]. |
3970 | desc.wMaxPacketSize); |
3971 | dev->sliced_cc_mode.alt_max_pkt_size[i] = |
3972 | (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); |
3973 | @@ -1676,6 +1699,11 @@ static int cx231xx_usb_probe(struct usb_interface *interface, |
3974 | } |
3975 | uif = udev->actconfig->interface[idx]; |
3976 | |
3977 | + if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) { |
3978 | + retval = -ENODEV; |
3979 | + goto err_video_alt; |
3980 | + } |
3981 | + |
3982 | dev->ts1_mode.end_point_addr = |
3983 | uif->altsetting[0].endpoint[isoc_pipe]. |
3984 | desc.bEndpointAddress; |
3985 | @@ -1693,7 +1721,14 @@ static int cx231xx_usb_probe(struct usb_interface *interface, |
3986 | } |
3987 | |
3988 | for (i = 0; i < dev->ts1_mode.num_alt; i++) { |
3989 | - u16 tmp = le16_to_cpu(uif->altsetting[i]. |
3990 | + u16 tmp; |
3991 | + |
3992 | + if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) { |
3993 | + retval = -ENODEV; |
3994 | + goto err_video_alt; |
3995 | + } |
3996 | + |
3997 | + tmp = le16_to_cpu(uif->altsetting[i]. |
3998 | endpoint[isoc_pipe].desc. |
3999 | wMaxPacketSize); |
4000 | dev->ts1_mode.alt_max_pkt_size[i] = |
4001 | diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c |
4002 | index dd5edd3a17ee..08acdd32e412 100644 |
4003 | --- a/drivers/media/usb/dvb-usb/dib0700_core.c |
4004 | +++ b/drivers/media/usb/dvb-usb/dib0700_core.c |
4005 | @@ -809,6 +809,9 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf) |
4006 | |
4007 | /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */ |
4008 | |
4009 | + if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1) |
4010 | + return -ENODEV; |
4011 | + |
4012 | purb = usb_alloc_urb(0, GFP_KERNEL); |
4013 | if (purb == NULL) |
4014 | return -ENOMEM; |
4015 | diff --git a/drivers/media/usb/dvb-usb/dibusb-mc-common.c b/drivers/media/usb/dvb-usb/dibusb-mc-common.c |
4016 | index c989cac9343d..0c2bc97436d5 100644 |
4017 | --- a/drivers/media/usb/dvb-usb/dibusb-mc-common.c |
4018 | +++ b/drivers/media/usb/dvb-usb/dibusb-mc-common.c |
4019 | @@ -11,6 +11,8 @@ |
4020 | |
4021 | #include "dibusb.h" |
4022 | |
4023 | +MODULE_LICENSE("GPL"); |
4024 | + |
4025 | /* 3000MC/P stuff */ |
4026 | // Config Adjacent channels Perf -cal22 |
4027 | static struct dibx000_agc_config dib3000p_mt2060_agc_config = { |
4028 | diff --git a/drivers/media/usb/dvb-usb/digitv.c b/drivers/media/usb/dvb-usb/digitv.c |
4029 | index 4284f6984dc1..475a3c0cdee7 100644 |
4030 | --- a/drivers/media/usb/dvb-usb/digitv.c |
4031 | +++ b/drivers/media/usb/dvb-usb/digitv.c |
4032 | @@ -33,6 +33,9 @@ static int digitv_ctrl_msg(struct dvb_usb_device *d, |
4033 | |
4034 | wo = (rbuf == NULL || rlen == 0); /* write-only */ |
4035 | |
4036 | + if (wlen > 4 || rlen > 4) |
4037 | + return -EIO; |
4038 | + |
4039 | memset(st->sndbuf, 0, 7); |
4040 | memset(st->rcvbuf, 0, 7); |
4041 | |
4042 | diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c |
4043 | index 4f42d57f81d9..6e654e5026dd 100644 |
4044 | --- a/drivers/media/usb/dvb-usb/dw2102.c |
4045 | +++ b/drivers/media/usb/dvb-usb/dw2102.c |
4046 | @@ -204,6 +204,20 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, |
4047 | |
4048 | switch (num) { |
4049 | case 2: |
4050 | + if (msg[0].len != 1) { |
4051 | + warn("i2c rd: len=%d is not 1!\n", |
4052 | + msg[0].len); |
4053 | + num = -EOPNOTSUPP; |
4054 | + break; |
4055 | + } |
4056 | + |
4057 | + if (2 + msg[1].len > sizeof(buf6)) { |
4058 | + warn("i2c rd: len=%d is too big!\n", |
4059 | + msg[1].len); |
4060 | + num = -EOPNOTSUPP; |
4061 | + break; |
4062 | + } |
4063 | + |
4064 | /* read si2109 register by number */ |
4065 | buf6[0] = msg[0].addr << 1; |
4066 | buf6[1] = msg[0].len; |
4067 | @@ -219,6 +233,13 @@ static int dw2102_serit_i2c_transfer(struct i2c_adapter *adap, |
4068 | case 1: |
4069 | switch (msg[0].addr) { |
4070 | case 0x68: |
4071 | + if (2 + msg[0].len > sizeof(buf6)) { |
4072 | + warn("i2c wr: len=%d is too big!\n", |
4073 | + msg[0].len); |
4074 | + num = -EOPNOTSUPP; |
4075 | + break; |
4076 | + } |
4077 | + |
4078 | /* write to si2109 register */ |
4079 | buf6[0] = msg[0].addr << 1; |
4080 | buf6[1] = msg[0].len; |
4081 | @@ -262,6 +283,13 @@ static int dw2102_earda_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg ms |
4082 | /* first write first register number */ |
4083 | u8 ibuf[MAX_XFER_SIZE], obuf[3]; |
4084 | |
4085 | + if (2 + msg[0].len != sizeof(obuf)) { |
4086 | + warn("i2c rd: len=%d is not 1!\n", |
4087 | + msg[0].len); |
4088 | + ret = -EOPNOTSUPP; |
4089 | + goto unlock; |
4090 | + } |
4091 | + |
4092 | if (2 + msg[1].len > sizeof(ibuf)) { |
4093 | warn("i2c rd: len=%d is too big!\n", |
4094 | msg[1].len); |
4095 | @@ -462,6 +490,12 @@ static int dw3101_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], |
4096 | /* first write first register number */ |
4097 | u8 ibuf[MAX_XFER_SIZE], obuf[3]; |
4098 | |
4099 | + if (2 + msg[0].len != sizeof(obuf)) { |
4100 | + warn("i2c rd: len=%d is not 1!\n", |
4101 | + msg[0].len); |
4102 | + ret = -EOPNOTSUPP; |
4103 | + goto unlock; |
4104 | + } |
4105 | if (2 + msg[1].len > sizeof(ibuf)) { |
4106 | warn("i2c rd: len=%d is too big!\n", |
4107 | msg[1].len); |
4108 | @@ -696,6 +730,13 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], |
4109 | msg[0].buf[0] = state->data[1]; |
4110 | break; |
4111 | default: |
4112 | + if (3 + msg[0].len > sizeof(state->data)) { |
4113 | + warn("i2c wr: len=%d is too big!\n", |
4114 | + msg[0].len); |
4115 | + num = -EOPNOTSUPP; |
4116 | + break; |
4117 | + } |
4118 | + |
4119 | /* always i2c write*/ |
4120 | state->data[0] = 0x08; |
4121 | state->data[1] = msg[0].addr; |
4122 | @@ -711,6 +752,19 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[], |
4123 | break; |
4124 | case 2: |
4125 | /* always i2c read */ |
4126 | + if (4 + msg[0].len > sizeof(state->data)) { |
4127 | + warn("i2c rd: len=%d is too big!\n", |
4128 | + msg[0].len); |
4129 | + num = -EOPNOTSUPP; |
4130 | + break; |
4131 | + } |
4132 | + if (1 + msg[1].len > sizeof(state->data)) { |
4133 | + warn("i2c rd: len=%d is too big!\n", |
4134 | + msg[1].len); |
4135 | + num = -EOPNOTSUPP; |
4136 | + break; |
4137 | + } |
4138 | + |
4139 | state->data[0] = 0x09; |
4140 | state->data[1] = msg[0].len; |
4141 | state->data[2] = msg[1].len; |
4142 | diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c |
4143 | index ecc207fbaf3c..9e0d6a4166d2 100644 |
4144 | --- a/drivers/media/usb/dvb-usb/ttusb2.c |
4145 | +++ b/drivers/media/usb/dvb-usb/ttusb2.c |
4146 | @@ -78,6 +78,9 @@ static int ttusb2_msg(struct dvb_usb_device *d, u8 cmd, |
4147 | u8 *s, *r = NULL; |
4148 | int ret = 0; |
4149 | |
4150 | + if (4 + rlen > 64) |
4151 | + return -EIO; |
4152 | + |
4153 | s = kzalloc(wlen+4, GFP_KERNEL); |
4154 | if (!s) |
4155 | return -ENOMEM; |
4156 | @@ -381,6 +384,22 @@ static int ttusb2_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num |
4157 | write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD); |
4158 | read = msg[i].flags & I2C_M_RD; |
4159 | |
4160 | + if (3 + msg[i].len > sizeof(obuf)) { |
4161 | + err("i2c wr len=%d too high", msg[i].len); |
4162 | + break; |
4163 | + } |
4164 | + if (write_read) { |
4165 | + if (3 + msg[i+1].len > sizeof(ibuf)) { |
4166 | + err("i2c rd len=%d too high", msg[i+1].len); |
4167 | + break; |
4168 | + } |
4169 | + } else if (read) { |
4170 | + if (3 + msg[i].len > sizeof(ibuf)) { |
4171 | + err("i2c rd len=%d too high", msg[i].len); |
4172 | + break; |
4173 | + } |
4174 | + } |
4175 | + |
4176 | obuf[0] = (msg[i].addr << 1) | (write_read | read); |
4177 | if (read) |
4178 | obuf[1] = 0; |
4179 | diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c |
4180 | index 71f273377f83..31b2117e8f1d 100644 |
4181 | --- a/drivers/media/usb/gspca/konica.c |
4182 | +++ b/drivers/media/usb/gspca/konica.c |
4183 | @@ -184,6 +184,9 @@ static int sd_start(struct gspca_dev *gspca_dev) |
4184 | return -EIO; |
4185 | } |
4186 | |
4187 | + if (alt->desc.bNumEndpoints < 2) |
4188 | + return -ENODEV; |
4189 | + |
4190 | packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); |
4191 | |
4192 | n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; |
4193 | diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c |
4194 | index f5c635a67d74..f9c3325aa4d4 100644 |
4195 | --- a/drivers/media/usb/usbvision/usbvision-video.c |
4196 | +++ b/drivers/media/usb/usbvision/usbvision-video.c |
4197 | @@ -1501,7 +1501,14 @@ static int usbvision_probe(struct usb_interface *intf, |
4198 | } |
4199 | |
4200 | for (i = 0; i < usbvision->num_alt; i++) { |
4201 | - u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc. |
4202 | + u16 tmp; |
4203 | + |
4204 | + if (uif->altsetting[i].desc.bNumEndpoints < 2) { |
4205 | + ret = -ENODEV; |
4206 | + goto err_pkt; |
4207 | + } |
4208 | + |
4209 | + tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc. |
4210 | wMaxPacketSize); |
4211 | usbvision->alt_max_pkt_size[i] = |
4212 | (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); |
4213 | diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c |
4214 | index f2d6fc03dda0..efdcd5bd6a4c 100644 |
4215 | --- a/drivers/media/usb/zr364xx/zr364xx.c |
4216 | +++ b/drivers/media/usb/zr364xx/zr364xx.c |
4217 | @@ -600,6 +600,14 @@ static int zr364xx_read_video_callback(struct zr364xx_camera *cam, |
4218 | ptr = pdest = frm->lpvbits; |
4219 | |
4220 | if (frm->ulState == ZR364XX_READ_IDLE) { |
4221 | + if (purb->actual_length < 128) { |
4222 | + /* header incomplete */ |
4223 | + dev_info(&cam->udev->dev, |
4224 | + "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n", |
4225 | + __func__, purb->actual_length); |
4226 | + return -EINVAL; |
4227 | + } |
4228 | + |
4229 | frm->ulState = ZR364XX_READ_FRAME; |
4230 | frm->cur_size = 0; |
4231 | |
4232 | diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c |
4233 | index b27ea98b781f..f98d64963022 100644 |
4234 | --- a/drivers/misc/cxl/pci.c |
4235 | +++ b/drivers/misc/cxl/pci.c |
4236 | @@ -1496,8 +1496,6 @@ static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev) |
4237 | if ((rc = cxl_native_register_psl_err_irq(adapter))) |
4238 | goto err; |
4239 | |
4240 | - /* Release the context lock as adapter is configured */ |
4241 | - cxl_adapter_context_unlock(adapter); |
4242 | return 0; |
4243 | |
4244 | err: |
4245 | @@ -1596,6 +1594,9 @@ static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev) |
4246 | if ((rc = cxl_sysfs_adapter_add(adapter))) |
4247 | goto err_put1; |
4248 | |
4249 | + /* Release the context lock as adapter is configured */ |
4250 | + cxl_adapter_context_unlock(adapter); |
4251 | + |
4252 | return adapter; |
4253 | |
4254 | err_put1: |
4255 | @@ -1781,7 +1782,7 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, |
4256 | { |
4257 | struct cxl *adapter = pci_get_drvdata(pdev); |
4258 | struct cxl_afu *afu; |
4259 | - pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET; |
4260 | + pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET, afu_result; |
4261 | int i; |
4262 | |
4263 | /* At this point, we could still have an interrupt pending. |
4264 | @@ -1885,16 +1886,26 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, |
4265 | for (i = 0; i < adapter->slices; i++) { |
4266 | afu = adapter->afu[i]; |
4267 | |
4268 | - result = cxl_vphb_error_detected(afu, state); |
4269 | - |
4270 | - /* Only continue if everyone agrees on NEED_RESET */ |
4271 | - if (result != PCI_ERS_RESULT_NEED_RESET) |
4272 | - return result; |
4273 | + afu_result = cxl_vphb_error_detected(afu, state); |
4274 | |
4275 | cxl_context_detach_all(afu); |
4276 | cxl_ops->afu_deactivate_mode(afu, afu->current_mode); |
4277 | pci_deconfigure_afu(afu); |
4278 | + |
4279 | + /* Disconnect trumps all, NONE trumps NEED_RESET */ |
4280 | + if (afu_result == PCI_ERS_RESULT_DISCONNECT) |
4281 | + result = PCI_ERS_RESULT_DISCONNECT; |
4282 | + else if ((afu_result == PCI_ERS_RESULT_NONE) && |
4283 | + (result == PCI_ERS_RESULT_NEED_RESET)) |
4284 | + result = PCI_ERS_RESULT_NONE; |
4285 | } |
4286 | + |
4287 | + /* should take the context lock here */ |
4288 | + if (cxl_adapter_context_lock(adapter) != 0) |
4289 | + dev_warn(&adapter->dev, |
4290 | + "Couldn't take context lock with %d active-contexts\n", |
4291 | + atomic_read(&adapter->contexts_num)); |
4292 | + |
4293 | cxl_deconfigure_adapter(adapter); |
4294 | |
4295 | return result; |
4296 | @@ -1913,6 +1924,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev) |
4297 | if (cxl_configure_adapter(adapter, pdev)) |
4298 | goto err; |
4299 | |
4300 | + /* |
4301 | + * Unlock context activation for the adapter. Ideally this should be |
4302 | + * done in cxl_pci_resume but cxlflash module tries to activate the |
4303 | + * master context as part of slot_reset callback. |
4304 | + */ |
4305 | + cxl_adapter_context_unlock(adapter); |
4306 | + |
4307 | for (i = 0; i < adapter->slices; i++) { |
4308 | afu = adapter->afu[i]; |
4309 | |
4310 | diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c |
4311 | index b0524f8accb6..44c0faf84426 100644 |
4312 | --- a/drivers/mtd/nand/nand_base.c |
4313 | +++ b/drivers/mtd/nand/nand_base.c |
4314 | @@ -139,6 +139,74 @@ const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = { |
4315 | }; |
4316 | EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops); |
4317 | |
4318 | +/* |
4319 | + * Support the old "large page" layout used for 1-bit Hamming ECC where ECC |
4320 | + * are placed at a fixed offset. |
4321 | + */ |
4322 | +static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section, |
4323 | + struct mtd_oob_region *oobregion) |
4324 | +{ |
4325 | + struct nand_chip *chip = mtd_to_nand(mtd); |
4326 | + struct nand_ecc_ctrl *ecc = &chip->ecc; |
4327 | + |
4328 | + if (section) |
4329 | + return -ERANGE; |
4330 | + |
4331 | + switch (mtd->oobsize) { |
4332 | + case 64: |
4333 | + oobregion->offset = 40; |
4334 | + break; |
4335 | + case 128: |
4336 | + oobregion->offset = 80; |
4337 | + break; |
4338 | + default: |
4339 | + return -EINVAL; |
4340 | + } |
4341 | + |
4342 | + oobregion->length = ecc->total; |
4343 | + if (oobregion->offset + oobregion->length > mtd->oobsize) |
4344 | + return -ERANGE; |
4345 | + |
4346 | + return 0; |
4347 | +} |
4348 | + |
4349 | +static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section, |
4350 | + struct mtd_oob_region *oobregion) |
4351 | +{ |
4352 | + struct nand_chip *chip = mtd_to_nand(mtd); |
4353 | + struct nand_ecc_ctrl *ecc = &chip->ecc; |
4354 | + int ecc_offset = 0; |
4355 | + |
4356 | + if (section < 0 || section > 1) |
4357 | + return -ERANGE; |
4358 | + |
4359 | + switch (mtd->oobsize) { |
4360 | + case 64: |
4361 | + ecc_offset = 40; |
4362 | + break; |
4363 | + case 128: |
4364 | + ecc_offset = 80; |
4365 | + break; |
4366 | + default: |
4367 | + return -EINVAL; |
4368 | + } |
4369 | + |
4370 | + if (section == 0) { |
4371 | + oobregion->offset = 2; |
4372 | + oobregion->length = ecc_offset - 2; |
4373 | + } else { |
4374 | + oobregion->offset = ecc_offset + ecc->total; |
4375 | + oobregion->length = mtd->oobsize - oobregion->offset; |
4376 | + } |
4377 | + |
4378 | + return 0; |
4379 | +} |
4380 | + |
4381 | +const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { |
4382 | + .ecc = nand_ooblayout_ecc_lp_hamming, |
4383 | + .free = nand_ooblayout_free_lp_hamming, |
4384 | +}; |
4385 | + |
4386 | static int check_offs_len(struct mtd_info *mtd, |
4387 | loff_t ofs, uint64_t len) |
4388 | { |
4389 | @@ -4653,7 +4721,7 @@ int nand_scan_tail(struct mtd_info *mtd) |
4390 | break; |
4391 | case 64: |
4392 | case 128: |
4393 | - mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); |
4394 | + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); |
4395 | break; |
4396 | default: |
4397 | WARN(1, "No oob scheme defined for oobsize %d\n", |
4398 | diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c |
4399 | index 2a52101120d4..084934a9f19c 100644 |
4400 | --- a/drivers/mtd/nand/omap2.c |
4401 | +++ b/drivers/mtd/nand/omap2.c |
4402 | @@ -1856,6 +1856,15 @@ static int omap_nand_probe(struct platform_device *pdev) |
4403 | nand_chip->ecc.priv = NULL; |
4404 | nand_set_flash_node(nand_chip, dev->of_node); |
4405 | |
4406 | + if (!mtd->name) { |
4407 | + mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, |
4408 | + "omap2-nand.%d", info->gpmc_cs); |
4409 | + if (!mtd->name) { |
4410 | + dev_err(&pdev->dev, "Failed to set MTD name\n"); |
4411 | + return -ENOMEM; |
4412 | + } |
4413 | + } |
4414 | + |
4415 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
4416 | nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); |
4417 | if (IS_ERR(nand_chip->IO_ADDR_R)) |
4418 | diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c |
4419 | index 4a91c5d000be..3acdc20485f1 100644 |
4420 | --- a/drivers/mtd/nand/orion_nand.c |
4421 | +++ b/drivers/mtd/nand/orion_nand.c |
4422 | @@ -23,6 +23,11 @@ |
4423 | #include <asm/sizes.h> |
4424 | #include <linux/platform_data/mtd-orion_nand.h> |
4425 | |
4426 | +struct orion_nand_info { |
4427 | + struct nand_chip chip; |
4428 | + struct clk *clk; |
4429 | +}; |
4430 | + |
4431 | static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
4432 | { |
4433 | struct nand_chip *nc = mtd_to_nand(mtd); |
4434 | @@ -75,20 +80,21 @@ static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) |
4435 | |
4436 | static int __init orion_nand_probe(struct platform_device *pdev) |
4437 | { |
4438 | + struct orion_nand_info *info; |
4439 | struct mtd_info *mtd; |
4440 | struct nand_chip *nc; |
4441 | struct orion_nand_data *board; |
4442 | struct resource *res; |
4443 | - struct clk *clk; |
4444 | void __iomem *io_base; |
4445 | int ret = 0; |
4446 | u32 val = 0; |
4447 | |
4448 | - nc = devm_kzalloc(&pdev->dev, |
4449 | - sizeof(struct nand_chip), |
4450 | + info = devm_kzalloc(&pdev->dev, |
4451 | + sizeof(struct orion_nand_info), |
4452 | GFP_KERNEL); |
4453 | - if (!nc) |
4454 | + if (!info) |
4455 | return -ENOMEM; |
4456 | + nc = &info->chip; |
4457 | mtd = nand_to_mtd(nc); |
4458 | |
4459 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
4460 | @@ -145,15 +151,13 @@ static int __init orion_nand_probe(struct platform_device *pdev) |
4461 | if (board->dev_ready) |
4462 | nc->dev_ready = board->dev_ready; |
4463 | |
4464 | - platform_set_drvdata(pdev, mtd); |
4465 | + platform_set_drvdata(pdev, info); |
4466 | |
4467 | /* Not all platforms can gate the clock, so it is not |
4468 | an error if the clock does not exists. */ |
4469 | - clk = clk_get(&pdev->dev, NULL); |
4470 | - if (!IS_ERR(clk)) { |
4471 | - clk_prepare_enable(clk); |
4472 | - clk_put(clk); |
4473 | - } |
4474 | + info->clk = devm_clk_get(&pdev->dev, NULL); |
4475 | + if (!IS_ERR(info->clk)) |
4476 | + clk_prepare_enable(info->clk); |
4477 | |
4478 | ret = nand_scan(mtd, 1); |
4479 | if (ret) |
4480 | @@ -169,26 +173,22 @@ static int __init orion_nand_probe(struct platform_device *pdev) |
4481 | return 0; |
4482 | |
4483 | no_dev: |
4484 | - if (!IS_ERR(clk)) { |
4485 | - clk_disable_unprepare(clk); |
4486 | - clk_put(clk); |
4487 | - } |
4488 | + if (!IS_ERR(info->clk)) |
4489 | + clk_disable_unprepare(info->clk); |
4490 | |
4491 | return ret; |
4492 | } |
4493 | |
4494 | static int orion_nand_remove(struct platform_device *pdev) |
4495 | { |
4496 | - struct mtd_info *mtd = platform_get_drvdata(pdev); |
4497 | - struct clk *clk; |
4498 | + struct orion_nand_info *info = platform_get_drvdata(pdev); |
4499 | + struct nand_chip *chip = &info->chip; |
4500 | + struct mtd_info *mtd = nand_to_mtd(chip); |
4501 | |
4502 | nand_release(mtd); |
4503 | |
4504 | - clk = clk_get(&pdev->dev, NULL); |
4505 | - if (!IS_ERR(clk)) { |
4506 | - clk_disable_unprepare(clk); |
4507 | - clk_put(clk); |
4508 | - } |
4509 | + if (!IS_ERR(info->clk)) |
4510 | + clk_disable_unprepare(info->clk); |
4511 | |
4512 | return 0; |
4513 | } |
4514 | diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c |
4515 | index 8716b8c07feb..6f3c805f7211 100644 |
4516 | --- a/drivers/net/irda/irda-usb.c |
4517 | +++ b/drivers/net/irda/irda-usb.c |
4518 | @@ -1077,7 +1077,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self) |
4519 | * are "42101001.sb" or "42101002.sb" |
4520 | */ |
4521 | sprintf(stir421x_fw_name, "4210%4X.sb", |
4522 | - self->usbdev->descriptor.bcdDevice); |
4523 | + le16_to_cpu(self->usbdev->descriptor.bcdDevice)); |
4524 | ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev); |
4525 | if (ret < 0) |
4526 | return ret; |
4527 | diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c |
4528 | index de2d212f39ec..12aa8abbcba4 100644 |
4529 | --- a/drivers/net/wireless/ath/ath9k/hif_usb.c |
4530 | +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c |
4531 | @@ -37,6 +37,7 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { |
4532 | { USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */ |
4533 | { USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */ |
4534 | { USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */ |
4535 | + { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */ |
4536 | |
4537 | { USB_DEVICE(0x0cf3, 0x7015), |
4538 | .driver_info = AR9287_USB }, /* Atheros */ |
4539 | @@ -1219,6 +1220,9 @@ static int send_eject_command(struct usb_interface *interface) |
4540 | u8 bulk_out_ep; |
4541 | int r; |
4542 | |
4543 | + if (iface_desc->desc.bNumEndpoints < 2) |
4544 | + return -ENODEV; |
4545 | + |
4546 | /* Find bulk out endpoint */ |
4547 | for (r = 1; r >= 0; r--) { |
4548 | endpoint = &iface_desc->endpoint[r].desc; |
4549 | diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c |
4550 | index 1e3bd435a694..2d7e8a372bf1 100644 |
4551 | --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c |
4552 | +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c |
4553 | @@ -2528,9 +2528,11 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, |
4554 | priv->random_mac[i] |= get_random_int() & |
4555 | ~(request->mac_addr_mask[i]); |
4556 | } |
4557 | + ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); |
4558 | + } else { |
4559 | + eth_zero_addr(priv->random_mac); |
4560 | } |
4561 | |
4562 | - ether_addr_copy(user_scan_cfg->random_mac, priv->random_mac); |
4563 | user_scan_cfg->num_ssids = request->n_ssids; |
4564 | user_scan_cfg->ssid_list = request->ssids; |
4565 | |
4566 | diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c |
4567 | index b8c990d10d6e..3045764372b0 100644 |
4568 | --- a/drivers/net/wireless/marvell/mwifiex/pcie.c |
4569 | +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c |
4570 | @@ -1039,6 +1039,7 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter) |
4571 | if (card && card->cmd_buf) { |
4572 | mwifiex_unmap_pci_memory(adapter, card->cmd_buf, |
4573 | PCI_DMA_TODEVICE); |
4574 | + dev_kfree_skb_any(card->cmd_buf); |
4575 | } |
4576 | return 0; |
4577 | } |
4578 | @@ -1608,6 +1609,11 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) |
4579 | return -1; |
4580 | |
4581 | card->cmd_buf = skb; |
4582 | + /* |
4583 | + * Need to keep a reference, since core driver might free up this |
4584 | + * buffer before we've unmapped it. |
4585 | + */ |
4586 | + skb_get(skb); |
4587 | |
4588 | /* To send a command, the driver will: |
4589 | 1. Write the 64bit physical address of the data buffer to |
4590 | @@ -1711,6 +1717,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) |
4591 | if (card->cmd_buf) { |
4592 | mwifiex_unmap_pci_memory(adapter, card->cmd_buf, |
4593 | PCI_DMA_TODEVICE); |
4594 | + dev_kfree_skb_any(card->cmd_buf); |
4595 | card->cmd_buf = NULL; |
4596 | } |
4597 | |
4598 | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c |
4599 | index 8da874cbec1a..d8254bca0b02 100644 |
4600 | --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c |
4601 | +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c |
4602 | @@ -358,6 +358,107 @@ bool rtl8821ae_phy_rf_config(struct ieee80211_hw *hw) |
4603 | return rtl8821ae_phy_rf6052_config(hw); |
4604 | } |
4605 | |
4606 | +static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw) |
4607 | +{ |
4608 | + struct rtl_priv *rtlpriv = rtl_priv(hw); |
4609 | + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
4610 | + u8 tmp; |
4611 | + |
4612 | + switch (rtlhal->rfe_type) { |
4613 | + case 3: |
4614 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770); |
4615 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770); |
4616 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); |
4617 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); |
4618 | + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); |
4619 | + break; |
4620 | + case 4: |
4621 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); |
4622 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); |
4623 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001); |
4624 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001); |
4625 | + break; |
4626 | + case 5: |
4627 | + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77); |
4628 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); |
4629 | + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); |
4630 | + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1); |
4631 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); |
4632 | + break; |
4633 | + case 1: |
4634 | + if (rtlpriv->btcoexist.bt_coexistence) { |
4635 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777); |
4636 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, |
4637 | + 0x77777777); |
4638 | + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); |
4639 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); |
4640 | + break; |
4641 | + } |
4642 | + case 0: |
4643 | + case 2: |
4644 | + default: |
4645 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777); |
4646 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777); |
4647 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); |
4648 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); |
4649 | + break; |
4650 | + } |
4651 | +} |
4652 | + |
4653 | +static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw) |
4654 | +{ |
4655 | + struct rtl_priv *rtlpriv = rtl_priv(hw); |
4656 | + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); |
4657 | + u8 tmp; |
4658 | + |
4659 | + switch (rtlhal->rfe_type) { |
4660 | + case 0: |
4661 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717); |
4662 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717); |
4663 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); |
4664 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); |
4665 | + break; |
4666 | + case 1: |
4667 | + if (rtlpriv->btcoexist.bt_coexistence) { |
4668 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717); |
4669 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, |
4670 | + 0x77337717); |
4671 | + rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000); |
4672 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); |
4673 | + } else { |
4674 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, |
4675 | + 0x77337717); |
4676 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, |
4677 | + 0x77337717); |
4678 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000); |
4679 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); |
4680 | + } |
4681 | + break; |
4682 | + case 3: |
4683 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717); |
4684 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717); |
4685 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); |
4686 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); |
4687 | + rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1); |
4688 | + break; |
4689 | + case 5: |
4690 | + rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33); |
4691 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); |
4692 | + tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3); |
4693 | + rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1); |
4694 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); |
4695 | + break; |
4696 | + case 2: |
4697 | + case 4: |
4698 | + default: |
4699 | + rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777); |
4700 | + rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777); |
4701 | + rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010); |
4702 | + rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010); |
4703 | + break; |
4704 | + } |
4705 | +} |
4706 | + |
4707 | u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band, |
4708 | u8 rf_path) |
4709 | { |
4710 | @@ -552,14 +653,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) |
4711 | /* 0x82C[1:0] = 2b'00 */ |
4712 | rtl_set_bbreg(hw, 0x82c, 0x3, 0); |
4713 | } |
4714 | - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { |
4715 | - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, |
4716 | - 0x77777777); |
4717 | - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, |
4718 | - 0x77777777); |
4719 | - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000); |
4720 | - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000); |
4721 | - } |
4722 | + |
4723 | + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) |
4724 | + _rtl8812ae_phy_set_rfe_reg_24g(hw); |
4725 | |
4726 | rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1); |
4727 | rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1); |
4728 | @@ -614,14 +710,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) |
4729 | /* 0x82C[1:0] = 2'b00 */ |
4730 | rtl_set_bbreg(hw, 0x82c, 0x3, 1); |
4731 | |
4732 | - if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { |
4733 | - rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, |
4734 | - 0x77337777); |
4735 | - rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, |
4736 | - 0x77337777); |
4737 | - rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010); |
4738 | - rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010); |
4739 | - } |
4740 | + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) |
4741 | + _rtl8812ae_phy_set_rfe_reg_5g(hw); |
4742 | |
4743 | rtl_set_bbreg(hw, RTXPATH, 0xf0, 0); |
4744 | rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf); |
4745 | diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h |
4746 | index 1d6110f9c1fb..ed69dbe178ff 100644 |
4747 | --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h |
4748 | +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h |
4749 | @@ -2424,6 +2424,7 @@ |
4750 | #define BMASKH4BITS 0xf0000000 |
4751 | #define BMASKOFDM_D 0xffc00000 |
4752 | #define BMASKCCK 0x3f3f3f3f |
4753 | +#define BMASKRFEINV 0x3ff00000 |
4754 | |
4755 | #define BRFREGOFFSETMASK 0xfffff |
4756 | |
4757 | diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c |
4758 | index 351bac8f6503..0392eb8a0dea 100644 |
4759 | --- a/drivers/nvdimm/bus.c |
4760 | +++ b/drivers/nvdimm/bus.c |
4761 | @@ -218,7 +218,10 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, |
4762 | if (cmd_rc < 0) |
4763 | return cmd_rc; |
4764 | |
4765 | - nvdimm_clear_from_poison_list(nvdimm_bus, phys, len); |
4766 | + if (clear_err.cleared > 0) |
4767 | + nvdimm_clear_from_poison_list(nvdimm_bus, phys, |
4768 | + clear_err.cleared); |
4769 | + |
4770 | return clear_err.cleared; |
4771 | } |
4772 | EXPORT_SYMBOL_GPL(nvdimm_clear_poison); |
4773 | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
4774 | index 5d309535abbd..d8a17078dd57 100644 |
4775 | --- a/drivers/nvme/host/pci.c |
4776 | +++ b/drivers/nvme/host/pci.c |
4777 | @@ -1394,6 +1394,11 @@ static inline void nvme_release_cmb(struct nvme_dev *dev) |
4778 | if (dev->cmb) { |
4779 | iounmap(dev->cmb); |
4780 | dev->cmb = NULL; |
4781 | + if (dev->cmbsz) { |
4782 | + sysfs_remove_file_from_group(&dev->ctrl.device->kobj, |
4783 | + &dev_attr_cmb.attr, NULL); |
4784 | + dev->cmbsz = 0; |
4785 | + } |
4786 | } |
4787 | } |
4788 | |
4789 | @@ -1665,6 +1670,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) |
4790 | { |
4791 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
4792 | |
4793 | + nvme_release_cmb(dev); |
4794 | pci_free_irq_vectors(pdev); |
4795 | |
4796 | if (pci_is_enabled(pdev)) { |
4797 | @@ -2062,7 +2068,6 @@ static void nvme_remove(struct pci_dev *pdev) |
4798 | nvme_dev_disable(dev, true); |
4799 | nvme_dev_remove_admin(dev); |
4800 | nvme_free_queues(dev, 0); |
4801 | - nvme_release_cmb(dev); |
4802 | nvme_release_prp_pools(dev); |
4803 | nvme_dev_unmap(dev); |
4804 | nvme_put_ctrl(&dev->ctrl); |
4805 | diff --git a/drivers/of/address.c b/drivers/of/address.c |
4806 | index 02b2903fe9d2..72914cdfce2a 100644 |
4807 | --- a/drivers/of/address.c |
4808 | +++ b/drivers/of/address.c |
4809 | @@ -263,7 +263,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, |
4810 | if (!parser->range || parser->range + parser->np > parser->end) |
4811 | return NULL; |
4812 | |
4813 | - range->pci_space = parser->range[0]; |
4814 | + range->pci_space = be32_to_cpup(parser->range); |
4815 | range->flags = of_bus_pci_get_flags(parser->range); |
4816 | range->pci_addr = of_read_number(parser->range + 1, ns); |
4817 | range->cpu_addr = of_translate_address(parser->node, |
4818 | diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c |
4819 | index e5ce4b59e162..2e09b8ed03ac 100644 |
4820 | --- a/drivers/of/fdt.c |
4821 | +++ b/drivers/of/fdt.c |
4822 | @@ -505,6 +505,9 @@ static void *__unflatten_device_tree(const void *blob, |
4823 | |
4824 | /* Allocate memory for the expanded device tree */ |
4825 | mem = dt_alloc(size + 4, __alignof__(struct device_node)); |
4826 | + if (!mem) |
4827 | + return NULL; |
4828 | + |
4829 | memset(mem, 0, size); |
4830 | |
4831 | *(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef); |
4832 | diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c |
4833 | index a53982a330ea..2db1f7a04baf 100644 |
4834 | --- a/drivers/of/of_numa.c |
4835 | +++ b/drivers/of/of_numa.c |
4836 | @@ -57,6 +57,8 @@ static void __init of_numa_parse_cpu_nodes(void) |
4837 | else |
4838 | node_set(nid, numa_nodes_parsed); |
4839 | } |
4840 | + |
4841 | + of_node_put(cpus); |
4842 | } |
4843 | |
4844 | static int __init of_numa_parse_memory_nodes(void) |
4845 | diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c |
4846 | index ada98569b78e..85088a11d9a6 100644 |
4847 | --- a/drivers/pci/host/pci-hyperv.c |
4848 | +++ b/drivers/pci/host/pci-hyperv.c |
4849 | @@ -72,6 +72,7 @@ enum { |
4850 | PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 |
4851 | }; |
4852 | |
4853 | +#define CPU_AFFINITY_ALL -1ULL |
4854 | #define PCI_CONFIG_MMIO_LENGTH 0x2000 |
4855 | #define CFG_PAGE_OFFSET 0x1000 |
4856 | #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) |
4857 | @@ -876,7 +877,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4858 | hv_int_desc_free(hpdev, int_desc); |
4859 | } |
4860 | |
4861 | - int_desc = kzalloc(sizeof(*int_desc), GFP_KERNEL); |
4862 | + int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); |
4863 | if (!int_desc) |
4864 | goto drop_reference; |
4865 | |
4866 | @@ -897,9 +898,13 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
4867 | * processors because Hyper-V only supports 64 in a guest. |
4868 | */ |
4869 | affinity = irq_data_get_affinity_mask(data); |
4870 | - for_each_cpu_and(cpu, affinity, cpu_online_mask) { |
4871 | - int_pkt->int_desc.cpu_mask |= |
4872 | - (1ULL << vmbus_cpu_number_to_vp_number(cpu)); |
4873 | + if (cpumask_weight(affinity) >= 32) { |
4874 | + int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; |
4875 | + } else { |
4876 | + for_each_cpu_and(cpu, affinity, cpu_online_mask) { |
4877 | + int_pkt->int_desc.cpu_mask |= |
4878 | + (1ULL << vmbus_cpu_number_to_vp_number(cpu)); |
4879 | + } |
4880 | } |
4881 | |
4882 | ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, |
4883 | diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c |
4884 | index 25d010d449a3..7ac258fd3c5c 100644 |
4885 | --- a/drivers/pci/pci-sysfs.c |
4886 | +++ b/drivers/pci/pci-sysfs.c |
4887 | @@ -985,15 +985,19 @@ void pci_remove_legacy_files(struct pci_bus *b) |
4888 | int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, |
4889 | enum pci_mmap_api mmap_api) |
4890 | { |
4891 | - unsigned long nr, start, size, pci_start; |
4892 | + unsigned long nr, start, size; |
4893 | + resource_size_t pci_start = 0, pci_end; |
4894 | |
4895 | if (pci_resource_len(pdev, resno) == 0) |
4896 | return 0; |
4897 | nr = vma_pages(vma); |
4898 | start = vma->vm_pgoff; |
4899 | size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; |
4900 | - pci_start = (mmap_api == PCI_MMAP_PROCFS) ? |
4901 | - pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; |
4902 | + if (mmap_api == PCI_MMAP_PROCFS) { |
4903 | + pci_resource_to_user(pdev, resno, &pdev->resource[resno], |
4904 | + &pci_start, &pci_end); |
4905 | + pci_start >>= PAGE_SHIFT; |
4906 | + } |
4907 | if (start >= pci_start && start < pci_start + size && |
4908 | start + nr <= pci_start + size) |
4909 | return 1; |
4910 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
4911 | index 7904d02ffdb9..d35c0162f9f2 100644 |
4912 | --- a/drivers/pci/pci.c |
4913 | +++ b/drivers/pci/pci.c |
4914 | @@ -1782,8 +1782,8 @@ static void pci_pme_list_scan(struct work_struct *work) |
4915 | } |
4916 | } |
4917 | if (!list_empty(&pci_pme_list)) |
4918 | - schedule_delayed_work(&pci_pme_work, |
4919 | - msecs_to_jiffies(PME_TIMEOUT)); |
4920 | + queue_delayed_work(system_freezable_wq, &pci_pme_work, |
4921 | + msecs_to_jiffies(PME_TIMEOUT)); |
4922 | mutex_unlock(&pci_pme_list_mutex); |
4923 | } |
4924 | |
4925 | @@ -1848,8 +1848,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable) |
4926 | mutex_lock(&pci_pme_list_mutex); |
4927 | list_add(&pme_dev->list, &pci_pme_list); |
4928 | if (list_is_singular(&pci_pme_list)) |
4929 | - schedule_delayed_work(&pci_pme_work, |
4930 | - msecs_to_jiffies(PME_TIMEOUT)); |
4931 | + queue_delayed_work(system_freezable_wq, |
4932 | + &pci_pme_work, |
4933 | + msecs_to_jiffies(PME_TIMEOUT)); |
4934 | mutex_unlock(&pci_pme_list_mutex); |
4935 | } else { |
4936 | mutex_lock(&pci_pme_list_mutex); |
4937 | diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c |
4938 | index f82710a8694d..dc8912e2d4a1 100644 |
4939 | --- a/drivers/pci/proc.c |
4940 | +++ b/drivers/pci/proc.c |
4941 | @@ -231,24 +231,33 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) |
4942 | { |
4943 | struct pci_dev *dev = PDE_DATA(file_inode(file)); |
4944 | struct pci_filp_private *fpriv = file->private_data; |
4945 | - int i, ret, write_combine; |
4946 | + int i, ret, write_combine = 0, res_bit; |
4947 | |
4948 | if (!capable(CAP_SYS_RAWIO)) |
4949 | return -EPERM; |
4950 | |
4951 | + if (fpriv->mmap_state == pci_mmap_io) |
4952 | + res_bit = IORESOURCE_IO; |
4953 | + else |
4954 | + res_bit = IORESOURCE_MEM; |
4955 | + |
4956 | /* Make sure the caller is mapping a real resource for this device */ |
4957 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
4958 | - if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) |
4959 | + if (dev->resource[i].flags & res_bit && |
4960 | + pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) |
4961 | break; |
4962 | } |
4963 | |
4964 | if (i >= PCI_ROM_RESOURCE) |
4965 | return -ENODEV; |
4966 | |
4967 | - if (fpriv->mmap_state == pci_mmap_mem) |
4968 | - write_combine = fpriv->write_combine; |
4969 | - else |
4970 | - write_combine = 0; |
4971 | + if (fpriv->mmap_state == pci_mmap_mem && |
4972 | + fpriv->write_combine) { |
4973 | + if (dev->resource[i].flags & IORESOURCE_PREFETCH) |
4974 | + write_combine = 1; |
4975 | + else |
4976 | + return -EINVAL; |
4977 | + } |
4978 | ret = pci_mmap_page_range(dev, vma, |
4979 | fpriv->mmap_state, write_combine); |
4980 | if (ret < 0) |
4981 | diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c |
4982 | index fb44d5215e30..a16d81420612 100644 |
4983 | --- a/drivers/regulator/rk808-regulator.c |
4984 | +++ b/drivers/regulator/rk808-regulator.c |
4985 | @@ -519,7 +519,7 @@ static const struct regulator_desc rk818_reg[] = { |
4986 | RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, |
4987 | BIT(0), 400), |
4988 | RK8XX_DESC(RK818_ID_LDO2, "LDO_REG2", "vcc6", 1800, 3400, 100, |
4989 | - RK818_LDO1_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, |
4990 | + RK818_LDO2_ON_VSEL_REG, RK818_LDO_VSEL_MASK, RK818_LDO_EN_REG, |
4991 | BIT(1), 400), |
4992 | { |
4993 | .name = "LDO_REG3", |
4994 | diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c |
4995 | index d2c3d7cc35f5..5ca6d2130593 100644 |
4996 | --- a/drivers/regulator/tps65023-regulator.c |
4997 | +++ b/drivers/regulator/tps65023-regulator.c |
4998 | @@ -311,8 +311,7 @@ static int tps_65023_probe(struct i2c_client *client, |
4999 | |
5000 | /* Enable setting output voltage by I2C */ |
5001 | regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2, |
5002 | - TPS65023_REG_CTRL2_CORE_ADJ, |
5003 | - TPS65023_REG_CTRL2_CORE_ADJ); |
5004 | + TPS65023_REG_CTRL2_CORE_ADJ, 0); |
5005 | |
5006 | return 0; |
5007 | } |
5008 | diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
5009 | index 0f807798c624..d390325c99ec 100644 |
5010 | --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
5011 | +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c |
5012 | @@ -1170,6 +1170,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) |
5013 | cmd = list_first_entry_or_null(&vscsi->free_cmd, |
5014 | struct ibmvscsis_cmd, list); |
5015 | if (cmd) { |
5016 | + cmd->flags &= ~(DELAY_SEND); |
5017 | list_del(&cmd->list); |
5018 | cmd->iue = iue; |
5019 | cmd->type = UNSET_TYPE; |
5020 | @@ -1749,45 +1750,79 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) |
5021 | static void ibmvscsis_send_messages(struct scsi_info *vscsi) |
5022 | { |
5023 | u64 msg_hi = 0; |
5024 | - /* note do not attmempt to access the IU_data_ptr with this pointer |
5025 | + /* note do not attempt to access the IU_data_ptr with this pointer |
5026 | * it is not valid |
5027 | */ |
5028 | struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; |
5029 | struct ibmvscsis_cmd *cmd, *nxt; |
5030 | struct iu_entry *iue; |
5031 | long rc = ADAPT_SUCCESS; |
5032 | + bool retry = false; |
5033 | |
5034 | if (!(vscsi->flags & RESPONSE_Q_DOWN)) { |
5035 | - list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { |
5036 | - iue = cmd->iue; |
5037 | + do { |
5038 | + retry = false; |
5039 | + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, |
5040 | + list) { |
5041 | + /* |
5042 | + * Check to make sure abort cmd gets processed |
5043 | + * prior to the abort tmr cmd |
5044 | + */ |
5045 | + if (cmd->flags & DELAY_SEND) |
5046 | + continue; |
5047 | |
5048 | - crq->valid = VALID_CMD_RESP_EL; |
5049 | - crq->format = cmd->rsp.format; |
5050 | + if (cmd->abort_cmd) { |
5051 | + retry = true; |
5052 | + cmd->abort_cmd->flags &= ~(DELAY_SEND); |
5053 | + } |
5054 | |
5055 | - if (cmd->flags & CMD_FAST_FAIL) |
5056 | - crq->status = VIOSRP_ADAPTER_FAIL; |
5057 | + /* |
5058 | + * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and |
5059 | + * the case where LIO issued a |
5060 | + * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST |
5061 | + * case then we dont send a response, since it |
5062 | + * was already done. |
5063 | + */ |
5064 | + if (cmd->se_cmd.transport_state & CMD_T_ABORTED && |
5065 | + !(cmd->se_cmd.transport_state & CMD_T_TAS)) { |
5066 | + list_del(&cmd->list); |
5067 | + ibmvscsis_free_cmd_resources(vscsi, |
5068 | + cmd); |
5069 | + } else { |
5070 | + iue = cmd->iue; |
5071 | |
5072 | - crq->IU_length = cpu_to_be16(cmd->rsp.len); |
5073 | + crq->valid = VALID_CMD_RESP_EL; |
5074 | + crq->format = cmd->rsp.format; |
5075 | |
5076 | - rc = h_send_crq(vscsi->dma_dev->unit_address, |
5077 | - be64_to_cpu(msg_hi), |
5078 | - be64_to_cpu(cmd->rsp.tag)); |
5079 | + if (cmd->flags & CMD_FAST_FAIL) |
5080 | + crq->status = VIOSRP_ADAPTER_FAIL; |
5081 | |
5082 | - pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", |
5083 | - cmd, be64_to_cpu(cmd->rsp.tag), rc); |
5084 | + crq->IU_length = cpu_to_be16(cmd->rsp.len); |
5085 | |
5086 | - /* if all ok free up the command element resources */ |
5087 | - if (rc == H_SUCCESS) { |
5088 | - /* some movement has occurred */ |
5089 | - vscsi->rsp_q_timer.timer_pops = 0; |
5090 | - list_del(&cmd->list); |
5091 | + rc = h_send_crq(vscsi->dma_dev->unit_address, |
5092 | + be64_to_cpu(msg_hi), |
5093 | + be64_to_cpu(cmd->rsp.tag)); |
5094 | |
5095 | - ibmvscsis_free_cmd_resources(vscsi, cmd); |
5096 | - } else { |
5097 | - srp_snd_msg_failed(vscsi, rc); |
5098 | - break; |
5099 | + pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", |
5100 | + cmd, be64_to_cpu(cmd->rsp.tag), rc); |
5101 | + |
5102 | + /* if all ok free up the command |
5103 | + * element resources |
5104 | + */ |
5105 | + if (rc == H_SUCCESS) { |
5106 | + /* some movement has occurred */ |
5107 | + vscsi->rsp_q_timer.timer_pops = 0; |
5108 | + list_del(&cmd->list); |
5109 | + |
5110 | + ibmvscsis_free_cmd_resources(vscsi, |
5111 | + cmd); |
5112 | + } else { |
5113 | + srp_snd_msg_failed(vscsi, rc); |
5114 | + break; |
5115 | + } |
5116 | + } |
5117 | } |
5118 | - } |
5119 | + } while (retry); |
5120 | |
5121 | if (!rc) { |
5122 | /* |
5123 | @@ -2708,6 +2743,7 @@ static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) |
5124 | |
5125 | for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; |
5126 | i++, cmd++) { |
5127 | + cmd->abort_cmd = NULL; |
5128 | cmd->adapter = vscsi; |
5129 | INIT_WORK(&cmd->work, ibmvscsis_scheduler); |
5130 | list_add_tail(&cmd->list, &vscsi->free_cmd); |
5131 | @@ -3579,9 +3615,20 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) |
5132 | { |
5133 | struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, |
5134 | se_cmd); |
5135 | + struct scsi_info *vscsi = cmd->adapter; |
5136 | struct iu_entry *iue = cmd->iue; |
5137 | int rc; |
5138 | |
5139 | + /* |
5140 | + * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success |
5141 | + * since LIO can't do anything about it, and we dont want to |
5142 | + * attempt an srp_transfer_data. |
5143 | + */ |
5144 | + if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { |
5145 | + pr_err("write_pending failed since: %d\n", vscsi->flags); |
5146 | + return 0; |
5147 | + } |
5148 | + |
5149 | rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, |
5150 | 1, 1); |
5151 | if (rc) { |
5152 | @@ -3660,11 +3707,28 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) |
5153 | struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, |
5154 | se_cmd); |
5155 | struct scsi_info *vscsi = cmd->adapter; |
5156 | + struct ibmvscsis_cmd *cmd_itr; |
5157 | + struct iu_entry *iue = iue = cmd->iue; |
5158 | + struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; |
5159 | + u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); |
5160 | uint len; |
5161 | |
5162 | pr_debug("queue_tm_rsp %p, status %d\n", |
5163 | se_cmd, (int)se_cmd->se_tmr_req->response); |
5164 | |
5165 | + if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK && |
5166 | + cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) { |
5167 | + spin_lock_bh(&vscsi->intr_lock); |
5168 | + list_for_each_entry(cmd_itr, &vscsi->active_q, list) { |
5169 | + if (tag_to_abort == cmd_itr->se_cmd.tag) { |
5170 | + cmd_itr->abort_cmd = cmd; |
5171 | + cmd->flags |= DELAY_SEND; |
5172 | + break; |
5173 | + } |
5174 | + } |
5175 | + spin_unlock_bh(&vscsi->intr_lock); |
5176 | + } |
5177 | + |
5178 | srp_build_response(vscsi, cmd, &len); |
5179 | cmd->rsp.format = SRP_FORMAT; |
5180 | cmd->rsp.len = len; |
5181 | @@ -3672,8 +3736,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) |
5182 | |
5183 | static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) |
5184 | { |
5185 | - /* TBD: What (if anything) should we do here? */ |
5186 | - pr_debug("ibmvscsis_aborted_task %p\n", se_cmd); |
5187 | + pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n", |
5188 | + se_cmd, se_cmd->tag); |
5189 | } |
5190 | |
5191 | static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, |
5192 | diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
5193 | index 65c6189885ab..b4391a8de456 100644 |
5194 | --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
5195 | +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h |
5196 | @@ -168,10 +168,12 @@ struct ibmvscsis_cmd { |
5197 | struct iu_rsp rsp; |
5198 | struct work_struct work; |
5199 | struct scsi_info *adapter; |
5200 | + struct ibmvscsis_cmd *abort_cmd; |
5201 | /* Sense buffer that will be mapped into outgoing status */ |
5202 | unsigned char sense_buf[TRANSPORT_SENSE_BUFFER]; |
5203 | u64 init_time; |
5204 | #define CMD_FAST_FAIL BIT(0) |
5205 | +#define DELAY_SEND BIT(1) |
5206 | u32 flags; |
5207 | char type; |
5208 | }; |
5209 | diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h |
5210 | index 54e6ac42fbcd..e1081c491edc 100644 |
5211 | --- a/drivers/scsi/lpfc/lpfc_crtn.h |
5212 | +++ b/drivers/scsi/lpfc/lpfc_crtn.h |
5213 | @@ -289,6 +289,7 @@ int lpfc_selective_reset(struct lpfc_hba *); |
5214 | void lpfc_reset_barrier(struct lpfc_hba *); |
5215 | int lpfc_sli_brdready(struct lpfc_hba *, uint32_t); |
5216 | int lpfc_sli_brdkill(struct lpfc_hba *); |
5217 | +int lpfc_sli_chipset_init(struct lpfc_hba *phba); |
5218 | int lpfc_sli_brdreset(struct lpfc_hba *); |
5219 | int lpfc_sli_brdrestart(struct lpfc_hba *); |
5220 | int lpfc_sli_hba_setup(struct lpfc_hba *); |
5221 | diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c |
5222 | index 6cc561b04211..5ec0f4f32c17 100644 |
5223 | --- a/drivers/scsi/lpfc/lpfc_init.c |
5224 | +++ b/drivers/scsi/lpfc/lpfc_init.c |
5225 | @@ -3563,6 +3563,13 @@ lpfc_get_wwpn(struct lpfc_hba *phba) |
5226 | LPFC_MBOXQ_t *mboxq; |
5227 | MAILBOX_t *mb; |
5228 | |
5229 | + if (phba->sli_rev < LPFC_SLI_REV4) { |
5230 | + /* Reset the port first */ |
5231 | + lpfc_sli_brdrestart(phba); |
5232 | + rc = lpfc_sli_chipset_init(phba); |
5233 | + if (rc) |
5234 | + return (uint64_t)-1; |
5235 | + } |
5236 | |
5237 | mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, |
5238 | GFP_KERNEL); |
5239 | diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c |
5240 | index 1c9fa45df7eb..4e2e675a624f 100644 |
5241 | --- a/drivers/scsi/lpfc/lpfc_sli.c |
5242 | +++ b/drivers/scsi/lpfc/lpfc_sli.c |
5243 | @@ -4204,13 +4204,16 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) |
5244 | /* Reset HBA */ |
5245 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
5246 | "0325 Reset HBA Data: x%x x%x\n", |
5247 | - phba->pport->port_state, psli->sli_flag); |
5248 | + (phba->pport) ? phba->pport->port_state : 0, |
5249 | + psli->sli_flag); |
5250 | |
5251 | /* perform board reset */ |
5252 | phba->fc_eventTag = 0; |
5253 | phba->link_events = 0; |
5254 | - phba->pport->fc_myDID = 0; |
5255 | - phba->pport->fc_prevDID = 0; |
5256 | + if (phba->pport) { |
5257 | + phba->pport->fc_myDID = 0; |
5258 | + phba->pport->fc_prevDID = 0; |
5259 | + } |
5260 | |
5261 | /* Turn off parity checking and serr during the physical reset */ |
5262 | pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value); |
5263 | @@ -4336,7 +4339,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) |
5264 | /* Restart HBA */ |
5265 | lpfc_printf_log(phba, KERN_INFO, LOG_SLI, |
5266 | "0337 Restart HBA Data: x%x x%x\n", |
5267 | - phba->pport->port_state, psli->sli_flag); |
5268 | + (phba->pport) ? phba->pport->port_state : 0, |
5269 | + psli->sli_flag); |
5270 | |
5271 | word0 = 0; |
5272 | mb = (MAILBOX_t *) &word0; |
5273 | @@ -4350,7 +4354,7 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) |
5274 | readl(to_slim); /* flush */ |
5275 | |
5276 | /* Only skip post after fc_ffinit is completed */ |
5277 | - if (phba->pport->port_state) |
5278 | + if (phba->pport && phba->pport->port_state) |
5279 | word0 = 1; /* This is really setting up word1 */ |
5280 | else |
5281 | word0 = 0; /* This is really setting up word1 */ |
5282 | @@ -4359,7 +4363,8 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba) |
5283 | readl(to_slim); /* flush */ |
5284 | |
5285 | lpfc_sli_brdreset(phba); |
5286 | - phba->pport->stopped = 0; |
5287 | + if (phba->pport) |
5288 | + phba->pport->stopped = 0; |
5289 | phba->link_state = LPFC_INIT_START; |
5290 | phba->hba_flag = 0; |
5291 | spin_unlock_irq(&phba->hbalock); |
5292 | @@ -4446,7 +4451,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba) |
5293 | * iteration, the function will restart the HBA again. The function returns |
5294 | * zero if HBA successfully restarted else returns negative error code. |
5295 | **/ |
5296 | -static int |
5297 | +int |
5298 | lpfc_sli_chipset_init(struct lpfc_hba *phba) |
5299 | { |
5300 | uint32_t status, i = 0; |
5301 | diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c |
5302 | index 8d6bca61e7aa..591f2740e5af 100644 |
5303 | --- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c |
5304 | +++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c |
5305 | @@ -97,8 +97,9 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val) |
5306 | |
5307 | switch (variable) { |
5308 | case HW_VAR_BSSID: |
5309 | - rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]); |
5310 | - rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]); |
5311 | + /* BSSIDR 2 byte alignment */ |
5312 | + rtl92e_writew(dev, BSSIDR, *(u16 *)val); |
5313 | + rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2)); |
5314 | break; |
5315 | |
5316 | case HW_VAR_MEDIA_STATUS: |
5317 | @@ -626,7 +627,7 @@ void rtl92e_get_eeprom_size(struct net_device *dev) |
5318 | struct r8192_priv *priv = rtllib_priv(dev); |
5319 | |
5320 | RT_TRACE(COMP_INIT, "===========>%s()\n", __func__); |
5321 | - curCR = rtl92e_readl(dev, EPROM_CMD); |
5322 | + curCR = rtl92e_readw(dev, EPROM_CMD); |
5323 | RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD, |
5324 | curCR); |
5325 | priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 : |
5326 | @@ -963,8 +964,8 @@ static void _rtl92e_net_update(struct net_device *dev) |
5327 | rtl92e_config_rate(dev, &rate_config); |
5328 | priv->dot11CurrentPreambleMode = PREAMBLE_AUTO; |
5329 | priv->basic_rate = rate_config &= 0x15f; |
5330 | - rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]); |
5331 | - rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]); |
5332 | + rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid); |
5333 | + rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2)); |
5334 | |
5335 | if (priv->rtllib->iw_mode == IW_MODE_ADHOC) { |
5336 | rtl92e_writew(dev, ATIMWND, 2); |
5337 | @@ -1184,8 +1185,7 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, |
5338 | struct cb_desc *cb_desc, struct sk_buff *skb) |
5339 | { |
5340 | struct r8192_priv *priv = rtllib_priv(dev); |
5341 | - dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, |
5342 | - PCI_DMA_TODEVICE); |
5343 | + dma_addr_t mapping; |
5344 | struct tx_fwinfo_8190pci *pTxFwInfo; |
5345 | |
5346 | pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data; |
5347 | @@ -1196,8 +1196,6 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, |
5348 | pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT, |
5349 | pTxFwInfo->TxRate, cb_desc); |
5350 | |
5351 | - if (pci_dma_mapping_error(priv->pdev, mapping)) |
5352 | - netdev_err(dev, "%s(): DMA Mapping error\n", __func__); |
5353 | if (cb_desc->bAMPDUEnable) { |
5354 | pTxFwInfo->AllowAggregation = 1; |
5355 | pTxFwInfo->RxMF = cb_desc->ampdu_factor; |
5356 | @@ -1232,6 +1230,14 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc, |
5357 | } |
5358 | |
5359 | memset((u8 *)pdesc, 0, 12); |
5360 | + |
5361 | + mapping = pci_map_single(priv->pdev, skb->data, skb->len, |
5362 | + PCI_DMA_TODEVICE); |
5363 | + if (pci_dma_mapping_error(priv->pdev, mapping)) { |
5364 | + netdev_err(dev, "%s(): DMA Mapping error\n", __func__); |
5365 | + return; |
5366 | + } |
5367 | + |
5368 | pdesc->LINIP = 0; |
5369 | pdesc->CmdInit = 1; |
5370 | pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8; |
5371 | diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c |
5372 | index 48bbd9e8a52f..dcc4eb691889 100644 |
5373 | --- a/drivers/staging/rtl8192e/rtl819x_TSProc.c |
5374 | +++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c |
5375 | @@ -306,11 +306,6 @@ static void MakeTSEntry(struct ts_common_info *pTsCommonInfo, u8 *Addr, |
5376 | pTsCommonInfo->TClasNum = TCLAS_Num; |
5377 | } |
5378 | |
5379 | -static bool IsACValid(unsigned int tid) |
5380 | -{ |
5381 | - return tid < 7; |
5382 | -} |
5383 | - |
5384 | bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, |
5385 | u8 *Addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs) |
5386 | { |
5387 | @@ -328,12 +323,6 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, |
5388 | if (ieee->current_network.qos_data.supported == 0) { |
5389 | UP = 0; |
5390 | } else { |
5391 | - if (!IsACValid(TID)) { |
5392 | - netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", |
5393 | - __func__, TID); |
5394 | - return false; |
5395 | - } |
5396 | - |
5397 | switch (TID) { |
5398 | case 0: |
5399 | case 3: |
5400 | @@ -351,6 +340,10 @@ bool GetTs(struct rtllib_device *ieee, struct ts_common_info **ppTS, |
5401 | case 7: |
5402 | UP = 7; |
5403 | break; |
5404 | + default: |
5405 | + netdev_warn(ieee->dev, "%s(): TID(%d) is not valid\n", |
5406 | + __func__, TID); |
5407 | + return false; |
5408 | } |
5409 | } |
5410 | |
5411 | diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c |
5412 | index 3aeffcb9c87e..02e97367cb88 100644 |
5413 | --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c |
5414 | +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c |
5415 | @@ -501,8 +501,15 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, |
5416 | */ |
5417 | sg_init_table(scatterlist, num_pages); |
5418 | /* Now set the pages for each scatterlist */ |
5419 | - for (i = 0; i < num_pages; i++) |
5420 | - sg_set_page(scatterlist + i, pages[i], PAGE_SIZE, 0); |
5421 | + for (i = 0; i < num_pages; i++) { |
5422 | + unsigned int len = PAGE_SIZE - offset; |
5423 | + |
5424 | + if (len > count) |
5425 | + len = count; |
5426 | + sg_set_page(scatterlist + i, pages[i], len, offset); |
5427 | + offset = 0; |
5428 | + count -= len; |
5429 | + } |
5430 | |
5431 | dma_buffers = dma_map_sg(g_dev, |
5432 | scatterlist, |
5433 | @@ -523,20 +530,20 @@ create_pagelist(char __user *buf, size_t count, unsigned short type, |
5434 | u32 addr = sg_dma_address(sg); |
5435 | |
5436 | /* Note: addrs is the address + page_count - 1 |
5437 | - * The firmware expects the block to be page |
5438 | + * The firmware expects blocks after the first to be page- |
5439 | * aligned and a multiple of the page size |
5440 | */ |
5441 | WARN_ON(len == 0); |
5442 | - WARN_ON(len & ~PAGE_MASK); |
5443 | - WARN_ON(addr & ~PAGE_MASK); |
5444 | + WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); |
5445 | + WARN_ON(i && (addr & ~PAGE_MASK)); |
5446 | if (k > 0 && |
5447 | - ((addrs[k - 1] & PAGE_MASK) | |
5448 | - ((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT) |
5449 | - == addr) { |
5450 | - addrs[k - 1] += (len >> PAGE_SHIFT); |
5451 | - } else { |
5452 | - addrs[k++] = addr | ((len >> PAGE_SHIFT) - 1); |
5453 | - } |
5454 | + ((addrs[k - 1] & PAGE_MASK) + |
5455 | + (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT)) |
5456 | + == (addr & PAGE_MASK)) |
5457 | + addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); |
5458 | + else |
5459 | + addrs[k++] = (addr & PAGE_MASK) | |
5460 | + (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); |
5461 | } |
5462 | |
5463 | /* Partial cache lines (fragments) require special measures */ |
5464 | diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c |
5465 | index 1aff7fde54b1..7737f14846f9 100644 |
5466 | --- a/drivers/thermal/mtk_thermal.c |
5467 | +++ b/drivers/thermal/mtk_thermal.c |
5468 | @@ -191,7 +191,7 @@ static const int mt8173_bank_data[MT8173_NUM_ZONES][3] = { |
5469 | }; |
5470 | |
5471 | static const int mt8173_msr[MT8173_NUM_SENSORS_PER_ZONE] = { |
5472 | - TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR2 |
5473 | + TEMP_MSR0, TEMP_MSR1, TEMP_MSR2, TEMP_MSR3 |
5474 | }; |
5475 | |
5476 | static const int mt8173_adcpnp[MT8173_NUM_SENSORS_PER_ZONE] = { |
5477 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
5478 | index d5388938bc7a..1c9533b74a55 100644 |
5479 | --- a/drivers/usb/class/cdc-acm.c |
5480 | +++ b/drivers/usb/class/cdc-acm.c |
5481 | @@ -323,6 +323,12 @@ static void acm_ctrl_irq(struct urb *urb) |
5482 | break; |
5483 | |
5484 | case USB_CDC_NOTIFY_SERIAL_STATE: |
5485 | + if (le16_to_cpu(dr->wLength) != 2) { |
5486 | + dev_dbg(&acm->control->dev, |
5487 | + "%s - malformed serial state\n", __func__); |
5488 | + break; |
5489 | + } |
5490 | + |
5491 | newctrl = get_unaligned_le16(data); |
5492 | |
5493 | if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { |
5494 | @@ -359,11 +365,10 @@ static void acm_ctrl_irq(struct urb *urb) |
5495 | |
5496 | default: |
5497 | dev_dbg(&acm->control->dev, |
5498 | - "%s - unknown notification %d received: index %d " |
5499 | - "len %d data0 %d data1 %d\n", |
5500 | + "%s - unknown notification %d received: index %d len %d\n", |
5501 | __func__, |
5502 | - dr->bNotificationType, dr->wIndex, |
5503 | - dr->wLength, data[0], data[1]); |
5504 | + dr->bNotificationType, dr->wIndex, dr->wLength); |
5505 | + |
5506 | break; |
5507 | } |
5508 | exit: |
5509 | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c |
5510 | index cfc3cff6e8d5..8e6ef671be9b 100644 |
5511 | --- a/drivers/usb/core/devio.c |
5512 | +++ b/drivers/usb/core/devio.c |
5513 | @@ -475,11 +475,11 @@ static void snoop_urb(struct usb_device *udev, |
5514 | |
5515 | if (userurb) { /* Async */ |
5516 | if (when == SUBMIT) |
5517 | - dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " |
5518 | + dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " |
5519 | "length %u\n", |
5520 | userurb, ep, t, d, length); |
5521 | else |
5522 | - dev_info(&udev->dev, "userurb %p, ep%d %s-%s, " |
5523 | + dev_info(&udev->dev, "userurb %pK, ep%d %s-%s, " |
5524 | "actual_length %u status %d\n", |
5525 | userurb, ep, t, d, length, |
5526 | timeout_or_status); |
5527 | @@ -1895,7 +1895,7 @@ static int proc_reapurb(struct usb_dev_state *ps, void __user *arg) |
5528 | if (as) { |
5529 | int retval; |
5530 | |
5531 | - snoop(&ps->dev->dev, "reap %p\n", as->userurb); |
5532 | + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
5533 | retval = processcompl(as, (void __user * __user *)arg); |
5534 | free_async(as); |
5535 | return retval; |
5536 | @@ -1912,7 +1912,7 @@ static int proc_reapurbnonblock(struct usb_dev_state *ps, void __user *arg) |
5537 | |
5538 | as = async_getcompleted(ps); |
5539 | if (as) { |
5540 | - snoop(&ps->dev->dev, "reap %p\n", as->userurb); |
5541 | + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
5542 | retval = processcompl(as, (void __user * __user *)arg); |
5543 | free_async(as); |
5544 | } else { |
5545 | @@ -2043,7 +2043,7 @@ static int proc_reapurb_compat(struct usb_dev_state *ps, void __user *arg) |
5546 | if (as) { |
5547 | int retval; |
5548 | |
5549 | - snoop(&ps->dev->dev, "reap %p\n", as->userurb); |
5550 | + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
5551 | retval = processcompl_compat(as, (void __user * __user *)arg); |
5552 | free_async(as); |
5553 | return retval; |
5554 | @@ -2060,7 +2060,7 @@ static int proc_reapurbnonblock_compat(struct usb_dev_state *ps, void __user *ar |
5555 | |
5556 | as = async_getcompleted(ps); |
5557 | if (as) { |
5558 | - snoop(&ps->dev->dev, "reap %p\n", as->userurb); |
5559 | + snoop(&ps->dev->dev, "reap %pK\n", as->userurb); |
5560 | retval = processcompl_compat(as, (void __user * __user *)arg); |
5561 | free_async(as); |
5562 | } else { |
5563 | @@ -2489,7 +2489,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, |
5564 | #endif |
5565 | |
5566 | case USBDEVFS_DISCARDURB: |
5567 | - snoop(&dev->dev, "%s: DISCARDURB %p\n", __func__, p); |
5568 | + snoop(&dev->dev, "%s: DISCARDURB %pK\n", __func__, p); |
5569 | ret = proc_unlinkurb(ps, p); |
5570 | break; |
5571 | |
5572 | diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c |
5573 | index 79bdca5cb9c7..6a857e875633 100644 |
5574 | --- a/drivers/usb/core/hcd.c |
5575 | +++ b/drivers/usb/core/hcd.c |
5576 | @@ -1722,7 +1722,7 @@ int usb_hcd_unlink_urb (struct urb *urb, int status) |
5577 | if (retval == 0) |
5578 | retval = -EINPROGRESS; |
5579 | else if (retval != -EIDRM && retval != -EBUSY) |
5580 | - dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n", |
5581 | + dev_dbg(&udev->dev, "hcd_unlink_urb %pK fail %d\n", |
5582 | urb, retval); |
5583 | usb_put_dev(udev); |
5584 | } |
5585 | @@ -1889,7 +1889,7 @@ void usb_hcd_flush_endpoint(struct usb_device *udev, |
5586 | /* kick hcd */ |
5587 | unlink1(hcd, urb, -ESHUTDOWN); |
5588 | dev_dbg (hcd->self.controller, |
5589 | - "shutdown urb %p ep%d%s%s\n", |
5590 | + "shutdown urb %pK ep%d%s%s\n", |
5591 | urb, usb_endpoint_num(&ep->desc), |
5592 | is_in ? "in" : "out", |
5593 | ({ char *s; |
5594 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
5595 | index 9dca59ef18b3..f77a4ebde7d5 100644 |
5596 | --- a/drivers/usb/core/hub.c |
5597 | +++ b/drivers/usb/core/hub.c |
5598 | @@ -362,7 +362,8 @@ static void usb_set_lpm_parameters(struct usb_device *udev) |
5599 | } |
5600 | |
5601 | /* USB 2.0 spec Section 11.24.4.5 */ |
5602 | -static int get_hub_descriptor(struct usb_device *hdev, void *data) |
5603 | +static int get_hub_descriptor(struct usb_device *hdev, |
5604 | + struct usb_hub_descriptor *desc) |
5605 | { |
5606 | int i, ret, size; |
5607 | unsigned dtype; |
5608 | @@ -378,10 +379,18 @@ static int get_hub_descriptor(struct usb_device *hdev, void *data) |
5609 | for (i = 0; i < 3; i++) { |
5610 | ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), |
5611 | USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB, |
5612 | - dtype << 8, 0, data, size, |
5613 | + dtype << 8, 0, desc, size, |
5614 | USB_CTRL_GET_TIMEOUT); |
5615 | - if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2)) |
5616 | + if (hub_is_superspeed(hdev)) { |
5617 | + if (ret == size) |
5618 | + return ret; |
5619 | + } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) { |
5620 | + /* Make sure we have the DeviceRemovable field. */ |
5621 | + size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1; |
5622 | + if (ret < size) |
5623 | + return -EMSGSIZE; |
5624 | return ret; |
5625 | + } |
5626 | } |
5627 | return -EINVAL; |
5628 | } |
5629 | @@ -1313,7 +1322,7 @@ static int hub_configure(struct usb_hub *hub, |
5630 | } |
5631 | mutex_init(&hub->status_mutex); |
5632 | |
5633 | - hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL); |
5634 | + hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL); |
5635 | if (!hub->descriptor) { |
5636 | ret = -ENOMEM; |
5637 | goto fail; |
5638 | @@ -1321,7 +1330,7 @@ static int hub_configure(struct usb_hub *hub, |
5639 | |
5640 | /* Request the entire hub descriptor. |
5641 | * hub->descriptor can handle USB_MAXCHILDREN ports, |
5642 | - * but the hub can/will return fewer bytes here. |
5643 | + * but a (non-SS) hub can/will return fewer bytes here. |
5644 | */ |
5645 | ret = get_hub_descriptor(hdev, hub->descriptor); |
5646 | if (ret < 0) { |
5647 | diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c |
5648 | index d75cb8c0f7df..47903d510955 100644 |
5649 | --- a/drivers/usb/core/urb.c |
5650 | +++ b/drivers/usb/core/urb.c |
5651 | @@ -338,7 +338,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) |
5652 | if (!urb || !urb->complete) |
5653 | return -EINVAL; |
5654 | if (urb->hcpriv) { |
5655 | - WARN_ONCE(1, "URB %p submitted while active\n", urb); |
5656 | + WARN_ONCE(1, "URB %pK submitted while active\n", urb); |
5657 | return -EBUSY; |
5658 | } |
5659 | |
5660 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
5661 | index 79e7a3480d51..81199f6ee3bc 100644 |
5662 | --- a/drivers/usb/dwc3/gadget.c |
5663 | +++ b/drivers/usb/dwc3/gadget.c |
5664 | @@ -3078,6 +3078,15 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) |
5665 | return IRQ_HANDLED; |
5666 | } |
5667 | |
5668 | + /* |
5669 | + * With PCIe legacy interrupt, test shows that top-half irq handler can |
5670 | + * be called again after HW interrupt deassertion. Check if bottom-half |
5671 | + * irq event handler completes before caching new event to prevent |
5672 | + * losing events. |
5673 | + */ |
5674 | + if (evt->flags & DWC3_EVENT_PENDING) |
5675 | + return IRQ_HANDLED; |
5676 | + |
5677 | count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); |
5678 | count &= DWC3_GEVNTCOUNT_MASK; |
5679 | if (!count) |
5680 | diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c |
5681 | index b6daf2e69989..5f81a2b69054 100644 |
5682 | --- a/drivers/usb/host/ohci-hcd.c |
5683 | +++ b/drivers/usb/host/ohci-hcd.c |
5684 | @@ -231,7 +231,8 @@ static int ohci_urb_enqueue ( |
5685 | |
5686 | /* Start up the I/O watchdog timer, if it's not running */ |
5687 | if (!timer_pending(&ohci->io_watchdog) && |
5688 | - list_empty(&ohci->eds_in_use)) { |
5689 | + list_empty(&ohci->eds_in_use) && |
5690 | + !(ohci->flags & OHCI_QUIRK_QEMU)) { |
5691 | ohci->prev_frame_no = ohci_frame_no(ohci); |
5692 | mod_timer(&ohci->io_watchdog, |
5693 | jiffies + IO_WATCHDOG_DELAY); |
5694 | diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c |
5695 | index bb1509675727..a84aebe9b0a9 100644 |
5696 | --- a/drivers/usb/host/ohci-pci.c |
5697 | +++ b/drivers/usb/host/ohci-pci.c |
5698 | @@ -164,6 +164,15 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) |
5699 | return 0; |
5700 | } |
5701 | |
5702 | +static int ohci_quirk_qemu(struct usb_hcd *hcd) |
5703 | +{ |
5704 | + struct ohci_hcd *ohci = hcd_to_ohci(hcd); |
5705 | + |
5706 | + ohci->flags |= OHCI_QUIRK_QEMU; |
5707 | + ohci_dbg(ohci, "enabled qemu quirk\n"); |
5708 | + return 0; |
5709 | +} |
5710 | + |
5711 | /* List of quirks for OHCI */ |
5712 | static const struct pci_device_id ohci_pci_quirks[] = { |
5713 | { |
5714 | @@ -214,6 +223,13 @@ static const struct pci_device_id ohci_pci_quirks[] = { |
5715 | PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), |
5716 | .driver_data = (unsigned long)ohci_quirk_amd700, |
5717 | }, |
5718 | + { |
5719 | + .vendor = PCI_VENDOR_ID_APPLE, |
5720 | + .device = 0x003f, |
5721 | + .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, |
5722 | + .subdevice = PCI_SUBDEVICE_ID_QEMU, |
5723 | + .driver_data = (unsigned long)ohci_quirk_qemu, |
5724 | + }, |
5725 | |
5726 | /* FIXME for some of the early AMD 760 southbridges, OHCI |
5727 | * won't work at all. blacklist them. |
5728 | diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h |
5729 | index 37f1725e7a46..a51b189bdbd8 100644 |
5730 | --- a/drivers/usb/host/ohci.h |
5731 | +++ b/drivers/usb/host/ohci.h |
5732 | @@ -418,6 +418,7 @@ struct ohci_hcd { |
5733 | #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ |
5734 | #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ |
5735 | #define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */ |
5736 | +#define OHCI_QUIRK_QEMU 0x1000 /* relax timing expectations */ |
5737 | |
5738 | // there are also chip quirks/bugs in init logic |
5739 | |
5740 | diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c |
5741 | index 3bddeaa1e2d7..144f5fbd6aa3 100644 |
5742 | --- a/drivers/usb/host/xhci-hub.c |
5743 | +++ b/drivers/usb/host/xhci-hub.c |
5744 | @@ -421,7 +421,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) |
5745 | wait_for_completion(cmd->completion); |
5746 | |
5747 | if (cmd->status == COMP_COMMAND_ABORTED || |
5748 | - cmd->status == COMP_STOPPED) { |
5749 | + cmd->status == COMP_COMMAND_RING_STOPPED) { |
5750 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); |
5751 | ret = -ETIME; |
5752 | } |
5753 | diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c |
5754 | index 3f8f28f6fa94..96e133f4f7e7 100644 |
5755 | --- a/drivers/usb/host/xhci-mem.c |
5756 | +++ b/drivers/usb/host/xhci-mem.c |
5757 | @@ -56,7 +56,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, |
5758 | } |
5759 | |
5760 | if (max_packet) { |
5761 | - seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); |
5762 | + seg->bounce_buf = kzalloc(max_packet, flags); |
5763 | if (!seg->bounce_buf) { |
5764 | dma_pool_free(xhci->segment_pool, seg->trbs, dma); |
5765 | kfree(seg); |
5766 | @@ -1729,7 +1729,7 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) |
5767 | xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma); |
5768 | for (i = 0; i < num_sp; i++) { |
5769 | dma_addr_t dma; |
5770 | - void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma, |
5771 | + void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma, |
5772 | flags); |
5773 | if (!buf) |
5774 | goto fail_sp5; |
5775 | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
5776 | index 7b86508ac8cf..fcf1f3f63e7a 100644 |
5777 | --- a/drivers/usb/host/xhci-pci.c |
5778 | +++ b/drivers/usb/host/xhci-pci.c |
5779 | @@ -52,6 +52,7 @@ |
5780 | #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 |
5781 | #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 |
5782 | #define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 |
5783 | +#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 |
5784 | |
5785 | static const char hcd_name[] = "xhci_hcd"; |
5786 | |
5787 | @@ -166,7 +167,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
5788 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || |
5789 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || |
5790 | pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || |
5791 | - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { |
5792 | + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || |
5793 | + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) { |
5794 | xhci->quirks |= XHCI_PME_STUCK_QUIRK; |
5795 | } |
5796 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
5797 | @@ -175,7 +177,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
5798 | } |
5799 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && |
5800 | (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || |
5801 | - pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) |
5802 | + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI || |
5803 | + pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) |
5804 | xhci->quirks |= XHCI_MISSING_CAS; |
5805 | |
5806 | if (pdev->vendor == PCI_VENDOR_ID_ETRON && |
5807 | diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
5808 | index 6ed468fa7d5e..66ddd080a2a8 100644 |
5809 | --- a/drivers/usb/host/xhci-plat.c |
5810 | +++ b/drivers/usb/host/xhci-plat.c |
5811 | @@ -162,7 +162,7 @@ static int xhci_plat_probe(struct platform_device *pdev) |
5812 | |
5813 | irq = platform_get_irq(pdev, 0); |
5814 | if (irq < 0) |
5815 | - return -ENODEV; |
5816 | + return irq; |
5817 | |
5818 | /* Try to set 64-bit DMA first */ |
5819 | if (!pdev->dev.dma_mask) |
5820 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
5821 | index a3309aa02993..c5b7b7bdffba 100644 |
5822 | --- a/drivers/usb/host/xhci-ring.c |
5823 | +++ b/drivers/usb/host/xhci-ring.c |
5824 | @@ -321,7 +321,7 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, |
5825 | if (i_cmd->status != COMP_COMMAND_ABORTED) |
5826 | continue; |
5827 | |
5828 | - i_cmd->status = COMP_STOPPED; |
5829 | + i_cmd->status = COMP_COMMAND_RING_STOPPED; |
5830 | |
5831 | xhci_dbg(xhci, "Turn aborted command %p to no-op\n", |
5832 | i_cmd->command_trb); |
5833 | @@ -1342,7 +1342,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, |
5834 | cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); |
5835 | |
5836 | /* If CMD ring stopped we own the trbs between enqueue and dequeue */ |
5837 | - if (cmd_comp_code == COMP_STOPPED) { |
5838 | + if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { |
5839 | complete_all(&xhci->cmd_ring_stop_completion); |
5840 | return; |
5841 | } |
5842 | @@ -1397,8 +1397,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, |
5843 | break; |
5844 | case TRB_CMD_NOOP: |
5845 | /* Is this an aborted command turned to NO-OP? */ |
5846 | - if (cmd->status == COMP_STOPPED) |
5847 | - cmd_comp_code = COMP_STOPPED; |
5848 | + if (cmd->status == COMP_COMMAND_RING_STOPPED) |
5849 | + cmd_comp_code = COMP_COMMAND_RING_STOPPED; |
5850 | break; |
5851 | case TRB_RESET_EP: |
5852 | WARN_ON(slot_id != TRB_TO_SLOT_ID( |
5853 | @@ -2616,11 +2616,12 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) |
5854 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
5855 | union xhci_trb *event_ring_deq; |
5856 | irqreturn_t ret = IRQ_NONE; |
5857 | + unsigned long flags; |
5858 | dma_addr_t deq; |
5859 | u64 temp_64; |
5860 | u32 status; |
5861 | |
5862 | - spin_lock(&xhci->lock); |
5863 | + spin_lock_irqsave(&xhci->lock, flags); |
5864 | /* Check if the xHC generated the interrupt, or the irq is shared */ |
5865 | status = readl(&xhci->op_regs->status); |
5866 | if (status == 0xffffffff) { |
5867 | @@ -2695,7 +2696,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) |
5868 | ret = IRQ_HANDLED; |
5869 | |
5870 | out: |
5871 | - spin_unlock(&xhci->lock); |
5872 | + spin_unlock_irqrestore(&xhci->lock, flags); |
5873 | |
5874 | return ret; |
5875 | } |
5876 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
5877 | index 953fd8f62df0..b1f779817ffe 100644 |
5878 | --- a/drivers/usb/host/xhci.c |
5879 | +++ b/drivers/usb/host/xhci.c |
5880 | @@ -1805,7 +1805,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci, |
5881 | |
5882 | switch (*cmd_status) { |
5883 | case COMP_COMMAND_ABORTED: |
5884 | - case COMP_STOPPED: |
5885 | + case COMP_COMMAND_RING_STOPPED: |
5886 | xhci_warn(xhci, "Timeout while waiting for configure endpoint command\n"); |
5887 | ret = -ETIME; |
5888 | break; |
5889 | @@ -1856,7 +1856,7 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci, |
5890 | |
5891 | switch (*cmd_status) { |
5892 | case COMP_COMMAND_ABORTED: |
5893 | - case COMP_STOPPED: |
5894 | + case COMP_COMMAND_RING_STOPPED: |
5895 | xhci_warn(xhci, "Timeout while waiting for evaluate context command\n"); |
5896 | ret = -ETIME; |
5897 | break; |
5898 | @@ -3478,7 +3478,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) |
5899 | ret = reset_device_cmd->status; |
5900 | switch (ret) { |
5901 | case COMP_COMMAND_ABORTED: |
5902 | - case COMP_STOPPED: |
5903 | + case COMP_COMMAND_RING_STOPPED: |
5904 | xhci_warn(xhci, "Timeout waiting for reset device command\n"); |
5905 | ret = -ETIME; |
5906 | goto command_cleanup; |
5907 | @@ -3845,7 +3845,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, |
5908 | */ |
5909 | switch (command->status) { |
5910 | case COMP_COMMAND_ABORTED: |
5911 | - case COMP_STOPPED: |
5912 | + case COMP_COMMAND_RING_STOPPED: |
5913 | xhci_warn(xhci, "Timeout while waiting for setup device command\n"); |
5914 | ret = -ETIME; |
5915 | break; |
5916 | diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c |
5917 | index aa350dc9eb25..142acf1e00f7 100644 |
5918 | --- a/drivers/usb/misc/chaoskey.c |
5919 | +++ b/drivers/usb/misc/chaoskey.c |
5920 | @@ -194,7 +194,7 @@ static int chaoskey_probe(struct usb_interface *interface, |
5921 | |
5922 | dev->in_ep = in_ep; |
5923 | |
5924 | - if (udev->descriptor.idVendor != ALEA_VENDOR_ID) |
5925 | + if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID) |
5926 | dev->reads_started = 1; |
5927 | |
5928 | dev->size = size; |
5929 | diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c |
5930 | index 37c63cb39714..0ef29d202263 100644 |
5931 | --- a/drivers/usb/misc/iowarrior.c |
5932 | +++ b/drivers/usb/misc/iowarrior.c |
5933 | @@ -554,7 +554,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, |
5934 | info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice); |
5935 | |
5936 | /* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */ |
5937 | - info.speed = le16_to_cpu(dev->udev->speed); |
5938 | + info.speed = dev->udev->speed; |
5939 | info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber; |
5940 | info.report_size = dev->report_size; |
5941 | |
5942 | diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c |
5943 | index 322a042d6e59..c74f0a6499b0 100644 |
5944 | --- a/drivers/usb/misc/legousbtower.c |
5945 | +++ b/drivers/usb/misc/legousbtower.c |
5946 | @@ -317,9 +317,16 @@ static int tower_open (struct inode *inode, struct file *file) |
5947 | int subminor; |
5948 | int retval = 0; |
5949 | struct usb_interface *interface; |
5950 | - struct tower_reset_reply reset_reply; |
5951 | + struct tower_reset_reply *reset_reply; |
5952 | int result; |
5953 | |
5954 | + reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL); |
5955 | + |
5956 | + if (!reset_reply) { |
5957 | + retval = -ENOMEM; |
5958 | + goto exit; |
5959 | + } |
5960 | + |
5961 | nonseekable_open(inode, file); |
5962 | subminor = iminor(inode); |
5963 | |
5964 | @@ -364,8 +371,8 @@ static int tower_open (struct inode *inode, struct file *file) |
5965 | USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, |
5966 | 0, |
5967 | 0, |
5968 | - &reset_reply, |
5969 | - sizeof(reset_reply), |
5970 | + reset_reply, |
5971 | + sizeof(*reset_reply), |
5972 | 1000); |
5973 | if (result < 0) { |
5974 | dev_err(&dev->udev->dev, |
5975 | @@ -406,6 +413,7 @@ static int tower_open (struct inode *inode, struct file *file) |
5976 | mutex_unlock(&dev->lock); |
5977 | |
5978 | exit: |
5979 | + kfree(reset_reply); |
5980 | return retval; |
5981 | } |
5982 | |
5983 | @@ -808,7 +816,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device |
5984 | struct lego_usb_tower *dev = NULL; |
5985 | struct usb_host_interface *iface_desc; |
5986 | struct usb_endpoint_descriptor* endpoint; |
5987 | - struct tower_get_version_reply get_version_reply; |
5988 | + struct tower_get_version_reply *get_version_reply = NULL; |
5989 | int i; |
5990 | int retval = -ENOMEM; |
5991 | int result; |
5992 | @@ -886,6 +894,13 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device |
5993 | dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; |
5994 | dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; |
5995 | |
5996 | + get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL); |
5997 | + |
5998 | + if (!get_version_reply) { |
5999 | + retval = -ENOMEM; |
6000 | + goto error; |
6001 | + } |
6002 | + |
6003 | /* get the firmware version and log it */ |
6004 | result = usb_control_msg (udev, |
6005 | usb_rcvctrlpipe(udev, 0), |
6006 | @@ -893,18 +908,19 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device |
6007 | USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, |
6008 | 0, |
6009 | 0, |
6010 | - &get_version_reply, |
6011 | - sizeof(get_version_reply), |
6012 | + get_version_reply, |
6013 | + sizeof(*get_version_reply), |
6014 | 1000); |
6015 | if (result < 0) { |
6016 | dev_err(idev, "LEGO USB Tower get version control request failed\n"); |
6017 | retval = result; |
6018 | goto error; |
6019 | } |
6020 | - dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d " |
6021 | - "build %d\n", get_version_reply.major, |
6022 | - get_version_reply.minor, |
6023 | - le16_to_cpu(get_version_reply.build_no)); |
6024 | + dev_info(&interface->dev, |
6025 | + "LEGO USB Tower firmware version is %d.%d build %d\n", |
6026 | + get_version_reply->major, |
6027 | + get_version_reply->minor, |
6028 | + le16_to_cpu(get_version_reply->build_no)); |
6029 | |
6030 | /* we can register the device now, as it is ready */ |
6031 | usb_set_intfdata (interface, dev); |
6032 | @@ -925,9 +941,11 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device |
6033 | USB_MAJOR, dev->minor); |
6034 | |
6035 | exit: |
6036 | + kfree(get_version_reply); |
6037 | return retval; |
6038 | |
6039 | error: |
6040 | + kfree(get_version_reply); |
6041 | tower_delete(dev); |
6042 | return retval; |
6043 | } |
6044 | diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c |
6045 | index ac3a4952abb4..dbe617a735d8 100644 |
6046 | --- a/drivers/usb/musb/musb_host.c |
6047 | +++ b/drivers/usb/musb/musb_host.c |
6048 | @@ -2780,10 +2780,11 @@ int musb_host_setup(struct musb *musb, int power_budget) |
6049 | int ret; |
6050 | struct usb_hcd *hcd = musb->hcd; |
6051 | |
6052 | - MUSB_HST_MODE(musb); |
6053 | - musb->xceiv->otg->default_a = 1; |
6054 | - musb->xceiv->otg->state = OTG_STATE_A_IDLE; |
6055 | - |
6056 | + if (musb->port_mode == MUSB_PORT_MODE_HOST) { |
6057 | + MUSB_HST_MODE(musb); |
6058 | + musb->xceiv->otg->default_a = 1; |
6059 | + musb->xceiv->otg->state = OTG_STATE_A_IDLE; |
6060 | + } |
6061 | otg_set_host(musb->xceiv->otg, &hcd->self); |
6062 | hcd->self.otg_port = 1; |
6063 | musb->xceiv->otg->host = &hcd->self; |
6064 | diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c |
6065 | index 8b43c4b99f04..7870b37e0ea5 100644 |
6066 | --- a/drivers/usb/musb/tusb6010_omap.c |
6067 | +++ b/drivers/usb/musb/tusb6010_omap.c |
6068 | @@ -219,6 +219,7 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, |
6069 | u32 dma_remaining; |
6070 | int src_burst, dst_burst; |
6071 | u16 csr; |
6072 | + u32 psize; |
6073 | int ch; |
6074 | s8 dmareq; |
6075 | s8 sync_dev; |
6076 | @@ -390,15 +391,19 @@ static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz, |
6077 | |
6078 | if (chdat->tx) { |
6079 | /* Send transfer_packet_sz packets at a time */ |
6080 | - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, |
6081 | - chdat->transfer_packet_sz); |
6082 | + psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); |
6083 | + psize &= ~0x7ff; |
6084 | + psize |= chdat->transfer_packet_sz; |
6085 | + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); |
6086 | |
6087 | musb_writel(ep_conf, TUSB_EP_TX_OFFSET, |
6088 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); |
6089 | } else { |
6090 | /* Receive transfer_packet_sz packets at a time */ |
6091 | - musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, |
6092 | - chdat->transfer_packet_sz << 16); |
6093 | + psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET); |
6094 | + psize &= ~(0x7ff << 16); |
6095 | + psize |= (chdat->transfer_packet_sz << 16); |
6096 | + musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize); |
6097 | |
6098 | musb_writel(ep_conf, TUSB_EP_RX_OFFSET, |
6099 | TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len)); |
6100 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
6101 | index 03e6319b6d1c..f187e13c47e8 100644 |
6102 | --- a/drivers/usb/serial/ftdi_sio.c |
6103 | +++ b/drivers/usb/serial/ftdi_sio.c |
6104 | @@ -809,10 +809,10 @@ static const struct usb_device_id id_table_combined[] = { |
6105 | { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) }, |
6106 | { USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID), |
6107 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
6108 | - { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID), |
6109 | - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
6110 | - { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID), |
6111 | - .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
6112 | + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) }, |
6113 | + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) }, |
6114 | + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) }, |
6115 | + { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) }, |
6116 | { USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID), |
6117 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
6118 | { USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID), |
6119 | @@ -1506,9 +1506,9 @@ static int set_serial_info(struct tty_struct *tty, |
6120 | (new_serial.flags & ASYNC_FLAGS)); |
6121 | priv->custom_divisor = new_serial.custom_divisor; |
6122 | |
6123 | +check_and_exit: |
6124 | write_latency_timer(port); |
6125 | |
6126 | -check_and_exit: |
6127 | if ((old_priv.flags & ASYNC_SPD_MASK) != |
6128 | (priv->flags & ASYNC_SPD_MASK)) { |
6129 | if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) |
6130 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
6131 | index 71fb9e59db71..4fcf1cecb6d7 100644 |
6132 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
6133 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
6134 | @@ -882,6 +882,8 @@ |
6135 | /* Olimex */ |
6136 | #define OLIMEX_VID 0x15BA |
6137 | #define OLIMEX_ARM_USB_OCD_PID 0x0003 |
6138 | +#define OLIMEX_ARM_USB_TINY_PID 0x0004 |
6139 | +#define OLIMEX_ARM_USB_TINY_H_PID 0x002a |
6140 | #define OLIMEX_ARM_USB_OCD_H_PID 0x002b |
6141 | |
6142 | /* |
6143 | diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c |
6144 | index a76b95d32157..428ae42dd29e 100644 |
6145 | --- a/drivers/usb/serial/io_ti.c |
6146 | +++ b/drivers/usb/serial/io_ti.c |
6147 | @@ -2349,8 +2349,11 @@ static void change_port_settings(struct tty_struct *tty, |
6148 | if (!baud) { |
6149 | /* pick a default, any default... */ |
6150 | baud = 9600; |
6151 | - } else |
6152 | + } else { |
6153 | + /* Avoid a zero divisor. */ |
6154 | + baud = min(baud, 461550); |
6155 | tty_encode_baud_rate(tty, baud, baud); |
6156 | + } |
6157 | |
6158 | edge_port->baud_rate = baud; |
6159 | config->wBaudRate = (__u16)((461550L + baud/2) / baud); |
6160 | diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c |
6161 | index edbc81f205c2..70f346f1aa86 100644 |
6162 | --- a/drivers/usb/serial/mct_u232.c |
6163 | +++ b/drivers/usb/serial/mct_u232.c |
6164 | @@ -189,7 +189,7 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty, |
6165 | return -ENOMEM; |
6166 | |
6167 | divisor = mct_u232_calculate_baud_rate(serial, value, &speed); |
6168 | - put_unaligned_le32(cpu_to_le32(divisor), buf); |
6169 | + put_unaligned_le32(divisor, buf); |
6170 | rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), |
6171 | MCT_U232_SET_BAUD_RATE_REQUEST, |
6172 | MCT_U232_SET_REQUEST_TYPE, |
6173 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
6174 | index af67a0de6b5d..3bf61acfc26b 100644 |
6175 | --- a/drivers/usb/serial/option.c |
6176 | +++ b/drivers/usb/serial/option.c |
6177 | @@ -281,6 +281,7 @@ static void option_instat_callback(struct urb *urb); |
6178 | #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 |
6179 | #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 |
6180 | #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 |
6181 | +#define TELIT_PRODUCT_ME910 0x1100 |
6182 | #define TELIT_PRODUCT_LE920 0x1200 |
6183 | #define TELIT_PRODUCT_LE910 0x1201 |
6184 | #define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
6185 | @@ -640,6 +641,11 @@ static const struct option_blacklist_info simcom_sim7100e_blacklist = { |
6186 | .reserved = BIT(5) | BIT(6), |
6187 | }; |
6188 | |
6189 | +static const struct option_blacklist_info telit_me910_blacklist = { |
6190 | + .sendsetup = BIT(0), |
6191 | + .reserved = BIT(1) | BIT(3), |
6192 | +}; |
6193 | + |
6194 | static const struct option_blacklist_info telit_le910_blacklist = { |
6195 | .sendsetup = BIT(0), |
6196 | .reserved = BIT(1) | BIT(2), |
6197 | @@ -1235,6 +1241,8 @@ static const struct usb_device_id option_ids[] = { |
6198 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, |
6199 | { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), |
6200 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, |
6201 | + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), |
6202 | + .driver_info = (kernel_ulong_t)&telit_me910_blacklist }, |
6203 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), |
6204 | .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, |
6205 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), |
6206 | diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c |
6207 | index 38b3f0d8cd58..fd509ed6cf70 100644 |
6208 | --- a/drivers/usb/serial/qcserial.c |
6209 | +++ b/drivers/usb/serial/qcserial.c |
6210 | @@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = { |
6211 | {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */ |
6212 | {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */ |
6213 | {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */ |
6214 | + {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */ |
6215 | + {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */ |
6216 | {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ |
6217 | {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ |
6218 | {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ |
6219 | diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c |
6220 | index 369f3c24815a..44af719194b2 100644 |
6221 | --- a/drivers/usb/storage/ene_ub6250.c |
6222 | +++ b/drivers/usb/storage/ene_ub6250.c |
6223 | @@ -446,6 +446,10 @@ struct ms_lib_ctrl { |
6224 | #define SD_BLOCK_LEN 9 |
6225 | |
6226 | struct ene_ub6250_info { |
6227 | + |
6228 | + /* I/O bounce buffer */ |
6229 | + u8 *bbuf; |
6230 | + |
6231 | /* for 6250 code */ |
6232 | struct SD_STATUS SD_Status; |
6233 | struct MS_STATUS MS_Status; |
6234 | @@ -493,8 +497,11 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag); |
6235 | |
6236 | static void ene_ub6250_info_destructor(void *extra) |
6237 | { |
6238 | + struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra; |
6239 | + |
6240 | if (!extra) |
6241 | return; |
6242 | + kfree(info->bbuf); |
6243 | } |
6244 | |
6245 | static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg) |
6246 | @@ -860,8 +867,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, |
6247 | u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat) |
6248 | { |
6249 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
6250 | + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
6251 | + u8 *bbuf = info->bbuf; |
6252 | int result; |
6253 | - u8 ExtBuf[4]; |
6254 | u32 bn = PhyBlockAddr * 0x20 + PageNum; |
6255 | |
6256 | result = ene_load_bincode(us, MS_RW_PATTERN); |
6257 | @@ -901,7 +909,7 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, |
6258 | bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16); |
6259 | bcb->CDB[6] = 0x01; |
6260 | |
6261 | - result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); |
6262 | + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
6263 | if (result != USB_STOR_XFER_GOOD) |
6264 | return USB_STOR_TRANSPORT_ERROR; |
6265 | |
6266 | @@ -910,9 +918,9 @@ static int ms_read_readpage(struct us_data *us, u32 PhyBlockAddr, |
6267 | ExtraDat->status0 = 0x10; /* Not yet,fireware support */ |
6268 | |
6269 | ExtraDat->status1 = 0x00; /* Not yet,fireware support */ |
6270 | - ExtraDat->ovrflg = ExtBuf[0]; |
6271 | - ExtraDat->mngflg = ExtBuf[1]; |
6272 | - ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); |
6273 | + ExtraDat->ovrflg = bbuf[0]; |
6274 | + ExtraDat->mngflg = bbuf[1]; |
6275 | + ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); |
6276 | |
6277 | return USB_STOR_TRANSPORT_GOOD; |
6278 | } |
6279 | @@ -1332,8 +1340,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, |
6280 | u8 PageNum, struct ms_lib_type_extdat *ExtraDat) |
6281 | { |
6282 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
6283 | + struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
6284 | + u8 *bbuf = info->bbuf; |
6285 | int result; |
6286 | - u8 ExtBuf[4]; |
6287 | |
6288 | memset(bcb, 0, sizeof(struct bulk_cb_wrap)); |
6289 | bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); |
6290 | @@ -1347,7 +1356,7 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, |
6291 | bcb->CDB[2] = (unsigned char)(PhyBlock>>16); |
6292 | bcb->CDB[6] = 0x01; |
6293 | |
6294 | - result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0); |
6295 | + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
6296 | if (result != USB_STOR_XFER_GOOD) |
6297 | return USB_STOR_TRANSPORT_ERROR; |
6298 | |
6299 | @@ -1355,9 +1364,9 @@ static int ms_lib_read_extra(struct us_data *us, u32 PhyBlock, |
6300 | ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */ |
6301 | ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */ |
6302 | ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */ |
6303 | - ExtraDat->ovrflg = ExtBuf[0]; |
6304 | - ExtraDat->mngflg = ExtBuf[1]; |
6305 | - ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]); |
6306 | + ExtraDat->ovrflg = bbuf[0]; |
6307 | + ExtraDat->mngflg = bbuf[1]; |
6308 | + ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]); |
6309 | |
6310 | return USB_STOR_TRANSPORT_GOOD; |
6311 | } |
6312 | @@ -1556,9 +1565,9 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) |
6313 | u16 PhyBlock, newblk, i; |
6314 | u16 LogStart, LogEnde; |
6315 | struct ms_lib_type_extdat extdat; |
6316 | - u8 buf[0x200]; |
6317 | u32 count = 0, index = 0; |
6318 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
6319 | + u8 *bbuf = info->bbuf; |
6320 | |
6321 | for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) { |
6322 | ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde); |
6323 | @@ -1572,14 +1581,16 @@ static int ms_lib_scan_logicalblocknumber(struct us_data *us, u16 btBlk1st) |
6324 | } |
6325 | |
6326 | if (count == PhyBlock) { |
6327 | - ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf); |
6328 | + ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, |
6329 | + bbuf); |
6330 | count += 0x80; |
6331 | } |
6332 | index = (PhyBlock % 0x80) * 4; |
6333 | |
6334 | - extdat.ovrflg = buf[index]; |
6335 | - extdat.mngflg = buf[index+1]; |
6336 | - extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]); |
6337 | + extdat.ovrflg = bbuf[index]; |
6338 | + extdat.mngflg = bbuf[index+1]; |
6339 | + extdat.logadr = memstick_logaddr(bbuf[index+2], |
6340 | + bbuf[index+3]); |
6341 | |
6342 | if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) { |
6343 | ms_lib_setacquired_errorblock(us, PhyBlock); |
6344 | @@ -2062,9 +2073,9 @@ static int ene_ms_init(struct us_data *us) |
6345 | { |
6346 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
6347 | int result; |
6348 | - u8 buf[0x200]; |
6349 | u16 MSP_BlockSize, MSP_UserAreaBlocks; |
6350 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
6351 | + u8 *bbuf = info->bbuf; |
6352 | |
6353 | printk(KERN_INFO "transport --- ENE_MSInit\n"); |
6354 | |
6355 | @@ -2083,13 +2094,13 @@ static int ene_ms_init(struct us_data *us) |
6356 | bcb->CDB[0] = 0xF1; |
6357 | bcb->CDB[1] = 0x01; |
6358 | |
6359 | - result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); |
6360 | + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
6361 | if (result != USB_STOR_XFER_GOOD) { |
6362 | printk(KERN_ERR "Execution MS Init Code Fail !!\n"); |
6363 | return USB_STOR_TRANSPORT_ERROR; |
6364 | } |
6365 | /* the same part to test ENE */ |
6366 | - info->MS_Status = *(struct MS_STATUS *)&buf[0]; |
6367 | + info->MS_Status = *(struct MS_STATUS *) bbuf; |
6368 | |
6369 | if (info->MS_Status.Insert && info->MS_Status.Ready) { |
6370 | printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert); |
6371 | @@ -2098,15 +2109,15 @@ static int ene_ms_init(struct us_data *us) |
6372 | printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG); |
6373 | printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP); |
6374 | if (info->MS_Status.IsMSPro) { |
6375 | - MSP_BlockSize = (buf[6] << 8) | buf[7]; |
6376 | - MSP_UserAreaBlocks = (buf[10] << 8) | buf[11]; |
6377 | + MSP_BlockSize = (bbuf[6] << 8) | bbuf[7]; |
6378 | + MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11]; |
6379 | info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks; |
6380 | } else { |
6381 | ms_card_init(us); /* Card is MS (to ms.c)*/ |
6382 | } |
6383 | usb_stor_dbg(us, "MS Init Code OK !!\n"); |
6384 | } else { |
6385 | - usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]); |
6386 | + usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]); |
6387 | return USB_STOR_TRANSPORT_ERROR; |
6388 | } |
6389 | |
6390 | @@ -2116,9 +2127,9 @@ static int ene_ms_init(struct us_data *us) |
6391 | static int ene_sd_init(struct us_data *us) |
6392 | { |
6393 | int result; |
6394 | - u8 buf[0x200]; |
6395 | struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; |
6396 | struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra; |
6397 | + u8 *bbuf = info->bbuf; |
6398 | |
6399 | usb_stor_dbg(us, "transport --- ENE_SDInit\n"); |
6400 | /* SD Init Part-1 */ |
6401 | @@ -2152,17 +2163,17 @@ static int ene_sd_init(struct us_data *us) |
6402 | bcb->Flags = US_BULK_FLAG_IN; |
6403 | bcb->CDB[0] = 0xF1; |
6404 | |
6405 | - result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0); |
6406 | + result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0); |
6407 | if (result != USB_STOR_XFER_GOOD) { |
6408 | usb_stor_dbg(us, "Execution SD Init Code Fail !!\n"); |
6409 | return USB_STOR_TRANSPORT_ERROR; |
6410 | } |
6411 | |
6412 | - info->SD_Status = *(struct SD_STATUS *)&buf[0]; |
6413 | + info->SD_Status = *(struct SD_STATUS *) bbuf; |
6414 | if (info->SD_Status.Insert && info->SD_Status.Ready) { |
6415 | struct SD_STATUS *s = &info->SD_Status; |
6416 | |
6417 | - ene_get_card_status(us, (unsigned char *)&buf); |
6418 | + ene_get_card_status(us, bbuf); |
6419 | usb_stor_dbg(us, "Insert = %x\n", s->Insert); |
6420 | usb_stor_dbg(us, "Ready = %x\n", s->Ready); |
6421 | usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC); |
6422 | @@ -2170,7 +2181,7 @@ static int ene_sd_init(struct us_data *us) |
6423 | usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed); |
6424 | usb_stor_dbg(us, "WtP = %x\n", s->WtP); |
6425 | } else { |
6426 | - usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]); |
6427 | + usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]); |
6428 | return USB_STOR_TRANSPORT_ERROR; |
6429 | } |
6430 | return USB_STOR_TRANSPORT_GOOD; |
6431 | @@ -2180,13 +2191,15 @@ static int ene_sd_init(struct us_data *us) |
6432 | static int ene_init(struct us_data *us) |
6433 | { |
6434 | int result; |
6435 | - u8 misc_reg03 = 0; |
6436 | + u8 misc_reg03; |
6437 | struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); |
6438 | + u8 *bbuf = info->bbuf; |
6439 | |
6440 | - result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); |
6441 | + result = ene_get_card_type(us, REG_CARD_STATUS, bbuf); |
6442 | if (result != USB_STOR_XFER_GOOD) |
6443 | return USB_STOR_TRANSPORT_ERROR; |
6444 | |
6445 | + misc_reg03 = bbuf[0]; |
6446 | if (misc_reg03 & 0x01) { |
6447 | if (!info->SD_Status.Ready) { |
6448 | result = ene_sd_init(us); |
6449 | @@ -2303,8 +2316,9 @@ static int ene_ub6250_probe(struct usb_interface *intf, |
6450 | const struct usb_device_id *id) |
6451 | { |
6452 | int result; |
6453 | - u8 misc_reg03 = 0; |
6454 | + u8 misc_reg03; |
6455 | struct us_data *us; |
6456 | + struct ene_ub6250_info *info; |
6457 | |
6458 | result = usb_stor_probe1(&us, intf, id, |
6459 | (id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list, |
6460 | @@ -2313,11 +2327,16 @@ static int ene_ub6250_probe(struct usb_interface *intf, |
6461 | return result; |
6462 | |
6463 | /* FIXME: where should the code alloc extra buf ? */ |
6464 | - if (!us->extra) { |
6465 | - us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); |
6466 | - if (!us->extra) |
6467 | - return -ENOMEM; |
6468 | - us->extra_destructor = ene_ub6250_info_destructor; |
6469 | + us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL); |
6470 | + if (!us->extra) |
6471 | + return -ENOMEM; |
6472 | + us->extra_destructor = ene_ub6250_info_destructor; |
6473 | + |
6474 | + info = (struct ene_ub6250_info *)(us->extra); |
6475 | + info->bbuf = kmalloc(512, GFP_KERNEL); |
6476 | + if (!info->bbuf) { |
6477 | + kfree(us->extra); |
6478 | + return -ENOMEM; |
6479 | } |
6480 | |
6481 | us->transport_name = "ene_ub6250"; |
6482 | @@ -2329,12 +2348,13 @@ static int ene_ub6250_probe(struct usb_interface *intf, |
6483 | return result; |
6484 | |
6485 | /* probe card type */ |
6486 | - result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03); |
6487 | + result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf); |
6488 | if (result != USB_STOR_XFER_GOOD) { |
6489 | usb_stor_disconnect(intf); |
6490 | return USB_STOR_TRANSPORT_ERROR; |
6491 | } |
6492 | |
6493 | + misc_reg03 = info->bbuf[0]; |
6494 | if (!(misc_reg03 & 0x01)) { |
6495 | pr_info("ums_eneub6250: This driver only supports SD/MS cards. " |
6496 | "It does not support SM cards.\n"); |
6497 | diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c |
6498 | index 6345e85822a4..a50cf45e530f 100644 |
6499 | --- a/drivers/uwb/i1480/dfu/usb.c |
6500 | +++ b/drivers/uwb/i1480/dfu/usb.c |
6501 | @@ -341,6 +341,7 @@ int i1480_usb_cmd(struct i1480 *i1480, const char *cmd_name, size_t cmd_size) |
6502 | static |
6503 | int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) |
6504 | { |
6505 | + struct usb_device *udev = interface_to_usbdev(iface); |
6506 | struct i1480_usb *i1480_usb; |
6507 | struct i1480 *i1480; |
6508 | struct device *dev = &iface->dev; |
6509 | @@ -352,8 +353,8 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) |
6510 | iface->cur_altsetting->desc.bInterfaceNumber); |
6511 | goto error; |
6512 | } |
6513 | - if (iface->num_altsetting > 1 |
6514 | - && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) { |
6515 | + if (iface->num_altsetting > 1 && |
6516 | + le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) { |
6517 | /* Need altsetting #1 [HW QUIRK] or EP1 won't work */ |
6518 | result = usb_set_interface(interface_to_usbdev(iface), 0, 1); |
6519 | if (result < 0) |
6520 | diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c |
6521 | index 99ebf6ea3de6..5615f4013924 100644 |
6522 | --- a/drivers/watchdog/pcwd_usb.c |
6523 | +++ b/drivers/watchdog/pcwd_usb.c |
6524 | @@ -630,6 +630,9 @@ static int usb_pcwd_probe(struct usb_interface *interface, |
6525 | return -ENODEV; |
6526 | } |
6527 | |
6528 | + if (iface_desc->desc.bNumEndpoints < 1) |
6529 | + return -ENODEV; |
6530 | + |
6531 | /* check out the endpoint: it has to be Interrupt & IN */ |
6532 | endpoint = &iface_desc->endpoint[0].desc; |
6533 | |
6534 | diff --git a/fs/dax.c b/fs/dax.c |
6535 | index b87f3ab742ba..db0cc52eb22e 100644 |
6536 | --- a/fs/dax.c |
6537 | +++ b/fs/dax.c |
6538 | @@ -1124,23 +1124,23 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, |
6539 | if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) |
6540 | flags |= IOMAP_WRITE; |
6541 | |
6542 | + entry = grab_mapping_entry(mapping, vmf->pgoff, 0); |
6543 | + if (IS_ERR(entry)) |
6544 | + return dax_fault_return(PTR_ERR(entry)); |
6545 | + |
6546 | /* |
6547 | * Note that we don't bother to use iomap_apply here: DAX required |
6548 | * the file system block size to be equal the page size, which means |
6549 | * that we never have to deal with more than a single extent here. |
6550 | */ |
6551 | error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); |
6552 | - if (error) |
6553 | - return dax_fault_return(error); |
6554 | - if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { |
6555 | - vmf_ret = dax_fault_return(-EIO); /* fs corruption? */ |
6556 | - goto finish_iomap; |
6557 | + if (error) { |
6558 | + vmf_ret = dax_fault_return(error); |
6559 | + goto unlock_entry; |
6560 | } |
6561 | - |
6562 | - entry = grab_mapping_entry(mapping, vmf->pgoff, 0); |
6563 | - if (IS_ERR(entry)) { |
6564 | - vmf_ret = dax_fault_return(PTR_ERR(entry)); |
6565 | - goto finish_iomap; |
6566 | + if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { |
6567 | + error = -EIO; /* fs corruption? */ |
6568 | + goto error_finish_iomap; |
6569 | } |
6570 | |
6571 | sector = dax_iomap_sector(&iomap, pos); |
6572 | @@ -1162,13 +1162,13 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, |
6573 | } |
6574 | |
6575 | if (error) |
6576 | - goto error_unlock_entry; |
6577 | + goto error_finish_iomap; |
6578 | |
6579 | __SetPageUptodate(vmf->cow_page); |
6580 | vmf_ret = finish_fault(vmf); |
6581 | if (!vmf_ret) |
6582 | vmf_ret = VM_FAULT_DONE_COW; |
6583 | - goto unlock_entry; |
6584 | + goto finish_iomap; |
6585 | } |
6586 | |
6587 | switch (iomap.type) { |
6588 | @@ -1188,7 +1188,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, |
6589 | case IOMAP_HOLE: |
6590 | if (!(vmf->flags & FAULT_FLAG_WRITE)) { |
6591 | vmf_ret = dax_load_hole(mapping, &entry, vmf); |
6592 | - goto unlock_entry; |
6593 | + goto finish_iomap; |
6594 | } |
6595 | /*FALLTHRU*/ |
6596 | default: |
6597 | @@ -1197,10 +1197,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, |
6598 | break; |
6599 | } |
6600 | |
6601 | - error_unlock_entry: |
6602 | + error_finish_iomap: |
6603 | vmf_ret = dax_fault_return(error) | major; |
6604 | - unlock_entry: |
6605 | - put_locked_mapping_entry(mapping, vmf->pgoff, entry); |
6606 | finish_iomap: |
6607 | if (ops->iomap_end) { |
6608 | int copied = PAGE_SIZE; |
6609 | @@ -1215,6 +1213,8 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, |
6610 | */ |
6611 | ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); |
6612 | } |
6613 | + unlock_entry: |
6614 | + put_locked_mapping_entry(mapping, vmf->pgoff, entry); |
6615 | return vmf_ret; |
6616 | } |
6617 | |
6618 | diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c |
6619 | index f073a6d2c6a5..d582d443c21a 100644 |
6620 | --- a/fs/nfs/callback_proc.c |
6621 | +++ b/fs/nfs/callback_proc.c |
6622 | @@ -131,10 +131,11 @@ static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp, |
6623 | if (!inode) |
6624 | continue; |
6625 | if (!nfs_sb_active(inode->i_sb)) { |
6626 | - rcu_read_lock(); |
6627 | + rcu_read_unlock(); |
6628 | spin_unlock(&clp->cl_lock); |
6629 | iput(inode); |
6630 | spin_lock(&clp->cl_lock); |
6631 | + rcu_read_lock(); |
6632 | goto restart; |
6633 | } |
6634 | return inode; |
6635 | @@ -170,10 +171,11 @@ static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp, |
6636 | if (!inode) |
6637 | continue; |
6638 | if (!nfs_sb_active(inode->i_sb)) { |
6639 | - rcu_read_lock(); |
6640 | + rcu_read_unlock(); |
6641 | spin_unlock(&clp->cl_lock); |
6642 | iput(inode); |
6643 | spin_lock(&clp->cl_lock); |
6644 | + rcu_read_lock(); |
6645 | goto restart; |
6646 | } |
6647 | return inode; |
6648 | diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c |
6649 | index 457cfeb1d5c1..9e0b24a192cc 100644 |
6650 | --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c |
6651 | +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c |
6652 | @@ -415,7 +415,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, |
6653 | mirror->mirror_ds->ds_versions[0].minor_version); |
6654 | |
6655 | /* connect success, check rsize/wsize limit */ |
6656 | - if (ds->ds_clp) { |
6657 | + if (!status) { |
6658 | max_payload = |
6659 | nfs_block_size(rpc_max_payload(ds->ds_clp->cl_rpcclient), |
6660 | NULL); |
6661 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
6662 | index 201ca3f2c4ba..8ba6c0d4d499 100644 |
6663 | --- a/fs/nfs/nfs4proc.c |
6664 | +++ b/fs/nfs/nfs4proc.c |
6665 | @@ -2300,8 +2300,10 @@ static int _nfs4_proc_open(struct nfs4_opendata *data) |
6666 | if (status != 0) |
6667 | return status; |
6668 | } |
6669 | - if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) |
6670 | + if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) { |
6671 | + nfs4_sequence_free_slot(&o_res->seq_res); |
6672 | nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label); |
6673 | + } |
6674 | return 0; |
6675 | } |
6676 | |
6677 | diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c |
6678 | index 6e629b856a00..cd6ec9bdd1c7 100644 |
6679 | --- a/fs/nfs/pagelist.c |
6680 | +++ b/fs/nfs/pagelist.c |
6681 | @@ -29,13 +29,14 @@ |
6682 | static struct kmem_cache *nfs_page_cachep; |
6683 | static const struct rpc_call_ops nfs_pgio_common_ops; |
6684 | |
6685 | -static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) |
6686 | +static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount, |
6687 | + gfp_t gfp_flags) |
6688 | { |
6689 | p->npages = pagecount; |
6690 | if (pagecount <= ARRAY_SIZE(p->page_array)) |
6691 | p->pagevec = p->page_array; |
6692 | else { |
6693 | - p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); |
6694 | + p->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags); |
6695 | if (!p->pagevec) |
6696 | p->npages = 0; |
6697 | } |
6698 | @@ -681,6 +682,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
6699 | { |
6700 | struct nfs_pgio_mirror *new; |
6701 | int i; |
6702 | + gfp_t gfp_flags = GFP_KERNEL; |
6703 | |
6704 | desc->pg_moreio = 0; |
6705 | desc->pg_inode = inode; |
6706 | @@ -700,8 +702,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, |
6707 | if (pg_ops->pg_get_mirror_count) { |
6708 | /* until we have a request, we don't have an lseg and no |
6709 | * idea how many mirrors there will be */ |
6710 | + if (desc->pg_rw_ops->rw_mode == FMODE_WRITE) |
6711 | + gfp_flags = GFP_NOIO; |
6712 | new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX, |
6713 | - sizeof(struct nfs_pgio_mirror), GFP_KERNEL); |
6714 | + sizeof(struct nfs_pgio_mirror), gfp_flags); |
6715 | desc->pg_mirrors_dynamic = new; |
6716 | desc->pg_mirrors = new; |
6717 | |
6718 | @@ -755,9 +759,12 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, |
6719 | struct list_head *head = &mirror->pg_list; |
6720 | struct nfs_commit_info cinfo; |
6721 | unsigned int pagecount, pageused; |
6722 | + gfp_t gfp_flags = GFP_KERNEL; |
6723 | |
6724 | pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); |
6725 | - if (!nfs_pgarray_set(&hdr->page_array, pagecount)) { |
6726 | + if (desc->pg_rw_ops->rw_mode == FMODE_WRITE) |
6727 | + gfp_flags = GFP_NOIO; |
6728 | + if (!nfs_pgarray_set(&hdr->page_array, pagecount, gfp_flags)) { |
6729 | nfs_pgio_error(hdr); |
6730 | desc->pg_error = -ENOMEM; |
6731 | return desc->pg_error; |
6732 | diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
6733 | index abb2c8a3be42..3d1c2842bb9d 100644 |
6734 | --- a/fs/nfs/write.c |
6735 | +++ b/fs/nfs/write.c |
6736 | @@ -548,9 +548,9 @@ static void nfs_write_error_remove_page(struct nfs_page *req) |
6737 | { |
6738 | nfs_unlock_request(req); |
6739 | nfs_end_page_writeback(req); |
6740 | - nfs_release_request(req); |
6741 | generic_error_remove_page(page_file_mapping(req->wb_page), |
6742 | req->wb_page); |
6743 | + nfs_release_request(req); |
6744 | } |
6745 | |
6746 | /* |
6747 | diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c |
6748 | index d86031b6ad79..c453a1998e00 100644 |
6749 | --- a/fs/nfsd/nfs4proc.c |
6750 | +++ b/fs/nfsd/nfs4proc.c |
6751 | @@ -1259,7 +1259,8 @@ nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type) |
6752 | return NULL; |
6753 | } |
6754 | |
6755 | - if (!(exp->ex_layout_types & (1 << layout_type))) { |
6756 | + if (layout_type >= LAYOUT_TYPE_MAX || |
6757 | + !(exp->ex_layout_types & (1 << layout_type))) { |
6758 | dprintk("%s: layout type %d not supported\n", |
6759 | __func__, layout_type); |
6760 | return NULL; |
6761 | diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c |
6762 | index 33017d652b1d..26780d53a6f9 100644 |
6763 | --- a/fs/nfsd/nfs4xdr.c |
6764 | +++ b/fs/nfsd/nfs4xdr.c |
6765 | @@ -2831,9 +2831,14 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp, |
6766 | } |
6767 | #endif /* CONFIG_NFSD_PNFS */ |
6768 | if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) { |
6769 | - status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0, |
6770 | - NFSD_SUPPATTR_EXCLCREAT_WORD1, |
6771 | - NFSD_SUPPATTR_EXCLCREAT_WORD2); |
6772 | + u32 supp[3]; |
6773 | + |
6774 | + memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp)); |
6775 | + supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0; |
6776 | + supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1; |
6777 | + supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2; |
6778 | + |
6779 | + status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]); |
6780 | if (status) |
6781 | goto out; |
6782 | } |
6783 | @@ -4119,8 +4124,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, |
6784 | struct nfsd4_getdeviceinfo *gdev) |
6785 | { |
6786 | struct xdr_stream *xdr = &resp->xdr; |
6787 | - const struct nfsd4_layout_ops *ops = |
6788 | - nfsd4_layout_ops[gdev->gd_layout_type]; |
6789 | + const struct nfsd4_layout_ops *ops; |
6790 | u32 starting_len = xdr->buf->len, needed_len; |
6791 | __be32 *p; |
6792 | |
6793 | @@ -4137,6 +4141,7 @@ nfsd4_encode_getdeviceinfo(struct nfsd4_compoundres *resp, __be32 nfserr, |
6794 | |
6795 | /* If maxcount is 0 then just update notifications */ |
6796 | if (gdev->gd_maxcount != 0) { |
6797 | + ops = nfsd4_layout_ops[gdev->gd_layout_type]; |
6798 | nfserr = ops->encode_getdeviceinfo(xdr, gdev); |
6799 | if (nfserr) { |
6800 | /* |
6801 | @@ -4189,8 +4194,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr, |
6802 | struct nfsd4_layoutget *lgp) |
6803 | { |
6804 | struct xdr_stream *xdr = &resp->xdr; |
6805 | - const struct nfsd4_layout_ops *ops = |
6806 | - nfsd4_layout_ops[lgp->lg_layout_type]; |
6807 | + const struct nfsd4_layout_ops *ops; |
6808 | __be32 *p; |
6809 | |
6810 | dprintk("%s: err %d\n", __func__, nfserr); |
6811 | @@ -4213,6 +4217,7 @@ nfsd4_encode_layoutget(struct nfsd4_compoundres *resp, __be32 nfserr, |
6812 | *p++ = cpu_to_be32(lgp->lg_seg.iomode); |
6813 | *p++ = cpu_to_be32(lgp->lg_layout_type); |
6814 | |
6815 | + ops = nfsd4_layout_ops[lgp->lg_layout_type]; |
6816 | nfserr = ops->encode_layoutget(xdr, lgp); |
6817 | out: |
6818 | kfree(lgp->lg_content); |
6819 | diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c |
6820 | index 2b37f2785834..4b3437d70e7e 100644 |
6821 | --- a/fs/notify/fanotify/fanotify_user.c |
6822 | +++ b/fs/notify/fanotify/fanotify_user.c |
6823 | @@ -295,27 +295,37 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, |
6824 | } |
6825 | |
6826 | ret = copy_event_to_user(group, kevent, buf); |
6827 | + if (unlikely(ret == -EOPENSTALE)) { |
6828 | + /* |
6829 | + * We cannot report events with stale fd so drop it. |
6830 | + * Setting ret to 0 will continue the event loop and |
6831 | + * do the right thing if there are no more events to |
6832 | + * read (i.e. return bytes read, -EAGAIN or wait). |
6833 | + */ |
6834 | + ret = 0; |
6835 | + } |
6836 | + |
6837 | /* |
6838 | * Permission events get queued to wait for response. Other |
6839 | * events can be destroyed now. |
6840 | */ |
6841 | if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) { |
6842 | fsnotify_destroy_event(group, kevent); |
6843 | - if (ret < 0) |
6844 | - break; |
6845 | } else { |
6846 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS |
6847 | - if (ret < 0) { |
6848 | + if (ret <= 0) { |
6849 | FANOTIFY_PE(kevent)->response = FAN_DENY; |
6850 | wake_up(&group->fanotify_data.access_waitq); |
6851 | - break; |
6852 | + } else { |
6853 | + spin_lock(&group->notification_lock); |
6854 | + list_add_tail(&kevent->list, |
6855 | + &group->fanotify_data.access_list); |
6856 | + spin_unlock(&group->notification_lock); |
6857 | } |
6858 | - spin_lock(&group->notification_lock); |
6859 | - list_add_tail(&kevent->list, |
6860 | - &group->fanotify_data.access_list); |
6861 | - spin_unlock(&group->notification_lock); |
6862 | #endif |
6863 | } |
6864 | + if (ret < 0) |
6865 | + break; |
6866 | buf += ret; |
6867 | count -= ret; |
6868 | } |
6869 | diff --git a/fs/proc/generic.c b/fs/proc/generic.c |
6870 | index ee27feb34cf4..9425c0d97262 100644 |
6871 | --- a/fs/proc/generic.c |
6872 | +++ b/fs/proc/generic.c |
6873 | @@ -472,6 +472,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name) |
6874 | ent->data = NULL; |
6875 | ent->proc_fops = NULL; |
6876 | ent->proc_iops = NULL; |
6877 | + parent->nlink++; |
6878 | if (proc_register(parent, ent) < 0) { |
6879 | kfree(ent); |
6880 | parent->nlink--; |
6881 | diff --git a/include/linux/fs.h b/include/linux/fs.h |
6882 | index 7251f7bb45e8..741563098d98 100644 |
6883 | --- a/include/linux/fs.h |
6884 | +++ b/include/linux/fs.h |
6885 | @@ -2921,17 +2921,19 @@ extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int); |
6886 | |
6887 | static inline int vfs_stat(const char __user *filename, struct kstat *stat) |
6888 | { |
6889 | - return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS); |
6890 | + return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT, |
6891 | + stat, STATX_BASIC_STATS); |
6892 | } |
6893 | static inline int vfs_lstat(const char __user *name, struct kstat *stat) |
6894 | { |
6895 | - return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW, |
6896 | + return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT, |
6897 | stat, STATX_BASIC_STATS); |
6898 | } |
6899 | static inline int vfs_fstatat(int dfd, const char __user *filename, |
6900 | struct kstat *stat, int flags) |
6901 | { |
6902 | - return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS); |
6903 | + return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT, |
6904 | + stat, STATX_BASIC_STATS); |
6905 | } |
6906 | static inline int vfs_fstat(int fd, struct kstat *stat) |
6907 | { |
6908 | diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h |
6909 | index 7ef111d3ecc5..f32d7c392c1e 100644 |
6910 | --- a/include/linux/hid-sensor-hub.h |
6911 | +++ b/include/linux/hid-sensor-hub.h |
6912 | @@ -231,6 +231,8 @@ struct hid_sensor_common { |
6913 | unsigned usage_id; |
6914 | atomic_t data_ready; |
6915 | atomic_t user_requested_state; |
6916 | + int poll_interval; |
6917 | + int raw_hystersis; |
6918 | struct iio_trigger *trigger; |
6919 | int timestamp_ns_scale; |
6920 | struct hid_sensor_hub_attribute_info poll; |
6921 | diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h |
6922 | index c328e4f7dcad..d0bdb986f759 100644 |
6923 | --- a/include/linux/kprobes.h |
6924 | +++ b/include/linux/kprobes.h |
6925 | @@ -347,6 +347,9 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, |
6926 | int write, void __user *buffer, |
6927 | size_t *length, loff_t *ppos); |
6928 | #endif |
6929 | +extern void wait_for_kprobe_optimizer(void); |
6930 | +#else |
6931 | +static inline void wait_for_kprobe_optimizer(void) { } |
6932 | #endif /* CONFIG_OPTPROBES */ |
6933 | #ifdef CONFIG_KPROBES_ON_FTRACE |
6934 | extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, |
6935 | diff --git a/kernel/fork.c b/kernel/fork.c |
6936 | index 6c463c80e93d..4cc564ece2cf 100644 |
6937 | --- a/kernel/fork.c |
6938 | +++ b/kernel/fork.c |
6939 | @@ -536,7 +536,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) |
6940 | set_task_stack_end_magic(tsk); |
6941 | |
6942 | #ifdef CONFIG_CC_STACKPROTECTOR |
6943 | - tsk->stack_canary = get_random_int(); |
6944 | + tsk->stack_canary = get_random_long(); |
6945 | #endif |
6946 | |
6947 | /* |
6948 | @@ -1815,11 +1815,13 @@ static __latent_entropy struct task_struct *copy_process( |
6949 | */ |
6950 | recalc_sigpending(); |
6951 | if (signal_pending(current)) { |
6952 | - spin_unlock(¤t->sighand->siglock); |
6953 | - write_unlock_irq(&tasklist_lock); |
6954 | retval = -ERESTARTNOINTR; |
6955 | goto bad_fork_cancel_cgroup; |
6956 | } |
6957 | + if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { |
6958 | + retval = -ENOMEM; |
6959 | + goto bad_fork_cancel_cgroup; |
6960 | + } |
6961 | |
6962 | if (likely(p->pid)) { |
6963 | ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); |
6964 | @@ -1877,6 +1879,8 @@ static __latent_entropy struct task_struct *copy_process( |
6965 | return p; |
6966 | |
6967 | bad_fork_cancel_cgroup: |
6968 | + spin_unlock(¤t->sighand->siglock); |
6969 | + write_unlock_irq(&tasklist_lock); |
6970 | cgroup_cancel_fork(p); |
6971 | bad_fork_free_pid: |
6972 | cgroup_threadgroup_change_end(current); |
6973 | diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c |
6974 | index be3c34e4f2ac..077c87f40f4d 100644 |
6975 | --- a/kernel/irq/chip.c |
6976 | +++ b/kernel/irq/chip.c |
6977 | @@ -877,8 +877,8 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, |
6978 | if (!desc) |
6979 | return; |
6980 | |
6981 | - __irq_do_set_handler(desc, handle, 1, NULL); |
6982 | desc->irq_common_data.handler_data = data; |
6983 | + __irq_do_set_handler(desc, handle, 1, NULL); |
6984 | |
6985 | irq_put_desc_busunlock(desc, flags); |
6986 | } |
6987 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
6988 | index 699c5bc51a92..74522da4d7cd 100644 |
6989 | --- a/kernel/kprobes.c |
6990 | +++ b/kernel/kprobes.c |
6991 | @@ -598,7 +598,7 @@ static void kprobe_optimizer(struct work_struct *work) |
6992 | } |
6993 | |
6994 | /* Wait for completing optimization and unoptimization */ |
6995 | -static void wait_for_kprobe_optimizer(void) |
6996 | +void wait_for_kprobe_optimizer(void) |
6997 | { |
6998 | mutex_lock(&kprobe_mutex); |
6999 | |
7000 | diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c |
7001 | index de461aa0bf9a..6e51b8820495 100644 |
7002 | --- a/kernel/pid_namespace.c |
7003 | +++ b/kernel/pid_namespace.c |
7004 | @@ -277,7 +277,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) |
7005 | * if reparented. |
7006 | */ |
7007 | for (;;) { |
7008 | - set_current_state(TASK_UNINTERRUPTIBLE); |
7009 | + set_current_state(TASK_INTERRUPTIBLE); |
7010 | if (pid_ns->nr_hashed == init_pids) |
7011 | break; |
7012 | schedule(); |
7013 | diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c |
7014 | index 5f688cc724f0..49cdda852165 100644 |
7015 | --- a/kernel/trace/trace_kprobe.c |
7016 | +++ b/kernel/trace/trace_kprobe.c |
7017 | @@ -1511,6 +1511,11 @@ static __init int kprobe_trace_self_tests_init(void) |
7018 | |
7019 | end: |
7020 | release_all_trace_kprobes(); |
7021 | + /* |
7022 | + * Wait for the optimizer work to finish. Otherwise it might fiddle |
7023 | + * with probes in already freed __init text. |
7024 | + */ |
7025 | + wait_for_kprobe_optimizer(); |
7026 | if (warn) |
7027 | pr_cont("NG: Some tests are failed. Please check them.\n"); |
7028 | else |
7029 | diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c |
7030 | index 8a9219ff2e77..fa31ef29e3fa 100644 |
7031 | --- a/net/ipx/af_ipx.c |
7032 | +++ b/net/ipx/af_ipx.c |
7033 | @@ -1168,11 +1168,10 @@ static int ipxitf_ioctl(unsigned int cmd, void __user *arg) |
7034 | sipx->sipx_network = ipxif->if_netnum; |
7035 | memcpy(sipx->sipx_node, ipxif->if_node, |
7036 | sizeof(sipx->sipx_node)); |
7037 | - rc = -EFAULT; |
7038 | + rc = 0; |
7039 | if (copy_to_user(arg, &ifr, sizeof(ifr))) |
7040 | - break; |
7041 | + rc = -EFAULT; |
7042 | ipxitf_put(ipxif); |
7043 | - rc = 0; |
7044 | break; |
7045 | } |
7046 | case SIOCAIPXITFCRT: |
7047 | diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c |
7048 | index 1fd9539a969d..5d0785cfe063 100644 |
7049 | --- a/security/integrity/ima/ima_appraise.c |
7050 | +++ b/security/integrity/ima/ima_appraise.c |
7051 | @@ -207,10 +207,11 @@ int ima_appraise_measurement(enum ima_hooks func, |
7052 | |
7053 | cause = "missing-hash"; |
7054 | status = INTEGRITY_NOLABEL; |
7055 | - if (opened & FILE_CREATED) { |
7056 | + if (opened & FILE_CREATED) |
7057 | iint->flags |= IMA_NEW_FILE; |
7058 | + if ((iint->flags & IMA_NEW_FILE) && |
7059 | + !(iint->flags & IMA_DIGSIG_REQUIRED)) |
7060 | status = INTEGRITY_PASS; |
7061 | - } |
7062 | goto out; |
7063 | } |
7064 | |
7065 | diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c |
7066 | index 043065867656..0f41257d339e 100644 |
7067 | --- a/sound/hda/hdac_controller.c |
7068 | +++ b/sound/hda/hdac_controller.c |
7069 | @@ -106,7 +106,11 @@ void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus) |
7070 | /* disable ringbuffer DMAs */ |
7071 | snd_hdac_chip_writeb(bus, RIRBCTL, 0); |
7072 | snd_hdac_chip_writeb(bus, CORBCTL, 0); |
7073 | + spin_unlock_irq(&bus->reg_lock); |
7074 | + |
7075 | hdac_wait_for_cmd_dmas(bus); |
7076 | + |
7077 | + spin_lock_irq(&bus->reg_lock); |
7078 | /* disable unsolicited responses */ |
7079 | snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_UNSOL, 0); |
7080 | spin_unlock_irq(&bus->reg_lock); |
7081 | diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c |
7082 | index 8c0f3b89b5bc..e78b5f055f25 100644 |
7083 | --- a/sound/soc/codecs/cs4271.c |
7084 | +++ b/sound/soc/codecs/cs4271.c |
7085 | @@ -498,7 +498,7 @@ static int cs4271_reset(struct snd_soc_codec *codec) |
7086 | struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); |
7087 | |
7088 | if (gpio_is_valid(cs4271->gpio_nreset)) { |
7089 | - gpio_set_value(cs4271->gpio_nreset, 0); |
7090 | + gpio_direction_output(cs4271->gpio_nreset, 0); |
7091 | mdelay(1); |
7092 | gpio_set_value(cs4271->gpio_nreset, 1); |
7093 | mdelay(1); |
7094 | diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c |
7095 | index b637d9c7afe3..2f54ce6d3a51 100644 |
7096 | --- a/virt/kvm/arm/vgic/vgic-v2.c |
7097 | +++ b/virt/kvm/arm/vgic/vgic-v2.c |
7098 | @@ -181,6 +181,13 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) |
7099 | if (irq->hw) { |
7100 | val |= GICH_LR_HW; |
7101 | val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT; |
7102 | + /* |
7103 | + * Never set pending+active on a HW interrupt, as the |
7104 | + * pending state is kept at the physical distributor |
7105 | + * level. |
7106 | + */ |
7107 | + if (irq->active && irq_is_pending(irq)) |
7108 | + val &= ~GICH_LR_PENDING_BIT; |
7109 | } else { |
7110 | if (irq->config == VGIC_CONFIG_LEVEL) |
7111 | val |= GICH_LR_EOI; |
7112 | diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c |
7113 | index be0f4c3e0142..abd3d4a1cb49 100644 |
7114 | --- a/virt/kvm/arm/vgic/vgic-v3.c |
7115 | +++ b/virt/kvm/arm/vgic/vgic-v3.c |
7116 | @@ -149,6 +149,13 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr) |
7117 | if (irq->hw) { |
7118 | val |= ICH_LR_HW; |
7119 | val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT; |
7120 | + /* |
7121 | + * Never set pending+active on a HW interrupt, as the |
7122 | + * pending state is kept at the physical distributor |
7123 | + * level. |
7124 | + */ |
7125 | + if (irq->active && irq_is_pending(irq)) |
7126 | + val &= ~ICH_LR_PENDING_BIT; |
7127 | } else { |
7128 | if (irq->config == VGIC_CONFIG_LEVEL) |
7129 | val |= ICH_LR_EOI; |