Contents of /trunk/kernel-alx/patches-5.4/0146-5.4.47-all-fixes.patch
Parent Directory | Revision Log
Revision 3527 -
(show annotations)
(download)
Thu Jun 25 11:14:58 2020 UTC (4 years, 3 months ago) by niro
File size: 159659 byte(s)
Thu Jun 25 11:14:58 2020 UTC (4 years, 3 months ago) by niro
File size: 159659 byte(s)
-linux-5.4.47
1 | diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt |
2 | index ca983328976b..f65b51523014 100644 |
3 | --- a/Documentation/lzo.txt |
4 | +++ b/Documentation/lzo.txt |
5 | @@ -159,11 +159,15 @@ Byte sequences |
6 | distance = 16384 + (H << 14) + D |
7 | state = S (copy S literals after this block) |
8 | End of stream is reached if distance == 16384 |
9 | + In version 1 only, to prevent ambiguity with the RLE case when |
10 | + ((distance & 0x803f) == 0x803f) && (261 <= length <= 264), the |
11 | + compressor must not emit block copies where distance and length |
12 | + meet these conditions. |
13 | |
14 | In version 1 only, this instruction is also used to encode a run of |
15 | - zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. |
16 | + zeros if distance = 0xbfff, i.e. H = 1 and the D bits are all 1. |
17 | In this case, it is followed by a fourth byte, X. |
18 | - run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4. |
19 | + run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4 |
20 | |
21 | 0 0 1 L L L L L (32..63) |
22 | Copy of small block within 16kB distance (preferably less than 34B) |
23 | diff --git a/Makefile b/Makefile |
24 | index 4835d6734c3f..1da2944b842e 100644 |
25 | --- a/Makefile |
26 | +++ b/Makefile |
27 | @@ -1,7 +1,7 @@ |
28 | # SPDX-License-Identifier: GPL-2.0 |
29 | VERSION = 5 |
30 | PATCHLEVEL = 4 |
31 | -SUBLEVEL = 46 |
32 | +SUBLEVEL = 47 |
33 | EXTRAVERSION = |
34 | NAME = Kleptomaniac Octopus |
35 | |
36 | diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts |
37 | index ba7f3e646c26..1333a68b9373 100644 |
38 | --- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts |
39 | +++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts |
40 | @@ -125,8 +125,6 @@ |
41 | bus-width = <8>; |
42 | pinctrl-names = "default"; |
43 | pinctrl-0 = <&pinctrl_sdmmc0_default>; |
44 | - non-removable; |
45 | - mmc-ddr-1_8v; |
46 | status = "okay"; |
47 | }; |
48 | |
49 | diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h |
50 | index 8e995ec796c8..cbde9fa15792 100644 |
51 | --- a/arch/arm/include/asm/kvm_emulate.h |
52 | +++ b/arch/arm/include/asm/kvm_emulate.h |
53 | @@ -363,6 +363,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, |
54 | } |
55 | } |
56 | |
57 | -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {} |
58 | +static inline bool vcpu_has_ptrauth(struct kvm_vcpu *vcpu) { return false; } |
59 | +static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) { } |
60 | |
61 | #endif /* __ARM_KVM_EMULATE_H__ */ |
62 | diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h |
63 | index 8a37c8e89777..1b179b1f46bc 100644 |
64 | --- a/arch/arm/include/asm/kvm_host.h |
65 | +++ b/arch/arm/include/asm/kvm_host.h |
66 | @@ -421,4 +421,6 @@ static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) |
67 | return true; |
68 | } |
69 | |
70 | +#define kvm_arm_vcpu_loaded(vcpu) (false) |
71 | + |
72 | #endif /* __ARM_KVM_HOST_H__ */ |
73 | diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c |
74 | index 324352787aea..db9401581cd2 100644 |
75 | --- a/arch/arm/kernel/ptrace.c |
76 | +++ b/arch/arm/kernel/ptrace.c |
77 | @@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = { |
78 | }; |
79 | |
80 | static struct undef_hook thumb_break_hook = { |
81 | - .instr_mask = 0xffff, |
82 | - .instr_val = 0xde01, |
83 | + .instr_mask = 0xffffffff, |
84 | + .instr_val = 0x0000de01, |
85 | .cpsr_mask = PSR_T_BIT, |
86 | .cpsr_val = PSR_T_BIT, |
87 | .fn = break_trap, |
88 | diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h |
89 | index b263e239cb59..a45366c3909b 100644 |
90 | --- a/arch/arm64/include/asm/acpi.h |
91 | +++ b/arch/arm64/include/asm/acpi.h |
92 | @@ -12,6 +12,7 @@ |
93 | #include <linux/efi.h> |
94 | #include <linux/memblock.h> |
95 | #include <linux/psci.h> |
96 | +#include <linux/stddef.h> |
97 | |
98 | #include <asm/cputype.h> |
99 | #include <asm/io.h> |
100 | @@ -31,14 +32,14 @@ |
101 | * is therefore used to delimit the MADT GICC structure minimum length |
102 | * appropriately. |
103 | */ |
104 | -#define ACPI_MADT_GICC_MIN_LENGTH ACPI_OFFSET( \ |
105 | +#define ACPI_MADT_GICC_MIN_LENGTH offsetof( \ |
106 | struct acpi_madt_generic_interrupt, efficiency_class) |
107 | |
108 | #define BAD_MADT_GICC_ENTRY(entry, end) \ |
109 | (!(entry) || (entry)->header.length < ACPI_MADT_GICC_MIN_LENGTH || \ |
110 | (unsigned long)(entry) + (entry)->header.length > (end)) |
111 | |
112 | -#define ACPI_MADT_GICC_SPE (ACPI_OFFSET(struct acpi_madt_generic_interrupt, \ |
113 | +#define ACPI_MADT_GICC_SPE (offsetof(struct acpi_madt_generic_interrupt, \ |
114 | spe_interrupt) + sizeof(u16)) |
115 | |
116 | /* Basic configuration for ACPI */ |
117 | diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h |
118 | index 6ff84f1f3b4c..f47081b40523 100644 |
119 | --- a/arch/arm64/include/asm/kvm_emulate.h |
120 | +++ b/arch/arm64/include/asm/kvm_emulate.h |
121 | @@ -97,12 +97,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu) |
122 | vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); |
123 | } |
124 | |
125 | -static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) |
126 | -{ |
127 | - if (vcpu_has_ptrauth(vcpu)) |
128 | - vcpu_ptrauth_disable(vcpu); |
129 | -} |
130 | - |
131 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) |
132 | { |
133 | return vcpu->arch.vsesr_el2; |
134 | diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h |
135 | index f656169db8c3..0c3bd6aff6e9 100644 |
136 | --- a/arch/arm64/include/asm/kvm_host.h |
137 | +++ b/arch/arm64/include/asm/kvm_host.h |
138 | @@ -392,8 +392,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg); |
139 | * CP14 and CP15 live in the same array, as they are backed by the |
140 | * same system registers. |
141 | */ |
142 | -#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)]) |
143 | -#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)]) |
144 | +#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) |
145 | + |
146 | +#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) |
147 | +#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS]) |
148 | |
149 | struct kvm_vm_stat { |
150 | ulong remote_tlb_flush; |
151 | @@ -677,4 +679,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu); |
152 | #define kvm_arm_vcpu_sve_finalized(vcpu) \ |
153 | ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED) |
154 | |
155 | +#define kvm_arm_vcpu_loaded(vcpu) ((vcpu)->arch.sysregs_loaded_on_cpu) |
156 | + |
157 | #endif /* __ARM64_KVM_HOST_H__ */ |
158 | diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c |
159 | index 706cca23f0d2..1249f68a9418 100644 |
160 | --- a/arch/arm64/kvm/handle_exit.c |
161 | +++ b/arch/arm64/kvm/handle_exit.c |
162 | @@ -162,31 +162,16 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run) |
163 | return 1; |
164 | } |
165 | |
166 | -#define __ptrauth_save_key(regs, key) \ |
167 | -({ \ |
168 | - regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ |
169 | - regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ |
170 | -}) |
171 | - |
172 | /* |
173 | * Handle the guest trying to use a ptrauth instruction, or trying to access a |
174 | * ptrauth register. |
175 | */ |
176 | void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu) |
177 | { |
178 | - struct kvm_cpu_context *ctxt; |
179 | - |
180 | - if (vcpu_has_ptrauth(vcpu)) { |
181 | + if (vcpu_has_ptrauth(vcpu)) |
182 | vcpu_ptrauth_enable(vcpu); |
183 | - ctxt = vcpu->arch.host_cpu_context; |
184 | - __ptrauth_save_key(ctxt->sys_regs, APIA); |
185 | - __ptrauth_save_key(ctxt->sys_regs, APIB); |
186 | - __ptrauth_save_key(ctxt->sys_regs, APDA); |
187 | - __ptrauth_save_key(ctxt->sys_regs, APDB); |
188 | - __ptrauth_save_key(ctxt->sys_regs, APGA); |
189 | - } else { |
190 | + else |
191 | kvm_inject_undefined(vcpu); |
192 | - } |
193 | } |
194 | |
195 | /* |
196 | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c |
197 | index 01a515e0171e..d43f44b3377e 100644 |
198 | --- a/arch/arm64/kvm/sys_regs.c |
199 | +++ b/arch/arm64/kvm/sys_regs.c |
200 | @@ -1280,10 +1280,16 @@ static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
201 | static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, |
202 | const struct sys_reg_desc *r) |
203 | { |
204 | + int reg = r->reg; |
205 | + |
206 | + /* See the 32bit mapping in kvm_host.h */ |
207 | + if (p->is_aarch32) |
208 | + reg = r->reg / 2; |
209 | + |
210 | if (p->is_write) |
211 | - vcpu_write_sys_reg(vcpu, p->regval, r->reg); |
212 | + vcpu_write_sys_reg(vcpu, p->regval, reg); |
213 | else |
214 | - p->regval = vcpu_read_sys_reg(vcpu, r->reg); |
215 | + p->regval = vcpu_read_sys_reg(vcpu, reg); |
216 | return true; |
217 | } |
218 | |
219 | diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h |
220 | index 9023828ede97..ac8f65a3e75a 100644 |
221 | --- a/arch/csky/abiv2/inc/abi/entry.h |
222 | +++ b/arch/csky/abiv2/inc/abi/entry.h |
223 | @@ -13,6 +13,8 @@ |
224 | #define LSAVE_A1 28 |
225 | #define LSAVE_A2 32 |
226 | #define LSAVE_A3 36 |
227 | +#define LSAVE_A4 40 |
228 | +#define LSAVE_A5 44 |
229 | |
230 | #define KSPTOUSP |
231 | #define USPTOKSP |
232 | diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S |
233 | index 65c55f22532a..4349528fbf38 100644 |
234 | --- a/arch/csky/kernel/entry.S |
235 | +++ b/arch/csky/kernel/entry.S |
236 | @@ -170,8 +170,10 @@ csky_syscall_trace: |
237 | ldw a3, (sp, LSAVE_A3) |
238 | #if defined(__CSKYABIV2__) |
239 | subi sp, 8 |
240 | - stw r5, (sp, 0x4) |
241 | - stw r4, (sp, 0x0) |
242 | + ldw r9, (sp, LSAVE_A4) |
243 | + stw r9, (sp, 0x0) |
244 | + ldw r9, (sp, LSAVE_A5) |
245 | + stw r9, (sp, 0x4) |
246 | #else |
247 | ldw r6, (sp, LSAVE_A4) |
248 | ldw r7, (sp, LSAVE_A5) |
249 | diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h |
250 | index 41204a49cf95..7b47a323dc23 100644 |
251 | --- a/arch/mips/include/asm/kvm_host.h |
252 | +++ b/arch/mips/include/asm/kvm_host.h |
253 | @@ -274,8 +274,12 @@ enum emulation_result { |
254 | #define MIPS3_PG_SHIFT 6 |
255 | #define MIPS3_PG_FRAME 0x3fffffc0 |
256 | |
257 | +#if defined(CONFIG_64BIT) |
258 | +#define VPN2_MASK GENMASK(cpu_vmbits - 1, 13) |
259 | +#else |
260 | #define VPN2_MASK 0xffffe000 |
261 | -#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID |
262 | +#endif |
263 | +#define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data) |
264 | #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G) |
265 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
266 | #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID) |
267 | diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S |
268 | index 4638d2863388..060a1acd7c6d 100644 |
269 | --- a/arch/powerpc/kernel/vmlinux.lds.S |
270 | +++ b/arch/powerpc/kernel/vmlinux.lds.S |
271 | @@ -326,12 +326,6 @@ SECTIONS |
272 | *(.branch_lt) |
273 | } |
274 | |
275 | -#ifdef CONFIG_DEBUG_INFO_BTF |
276 | - .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { |
277 | - *(.BTF) |
278 | - } |
279 | -#endif |
280 | - |
281 | .opd : AT(ADDR(.opd) - LOAD_OFFSET) { |
282 | __start_opd = .; |
283 | KEEP(*(.opd)) |
284 | diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c |
285 | index c73205172447..633711bf1cae 100644 |
286 | --- a/arch/powerpc/mm/ptdump/ptdump.c |
287 | +++ b/arch/powerpc/mm/ptdump/ptdump.c |
288 | @@ -58,6 +58,7 @@ struct pg_state { |
289 | unsigned long start_address; |
290 | unsigned long start_pa; |
291 | unsigned long last_pa; |
292 | + unsigned long page_size; |
293 | unsigned int level; |
294 | u64 current_flags; |
295 | bool check_wx; |
296 | @@ -155,9 +156,9 @@ static void dump_addr(struct pg_state *st, unsigned long addr) |
297 | #endif |
298 | |
299 | pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1); |
300 | - if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) { |
301 | + if (st->start_pa == st->last_pa && st->start_address + st->page_size != addr) { |
302 | pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa); |
303 | - delta = PAGE_SIZE >> 10; |
304 | + delta = st->page_size >> 10; |
305 | } else { |
306 | pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa); |
307 | delta = (addr - st->start_address) >> 10; |
308 | @@ -188,7 +189,7 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) |
309 | } |
310 | |
311 | static void note_page(struct pg_state *st, unsigned long addr, |
312 | - unsigned int level, u64 val) |
313 | + unsigned int level, u64 val, unsigned long page_size) |
314 | { |
315 | u64 flag = val & pg_level[level].mask; |
316 | u64 pa = val & PTE_RPN_MASK; |
317 | @@ -200,6 +201,7 @@ static void note_page(struct pg_state *st, unsigned long addr, |
318 | st->start_address = addr; |
319 | st->start_pa = pa; |
320 | st->last_pa = pa; |
321 | + st->page_size = page_size; |
322 | pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); |
323 | /* |
324 | * Dump the section of virtual memory when: |
325 | @@ -211,7 +213,7 @@ static void note_page(struct pg_state *st, unsigned long addr, |
326 | */ |
327 | } else if (flag != st->current_flags || level != st->level || |
328 | addr >= st->marker[1].start_address || |
329 | - (pa != st->last_pa + PAGE_SIZE && |
330 | + (pa != st->last_pa + st->page_size && |
331 | (pa != st->start_pa || st->start_pa != st->last_pa))) { |
332 | |
333 | /* Check the PTE flags */ |
334 | @@ -239,6 +241,7 @@ static void note_page(struct pg_state *st, unsigned long addr, |
335 | st->start_address = addr; |
336 | st->start_pa = pa; |
337 | st->last_pa = pa; |
338 | + st->page_size = page_size; |
339 | st->current_flags = flag; |
340 | st->level = level; |
341 | } else { |
342 | @@ -254,7 +257,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start) |
343 | |
344 | for (i = 0; i < PTRS_PER_PTE; i++, pte++) { |
345 | addr = start + i * PAGE_SIZE; |
346 | - note_page(st, addr, 4, pte_val(*pte)); |
347 | + note_page(st, addr, 4, pte_val(*pte), PAGE_SIZE); |
348 | |
349 | } |
350 | } |
351 | @@ -271,7 +274,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) |
352 | /* pmd exists */ |
353 | walk_pte(st, pmd, addr); |
354 | else |
355 | - note_page(st, addr, 3, pmd_val(*pmd)); |
356 | + note_page(st, addr, 3, pmd_val(*pmd), PMD_SIZE); |
357 | } |
358 | } |
359 | |
360 | @@ -287,7 +290,7 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start) |
361 | /* pud exists */ |
362 | walk_pmd(st, pud, addr); |
363 | else |
364 | - note_page(st, addr, 2, pud_val(*pud)); |
365 | + note_page(st, addr, 2, pud_val(*pud), PUD_SIZE); |
366 | } |
367 | } |
368 | |
369 | @@ -306,7 +309,7 @@ static void walk_pagetables(struct pg_state *st) |
370 | /* pgd exists */ |
371 | walk_pud(st, pgd, addr); |
372 | else |
373 | - note_page(st, addr, 1, pgd_val(*pgd)); |
374 | + note_page(st, addr, 1, pgd_val(*pgd), PGDIR_SIZE); |
375 | } |
376 | } |
377 | |
378 | @@ -361,7 +364,7 @@ static int ptdump_show(struct seq_file *m, void *v) |
379 | |
380 | /* Traverse kernel page tables */ |
381 | walk_pagetables(&st); |
382 | - note_page(&st, 0, 0, 0); |
383 | + note_page(&st, 0, 0, 0, 0); |
384 | return 0; |
385 | } |
386 | |
387 | diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c |
388 | index fe8d396e2301..16df9cc8f360 100644 |
389 | --- a/arch/powerpc/sysdev/xive/common.c |
390 | +++ b/arch/powerpc/sysdev/xive/common.c |
391 | @@ -19,6 +19,7 @@ |
392 | #include <linux/slab.h> |
393 | #include <linux/spinlock.h> |
394 | #include <linux/msi.h> |
395 | +#include <linux/vmalloc.h> |
396 | |
397 | #include <asm/prom.h> |
398 | #include <asm/io.h> |
399 | @@ -1013,12 +1014,16 @@ EXPORT_SYMBOL_GPL(is_xive_irq); |
400 | void xive_cleanup_irq_data(struct xive_irq_data *xd) |
401 | { |
402 | if (xd->eoi_mmio) { |
403 | + unmap_kernel_range((unsigned long)xd->eoi_mmio, |
404 | + 1u << xd->esb_shift); |
405 | iounmap(xd->eoi_mmio); |
406 | if (xd->eoi_mmio == xd->trig_mmio) |
407 | xd->trig_mmio = NULL; |
408 | xd->eoi_mmio = NULL; |
409 | } |
410 | if (xd->trig_mmio) { |
411 | + unmap_kernel_range((unsigned long)xd->trig_mmio, |
412 | + 1u << xd->esb_shift); |
413 | iounmap(xd->trig_mmio); |
414 | xd->trig_mmio = NULL; |
415 | } |
416 | diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c |
417 | index 281e0dd4c614..20e093f86329 100644 |
418 | --- a/arch/s390/pci/pci_clp.c |
419 | +++ b/arch/s390/pci/pci_clp.c |
420 | @@ -309,14 +309,13 @@ out: |
421 | |
422 | int clp_disable_fh(struct zpci_dev *zdev) |
423 | { |
424 | - u32 fh = zdev->fh; |
425 | int rc; |
426 | |
427 | if (!zdev_enabled(zdev)) |
428 | return 0; |
429 | |
430 | rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN); |
431 | - zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, fh, rc); |
432 | + zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc); |
433 | return rc; |
434 | } |
435 | |
436 | diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c |
437 | index c531e3f3269e..0461ab257df6 100644 |
438 | --- a/arch/x86/events/intel/core.c |
439 | +++ b/arch/x86/events/intel/core.c |
440 | @@ -1892,8 +1892,8 @@ static __initconst const u64 tnt_hw_cache_extra_regs |
441 | |
442 | static struct extra_reg intel_tnt_extra_regs[] __read_mostly = { |
443 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ |
444 | - INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffffff9fffull, RSP_0), |
445 | - INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xffffff9fffull, RSP_1), |
446 | + INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0), |
447 | + INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1), |
448 | EVENT_EXTRA_END |
449 | }; |
450 | |
451 | diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h |
452 | index 2ee8e469dcf5..162128cdfbf2 100644 |
453 | --- a/arch/x86/include/asm/set_memory.h |
454 | +++ b/arch/x86/include/asm/set_memory.h |
455 | @@ -85,28 +85,35 @@ void set_kernel_text_rw(void); |
456 | void set_kernel_text_ro(void); |
457 | |
458 | #ifdef CONFIG_X86_64 |
459 | -static inline int set_mce_nospec(unsigned long pfn) |
460 | +/* |
461 | + * Prevent speculative access to the page by either unmapping |
462 | + * it (if we do not require access to any part of the page) or |
463 | + * marking it uncacheable (if we want to try to retrieve data |
464 | + * from non-poisoned lines in the page). |
465 | + */ |
466 | +static inline int set_mce_nospec(unsigned long pfn, bool unmap) |
467 | { |
468 | unsigned long decoy_addr; |
469 | int rc; |
470 | |
471 | /* |
472 | - * Mark the linear address as UC to make sure we don't log more |
473 | - * errors because of speculative access to the page. |
474 | * We would like to just call: |
475 | - * set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1); |
476 | + * set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1); |
477 | * but doing that would radically increase the odds of a |
478 | * speculative access to the poison page because we'd have |
479 | * the virtual address of the kernel 1:1 mapping sitting |
480 | * around in registers. |
481 | * Instead we get tricky. We create a non-canonical address |
482 | * that looks just like the one we want, but has bit 63 flipped. |
483 | - * This relies on set_memory_uc() properly sanitizing any __pa() |
484 | + * This relies on set_memory_XX() properly sanitizing any __pa() |
485 | * results with __PHYSICAL_MASK or PTE_PFN_MASK. |
486 | */ |
487 | decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); |
488 | |
489 | - rc = set_memory_uc(decoy_addr, 1); |
490 | + if (unmap) |
491 | + rc = set_memory_np(decoy_addr, 1); |
492 | + else |
493 | + rc = set_memory_uc(decoy_addr, 1); |
494 | if (rc) |
495 | pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); |
496 | return rc; |
497 | diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
498 | index c3f4dd4ae155..c553cafd0736 100644 |
499 | --- a/arch/x86/kernel/cpu/amd.c |
500 | +++ b/arch/x86/kernel/cpu/amd.c |
501 | @@ -1117,8 +1117,7 @@ static const int amd_erratum_383[] = |
502 | |
503 | /* #1054: Instructions Retired Performance Counter May Be Inaccurate */ |
504 | static const int amd_erratum_1054[] = |
505 | - AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); |
506 | - |
507 | + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf)); |
508 | |
509 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) |
510 | { |
511 | diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c |
512 | index 3c3f3e02683a..acbf3dbb8bf2 100644 |
513 | --- a/arch/x86/kernel/cpu/bugs.c |
514 | +++ b/arch/x86/kernel/cpu/bugs.c |
515 | @@ -581,7 +581,9 @@ early_param("nospectre_v1", nospectre_v1_cmdline); |
516 | static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = |
517 | SPECTRE_V2_NONE; |
518 | |
519 | -static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = |
520 | +static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = |
521 | + SPECTRE_V2_USER_NONE; |
522 | +static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = |
523 | SPECTRE_V2_USER_NONE; |
524 | |
525 | #ifdef CONFIG_RETPOLINE |
526 | @@ -727,15 +729,6 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) |
527 | break; |
528 | } |
529 | |
530 | - /* |
531 | - * At this point, an STIBP mode other than "off" has been set. |
532 | - * If STIBP support is not being forced, check if STIBP always-on |
533 | - * is preferred. |
534 | - */ |
535 | - if (mode != SPECTRE_V2_USER_STRICT && |
536 | - boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) |
537 | - mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
538 | - |
539 | /* Initialize Indirect Branch Prediction Barrier */ |
540 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
541 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); |
542 | @@ -758,23 +751,36 @@ spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) |
543 | pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", |
544 | static_key_enabled(&switch_mm_always_ibpb) ? |
545 | "always-on" : "conditional"); |
546 | + |
547 | + spectre_v2_user_ibpb = mode; |
548 | } |
549 | |
550 | - /* If enhanced IBRS is enabled no STIBP required */ |
551 | - if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) |
552 | + /* |
553 | + * If enhanced IBRS is enabled or SMT impossible, STIBP is not |
554 | + * required. |
555 | + */ |
556 | + if (!smt_possible || spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) |
557 | return; |
558 | |
559 | /* |
560 | - * If SMT is not possible or STIBP is not available clear the STIBP |
561 | - * mode. |
562 | + * At this point, an STIBP mode other than "off" has been set. |
563 | + * If STIBP support is not being forced, check if STIBP always-on |
564 | + * is preferred. |
565 | */ |
566 | - if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) |
567 | + if (mode != SPECTRE_V2_USER_STRICT && |
568 | + boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) |
569 | + mode = SPECTRE_V2_USER_STRICT_PREFERRED; |
570 | + |
571 | + /* |
572 | + * If STIBP is not available, clear the STIBP mode. |
573 | + */ |
574 | + if (!boot_cpu_has(X86_FEATURE_STIBP)) |
575 | mode = SPECTRE_V2_USER_NONE; |
576 | + |
577 | + spectre_v2_user_stibp = mode; |
578 | + |
579 | set_mode: |
580 | - spectre_v2_user = mode; |
581 | - /* Only print the STIBP mode when SMT possible */ |
582 | - if (smt_possible) |
583 | - pr_info("%s\n", spectre_v2_user_strings[mode]); |
584 | + pr_info("%s\n", spectre_v2_user_strings[mode]); |
585 | } |
586 | |
587 | static const char * const spectre_v2_strings[] = { |
588 | @@ -1007,7 +1013,7 @@ void cpu_bugs_smt_update(void) |
589 | { |
590 | mutex_lock(&spec_ctrl_mutex); |
591 | |
592 | - switch (spectre_v2_user) { |
593 | + switch (spectre_v2_user_stibp) { |
594 | case SPECTRE_V2_USER_NONE: |
595 | break; |
596 | case SPECTRE_V2_USER_STRICT: |
597 | @@ -1250,14 +1256,19 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) |
598 | { |
599 | switch (ctrl) { |
600 | case PR_SPEC_ENABLE: |
601 | - if (spectre_v2_user == SPECTRE_V2_USER_NONE) |
602 | + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
603 | + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
604 | return 0; |
605 | /* |
606 | * Indirect branch speculation is always disabled in strict |
607 | - * mode. |
608 | + * mode. It can neither be enabled if it was force-disabled |
609 | + * by a previous prctl call. |
610 | + |
611 | */ |
612 | - if (spectre_v2_user == SPECTRE_V2_USER_STRICT || |
613 | - spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) |
614 | + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || |
615 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
616 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || |
617 | + task_spec_ib_force_disable(task)) |
618 | return -EPERM; |
619 | task_clear_spec_ib_disable(task); |
620 | task_update_spec_tif(task); |
621 | @@ -1268,10 +1279,12 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) |
622 | * Indirect branch speculation is always allowed when |
623 | * mitigation is force disabled. |
624 | */ |
625 | - if (spectre_v2_user == SPECTRE_V2_USER_NONE) |
626 | + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
627 | + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
628 | return -EPERM; |
629 | - if (spectre_v2_user == SPECTRE_V2_USER_STRICT || |
630 | - spectre_v2_user == SPECTRE_V2_USER_STRICT_PREFERRED) |
631 | + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || |
632 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
633 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) |
634 | return 0; |
635 | task_set_spec_ib_disable(task); |
636 | if (ctrl == PR_SPEC_FORCE_DISABLE) |
637 | @@ -1302,7 +1315,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) |
638 | { |
639 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) |
640 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
641 | - if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) |
642 | + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
643 | + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) |
644 | ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
645 | } |
646 | #endif |
647 | @@ -1333,22 +1347,24 @@ static int ib_prctl_get(struct task_struct *task) |
648 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) |
649 | return PR_SPEC_NOT_AFFECTED; |
650 | |
651 | - switch (spectre_v2_user) { |
652 | - case SPECTRE_V2_USER_NONE: |
653 | + if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
654 | + spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) |
655 | return PR_SPEC_ENABLE; |
656 | - case SPECTRE_V2_USER_PRCTL: |
657 | - case SPECTRE_V2_USER_SECCOMP: |
658 | + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || |
659 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || |
660 | + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) |
661 | + return PR_SPEC_DISABLE; |
662 | + else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || |
663 | + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
664 | + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || |
665 | + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { |
666 | if (task_spec_ib_force_disable(task)) |
667 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; |
668 | if (task_spec_ib_disable(task)) |
669 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
670 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; |
671 | - case SPECTRE_V2_USER_STRICT: |
672 | - case SPECTRE_V2_USER_STRICT_PREFERRED: |
673 | - return PR_SPEC_DISABLE; |
674 | - default: |
675 | + } else |
676 | return PR_SPEC_NOT_AFFECTED; |
677 | - } |
678 | } |
679 | |
680 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
681 | @@ -1587,7 +1603,7 @@ static char *stibp_state(void) |
682 | if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) |
683 | return ""; |
684 | |
685 | - switch (spectre_v2_user) { |
686 | + switch (spectre_v2_user_stibp) { |
687 | case SPECTRE_V2_USER_NONE: |
688 | return ", STIBP: disabled"; |
689 | case SPECTRE_V2_USER_STRICT: |
690 | diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c |
691 | index aecb15ba66cd..fd76e3733dd3 100644 |
692 | --- a/arch/x86/kernel/cpu/mce/core.c |
693 | +++ b/arch/x86/kernel/cpu/mce/core.c |
694 | @@ -533,6 +533,13 @@ bool mce_is_memory_error(struct mce *m) |
695 | } |
696 | EXPORT_SYMBOL_GPL(mce_is_memory_error); |
697 | |
698 | +static bool whole_page(struct mce *m) |
699 | +{ |
700 | + if (!mca_cfg.ser || !(m->status & MCI_STATUS_MISCV)) |
701 | + return true; |
702 | + return MCI_MISC_ADDR_LSB(m->misc) >= PAGE_SHIFT; |
703 | +} |
704 | + |
705 | bool mce_is_correctable(struct mce *m) |
706 | { |
707 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) |
708 | @@ -601,7 +608,7 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, |
709 | if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { |
710 | pfn = mce->addr >> PAGE_SHIFT; |
711 | if (!memory_failure(pfn, 0)) |
712 | - set_mce_nospec(pfn); |
713 | + set_mce_nospec(pfn, whole_page(mce)); |
714 | } |
715 | |
716 | return NOTIFY_OK; |
717 | @@ -1103,7 +1110,7 @@ static int do_memory_failure(struct mce *m) |
718 | if (ret) |
719 | pr_err("Memory error not recovered"); |
720 | else |
721 | - set_mce_nospec(m->addr >> PAGE_SHIFT); |
722 | + set_mce_nospec(m->addr >> PAGE_SHIFT, whole_page(m)); |
723 | return ret; |
724 | } |
725 | |
726 | diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c |
727 | index 5e94c4354d4e..571e38c9ee1d 100644 |
728 | --- a/arch/x86/kernel/process.c |
729 | +++ b/arch/x86/kernel/process.c |
730 | @@ -428,28 +428,20 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, |
731 | |
732 | lockdep_assert_irqs_disabled(); |
733 | |
734 | - /* |
735 | - * If TIF_SSBD is different, select the proper mitigation |
736 | - * method. Note that if SSBD mitigation is disabled or permanentely |
737 | - * enabled this branch can't be taken because nothing can set |
738 | - * TIF_SSBD. |
739 | - */ |
740 | - if (tif_diff & _TIF_SSBD) { |
741 | - if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { |
742 | + /* Handle change of TIF_SSBD depending on the mitigation method. */ |
743 | + if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { |
744 | + if (tif_diff & _TIF_SSBD) |
745 | amd_set_ssb_virt_state(tifn); |
746 | - } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { |
747 | + } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { |
748 | + if (tif_diff & _TIF_SSBD) |
749 | amd_set_core_ssb_state(tifn); |
750 | - } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || |
751 | - static_cpu_has(X86_FEATURE_AMD_SSBD)) { |
752 | - msr |= ssbd_tif_to_spec_ctrl(tifn); |
753 | - updmsr = true; |
754 | - } |
755 | + } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || |
756 | + static_cpu_has(X86_FEATURE_AMD_SSBD)) { |
757 | + updmsr |= !!(tif_diff & _TIF_SSBD); |
758 | + msr |= ssbd_tif_to_spec_ctrl(tifn); |
759 | } |
760 | |
761 | - /* |
762 | - * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, |
763 | - * otherwise avoid the MSR write. |
764 | - */ |
765 | + /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ |
766 | if (IS_ENABLED(CONFIG_SMP) && |
767 | static_branch_unlikely(&switch_to_cond_stibp)) { |
768 | updmsr |= !!(tif_diff & _TIF_SPEC_IB); |
769 | diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c |
770 | index 0cc7c0b106bb..762f5c1465a6 100644 |
771 | --- a/arch/x86/kernel/reboot.c |
772 | +++ b/arch/x86/kernel/reboot.c |
773 | @@ -197,6 +197,14 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = { |
774 | DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), |
775 | }, |
776 | }, |
777 | + { /* Handle problems with rebooting on Apple MacBook6,1 */ |
778 | + .callback = set_pci_reboot, |
779 | + .ident = "Apple MacBook6,1", |
780 | + .matches = { |
781 | + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), |
782 | + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook6,1"), |
783 | + }, |
784 | + }, |
785 | { /* Handle problems with rebooting on Apple MacBookPro5 */ |
786 | .callback = set_pci_reboot, |
787 | .ident = "Apple MacBookPro5", |
788 | diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c |
789 | index d8673d8a779b..36a585b80d9e 100644 |
790 | --- a/arch/x86/kernel/time.c |
791 | +++ b/arch/x86/kernel/time.c |
792 | @@ -25,10 +25,6 @@ |
793 | #include <asm/hpet.h> |
794 | #include <asm/time.h> |
795 | |
796 | -#ifdef CONFIG_X86_64 |
797 | -__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
798 | -#endif |
799 | - |
800 | unsigned long profile_pc(struct pt_regs *regs) |
801 | { |
802 | unsigned long pc = instruction_pointer(regs); |
803 | diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S |
804 | index e2feacf921a0..bac1a65a9d39 100644 |
805 | --- a/arch/x86/kernel/vmlinux.lds.S |
806 | +++ b/arch/x86/kernel/vmlinux.lds.S |
807 | @@ -36,13 +36,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT) |
808 | #ifdef CONFIG_X86_32 |
809 | OUTPUT_ARCH(i386) |
810 | ENTRY(phys_startup_32) |
811 | -jiffies = jiffies_64; |
812 | #else |
813 | OUTPUT_ARCH(i386:x86-64) |
814 | ENTRY(phys_startup_64) |
815 | -jiffies_64 = jiffies; |
816 | #endif |
817 | |
818 | +jiffies = jiffies_64; |
819 | + |
820 | #if defined(CONFIG_X86_64) |
821 | /* |
822 | * On 64-bit, align RODATA to 2MB so we retain large page mappings for |
823 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
824 | index 518100ea5ef4..a3824ae9a634 100644 |
825 | --- a/arch/x86/kvm/mmu.c |
826 | +++ b/arch/x86/kvm/mmu.c |
827 | @@ -343,6 +343,8 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask) |
828 | { |
829 | BUG_ON((u64)(unsigned)access_mask != access_mask); |
830 | BUG_ON((mmio_mask & mmio_value) != mmio_value); |
831 | + WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); |
832 | + WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); |
833 | shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; |
834 | shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK; |
835 | shadow_mmio_access_mask = access_mask; |
836 | @@ -580,16 +582,15 @@ static void kvm_mmu_reset_all_pte_masks(void) |
837 | * the most significant bits of legal physical address space. |
838 | */ |
839 | shadow_nonpresent_or_rsvd_mask = 0; |
840 | - low_phys_bits = boot_cpu_data.x86_cache_bits; |
841 | - if (boot_cpu_data.x86_cache_bits < |
842 | - 52 - shadow_nonpresent_or_rsvd_mask_len) { |
843 | + low_phys_bits = boot_cpu_data.x86_phys_bits; |
844 | + if (boot_cpu_has_bug(X86_BUG_L1TF) && |
845 | + !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= |
846 | + 52 - shadow_nonpresent_or_rsvd_mask_len)) { |
847 | + low_phys_bits = boot_cpu_data.x86_cache_bits |
848 | + - shadow_nonpresent_or_rsvd_mask_len; |
849 | shadow_nonpresent_or_rsvd_mask = |
850 | - rsvd_bits(boot_cpu_data.x86_cache_bits - |
851 | - shadow_nonpresent_or_rsvd_mask_len, |
852 | - boot_cpu_data.x86_cache_bits - 1); |
853 | - low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len; |
854 | - } else |
855 | - WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF)); |
856 | + rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); |
857 | + } |
858 | |
859 | shadow_nonpresent_or_rsvd_lower_gfn_mask = |
860 | GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); |
861 | @@ -6247,25 +6248,16 @@ static void kvm_set_mmio_spte_mask(void) |
862 | u64 mask; |
863 | |
864 | /* |
865 | - * Set the reserved bits and the present bit of an paging-structure |
866 | - * entry to generate page fault with PFER.RSV = 1. |
867 | - */ |
868 | - |
869 | - /* |
870 | - * Mask the uppermost physical address bit, which would be reserved as |
871 | - * long as the supported physical address width is less than 52. |
872 | + * Set a reserved PA bit in MMIO SPTEs to generate page faults with |
873 | + * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT |
874 | + * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports |
875 | + * 52-bit physical addresses then there are no reserved PA bits in the |
876 | + * PTEs and so the reserved PA approach must be disabled. |
877 | */ |
878 | - mask = 1ull << 51; |
879 | - |
880 | - /* Set the present bit. */ |
881 | - mask |= 1ull; |
882 | - |
883 | - /* |
884 | - * If reserved bit is not supported, clear the present bit to disable |
885 | - * mmio page fault. |
886 | - */ |
887 | - if (shadow_phys_bits == 52) |
888 | - mask &= ~1ull; |
889 | + if (shadow_phys_bits < 52) |
890 | + mask = BIT_ULL(51) | PT_PRESENT_MASK; |
891 | + else |
892 | + mask = 0; |
893 | |
894 | kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); |
895 | } |
896 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
897 | index cc7da664fd39..3243a80ea32c 100644 |
898 | --- a/arch/x86/kvm/svm.c |
899 | +++ b/arch/x86/kvm/svm.c |
900 | @@ -3237,8 +3237,8 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) |
901 | return NESTED_EXIT_HOST; |
902 | break; |
903 | case SVM_EXIT_EXCP_BASE + PF_VECTOR: |
904 | - /* When we're shadowing, trap PFs, but not async PF */ |
905 | - if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0) |
906 | + /* Trap async PF even if not shadowing */ |
907 | + if (!npt_enabled || svm->vcpu.arch.apf.host_apf_reason) |
908 | return NESTED_EXIT_HOST; |
909 | break; |
910 | default: |
911 | @@ -3327,7 +3327,7 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr |
912 | dst->iopm_base_pa = from->iopm_base_pa; |
913 | dst->msrpm_base_pa = from->msrpm_base_pa; |
914 | dst->tsc_offset = from->tsc_offset; |
915 | - dst->asid = from->asid; |
916 | + /* asid not copied, it is handled manually for svm->vmcb. */ |
917 | dst->tlb_ctl = from->tlb_ctl; |
918 | dst->int_ctl = from->int_ctl; |
919 | dst->int_vector = from->int_vector; |
920 | diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c |
921 | index 4a09f40b24dc..a460ddf04d60 100644 |
922 | --- a/arch/x86/kvm/vmx/nested.c |
923 | +++ b/arch/x86/kvm/vmx/nested.c |
924 | @@ -302,7 +302,7 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) |
925 | cpu = get_cpu(); |
926 | prev = vmx->loaded_vmcs; |
927 | vmx->loaded_vmcs = vmcs; |
928 | - vmx_vcpu_load_vmcs(vcpu, cpu); |
929 | + vmx_vcpu_load_vmcs(vcpu, cpu, prev); |
930 | vmx_sync_vmcs_host_state(vmx, prev); |
931 | put_cpu(); |
932 | |
933 | @@ -5357,7 +5357,7 @@ bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) |
934 | vmcs_read32(VM_EXIT_INTR_ERROR_CODE), |
935 | KVM_ISA_VMX); |
936 | |
937 | - switch (exit_reason) { |
938 | + switch ((u16)exit_reason) { |
939 | case EXIT_REASON_EXCEPTION_NMI: |
940 | if (is_nmi(intr_info)) |
941 | return false; |
942 | diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c |
943 | index 7a2c05277f4c..5fac01865a2d 100644 |
944 | --- a/arch/x86/kvm/vmx/vmx.c |
945 | +++ b/arch/x86/kvm/vmx/vmx.c |
946 | @@ -1286,10 +1286,12 @@ after_clear_sn: |
947 | pi_set_on(pi_desc); |
948 | } |
949 | |
950 | -void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) |
951 | +void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, |
952 | + struct loaded_vmcs *buddy) |
953 | { |
954 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
955 | bool already_loaded = vmx->loaded_vmcs->cpu == cpu; |
956 | + struct vmcs *prev; |
957 | |
958 | if (!already_loaded) { |
959 | loaded_vmcs_clear(vmx->loaded_vmcs); |
960 | @@ -1308,10 +1310,18 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu) |
961 | local_irq_enable(); |
962 | } |
963 | |
964 | - if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { |
965 | + prev = per_cpu(current_vmcs, cpu); |
966 | + if (prev != vmx->loaded_vmcs->vmcs) { |
967 | per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; |
968 | vmcs_load(vmx->loaded_vmcs->vmcs); |
969 | - indirect_branch_prediction_barrier(); |
970 | + |
971 | + /* |
972 | + * No indirect branch prediction barrier needed when switching |
973 | + * the active VMCS within a guest, e.g. on nested VM-Enter. |
974 | + * The L1 VMM can protect itself with retpolines, IBPB or IBRS. |
975 | + */ |
976 | + if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) |
977 | + indirect_branch_prediction_barrier(); |
978 | } |
979 | |
980 | if (!already_loaded) { |
981 | @@ -1356,7 +1366,7 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
982 | { |
983 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
984 | |
985 | - vmx_vcpu_load_vmcs(vcpu, cpu); |
986 | + vmx_vcpu_load_vmcs(vcpu, cpu, NULL); |
987 | |
988 | vmx_vcpu_pi_load(vcpu, cpu); |
989 | |
990 | diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h |
991 | index 5a0f34b1e226..295c5f83842e 100644 |
992 | --- a/arch/x86/kvm/vmx/vmx.h |
993 | +++ b/arch/x86/kvm/vmx/vmx.h |
994 | @@ -304,7 +304,8 @@ struct kvm_vmx { |
995 | }; |
996 | |
997 | bool nested_vmx_allowed(struct kvm_vcpu *vcpu); |
998 | -void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu); |
999 | +void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu, |
1000 | + struct loaded_vmcs *buddy); |
1001 | void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
1002 | int allocate_vpid(void); |
1003 | void free_vpid(int vpid); |
1004 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
1005 | index c6d9e363dfc0..fff279fb173b 100644 |
1006 | --- a/arch/x86/kvm/x86.c |
1007 | +++ b/arch/x86/kvm/x86.c |
1008 | @@ -6833,7 +6833,7 @@ restart: |
1009 | if (!ctxt->have_exception || |
1010 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) { |
1011 | kvm_rip_write(vcpu, ctxt->eip); |
1012 | - if (r && ctxt->tf) |
1013 | + if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) |
1014 | r = kvm_vcpu_do_singlestep(vcpu); |
1015 | __kvm_set_rflags(vcpu, ctxt->eflags); |
1016 | } |
1017 | @@ -7978,9 +7978,8 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) |
1018 | kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); |
1019 | } |
1020 | |
1021 | -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
1022 | - unsigned long start, unsigned long end, |
1023 | - bool blockable) |
1024 | +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
1025 | + unsigned long start, unsigned long end) |
1026 | { |
1027 | unsigned long apic_address; |
1028 | |
1029 | @@ -7991,8 +7990,6 @@ int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
1030 | apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); |
1031 | if (start <= apic_address && apic_address < end) |
1032 | kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); |
1033 | - |
1034 | - return 0; |
1035 | } |
1036 | |
1037 | void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) |
1038 | diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c |
1039 | index e723559c386a..0c67a5a94de3 100644 |
1040 | --- a/arch/x86/pci/fixup.c |
1041 | +++ b/arch/x86/pci/fixup.c |
1042 | @@ -572,6 +572,10 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar); |
1043 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar); |
1044 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar); |
1045 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar); |
1046 | +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar); |
1047 | +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar); |
1048 | +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar); |
1049 | +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar); |
1050 | |
1051 | /* |
1052 | * Device [1022:7808] |
1053 | diff --git a/crypto/algapi.c b/crypto/algapi.c |
1054 | index bb8329e49956..fff52bc9d97d 100644 |
1055 | --- a/crypto/algapi.c |
1056 | +++ b/crypto/algapi.c |
1057 | @@ -374,7 +374,7 @@ static void crypto_wait_for_test(struct crypto_larval *larval) |
1058 | err = wait_for_completion_killable(&larval->completion); |
1059 | WARN_ON(err); |
1060 | if (!err) |
1061 | - crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval); |
1062 | + crypto_notify(CRYPTO_MSG_ALG_LOADED, larval); |
1063 | |
1064 | out: |
1065 | crypto_larval_kill(&larval->alg); |
1066 | diff --git a/crypto/drbg.c b/crypto/drbg.c |
1067 | index b6929eb5f565..04379ca624cd 100644 |
1068 | --- a/crypto/drbg.c |
1069 | +++ b/crypto/drbg.c |
1070 | @@ -1294,8 +1294,10 @@ static inline int drbg_alloc_state(struct drbg_state *drbg) |
1071 | if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) { |
1072 | drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags), |
1073 | GFP_KERNEL); |
1074 | - if (!drbg->prev) |
1075 | + if (!drbg->prev) { |
1076 | + ret = -ENOMEM; |
1077 | goto fini; |
1078 | + } |
1079 | drbg->fips_primed = false; |
1080 | } |
1081 | |
1082 | diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c |
1083 | index a1a858ad4d18..f9b1a2abdbe2 100644 |
1084 | --- a/drivers/acpi/cppc_acpi.c |
1085 | +++ b/drivers/acpi/cppc_acpi.c |
1086 | @@ -865,6 +865,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) |
1087 | "acpi_cppc"); |
1088 | if (ret) { |
1089 | per_cpu(cpc_desc_ptr, pr->id) = NULL; |
1090 | + kobject_put(&cpc_ptr->kobj); |
1091 | goto out_free; |
1092 | } |
1093 | |
1094 | diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c |
1095 | index ea9ecf3d70c2..1a5956fb2cbc 100644 |
1096 | --- a/drivers/acpi/device_pm.c |
1097 | +++ b/drivers/acpi/device_pm.c |
1098 | @@ -186,7 +186,7 @@ int acpi_device_set_power(struct acpi_device *device, int state) |
1099 | * possibly drop references to the power resources in use. |
1100 | */ |
1101 | state = ACPI_STATE_D3_HOT; |
1102 | - /* If _PR3 is not available, use D3hot as the target state. */ |
1103 | + /* If D3cold is not supported, use D3hot as the target state. */ |
1104 | if (!device->power.states[ACPI_STATE_D3_COLD].flags.valid) |
1105 | target_state = state; |
1106 | } else if (!device->power.states[state].flags.valid) { |
1107 | diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c |
1108 | index aba0d0027586..6d7a522952bf 100644 |
1109 | --- a/drivers/acpi/evged.c |
1110 | +++ b/drivers/acpi/evged.c |
1111 | @@ -79,6 +79,8 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, |
1112 | struct resource r; |
1113 | struct acpi_resource_irq *p = &ares->data.irq; |
1114 | struct acpi_resource_extended_irq *pext = &ares->data.extended_irq; |
1115 | + char ev_name[5]; |
1116 | + u8 trigger; |
1117 | |
1118 | if (ares->type == ACPI_RESOURCE_TYPE_END_TAG) |
1119 | return AE_OK; |
1120 | @@ -87,14 +89,28 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares, |
1121 | dev_err(dev, "unable to parse IRQ resource\n"); |
1122 | return AE_ERROR; |
1123 | } |
1124 | - if (ares->type == ACPI_RESOURCE_TYPE_IRQ) |
1125 | + if (ares->type == ACPI_RESOURCE_TYPE_IRQ) { |
1126 | gsi = p->interrupts[0]; |
1127 | - else |
1128 | + trigger = p->triggering; |
1129 | + } else { |
1130 | gsi = pext->interrupts[0]; |
1131 | + trigger = p->triggering; |
1132 | + } |
1133 | |
1134 | irq = r.start; |
1135 | |
1136 | - if (ACPI_FAILURE(acpi_get_handle(handle, "_EVT", &evt_handle))) { |
1137 | + switch (gsi) { |
1138 | + case 0 ... 255: |
1139 | + sprintf(ev_name, "_%c%02hhX", |
1140 | + trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi); |
1141 | + |
1142 | + if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) |
1143 | + break; |
1144 | + /* fall through */ |
1145 | + default: |
1146 | + if (ACPI_SUCCESS(acpi_get_handle(handle, "_EVT", &evt_handle))) |
1147 | + break; |
1148 | + |
1149 | dev_err(dev, "cannot locate _EVT method\n"); |
1150 | return AE_ERROR; |
1151 | } |
1152 | diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c |
1153 | index 915650bf519f..2527938a30b5 100644 |
1154 | --- a/drivers/acpi/scan.c |
1155 | +++ b/drivers/acpi/scan.c |
1156 | @@ -919,12 +919,9 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state) |
1157 | |
1158 | if (buffer.length && package |
1159 | && package->type == ACPI_TYPE_PACKAGE |
1160 | - && package->package.count) { |
1161 | - int err = acpi_extract_power_resources(package, 0, |
1162 | - &ps->resources); |
1163 | - if (!err) |
1164 | - device->power.flags.power_resources = 1; |
1165 | - } |
1166 | + && package->package.count) |
1167 | + acpi_extract_power_resources(package, 0, &ps->resources); |
1168 | + |
1169 | ACPI_FREE(buffer.pointer); |
1170 | } |
1171 | |
1172 | @@ -971,14 +968,27 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) |
1173 | acpi_bus_init_power_state(device, i); |
1174 | |
1175 | INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources); |
1176 | - if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) |
1177 | - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; |
1178 | |
1179 | - /* Set defaults for D0 and D3hot states (always valid) */ |
1180 | + /* Set the defaults for D0 and D3hot (always supported). */ |
1181 | device->power.states[ACPI_STATE_D0].flags.valid = 1; |
1182 | device->power.states[ACPI_STATE_D0].power = 100; |
1183 | device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1; |
1184 | |
1185 | + /* |
1186 | + * Use power resources only if the D0 list of them is populated, because |
1187 | + * some platforms may provide _PR3 only to indicate D3cold support and |
1188 | + * in those cases the power resources list returned by it may be bogus. |
1189 | + */ |
1190 | + if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) { |
1191 | + device->power.flags.power_resources = 1; |
1192 | + /* |
1193 | + * D3cold is supported if the D3hot list of power resources is |
1194 | + * not empty. |
1195 | + */ |
1196 | + if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources)) |
1197 | + device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; |
1198 | + } |
1199 | + |
1200 | if (acpi_bus_init_power(device)) |
1201 | device->flags.power_manageable = 0; |
1202 | } |
1203 | diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c |
1204 | index c60d2c6d31d6..3a89909b50a6 100644 |
1205 | --- a/drivers/acpi/sysfs.c |
1206 | +++ b/drivers/acpi/sysfs.c |
1207 | @@ -993,8 +993,10 @@ void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, |
1208 | |
1209 | error = kobject_init_and_add(&hotplug->kobj, |
1210 | &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name); |
1211 | - if (error) |
1212 | + if (error) { |
1213 | + kobject_put(&hotplug->kobj); |
1214 | goto err_out; |
1215 | + } |
1216 | |
1217 | kobject_uevent(&hotplug->kobj, KOBJ_ADD); |
1218 | return; |
1219 | diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c |
1220 | index f19a03b62365..ac97a1e2e5dd 100644 |
1221 | --- a/drivers/block/floppy.c |
1222 | +++ b/drivers/block/floppy.c |
1223 | @@ -2902,17 +2902,17 @@ static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx, |
1224 | (unsigned long long) current_req->cmd_flags)) |
1225 | return BLK_STS_IOERR; |
1226 | |
1227 | - spin_lock_irq(&floppy_lock); |
1228 | - list_add_tail(&bd->rq->queuelist, &floppy_reqs); |
1229 | - spin_unlock_irq(&floppy_lock); |
1230 | - |
1231 | if (test_and_set_bit(0, &fdc_busy)) { |
1232 | /* fdc busy, this new request will be treated when the |
1233 | current one is done */ |
1234 | is_alive(__func__, "old request running"); |
1235 | - return BLK_STS_OK; |
1236 | + return BLK_STS_RESOURCE; |
1237 | } |
1238 | |
1239 | + spin_lock_irq(&floppy_lock); |
1240 | + list_add_tail(&bd->rq->queuelist, &floppy_reqs); |
1241 | + spin_unlock_irq(&floppy_lock); |
1242 | + |
1243 | command_status = FD_COMMAND_NONE; |
1244 | __reschedule_timeout(MAXTIMEOUT, "fd_request"); |
1245 | set_fdc(0); |
1246 | diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c |
1247 | index c6271ce250b3..b161bdf60000 100644 |
1248 | --- a/drivers/char/agp/intel-gtt.c |
1249 | +++ b/drivers/char/agp/intel-gtt.c |
1250 | @@ -846,6 +846,7 @@ void intel_gtt_insert_page(dma_addr_t addr, |
1251 | unsigned int flags) |
1252 | { |
1253 | intel_private.driver->write_entry(addr, pg, flags); |
1254 | + readl(intel_private.gtt + pg); |
1255 | if (intel_private.driver->chipset_flush) |
1256 | intel_private.driver->chipset_flush(); |
1257 | } |
1258 | @@ -871,7 +872,7 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, |
1259 | j++; |
1260 | } |
1261 | } |
1262 | - wmb(); |
1263 | + readl(intel_private.gtt + j - 1); |
1264 | if (intel_private.driver->chipset_flush) |
1265 | intel_private.driver->chipset_flush(); |
1266 | } |
1267 | @@ -1105,6 +1106,7 @@ static void i9xx_cleanup(void) |
1268 | |
1269 | static void i9xx_chipset_flush(void) |
1270 | { |
1271 | + wmb(); |
1272 | if (intel_private.i9xx_flush_page) |
1273 | writel(1, intel_private.i9xx_flush_page); |
1274 | } |
1275 | diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
1276 | index 9728d1282e43..36e9f38a3882 100644 |
1277 | --- a/drivers/clk/clk.c |
1278 | +++ b/drivers/clk/clk.c |
1279 | @@ -114,7 +114,11 @@ static int clk_pm_runtime_get(struct clk_core *core) |
1280 | return 0; |
1281 | |
1282 | ret = pm_runtime_get_sync(core->dev); |
1283 | - return ret < 0 ? ret : 0; |
1284 | + if (ret < 0) { |
1285 | + pm_runtime_put_noidle(core->dev); |
1286 | + return ret; |
1287 | + } |
1288 | + return 0; |
1289 | } |
1290 | |
1291 | static void clk_pm_runtime_put(struct clk_core *core) |
1292 | diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c |
1293 | index 35f8e098e9fa..fa988bd1e606 100644 |
1294 | --- a/drivers/cpufreq/cpufreq.c |
1295 | +++ b/drivers/cpufreq/cpufreq.c |
1296 | @@ -2507,26 +2507,27 @@ EXPORT_SYMBOL_GPL(cpufreq_update_limits); |
1297 | static int cpufreq_boost_set_sw(int state) |
1298 | { |
1299 | struct cpufreq_policy *policy; |
1300 | - int ret = -EINVAL; |
1301 | |
1302 | for_each_active_policy(policy) { |
1303 | + int ret; |
1304 | + |
1305 | if (!policy->freq_table) |
1306 | - continue; |
1307 | + return -ENXIO; |
1308 | |
1309 | ret = cpufreq_frequency_table_cpuinfo(policy, |
1310 | policy->freq_table); |
1311 | if (ret) { |
1312 | pr_err("%s: Policy frequency update failed\n", |
1313 | __func__); |
1314 | - break; |
1315 | + return ret; |
1316 | } |
1317 | |
1318 | ret = freq_qos_update_request(policy->max_freq_req, policy->max); |
1319 | if (ret < 0) |
1320 | - break; |
1321 | + return ret; |
1322 | } |
1323 | |
1324 | - return ret; |
1325 | + return 0; |
1326 | } |
1327 | |
1328 | int cpufreq_boost_trigger_state(int state) |
1329 | diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c |
1330 | index c4632d84c9a1..637be2f903d3 100644 |
1331 | --- a/drivers/crypto/cavium/nitrox/nitrox_main.c |
1332 | +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c |
1333 | @@ -278,7 +278,7 @@ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) |
1334 | |
1335 | struct nitrox_device *nitrox_get_first_device(void) |
1336 | { |
1337 | - struct nitrox_device *ndev = NULL; |
1338 | + struct nitrox_device *ndev; |
1339 | |
1340 | mutex_lock(&devlist_lock); |
1341 | list_for_each_entry(ndev, &ndevlist, list) { |
1342 | @@ -286,7 +286,7 @@ struct nitrox_device *nitrox_get_first_device(void) |
1343 | break; |
1344 | } |
1345 | mutex_unlock(&devlist_lock); |
1346 | - if (!ndev) |
1347 | + if (&ndev->list == &ndevlist) |
1348 | return NULL; |
1349 | |
1350 | refcount_inc(&ndev->refcnt); |
1351 | diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c |
1352 | index 82b316b2f537..ac420b201dd8 100644 |
1353 | --- a/drivers/crypto/virtio/virtio_crypto_algs.c |
1354 | +++ b/drivers/crypto/virtio/virtio_crypto_algs.c |
1355 | @@ -353,13 +353,18 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, |
1356 | int err; |
1357 | unsigned long flags; |
1358 | struct scatterlist outhdr, iv_sg, status_sg, **sgs; |
1359 | - int i; |
1360 | u64 dst_len; |
1361 | unsigned int num_out = 0, num_in = 0; |
1362 | int sg_total; |
1363 | uint8_t *iv; |
1364 | + struct scatterlist *sg; |
1365 | |
1366 | src_nents = sg_nents_for_len(req->src, req->nbytes); |
1367 | + if (src_nents < 0) { |
1368 | + pr_err("Invalid number of src SG.\n"); |
1369 | + return src_nents; |
1370 | + } |
1371 | + |
1372 | dst_nents = sg_nents(req->dst); |
1373 | |
1374 | pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n", |
1375 | @@ -405,6 +410,7 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, |
1376 | goto free; |
1377 | } |
1378 | |
1379 | + dst_len = min_t(unsigned int, req->nbytes, dst_len); |
1380 | pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n", |
1381 | req->nbytes, dst_len); |
1382 | |
1383 | @@ -445,12 +451,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, |
1384 | vc_sym_req->iv = iv; |
1385 | |
1386 | /* Source data */ |
1387 | - for (i = 0; i < src_nents; i++) |
1388 | - sgs[num_out++] = &req->src[i]; |
1389 | + for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--) |
1390 | + sgs[num_out++] = sg; |
1391 | |
1392 | /* Destination data */ |
1393 | - for (i = 0; i < dst_nents; i++) |
1394 | - sgs[num_out + num_in++] = &req->dst[i]; |
1395 | + for (sg = req->dst; sg; sg = sg_next(sg)) |
1396 | + sgs[num_out + num_in++] = sg; |
1397 | |
1398 | /* Status */ |
1399 | sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status)); |
1400 | @@ -580,10 +586,11 @@ static void virtio_crypto_ablkcipher_finalize_req( |
1401 | scatterwalk_map_and_copy(req->info, req->dst, |
1402 | req->nbytes - AES_BLOCK_SIZE, |
1403 | AES_BLOCK_SIZE, 0); |
1404 | - crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, |
1405 | - req, err); |
1406 | kzfree(vc_sym_req->iv); |
1407 | virtcrypto_clear_request(&vc_sym_req->base); |
1408 | + |
1409 | + crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, |
1410 | + req, err); |
1411 | } |
1412 | |
1413 | static struct virtio_crypto_algo virtio_crypto_algs[] = { { |
1414 | diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c |
1415 | index c370d5457e6b..c0c5b6ecdb2e 100644 |
1416 | --- a/drivers/edac/i10nm_base.c |
1417 | +++ b/drivers/edac/i10nm_base.c |
1418 | @@ -162,7 +162,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci) |
1419 | mtr, mcddrtcfg, imc->mc, i, j); |
1420 | |
1421 | if (IS_DIMM_PRESENT(mtr)) |
1422 | - ndimms += skx_get_dimm_info(mtr, 0, dimm, |
1423 | + ndimms += skx_get_dimm_info(mtr, 0, 0, dimm, |
1424 | imc, i, j); |
1425 | else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) |
1426 | ndimms += skx_get_nvdimm_info(dimm, imc, i, j, |
1427 | diff --git a/drivers/edac/skx_base.c b/drivers/edac/skx_base.c |
1428 | index 0fcf3785e8f3..77cd370bd62f 100644 |
1429 | --- a/drivers/edac/skx_base.c |
1430 | +++ b/drivers/edac/skx_base.c |
1431 | @@ -151,27 +151,23 @@ static const struct x86_cpu_id skx_cpuids[] = { |
1432 | }; |
1433 | MODULE_DEVICE_TABLE(x86cpu, skx_cpuids); |
1434 | |
1435 | -#define SKX_GET_MTMTR(dev, reg) \ |
1436 | - pci_read_config_dword((dev), 0x87c, &(reg)) |
1437 | - |
1438 | -static bool skx_check_ecc(struct pci_dev *pdev) |
1439 | +static bool skx_check_ecc(u32 mcmtr) |
1440 | { |
1441 | - u32 mtmtr; |
1442 | - |
1443 | - SKX_GET_MTMTR(pdev, mtmtr); |
1444 | - |
1445 | - return !!GET_BITFIELD(mtmtr, 2, 2); |
1446 | + return !!GET_BITFIELD(mcmtr, 2, 2); |
1447 | } |
1448 | |
1449 | static int skx_get_dimm_config(struct mem_ctl_info *mci) |
1450 | { |
1451 | struct skx_pvt *pvt = mci->pvt_info; |
1452 | + u32 mtr, mcmtr, amap, mcddrtcfg; |
1453 | struct skx_imc *imc = pvt->imc; |
1454 | - u32 mtr, amap, mcddrtcfg; |
1455 | struct dimm_info *dimm; |
1456 | int i, j; |
1457 | int ndimms; |
1458 | |
1459 | + /* Only the mcmtr on the first channel is effective */ |
1460 | + pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr); |
1461 | + |
1462 | for (i = 0; i < SKX_NUM_CHANNELS; i++) { |
1463 | ndimms = 0; |
1464 | pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap); |
1465 | @@ -182,14 +178,14 @@ static int skx_get_dimm_config(struct mem_ctl_info *mci) |
1466 | pci_read_config_dword(imc->chan[i].cdev, |
1467 | 0x80 + 4 * j, &mtr); |
1468 | if (IS_DIMM_PRESENT(mtr)) { |
1469 | - ndimms += skx_get_dimm_info(mtr, amap, dimm, imc, i, j); |
1470 | + ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j); |
1471 | } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) { |
1472 | ndimms += skx_get_nvdimm_info(dimm, imc, i, j, |
1473 | EDAC_MOD_STR); |
1474 | nvdimm_count++; |
1475 | } |
1476 | } |
1477 | - if (ndimms && !skx_check_ecc(imc->chan[0].cdev)) { |
1478 | + if (ndimms && !skx_check_ecc(mcmtr)) { |
1479 | skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc); |
1480 | return -ENODEV; |
1481 | } |
1482 | diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c |
1483 | index a04349c6d17e..2177ad765bd1 100644 |
1484 | --- a/drivers/edac/skx_common.c |
1485 | +++ b/drivers/edac/skx_common.c |
1486 | @@ -283,7 +283,7 @@ static int skx_get_dimm_attr(u32 reg, int lobit, int hibit, int add, |
1487 | #define numrow(reg) skx_get_dimm_attr(reg, 2, 4, 12, 1, 6, "rows") |
1488 | #define numcol(reg) skx_get_dimm_attr(reg, 0, 1, 10, 0, 2, "cols") |
1489 | |
1490 | -int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, |
1491 | +int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, |
1492 | struct skx_imc *imc, int chan, int dimmno) |
1493 | { |
1494 | int banks = 16, ranks, rows, cols, npages; |
1495 | @@ -303,8 +303,8 @@ int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, |
1496 | imc->mc, chan, dimmno, size, npages, |
1497 | banks, 1 << ranks, rows, cols); |
1498 | |
1499 | - imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); |
1500 | - imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); |
1501 | + imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mcmtr, 0, 0); |
1502 | + imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mcmtr, 9, 9); |
1503 | imc->chan[chan].dimms[dimmno].fine_grain_bank = GET_BITFIELD(amap, 0, 0); |
1504 | imc->chan[chan].dimms[dimmno].rowbits = rows; |
1505 | imc->chan[chan].dimms[dimmno].colbits = cols; |
1506 | diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h |
1507 | index 08cc971a50ea..fed337c12954 100644 |
1508 | --- a/drivers/edac/skx_common.h |
1509 | +++ b/drivers/edac/skx_common.h |
1510 | @@ -126,7 +126,7 @@ int skx_get_all_bus_mappings(unsigned int did, int off, enum type, |
1511 | |
1512 | int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm); |
1513 | |
1514 | -int skx_get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, |
1515 | +int skx_get_dimm_info(u32 mtr, u32 mcmtr, u32 amap, struct dimm_info *dimm, |
1516 | struct skx_imc *imc, int chan, int dimmno); |
1517 | |
1518 | int skx_get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc, |
1519 | diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c |
1520 | index aff3dfb4d7ba..d187585db97a 100644 |
1521 | --- a/drivers/firmware/efi/efivars.c |
1522 | +++ b/drivers/firmware/efi/efivars.c |
1523 | @@ -522,8 +522,10 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var) |
1524 | ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype, |
1525 | NULL, "%s", short_name); |
1526 | kfree(short_name); |
1527 | - if (ret) |
1528 | + if (ret) { |
1529 | + kobject_put(&new_var->kobj); |
1530 | return ret; |
1531 | + } |
1532 | |
1533 | kobject_uevent(&new_var->kobj, KOBJ_ADD); |
1534 | if (efivar_entry_add(new_var, &efivar_sysfs_list)) { |
1535 | diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c |
1536 | index 35a5f8f8eea5..e48d971ffb61 100644 |
1537 | --- a/drivers/firmware/imx/imx-scu.c |
1538 | +++ b/drivers/firmware/imx/imx-scu.c |
1539 | @@ -38,6 +38,7 @@ struct imx_sc_ipc { |
1540 | struct device *dev; |
1541 | struct mutex lock; |
1542 | struct completion done; |
1543 | + bool fast_ipc; |
1544 | |
1545 | /* temporarily store the SCU msg */ |
1546 | u32 *msg; |
1547 | @@ -115,6 +116,26 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg) |
1548 | struct imx_sc_ipc *sc_ipc = sc_chan->sc_ipc; |
1549 | struct imx_sc_rpc_msg *hdr; |
1550 | u32 *data = msg; |
1551 | + int i; |
1552 | + |
1553 | + if (!sc_ipc->msg) { |
1554 | + dev_warn(sc_ipc->dev, "unexpected rx idx %d 0x%08x, ignore!\n", |
1555 | + sc_chan->idx, *data); |
1556 | + return; |
1557 | + } |
1558 | + |
1559 | + if (sc_ipc->fast_ipc) { |
1560 | + hdr = msg; |
1561 | + sc_ipc->rx_size = hdr->size; |
1562 | + sc_ipc->msg[0] = *data++; |
1563 | + |
1564 | + for (i = 1; i < sc_ipc->rx_size; i++) |
1565 | + sc_ipc->msg[i] = *data++; |
1566 | + |
1567 | + complete(&sc_ipc->done); |
1568 | + |
1569 | + return; |
1570 | + } |
1571 | |
1572 | if (sc_chan->idx == 0) { |
1573 | hdr = msg; |
1574 | @@ -137,20 +158,22 @@ static void imx_scu_rx_callback(struct mbox_client *c, void *msg) |
1575 | |
1576 | static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) |
1577 | { |
1578 | - struct imx_sc_rpc_msg *hdr = msg; |
1579 | + struct imx_sc_rpc_msg hdr = *(struct imx_sc_rpc_msg *)msg; |
1580 | struct imx_sc_chan *sc_chan; |
1581 | u32 *data = msg; |
1582 | int ret; |
1583 | + int size; |
1584 | int i; |
1585 | |
1586 | /* Check size */ |
1587 | - if (hdr->size > IMX_SC_RPC_MAX_MSG) |
1588 | + if (hdr.size > IMX_SC_RPC_MAX_MSG) |
1589 | return -EINVAL; |
1590 | |
1591 | - dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr->svc, |
1592 | - hdr->func, hdr->size); |
1593 | + dev_dbg(sc_ipc->dev, "RPC SVC %u FUNC %u SIZE %u\n", hdr.svc, |
1594 | + hdr.func, hdr.size); |
1595 | |
1596 | - for (i = 0; i < hdr->size; i++) { |
1597 | + size = sc_ipc->fast_ipc ? 1 : hdr.size; |
1598 | + for (i = 0; i < size; i++) { |
1599 | sc_chan = &sc_ipc->chans[i % 4]; |
1600 | |
1601 | /* |
1602 | @@ -162,8 +185,10 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg) |
1603 | * Wait for tx_done before every send to ensure that no |
1604 | * queueing happens at the mailbox channel level. |
1605 | */ |
1606 | - wait_for_completion(&sc_chan->tx_done); |
1607 | - reinit_completion(&sc_chan->tx_done); |
1608 | + if (!sc_ipc->fast_ipc) { |
1609 | + wait_for_completion(&sc_chan->tx_done); |
1610 | + reinit_completion(&sc_chan->tx_done); |
1611 | + } |
1612 | |
1613 | ret = mbox_send_message(sc_chan->ch, &data[i]); |
1614 | if (ret < 0) |
1615 | @@ -187,7 +212,8 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp) |
1616 | mutex_lock(&sc_ipc->lock); |
1617 | reinit_completion(&sc_ipc->done); |
1618 | |
1619 | - sc_ipc->msg = msg; |
1620 | + if (have_resp) |
1621 | + sc_ipc->msg = msg; |
1622 | sc_ipc->count = 0; |
1623 | ret = imx_scu_ipc_write(sc_ipc, msg); |
1624 | if (ret < 0) { |
1625 | @@ -209,6 +235,7 @@ int imx_scu_call_rpc(struct imx_sc_ipc *sc_ipc, void *msg, bool have_resp) |
1626 | } |
1627 | |
1628 | out: |
1629 | + sc_ipc->msg = NULL; |
1630 | mutex_unlock(&sc_ipc->lock); |
1631 | |
1632 | dev_dbg(sc_ipc->dev, "RPC SVC done\n"); |
1633 | @@ -224,6 +251,8 @@ static int imx_scu_probe(struct platform_device *pdev) |
1634 | struct imx_sc_chan *sc_chan; |
1635 | struct mbox_client *cl; |
1636 | char *chan_name; |
1637 | + struct of_phandle_args args; |
1638 | + int num_channel; |
1639 | int ret; |
1640 | int i; |
1641 | |
1642 | @@ -231,11 +260,20 @@ static int imx_scu_probe(struct platform_device *pdev) |
1643 | if (!sc_ipc) |
1644 | return -ENOMEM; |
1645 | |
1646 | - for (i = 0; i < SCU_MU_CHAN_NUM; i++) { |
1647 | - if (i < 4) |
1648 | + ret = of_parse_phandle_with_args(pdev->dev.of_node, "mboxes", |
1649 | + "#mbox-cells", 0, &args); |
1650 | + if (ret) |
1651 | + return ret; |
1652 | + |
1653 | + sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); |
1654 | + |
1655 | + num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; |
1656 | + for (i = 0; i < num_channel; i++) { |
1657 | + if (i < num_channel / 2) |
1658 | chan_name = kasprintf(GFP_KERNEL, "tx%d", i); |
1659 | else |
1660 | - chan_name = kasprintf(GFP_KERNEL, "rx%d", i - 4); |
1661 | + chan_name = kasprintf(GFP_KERNEL, "rx%d", |
1662 | + i - num_channel / 2); |
1663 | |
1664 | if (!chan_name) |
1665 | return -ENOMEM; |
1666 | @@ -247,13 +285,15 @@ static int imx_scu_probe(struct platform_device *pdev) |
1667 | cl->knows_txdone = true; |
1668 | cl->rx_callback = imx_scu_rx_callback; |
1669 | |
1670 | - /* Initial tx_done completion as "done" */ |
1671 | - cl->tx_done = imx_scu_tx_done; |
1672 | - init_completion(&sc_chan->tx_done); |
1673 | - complete(&sc_chan->tx_done); |
1674 | + if (!sc_ipc->fast_ipc) { |
1675 | + /* Initial tx_done completion as "done" */ |
1676 | + cl->tx_done = imx_scu_tx_done; |
1677 | + init_completion(&sc_chan->tx_done); |
1678 | + complete(&sc_chan->tx_done); |
1679 | + } |
1680 | |
1681 | sc_chan->sc_ipc = sc_ipc; |
1682 | - sc_chan->idx = i % 4; |
1683 | + sc_chan->idx = i % (num_channel / 2); |
1684 | sc_chan->ch = mbox_request_channel_byname(cl, chan_name); |
1685 | if (IS_ERR(sc_chan->ch)) { |
1686 | ret = PTR_ERR(sc_chan->ch); |
1687 | diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
1688 | index 968d9b2705d0..6d0cc90401c0 100644 |
1689 | --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
1690 | +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c |
1691 | @@ -619,6 +619,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) |
1692 | GFP_KERNEL | |
1693 | __GFP_NORETRY | |
1694 | __GFP_NOWARN); |
1695 | + /* |
1696 | + * Using __get_user_pages_fast() with a read-only |
1697 | + * access is questionable. A read-only page may be |
1698 | + * COW-broken, and then this might end up giving |
1699 | + * the wrong side of the COW.. |
1700 | + * |
1701 | + * We may or may not care. |
1702 | + */ |
1703 | if (pvec) /* defer to worker if malloc fails */ |
1704 | pinned = __get_user_pages_fast(obj->userptr.ptr, |
1705 | num_pages, |
1706 | diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h |
1707 | index 5a95100fa18b..03b05c54722d 100644 |
1708 | --- a/drivers/gpu/drm/vkms/vkms_drv.h |
1709 | +++ b/drivers/gpu/drm/vkms/vkms_drv.h |
1710 | @@ -121,11 +121,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev, |
1711 | enum drm_plane_type type, int index); |
1712 | |
1713 | /* Gem stuff */ |
1714 | -struct drm_gem_object *vkms_gem_create(struct drm_device *dev, |
1715 | - struct drm_file *file, |
1716 | - u32 *handle, |
1717 | - u64 size); |
1718 | - |
1719 | vm_fault_t vkms_gem_fault(struct vm_fault *vmf); |
1720 | |
1721 | int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, |
1722 | diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c |
1723 | index 6489bfe0a149..8ba8b87d0c99 100644 |
1724 | --- a/drivers/gpu/drm/vkms/vkms_gem.c |
1725 | +++ b/drivers/gpu/drm/vkms/vkms_gem.c |
1726 | @@ -95,10 +95,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf) |
1727 | return ret; |
1728 | } |
1729 | |
1730 | -struct drm_gem_object *vkms_gem_create(struct drm_device *dev, |
1731 | - struct drm_file *file, |
1732 | - u32 *handle, |
1733 | - u64 size) |
1734 | +static struct drm_gem_object *vkms_gem_create(struct drm_device *dev, |
1735 | + struct drm_file *file, |
1736 | + u32 *handle, |
1737 | + u64 size) |
1738 | { |
1739 | struct vkms_gem_object *obj; |
1740 | int ret; |
1741 | @@ -111,7 +111,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev, |
1742 | return ERR_CAST(obj); |
1743 | |
1744 | ret = drm_gem_handle_create(file, &obj->gem, handle); |
1745 | - drm_gem_object_put_unlocked(&obj->gem); |
1746 | if (ret) |
1747 | return ERR_PTR(ret); |
1748 | |
1749 | @@ -140,6 +139,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev, |
1750 | args->size = gem_obj->size; |
1751 | args->pitch = pitch; |
1752 | |
1753 | + drm_gem_object_put_unlocked(gem_obj); |
1754 | + |
1755 | DRM_DEBUG_DRIVER("Created object of size %lld\n", size); |
1756 | |
1757 | return 0; |
1758 | diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c |
1759 | index f2a2d1246c19..adb08c3fc085 100644 |
1760 | --- a/drivers/infiniband/core/uverbs_main.c |
1761 | +++ b/drivers/infiniband/core/uverbs_main.c |
1762 | @@ -307,6 +307,8 @@ static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue, |
1763 | spin_lock_irq(&ev_queue->lock); |
1764 | if (!list_empty(&ev_queue->event_list)) |
1765 | pollflags = EPOLLIN | EPOLLRDNORM; |
1766 | + else if (ev_queue->is_closed) |
1767 | + pollflags = EPOLLERR; |
1768 | spin_unlock_irq(&ev_queue->lock); |
1769 | |
1770 | return pollflags; |
1771 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c |
1772 | index 4d2036209b45..758dae8d6500 100644 |
1773 | --- a/drivers/input/mouse/synaptics.c |
1774 | +++ b/drivers/input/mouse/synaptics.c |
1775 | @@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = { |
1776 | "LEN005b", /* P50 */ |
1777 | "LEN005e", /* T560 */ |
1778 | "LEN006c", /* T470s */ |
1779 | + "LEN007a", /* T470s */ |
1780 | "LEN0071", /* T480 */ |
1781 | "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ |
1782 | "LEN0073", /* X1 Carbon G5 (Elantech) */ |
1783 | diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c |
1784 | index a5ab774da4cc..fca908ba4841 100644 |
1785 | --- a/drivers/input/touchscreen/mms114.c |
1786 | +++ b/drivers/input/touchscreen/mms114.c |
1787 | @@ -91,15 +91,15 @@ static int __mms114_read_reg(struct mms114_data *data, unsigned int reg, |
1788 | if (reg <= MMS114_MODE_CONTROL && reg + len > MMS114_MODE_CONTROL) |
1789 | BUG(); |
1790 | |
1791 | - /* Write register: use repeated start */ |
1792 | + /* Write register */ |
1793 | xfer[0].addr = client->addr; |
1794 | - xfer[0].flags = I2C_M_TEN | I2C_M_NOSTART; |
1795 | + xfer[0].flags = client->flags & I2C_M_TEN; |
1796 | xfer[0].len = 1; |
1797 | xfer[0].buf = &buf; |
1798 | |
1799 | /* Read data */ |
1800 | xfer[1].addr = client->addr; |
1801 | - xfer[1].flags = I2C_M_RD; |
1802 | + xfer[1].flags = (client->flags & I2C_M_TEN) | I2C_M_RD; |
1803 | xfer[1].len = len; |
1804 | xfer[1].buf = val; |
1805 | |
1806 | @@ -428,10 +428,8 @@ static int mms114_probe(struct i2c_client *client, |
1807 | const void *match_data; |
1808 | int error; |
1809 | |
1810 | - if (!i2c_check_functionality(client->adapter, |
1811 | - I2C_FUNC_PROTOCOL_MANGLING)) { |
1812 | - dev_err(&client->dev, |
1813 | - "Need i2c bus that supports protocol mangling\n"); |
1814 | + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { |
1815 | + dev_err(&client->dev, "Not supported I2C adapter\n"); |
1816 | return -ENODEV; |
1817 | } |
1818 | |
1819 | diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c |
1820 | index ebb387aa5158..20eed28ea60d 100644 |
1821 | --- a/drivers/mmc/core/sdio.c |
1822 | +++ b/drivers/mmc/core/sdio.c |
1823 | @@ -584,7 +584,7 @@ try_again: |
1824 | */ |
1825 | err = mmc_send_io_op_cond(host, ocr, &rocr); |
1826 | if (err) |
1827 | - goto err; |
1828 | + return err; |
1829 | |
1830 | /* |
1831 | * For SPI, enable CRC as appropriate. |
1832 | @@ -592,17 +592,15 @@ try_again: |
1833 | if (mmc_host_is_spi(host)) { |
1834 | err = mmc_spi_set_crc(host, use_spi_crc); |
1835 | if (err) |
1836 | - goto err; |
1837 | + return err; |
1838 | } |
1839 | |
1840 | /* |
1841 | * Allocate card structure. |
1842 | */ |
1843 | card = mmc_alloc_card(host, NULL); |
1844 | - if (IS_ERR(card)) { |
1845 | - err = PTR_ERR(card); |
1846 | - goto err; |
1847 | - } |
1848 | + if (IS_ERR(card)) |
1849 | + return PTR_ERR(card); |
1850 | |
1851 | if ((rocr & R4_MEMORY_PRESENT) && |
1852 | mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { |
1853 | @@ -610,19 +608,15 @@ try_again: |
1854 | |
1855 | if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || |
1856 | memcmp(card->raw_cid, oldcard->raw_cid, sizeof(card->raw_cid)) != 0)) { |
1857 | - mmc_remove_card(card); |
1858 | - pr_debug("%s: Perhaps the card was replaced\n", |
1859 | - mmc_hostname(host)); |
1860 | - return -ENOENT; |
1861 | + err = -ENOENT; |
1862 | + goto mismatch; |
1863 | } |
1864 | } else { |
1865 | card->type = MMC_TYPE_SDIO; |
1866 | |
1867 | if (oldcard && oldcard->type != MMC_TYPE_SDIO) { |
1868 | - mmc_remove_card(card); |
1869 | - pr_debug("%s: Perhaps the card was replaced\n", |
1870 | - mmc_hostname(host)); |
1871 | - return -ENOENT; |
1872 | + err = -ENOENT; |
1873 | + goto mismatch; |
1874 | } |
1875 | } |
1876 | |
1877 | @@ -677,7 +671,7 @@ try_again: |
1878 | if (!oldcard && card->type == MMC_TYPE_SD_COMBO) { |
1879 | err = mmc_sd_get_csd(host, card); |
1880 | if (err) |
1881 | - return err; |
1882 | + goto remove; |
1883 | |
1884 | mmc_decode_cid(card); |
1885 | } |
1886 | @@ -704,7 +698,12 @@ try_again: |
1887 | mmc_set_timing(card->host, MMC_TIMING_SD_HS); |
1888 | } |
1889 | |
1890 | - goto finish; |
1891 | + if (oldcard) |
1892 | + mmc_remove_card(card); |
1893 | + else |
1894 | + host->card = card; |
1895 | + |
1896 | + return 0; |
1897 | } |
1898 | |
1899 | /* |
1900 | @@ -718,9 +717,8 @@ try_again: |
1901 | /* Retry init sequence, but without R4_18V_PRESENT. */ |
1902 | retries = 0; |
1903 | goto try_again; |
1904 | - } else { |
1905 | - goto remove; |
1906 | } |
1907 | + return err; |
1908 | } |
1909 | |
1910 | /* |
1911 | @@ -731,16 +729,14 @@ try_again: |
1912 | goto remove; |
1913 | |
1914 | if (oldcard) { |
1915 | - int same = (card->cis.vendor == oldcard->cis.vendor && |
1916 | - card->cis.device == oldcard->cis.device); |
1917 | - mmc_remove_card(card); |
1918 | - if (!same) { |
1919 | - pr_debug("%s: Perhaps the card was replaced\n", |
1920 | - mmc_hostname(host)); |
1921 | - return -ENOENT; |
1922 | + if (card->cis.vendor == oldcard->cis.vendor && |
1923 | + card->cis.device == oldcard->cis.device) { |
1924 | + mmc_remove_card(card); |
1925 | + card = oldcard; |
1926 | + } else { |
1927 | + err = -ENOENT; |
1928 | + goto mismatch; |
1929 | } |
1930 | - |
1931 | - card = oldcard; |
1932 | } |
1933 | card->ocr = ocr_card; |
1934 | mmc_fixup_device(card, sdio_fixup_methods); |
1935 | @@ -801,16 +797,15 @@ try_again: |
1936 | err = -EINVAL; |
1937 | goto remove; |
1938 | } |
1939 | -finish: |
1940 | - if (!oldcard) |
1941 | - host->card = card; |
1942 | + |
1943 | + host->card = card; |
1944 | return 0; |
1945 | |
1946 | +mismatch: |
1947 | + pr_debug("%s: Perhaps the card was replaced\n", mmc_hostname(host)); |
1948 | remove: |
1949 | - if (!oldcard) |
1950 | + if (oldcard != card) |
1951 | mmc_remove_card(card); |
1952 | - |
1953 | -err: |
1954 | return err; |
1955 | } |
1956 | |
1957 | diff --git a/drivers/mmc/host/mmci_stm32_sdmmc.c b/drivers/mmc/host/mmci_stm32_sdmmc.c |
1958 | index 8e83ae6920ae..0953bd8a4f79 100644 |
1959 | --- a/drivers/mmc/host/mmci_stm32_sdmmc.c |
1960 | +++ b/drivers/mmc/host/mmci_stm32_sdmmc.c |
1961 | @@ -162,6 +162,9 @@ static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl) |
1962 | static void sdmmc_idma_finalize(struct mmci_host *host, struct mmc_data *data) |
1963 | { |
1964 | writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); |
1965 | + |
1966 | + if (!data->host_cookie) |
1967 | + sdmmc_idma_unprep_data(host, data, 0); |
1968 | } |
1969 | |
1970 | static void mmci_sdmmc_set_clkreg(struct mmci_host *host, unsigned int desired) |
1971 | diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c |
1972 | index 0148f8e6bb37..8b2a6a362c60 100644 |
1973 | --- a/drivers/mmc/host/sdhci-msm.c |
1974 | +++ b/drivers/mmc/host/sdhci-msm.c |
1975 | @@ -1112,6 +1112,12 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) |
1976 | /* Clock-Data-Recovery used to dynamically adjust RX sampling point */ |
1977 | msm_host->use_cdr = true; |
1978 | |
1979 | + /* |
1980 | + * Clear tuning_done flag before tuning to ensure proper |
1981 | + * HS400 settings. |
1982 | + */ |
1983 | + msm_host->tuning_done = 0; |
1984 | + |
1985 | /* |
1986 | * For HS400 tuning in HS200 timing requires: |
1987 | * - select MCLK/2 in VENDOR_SPEC |
1988 | diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c |
1989 | index dec5a99f52cf..25083f010a7a 100644 |
1990 | --- a/drivers/mmc/host/tmio_mmc_core.c |
1991 | +++ b/drivers/mmc/host/tmio_mmc_core.c |
1992 | @@ -1285,12 +1285,14 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) |
1993 | cancel_work_sync(&host->done); |
1994 | cancel_delayed_work_sync(&host->delayed_reset_work); |
1995 | tmio_mmc_release_dma(host); |
1996 | + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); |
1997 | |
1998 | - pm_runtime_dont_use_autosuspend(&pdev->dev); |
1999 | if (host->native_hotplug) |
2000 | pm_runtime_put_noidle(&pdev->dev); |
2001 | - pm_runtime_put_sync(&pdev->dev); |
2002 | + |
2003 | pm_runtime_disable(&pdev->dev); |
2004 | + pm_runtime_dont_use_autosuspend(&pdev->dev); |
2005 | + pm_runtime_put_noidle(&pdev->dev); |
2006 | } |
2007 | EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); |
2008 | |
2009 | diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c |
2010 | index 0c72ec5546c3..aec9c8ae694c 100644 |
2011 | --- a/drivers/mmc/host/uniphier-sd.c |
2012 | +++ b/drivers/mmc/host/uniphier-sd.c |
2013 | @@ -614,11 +614,6 @@ static int uniphier_sd_probe(struct platform_device *pdev) |
2014 | } |
2015 | } |
2016 | |
2017 | - ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, |
2018 | - dev_name(dev), host); |
2019 | - if (ret) |
2020 | - goto free_host; |
2021 | - |
2022 | if (priv->caps & UNIPHIER_SD_CAP_EXTENDED_IP) |
2023 | host->dma_ops = &uniphier_sd_internal_dma_ops; |
2024 | else |
2025 | @@ -646,8 +641,15 @@ static int uniphier_sd_probe(struct platform_device *pdev) |
2026 | if (ret) |
2027 | goto free_host; |
2028 | |
2029 | + ret = devm_request_irq(dev, irq, tmio_mmc_irq, IRQF_SHARED, |
2030 | + dev_name(dev), host); |
2031 | + if (ret) |
2032 | + goto remove_host; |
2033 | + |
2034 | return 0; |
2035 | |
2036 | +remove_host: |
2037 | + tmio_mmc_host_remove(host); |
2038 | free_host: |
2039 | tmio_mmc_host_free(host); |
2040 | |
2041 | diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
2042 | index aaa03ce5796f..5a42ddeecfe5 100644 |
2043 | --- a/drivers/net/ethernet/ibm/ibmvnic.c |
2044 | +++ b/drivers/net/ethernet/ibm/ibmvnic.c |
2045 | @@ -4536,12 +4536,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, |
2046 | dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); |
2047 | break; |
2048 | } |
2049 | - dev_info(dev, "Partner protocol version is %d\n", |
2050 | - crq->version_exchange_rsp.version); |
2051 | - if (be16_to_cpu(crq->version_exchange_rsp.version) < |
2052 | - ibmvnic_version) |
2053 | - ibmvnic_version = |
2054 | + ibmvnic_version = |
2055 | be16_to_cpu(crq->version_exchange_rsp.version); |
2056 | + dev_info(dev, "Partner protocol version is %d\n", |
2057 | + ibmvnic_version); |
2058 | send_cap_queries(adapter); |
2059 | break; |
2060 | case QUERY_CAPABILITY_RSP: |
2061 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c |
2062 | index c28cbae42331..2c80205dc939 100644 |
2063 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c |
2064 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c |
2065 | @@ -152,6 +152,10 @@ void mlx5e_close_xsk(struct mlx5e_channel *c) |
2066 | mlx5e_close_cq(&c->xskicosq.cq); |
2067 | mlx5e_close_xdpsq(&c->xsksq); |
2068 | mlx5e_close_cq(&c->xsksq.cq); |
2069 | + |
2070 | + memset(&c->xskrq, 0, sizeof(c->xskrq)); |
2071 | + memset(&c->xsksq, 0, sizeof(c->xsksq)); |
2072 | + memset(&c->xskicosq, 0, sizeof(c->xskicosq)); |
2073 | } |
2074 | |
2075 | void mlx5e_activate_xsk(struct mlx5e_channel *c) |
2076 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c |
2077 | index f63beb399837..f628887d8af8 100644 |
2078 | --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c |
2079 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c |
2080 | @@ -193,15 +193,23 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) |
2081 | |
2082 | void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) |
2083 | { |
2084 | + bool err_detected = false; |
2085 | + |
2086 | + /* Mark the device as fatal in order to abort FW commands */ |
2087 | + if ((check_fatal_sensors(dev) || force) && |
2088 | + dev->state == MLX5_DEVICE_STATE_UP) { |
2089 | + dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; |
2090 | + err_detected = true; |
2091 | + } |
2092 | mutex_lock(&dev->intf_state_mutex); |
2093 | - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
2094 | - goto unlock; |
2095 | + if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) |
2096 | + goto unlock;/* a previous error is still being handled */ |
2097 | if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { |
2098 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; |
2099 | goto unlock; |
2100 | } |
2101 | |
2102 | - if (check_fatal_sensors(dev) || force) { |
2103 | + if (check_fatal_sensors(dev) || force) { /* protected state setting */ |
2104 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; |
2105 | mlx5_cmd_flush(dev); |
2106 | } |
2107 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
2108 | index e4a690128b3a..7c0a726277b0 100644 |
2109 | --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c |
2110 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
2111 | @@ -794,6 +794,11 @@ err_disable: |
2112 | |
2113 | static void mlx5_pci_close(struct mlx5_core_dev *dev) |
2114 | { |
2115 | + /* health work might still be active, and it needs pci bar in |
2116 | + * order to know the NIC state. Therefore, drain the health WQ |
2117 | + * before removing the pci bars |
2118 | + */ |
2119 | + mlx5_drain_health_wq(dev); |
2120 | iounmap(dev->iseg); |
2121 | pci_clear_master(dev->pdev); |
2122 | release_bar(dev->pdev); |
2123 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c |
2124 | index 35a1dc89c28a..71c90c8a9e94 100644 |
2125 | --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c |
2126 | +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c |
2127 | @@ -390,8 +390,7 @@ static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev, |
2128 | static int mlxsw_thermal_trend_get(struct thermal_zone_device *tzdev, |
2129 | int trip, enum thermal_trend *trend) |
2130 | { |
2131 | - struct mlxsw_thermal_module *tz = tzdev->devdata; |
2132 | - struct mlxsw_thermal *thermal = tz->parent; |
2133 | + struct mlxsw_thermal *thermal = tzdev->devdata; |
2134 | |
2135 | if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) |
2136 | return -EINVAL; |
2137 | @@ -592,6 +591,22 @@ mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip, |
2138 | return 0; |
2139 | } |
2140 | |
2141 | +static int mlxsw_thermal_module_trend_get(struct thermal_zone_device *tzdev, |
2142 | + int trip, enum thermal_trend *trend) |
2143 | +{ |
2144 | + struct mlxsw_thermal_module *tz = tzdev->devdata; |
2145 | + struct mlxsw_thermal *thermal = tz->parent; |
2146 | + |
2147 | + if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS) |
2148 | + return -EINVAL; |
2149 | + |
2150 | + if (tzdev == thermal->tz_highest_dev) |
2151 | + return 1; |
2152 | + |
2153 | + *trend = THERMAL_TREND_STABLE; |
2154 | + return 0; |
2155 | +} |
2156 | + |
2157 | static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { |
2158 | .bind = mlxsw_thermal_module_bind, |
2159 | .unbind = mlxsw_thermal_module_unbind, |
2160 | @@ -603,7 +618,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_module_ops = { |
2161 | .set_trip_temp = mlxsw_thermal_module_trip_temp_set, |
2162 | .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, |
2163 | .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, |
2164 | - .get_trend = mlxsw_thermal_trend_get, |
2165 | + .get_trend = mlxsw_thermal_module_trend_get, |
2166 | }; |
2167 | |
2168 | static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev, |
2169 | @@ -642,7 +657,7 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = { |
2170 | .set_trip_temp = mlxsw_thermal_module_trip_temp_set, |
2171 | .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get, |
2172 | .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set, |
2173 | - .get_trend = mlxsw_thermal_trend_get, |
2174 | + .get_trend = mlxsw_thermal_module_trend_get, |
2175 | }; |
2176 | |
2177 | static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev, |
2178 | diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c |
2179 | index b16a1221d19b..fb182bec8f06 100644 |
2180 | --- a/drivers/net/net_failover.c |
2181 | +++ b/drivers/net/net_failover.c |
2182 | @@ -61,7 +61,8 @@ static int net_failover_open(struct net_device *dev) |
2183 | return 0; |
2184 | |
2185 | err_standby_open: |
2186 | - dev_close(primary_dev); |
2187 | + if (primary_dev) |
2188 | + dev_close(primary_dev); |
2189 | err_primary_open: |
2190 | netif_tx_disable(dev); |
2191 | return err; |
2192 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
2193 | index 6e9a59e3d822..46bdd0df2eb8 100644 |
2194 | --- a/drivers/net/tun.c |
2195 | +++ b/drivers/net/tun.c |
2196 | @@ -1908,8 +1908,11 @@ drop: |
2197 | skb->dev = tun->dev; |
2198 | break; |
2199 | case IFF_TAP: |
2200 | - if (!frags) |
2201 | - skb->protocol = eth_type_trans(skb, tun->dev); |
2202 | + if (frags && !pskb_may_pull(skb, ETH_HLEN)) { |
2203 | + err = -ENOMEM; |
2204 | + goto drop; |
2205 | + } |
2206 | + skb->protocol = eth_type_trans(skb, tun->dev); |
2207 | break; |
2208 | } |
2209 | |
2210 | @@ -1966,9 +1969,12 @@ drop: |
2211 | } |
2212 | |
2213 | if (frags) { |
2214 | + u32 headlen; |
2215 | + |
2216 | /* Exercise flow dissector code path. */ |
2217 | - u32 headlen = eth_get_headlen(tun->dev, skb->data, |
2218 | - skb_headlen(skb)); |
2219 | + skb_push(skb, ETH_HLEN); |
2220 | + headlen = eth_get_headlen(tun->dev, skb->data, |
2221 | + skb_headlen(skb)); |
2222 | |
2223 | if (unlikely(headlen > skb_headlen(skb))) { |
2224 | this_cpu_inc(tun->pcpu_stats->rx_dropped); |
2225 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
2226 | index ae59fca96032..03434db36b5c 100644 |
2227 | --- a/drivers/net/vxlan.c |
2228 | +++ b/drivers/net/vxlan.c |
2229 | @@ -1924,6 +1924,10 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, |
2230 | ns_olen = request->len - skb_network_offset(request) - |
2231 | sizeof(struct ipv6hdr) - sizeof(*ns); |
2232 | for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) { |
2233 | + if (!ns->opt[i + 1]) { |
2234 | + kfree_skb(reply); |
2235 | + return NULL; |
2236 | + } |
2237 | if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { |
2238 | daddr = ns->opt + i + sizeof(struct nd_opt_hdr); |
2239 | break; |
2240 | diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c |
2241 | index dd0c32379375..4ed21dad6a8e 100644 |
2242 | --- a/drivers/net/wireless/ath/ath9k/hif_usb.c |
2243 | +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c |
2244 | @@ -612,6 +612,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, |
2245 | hif_dev->remain_skb = nskb; |
2246 | spin_unlock(&hif_dev->rx_lock); |
2247 | } else { |
2248 | + if (pool_index == MAX_PKT_NUM_IN_TRANSFER) { |
2249 | + dev_err(&hif_dev->udev->dev, |
2250 | + "ath9k_htc: over RX MAX_PKT_NUM\n"); |
2251 | + goto err; |
2252 | + } |
2253 | nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); |
2254 | if (!nskb) { |
2255 | dev_err(&hif_dev->udev->dev, |
2256 | @@ -638,9 +643,9 @@ err: |
2257 | |
2258 | static void ath9k_hif_usb_rx_cb(struct urb *urb) |
2259 | { |
2260 | - struct sk_buff *skb = (struct sk_buff *) urb->context; |
2261 | - struct hif_device_usb *hif_dev = |
2262 | - usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); |
2263 | + struct rx_buf *rx_buf = (struct rx_buf *)urb->context; |
2264 | + struct hif_device_usb *hif_dev = rx_buf->hif_dev; |
2265 | + struct sk_buff *skb = rx_buf->skb; |
2266 | int ret; |
2267 | |
2268 | if (!skb) |
2269 | @@ -680,14 +685,15 @@ resubmit: |
2270 | return; |
2271 | free: |
2272 | kfree_skb(skb); |
2273 | + kfree(rx_buf); |
2274 | } |
2275 | |
2276 | static void ath9k_hif_usb_reg_in_cb(struct urb *urb) |
2277 | { |
2278 | - struct sk_buff *skb = (struct sk_buff *) urb->context; |
2279 | + struct rx_buf *rx_buf = (struct rx_buf *)urb->context; |
2280 | + struct hif_device_usb *hif_dev = rx_buf->hif_dev; |
2281 | + struct sk_buff *skb = rx_buf->skb; |
2282 | struct sk_buff *nskb; |
2283 | - struct hif_device_usb *hif_dev = |
2284 | - usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); |
2285 | int ret; |
2286 | |
2287 | if (!skb) |
2288 | @@ -745,6 +751,7 @@ resubmit: |
2289 | return; |
2290 | free: |
2291 | kfree_skb(skb); |
2292 | + kfree(rx_buf); |
2293 | urb->context = NULL; |
2294 | } |
2295 | |
2296 | @@ -790,7 +797,7 @@ static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev) |
2297 | init_usb_anchor(&hif_dev->mgmt_submitted); |
2298 | |
2299 | for (i = 0; i < MAX_TX_URB_NUM; i++) { |
2300 | - tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL); |
2301 | + tx_buf = kzalloc(sizeof(*tx_buf), GFP_KERNEL); |
2302 | if (!tx_buf) |
2303 | goto err; |
2304 | |
2305 | @@ -827,8 +834,9 @@ static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev) |
2306 | |
2307 | static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) |
2308 | { |
2309 | - struct urb *urb = NULL; |
2310 | + struct rx_buf *rx_buf = NULL; |
2311 | struct sk_buff *skb = NULL; |
2312 | + struct urb *urb = NULL; |
2313 | int i, ret; |
2314 | |
2315 | init_usb_anchor(&hif_dev->rx_submitted); |
2316 | @@ -836,6 +844,12 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) |
2317 | |
2318 | for (i = 0; i < MAX_RX_URB_NUM; i++) { |
2319 | |
2320 | + rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); |
2321 | + if (!rx_buf) { |
2322 | + ret = -ENOMEM; |
2323 | + goto err_rxb; |
2324 | + } |
2325 | + |
2326 | /* Allocate URB */ |
2327 | urb = usb_alloc_urb(0, GFP_KERNEL); |
2328 | if (urb == NULL) { |
2329 | @@ -850,11 +864,14 @@ static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev) |
2330 | goto err_skb; |
2331 | } |
2332 | |
2333 | + rx_buf->hif_dev = hif_dev; |
2334 | + rx_buf->skb = skb; |
2335 | + |
2336 | usb_fill_bulk_urb(urb, hif_dev->udev, |
2337 | usb_rcvbulkpipe(hif_dev->udev, |
2338 | USB_WLAN_RX_PIPE), |
2339 | skb->data, MAX_RX_BUF_SIZE, |
2340 | - ath9k_hif_usb_rx_cb, skb); |
2341 | + ath9k_hif_usb_rx_cb, rx_buf); |
2342 | |
2343 | /* Anchor URB */ |
2344 | usb_anchor_urb(urb, &hif_dev->rx_submitted); |
2345 | @@ -880,6 +897,8 @@ err_submit: |
2346 | err_skb: |
2347 | usb_free_urb(urb); |
2348 | err_urb: |
2349 | + kfree(rx_buf); |
2350 | +err_rxb: |
2351 | ath9k_hif_usb_dealloc_rx_urbs(hif_dev); |
2352 | return ret; |
2353 | } |
2354 | @@ -891,14 +910,21 @@ static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev) |
2355 | |
2356 | static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) |
2357 | { |
2358 | - struct urb *urb = NULL; |
2359 | + struct rx_buf *rx_buf = NULL; |
2360 | struct sk_buff *skb = NULL; |
2361 | + struct urb *urb = NULL; |
2362 | int i, ret; |
2363 | |
2364 | init_usb_anchor(&hif_dev->reg_in_submitted); |
2365 | |
2366 | for (i = 0; i < MAX_REG_IN_URB_NUM; i++) { |
2367 | |
2368 | + rx_buf = kzalloc(sizeof(*rx_buf), GFP_KERNEL); |
2369 | + if (!rx_buf) { |
2370 | + ret = -ENOMEM; |
2371 | + goto err_rxb; |
2372 | + } |
2373 | + |
2374 | /* Allocate URB */ |
2375 | urb = usb_alloc_urb(0, GFP_KERNEL); |
2376 | if (urb == NULL) { |
2377 | @@ -913,11 +939,14 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev) |
2378 | goto err_skb; |
2379 | } |
2380 | |
2381 | + rx_buf->hif_dev = hif_dev; |
2382 | + rx_buf->skb = skb; |
2383 | + |
2384 | usb_fill_int_urb(urb, hif_dev->udev, |
2385 | usb_rcvintpipe(hif_dev->udev, |
2386 | USB_REG_IN_PIPE), |
2387 | skb->data, MAX_REG_IN_BUF_SIZE, |
2388 | - ath9k_hif_usb_reg_in_cb, skb, 1); |
2389 | + ath9k_hif_usb_reg_in_cb, rx_buf, 1); |
2390 | |
2391 | /* Anchor URB */ |
2392 | usb_anchor_urb(urb, &hif_dev->reg_in_submitted); |
2393 | @@ -943,6 +972,8 @@ err_submit: |
2394 | err_skb: |
2395 | usb_free_urb(urb); |
2396 | err_urb: |
2397 | + kfree(rx_buf); |
2398 | +err_rxb: |
2399 | ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); |
2400 | return ret; |
2401 | } |
2402 | @@ -973,7 +1004,7 @@ err: |
2403 | return -ENOMEM; |
2404 | } |
2405 | |
2406 | -static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) |
2407 | +void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev) |
2408 | { |
2409 | usb_kill_anchored_urbs(&hif_dev->regout_submitted); |
2410 | ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev); |
2411 | @@ -1341,8 +1372,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface) |
2412 | |
2413 | if (hif_dev->flags & HIF_USB_READY) { |
2414 | ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged); |
2415 | - ath9k_htc_hw_free(hif_dev->htc_handle); |
2416 | ath9k_hif_usb_dev_deinit(hif_dev); |
2417 | + ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv); |
2418 | + ath9k_htc_hw_free(hif_dev->htc_handle); |
2419 | } |
2420 | |
2421 | usb_set_intfdata(interface, NULL); |
2422 | diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h |
2423 | index 7846916aa01d..5985aa15ca93 100644 |
2424 | --- a/drivers/net/wireless/ath/ath9k/hif_usb.h |
2425 | +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h |
2426 | @@ -86,6 +86,11 @@ struct tx_buf { |
2427 | struct list_head list; |
2428 | }; |
2429 | |
2430 | +struct rx_buf { |
2431 | + struct sk_buff *skb; |
2432 | + struct hif_device_usb *hif_dev; |
2433 | +}; |
2434 | + |
2435 | #define HIF_USB_TX_STOP BIT(0) |
2436 | #define HIF_USB_TX_FLUSH BIT(1) |
2437 | |
2438 | @@ -133,5 +138,6 @@ struct hif_device_usb { |
2439 | |
2440 | int ath9k_hif_usb_init(void); |
2441 | void ath9k_hif_usb_exit(void); |
2442 | +void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev); |
2443 | |
2444 | #endif /* HTC_USB_H */ |
2445 | diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c |
2446 | index d961095ab01f..40a065028ebe 100644 |
2447 | --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c |
2448 | +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c |
2449 | @@ -931,8 +931,9 @@ err_init: |
2450 | int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, |
2451 | u16 devid, char *product, u32 drv_info) |
2452 | { |
2453 | - struct ieee80211_hw *hw; |
2454 | + struct hif_device_usb *hif_dev; |
2455 | struct ath9k_htc_priv *priv; |
2456 | + struct ieee80211_hw *hw; |
2457 | int ret; |
2458 | |
2459 | hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops); |
2460 | @@ -967,7 +968,10 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, |
2461 | return 0; |
2462 | |
2463 | err_init: |
2464 | - ath9k_deinit_wmi(priv); |
2465 | + ath9k_stop_wmi(priv); |
2466 | + hif_dev = (struct hif_device_usb *)htc_handle->hif_dev; |
2467 | + ath9k_hif_usb_dealloc_urbs(hif_dev); |
2468 | + ath9k_destoy_wmi(priv); |
2469 | err_free: |
2470 | ieee80211_free_hw(hw); |
2471 | return ret; |
2472 | @@ -982,7 +986,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) |
2473 | htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED; |
2474 | |
2475 | ath9k_deinit_device(htc_handle->drv_priv); |
2476 | - ath9k_deinit_wmi(htc_handle->drv_priv); |
2477 | + ath9k_stop_wmi(htc_handle->drv_priv); |
2478 | ieee80211_free_hw(htc_handle->drv_priv->hw); |
2479 | } |
2480 | } |
2481 | diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c |
2482 | index 9cec5c216e1f..118e5550b10c 100644 |
2483 | --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c |
2484 | +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c |
2485 | @@ -999,9 +999,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv, |
2486 | * which are not PHY_ERROR (short radar pulses have a length of 3) |
2487 | */ |
2488 | if (unlikely(!rs_datalen || (rs_datalen < 10 && !is_phyerr))) { |
2489 | - ath_warn(common, |
2490 | - "Short RX data len, dropping (dlen: %d)\n", |
2491 | - rs_datalen); |
2492 | + ath_dbg(common, ANY, |
2493 | + "Short RX data len, dropping (dlen: %d)\n", |
2494 | + rs_datalen); |
2495 | goto rx_next; |
2496 | } |
2497 | |
2498 | diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c |
2499 | index d091c8ebdcf0..d2e062eaf561 100644 |
2500 | --- a/drivers/net/wireless/ath/ath9k/htc_hst.c |
2501 | +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c |
2502 | @@ -113,6 +113,9 @@ static void htc_process_conn_rsp(struct htc_target *target, |
2503 | |
2504 | if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) { |
2505 | epid = svc_rspmsg->endpoint_id; |
2506 | + if (epid < 0 || epid >= ENDPOINT_MAX) |
2507 | + return; |
2508 | + |
2509 | service_id = be16_to_cpu(svc_rspmsg->service_id); |
2510 | max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len); |
2511 | endpoint = &target->endpoint[epid]; |
2512 | @@ -170,7 +173,6 @@ static int htc_config_pipe_credits(struct htc_target *target) |
2513 | time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); |
2514 | if (!time_left) { |
2515 | dev_err(target->dev, "HTC credit config timeout\n"); |
2516 | - kfree_skb(skb); |
2517 | return -ETIMEDOUT; |
2518 | } |
2519 | |
2520 | @@ -206,7 +208,6 @@ static int htc_setup_complete(struct htc_target *target) |
2521 | time_left = wait_for_completion_timeout(&target->cmd_wait, HZ); |
2522 | if (!time_left) { |
2523 | dev_err(target->dev, "HTC start timeout\n"); |
2524 | - kfree_skb(skb); |
2525 | return -ETIMEDOUT; |
2526 | } |
2527 | |
2528 | @@ -279,7 +280,6 @@ int htc_connect_service(struct htc_target *target, |
2529 | if (!time_left) { |
2530 | dev_err(target->dev, "Service connection timeout for: %d\n", |
2531 | service_connreq->service_id); |
2532 | - kfree_skb(skb); |
2533 | return -ETIMEDOUT; |
2534 | } |
2535 | |
2536 | diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c |
2537 | index cdc146091194..e7a3127395be 100644 |
2538 | --- a/drivers/net/wireless/ath/ath9k/wmi.c |
2539 | +++ b/drivers/net/wireless/ath/ath9k/wmi.c |
2540 | @@ -112,14 +112,17 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv) |
2541 | return wmi; |
2542 | } |
2543 | |
2544 | -void ath9k_deinit_wmi(struct ath9k_htc_priv *priv) |
2545 | +void ath9k_stop_wmi(struct ath9k_htc_priv *priv) |
2546 | { |
2547 | struct wmi *wmi = priv->wmi; |
2548 | |
2549 | mutex_lock(&wmi->op_mutex); |
2550 | wmi->stopped = true; |
2551 | mutex_unlock(&wmi->op_mutex); |
2552 | +} |
2553 | |
2554 | +void ath9k_destoy_wmi(struct ath9k_htc_priv *priv) |
2555 | +{ |
2556 | kfree(priv->wmi); |
2557 | } |
2558 | |
2559 | @@ -336,7 +339,6 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, |
2560 | ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", |
2561 | wmi_cmd_to_name(cmd_id)); |
2562 | mutex_unlock(&wmi->op_mutex); |
2563 | - kfree_skb(skb); |
2564 | return -ETIMEDOUT; |
2565 | } |
2566 | |
2567 | diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h |
2568 | index 380175d5ecd7..d8b912206232 100644 |
2569 | --- a/drivers/net/wireless/ath/ath9k/wmi.h |
2570 | +++ b/drivers/net/wireless/ath/ath9k/wmi.h |
2571 | @@ -179,7 +179,6 @@ struct wmi { |
2572 | }; |
2573 | |
2574 | struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv); |
2575 | -void ath9k_deinit_wmi(struct ath9k_htc_priv *priv); |
2576 | int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi, |
2577 | enum htc_endpoint_id *wmi_ctrl_epid); |
2578 | int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, |
2579 | @@ -189,6 +188,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, |
2580 | void ath9k_wmi_event_tasklet(unsigned long data); |
2581 | void ath9k_fatal_work(struct work_struct *work); |
2582 | void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv); |
2583 | +void ath9k_stop_wmi(struct ath9k_htc_priv *priv); |
2584 | +void ath9k_destoy_wmi(struct ath9k_htc_priv *priv); |
2585 | |
2586 | #define WMI_CMD(_wmi_cmd) \ |
2587 | do { \ |
2588 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c |
2589 | index ed367b0a185c..f49887379c43 100644 |
2590 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c |
2591 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c |
2592 | @@ -281,7 +281,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) |
2593 | int regulatory_type; |
2594 | |
2595 | /* Checking for required sections */ |
2596 | - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { |
2597 | + if (mvm->trans->cfg->nvm_type == IWL_NVM) { |
2598 | if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || |
2599 | !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { |
2600 | IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); |
2601 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
2602 | index 779132aef0fb..c73e8095a849 100644 |
2603 | --- a/drivers/pci/pci.c |
2604 | +++ b/drivers/pci/pci.c |
2605 | @@ -4621,10 +4621,10 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, |
2606 | |
2607 | /* |
2608 | * Some controllers might not implement link active reporting. In this |
2609 | - * case, we wait for 1000 + 100 ms. |
2610 | + * case, we wait for 1000 ms + any delay requested by the caller. |
2611 | */ |
2612 | if (!pdev->link_active_reporting) { |
2613 | - msleep(1100); |
2614 | + msleep(timeout + delay); |
2615 | return true; |
2616 | } |
2617 | |
2618 | diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c |
2619 | index 010f541a5002..0896b3614eb1 100644 |
2620 | --- a/drivers/remoteproc/remoteproc_core.c |
2621 | +++ b/drivers/remoteproc/remoteproc_core.c |
2622 | @@ -511,7 +511,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, |
2623 | |
2624 | /* Initialise vdev subdevice */ |
2625 | snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index); |
2626 | - rvdev->dev.parent = rproc->dev.parent; |
2627 | + rvdev->dev.parent = &rproc->dev; |
2628 | rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset; |
2629 | rvdev->dev.release = rproc_rvdev_release; |
2630 | dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name); |
2631 | diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c |
2632 | index 31a62a0b470e..380d52672035 100644 |
2633 | --- a/drivers/remoteproc/remoteproc_virtio.c |
2634 | +++ b/drivers/remoteproc/remoteproc_virtio.c |
2635 | @@ -375,6 +375,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id) |
2636 | goto out; |
2637 | } |
2638 | } |
2639 | + } else { |
2640 | + struct device_node *np = rproc->dev.parent->of_node; |
2641 | + |
2642 | + /* |
2643 | + * If we don't have dedicated buffer, just attempt to re-assign |
2644 | + * the reserved memory from our parent. A default memory-region |
2645 | + * at index 0 from the parent's memory-regions is assigned for |
2646 | + * the rvdev dev to allocate from. Failure is non-critical and |
2647 | + * the allocations will fall back to global pools, so don't |
2648 | + * check return value either. |
2649 | + */ |
2650 | + of_reserved_mem_device_init_by_idx(dev, np, 0); |
2651 | } |
2652 | |
2653 | /* Allocate virtio device */ |
2654 | diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c |
2655 | index 85f77c1ed23c..4a09f21cb235 100644 |
2656 | --- a/drivers/scsi/lpfc/lpfc_ct.c |
2657 | +++ b/drivers/scsi/lpfc/lpfc_ct.c |
2658 | @@ -462,7 +462,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) |
2659 | struct lpfc_nodelist *ndlp; |
2660 | |
2661 | if ((vport->port_type != LPFC_NPIV_PORT) || |
2662 | - (fc4_type == FC_TYPE_FCP) || |
2663 | !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { |
2664 | |
2665 | ndlp = lpfc_setup_disc_node(vport, Did); |
2666 | diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
2667 | index d86838801805..3d48024082ba 100644 |
2668 | --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c |
2669 | +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c |
2670 | @@ -4227,6 +4227,7 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) |
2671 | struct fusion_context *fusion; |
2672 | struct megasas_cmd *cmd_mfi; |
2673 | union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; |
2674 | + struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req; |
2675 | u16 smid; |
2676 | bool refire_cmd = 0; |
2677 | u8 result; |
2678 | @@ -4284,6 +4285,11 @@ static void megasas_refire_mgmt_cmd(struct megasas_instance *instance) |
2679 | break; |
2680 | } |
2681 | |
2682 | + scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) |
2683 | + cmd_fusion->io_request; |
2684 | + if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) |
2685 | + result = RETURN_CMD; |
2686 | + |
2687 | switch (result) { |
2688 | case REFIRE_CMD: |
2689 | megasas_fire_cmd_fusion(instance, req_desc); |
2690 | @@ -4481,7 +4487,6 @@ megasas_issue_tm(struct megasas_instance *instance, u16 device_handle, |
2691 | if (!timeleft) { |
2692 | dev_err(&instance->pdev->dev, |
2693 | "task mgmt type 0x%x timed out\n", type); |
2694 | - cmd_mfi->flags |= DRV_DCMD_SKIP_REFIRE; |
2695 | mutex_unlock(&instance->reset_mutex); |
2696 | rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR); |
2697 | mutex_lock(&instance->reset_mutex); |
2698 | diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c |
2699 | index 7a3531856491..d0afe0b1599f 100644 |
2700 | --- a/drivers/spi/spi-bcm-qspi.c |
2701 | +++ b/drivers/spi/spi-bcm-qspi.c |
2702 | @@ -670,7 +670,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) |
2703 | if (buf) |
2704 | buf[tp.byte] = read_rxram_slot_u8(qspi, slot); |
2705 | dev_dbg(&qspi->pdev->dev, "RD %02x\n", |
2706 | - buf ? buf[tp.byte] : 0xff); |
2707 | + buf ? buf[tp.byte] : 0x0); |
2708 | } else { |
2709 | u16 *buf = tp.trans->rx_buf; |
2710 | |
2711 | @@ -678,7 +678,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots) |
2712 | buf[tp.byte / 2] = read_rxram_slot_u16(qspi, |
2713 | slot); |
2714 | dev_dbg(&qspi->pdev->dev, "RD %04x\n", |
2715 | - buf ? buf[tp.byte] : 0xffff); |
2716 | + buf ? buf[tp.byte / 2] : 0x0); |
2717 | } |
2718 | |
2719 | update_qspi_trans_byte_count(qspi, &tp, |
2720 | @@ -733,13 +733,13 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi) |
2721 | while (!tstatus && slot < MSPI_NUM_CDRAM) { |
2722 | if (tp.trans->bits_per_word <= 8) { |
2723 | const u8 *buf = tp.trans->tx_buf; |
2724 | - u8 val = buf ? buf[tp.byte] : 0xff; |
2725 | + u8 val = buf ? buf[tp.byte] : 0x00; |
2726 | |
2727 | write_txram_slot_u8(qspi, slot, val); |
2728 | dev_dbg(&qspi->pdev->dev, "WR %02x\n", val); |
2729 | } else { |
2730 | const u16 *buf = tp.trans->tx_buf; |
2731 | - u16 val = buf ? buf[tp.byte / 2] : 0xffff; |
2732 | + u16 val = buf ? buf[tp.byte / 2] : 0x0000; |
2733 | |
2734 | write_txram_slot_u16(qspi, slot, val); |
2735 | dev_dbg(&qspi->pdev->dev, "WR %04x\n", val); |
2736 | @@ -1220,6 +1220,11 @@ int bcm_qspi_probe(struct platform_device *pdev, |
2737 | } |
2738 | |
2739 | qspi = spi_master_get_devdata(master); |
2740 | + |
2741 | + qspi->clk = devm_clk_get_optional(&pdev->dev, NULL); |
2742 | + if (IS_ERR(qspi->clk)) |
2743 | + return PTR_ERR(qspi->clk); |
2744 | + |
2745 | qspi->pdev = pdev; |
2746 | qspi->trans_pos.trans = NULL; |
2747 | qspi->trans_pos.byte = 0; |
2748 | @@ -1332,13 +1337,6 @@ int bcm_qspi_probe(struct platform_device *pdev, |
2749 | qspi->soc_intc = NULL; |
2750 | } |
2751 | |
2752 | - qspi->clk = devm_clk_get(&pdev->dev, NULL); |
2753 | - if (IS_ERR(qspi->clk)) { |
2754 | - dev_warn(dev, "unable to get clock\n"); |
2755 | - ret = PTR_ERR(qspi->clk); |
2756 | - goto qspi_probe_err; |
2757 | - } |
2758 | - |
2759 | ret = clk_prepare_enable(qspi->clk); |
2760 | if (ret) { |
2761 | dev_err(dev, "failed to prepare clock\n"); |
2762 | diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c |
2763 | index b4070c0de3df..c88f5d99c906 100644 |
2764 | --- a/drivers/spi/spi-bcm2835.c |
2765 | +++ b/drivers/spi/spi-bcm2835.c |
2766 | @@ -1330,7 +1330,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) |
2767 | goto out_clk_disable; |
2768 | } |
2769 | |
2770 | - err = devm_spi_register_controller(&pdev->dev, ctlr); |
2771 | + err = spi_register_controller(ctlr); |
2772 | if (err) { |
2773 | dev_err(&pdev->dev, "could not register SPI controller: %d\n", |
2774 | err); |
2775 | @@ -1355,6 +1355,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev) |
2776 | |
2777 | bcm2835_debugfs_remove(bs); |
2778 | |
2779 | + spi_unregister_controller(ctlr); |
2780 | + |
2781 | /* Clear FIFOs, and disable the HW block */ |
2782 | bcm2835_wr(bs, BCM2835_SPI_CS, |
2783 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); |
2784 | diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c |
2785 | index a2162ff56a12..c331efd6e86b 100644 |
2786 | --- a/drivers/spi/spi-bcm2835aux.c |
2787 | +++ b/drivers/spi/spi-bcm2835aux.c |
2788 | @@ -569,7 +569,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev) |
2789 | goto out_clk_disable; |
2790 | } |
2791 | |
2792 | - err = devm_spi_register_master(&pdev->dev, master); |
2793 | + err = spi_register_master(master); |
2794 | if (err) { |
2795 | dev_err(&pdev->dev, "could not register SPI master: %d\n", err); |
2796 | goto out_clk_disable; |
2797 | @@ -593,6 +593,8 @@ static int bcm2835aux_spi_remove(struct platform_device *pdev) |
2798 | |
2799 | bcm2835aux_debugfs_remove(bs); |
2800 | |
2801 | + spi_unregister_master(master); |
2802 | + |
2803 | bcm2835aux_spi_reset_hw(bs); |
2804 | |
2805 | /* disable the HW block by releasing the clock */ |
2806 | diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c |
2807 | index d2ca3b357cfe..82c5c027ec4c 100644 |
2808 | --- a/drivers/spi/spi-dw.c |
2809 | +++ b/drivers/spi/spi-dw.c |
2810 | @@ -128,12 +128,20 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable) |
2811 | { |
2812 | struct dw_spi *dws = spi_controller_get_devdata(spi->controller); |
2813 | struct chip_data *chip = spi_get_ctldata(spi); |
2814 | + bool cs_high = !!(spi->mode & SPI_CS_HIGH); |
2815 | |
2816 | /* Chip select logic is inverted from spi_set_cs() */ |
2817 | if (chip && chip->cs_control) |
2818 | chip->cs_control(!enable); |
2819 | |
2820 | - if (!enable) |
2821 | + /* |
2822 | + * DW SPI controller demands any native CS being set in order to |
2823 | + * proceed with data transfer. So in order to activate the SPI |
2824 | + * communications we must set a corresponding bit in the Slave |
2825 | + * Enable register no matter whether the SPI core is configured to |
2826 | + * support active-high or active-low CS level. |
2827 | + */ |
2828 | + if (cs_high == enable) |
2829 | dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select)); |
2830 | else if (dws->cs_override) |
2831 | dw_writel(dws, DW_SPI_SER, 0); |
2832 | @@ -524,7 +532,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) |
2833 | } |
2834 | } |
2835 | |
2836 | - ret = devm_spi_register_controller(dev, master); |
2837 | + ret = spi_register_controller(master); |
2838 | if (ret) { |
2839 | dev_err(&master->dev, "problem registering spi master\n"); |
2840 | goto err_dma_exit; |
2841 | @@ -548,6 +556,8 @@ void dw_spi_remove_host(struct dw_spi *dws) |
2842 | { |
2843 | dw_spi_debugfs_remove(dws); |
2844 | |
2845 | + spi_unregister_controller(dws->master); |
2846 | + |
2847 | if (dws->dma_ops && dws->dma_ops->dma_exit) |
2848 | dws->dma_ops->dma_exit(dws); |
2849 | |
2850 | diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c |
2851 | index 723145673206..d0d6f1bda1b6 100644 |
2852 | --- a/drivers/spi/spi-pxa2xx.c |
2853 | +++ b/drivers/spi/spi-pxa2xx.c |
2854 | @@ -1880,7 +1880,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) |
2855 | |
2856 | /* Register with the SPI framework */ |
2857 | platform_set_drvdata(pdev, drv_data); |
2858 | - status = devm_spi_register_controller(&pdev->dev, controller); |
2859 | + status = spi_register_controller(controller); |
2860 | if (status != 0) { |
2861 | dev_err(&pdev->dev, "problem registering spi controller\n"); |
2862 | goto out_error_pm_runtime_enabled; |
2863 | @@ -1889,7 +1889,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) |
2864 | return status; |
2865 | |
2866 | out_error_pm_runtime_enabled: |
2867 | - pm_runtime_put_noidle(&pdev->dev); |
2868 | pm_runtime_disable(&pdev->dev); |
2869 | |
2870 | out_error_clock_enabled: |
2871 | @@ -1916,6 +1915,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) |
2872 | |
2873 | pm_runtime_get_sync(&pdev->dev); |
2874 | |
2875 | + spi_unregister_controller(drv_data->controller); |
2876 | + |
2877 | /* Disable the SSP at the peripheral and SOC level */ |
2878 | pxa2xx_spi_write(drv_data, SSCR0, 0); |
2879 | clk_disable_unprepare(ssp->clk); |
2880 | diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c |
2881 | index c186d3a944cd..6bfbf0cfcf63 100644 |
2882 | --- a/drivers/spi/spi.c |
2883 | +++ b/drivers/spi/spi.c |
2884 | @@ -2581,6 +2581,8 @@ void spi_unregister_controller(struct spi_controller *ctlr) |
2885 | struct spi_controller *found; |
2886 | int id = ctlr->bus_num; |
2887 | |
2888 | + device_for_each_child(&ctlr->dev, NULL, __unregister); |
2889 | + |
2890 | /* First make sure that this controller was ever added */ |
2891 | mutex_lock(&board_lock); |
2892 | found = idr_find(&spi_master_idr, id); |
2893 | @@ -2593,7 +2595,6 @@ void spi_unregister_controller(struct spi_controller *ctlr) |
2894 | list_del(&ctlr->list); |
2895 | mutex_unlock(&board_lock); |
2896 | |
2897 | - device_for_each_child(&ctlr->dev, NULL, __unregister); |
2898 | device_unregister(&ctlr->dev); |
2899 | /* free bus id */ |
2900 | mutex_lock(&board_lock); |
2901 | diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c |
2902 | index be8d9702cbb2..4b84fd4483e1 100644 |
2903 | --- a/drivers/video/fbdev/vt8500lcdfb.c |
2904 | +++ b/drivers/video/fbdev/vt8500lcdfb.c |
2905 | @@ -230,6 +230,7 @@ static int vt8500lcd_blank(int blank, struct fb_info *info) |
2906 | info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) |
2907 | for (i = 0; i < 256; i++) |
2908 | vt8500lcd_setcolreg(i, 0, 0, 0, 0, info); |
2909 | + fallthrough; |
2910 | case FB_BLANK_UNBLANK: |
2911 | if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR || |
2912 | info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) |
2913 | diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c |
2914 | index 3be07807edcd..e30f9427b335 100644 |
2915 | --- a/drivers/video/fbdev/w100fb.c |
2916 | +++ b/drivers/video/fbdev/w100fb.c |
2917 | @@ -588,6 +588,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) |
2918 | memsize=par->mach->mem->size; |
2919 | memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize); |
2920 | vfree(par->saved_extmem); |
2921 | + par->saved_extmem = NULL; |
2922 | } |
2923 | if (par->saved_intmem) { |
2924 | memsize=MEM_INT_SIZE; |
2925 | @@ -596,6 +597,7 @@ static void w100fb_restore_vidmem(struct w100fb_par *par) |
2926 | else |
2927 | memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize); |
2928 | vfree(par->saved_intmem); |
2929 | + par->saved_intmem = NULL; |
2930 | } |
2931 | } |
2932 | |
2933 | diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c |
2934 | index 8ed89f032ebf..e0e62149a6f4 100644 |
2935 | --- a/drivers/watchdog/imx_sc_wdt.c |
2936 | +++ b/drivers/watchdog/imx_sc_wdt.c |
2937 | @@ -177,6 +177,11 @@ static int imx_sc_wdt_probe(struct platform_device *pdev) |
2938 | wdog->timeout = DEFAULT_TIMEOUT; |
2939 | |
2940 | watchdog_init_timeout(wdog, 0, dev); |
2941 | + |
2942 | + ret = imx_sc_wdt_set_timeout(wdog, wdog->timeout); |
2943 | + if (ret) |
2944 | + return ret; |
2945 | + |
2946 | watchdog_stop_on_reboot(wdog); |
2947 | watchdog_stop_on_unregister(wdog); |
2948 | |
2949 | diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c |
2950 | index c57c71b7d53d..ffe9bd843922 100644 |
2951 | --- a/drivers/xen/pvcalls-back.c |
2952 | +++ b/drivers/xen/pvcalls-back.c |
2953 | @@ -1087,7 +1087,8 @@ static void set_backend_state(struct xenbus_device *dev, |
2954 | case XenbusStateInitialised: |
2955 | switch (state) { |
2956 | case XenbusStateConnected: |
2957 | - backend_connect(dev); |
2958 | + if (backend_connect(dev)) |
2959 | + return; |
2960 | xenbus_switch_state(dev, XenbusStateConnected); |
2961 | break; |
2962 | case XenbusStateClosing: |
2963 | diff --git a/fs/aio.c b/fs/aio.c |
2964 | index 4115d5ad6b90..47bb7b5685ba 100644 |
2965 | --- a/fs/aio.c |
2966 | +++ b/fs/aio.c |
2967 | @@ -176,6 +176,7 @@ struct fsync_iocb { |
2968 | struct file *file; |
2969 | struct work_struct work; |
2970 | bool datasync; |
2971 | + struct cred *creds; |
2972 | }; |
2973 | |
2974 | struct poll_iocb { |
2975 | @@ -1589,8 +1590,11 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb, |
2976 | static void aio_fsync_work(struct work_struct *work) |
2977 | { |
2978 | struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); |
2979 | + const struct cred *old_cred = override_creds(iocb->fsync.creds); |
2980 | |
2981 | iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); |
2982 | + revert_creds(old_cred); |
2983 | + put_cred(iocb->fsync.creds); |
2984 | iocb_put(iocb); |
2985 | } |
2986 | |
2987 | @@ -1604,6 +1608,10 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, |
2988 | if (unlikely(!req->file->f_op->fsync)) |
2989 | return -EINVAL; |
2990 | |
2991 | + req->creds = prepare_creds(); |
2992 | + if (!req->creds) |
2993 | + return -ENOMEM; |
2994 | + |
2995 | req->datasync = datasync; |
2996 | INIT_WORK(&req->work, aio_fsync_work); |
2997 | schedule_work(&req->work); |
2998 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
2999 | index c8f304cae3f3..06b1a86d76b1 100644 |
3000 | --- a/fs/cifs/smb2pdu.c |
3001 | +++ b/fs/cifs/smb2pdu.c |
3002 | @@ -2747,7 +2747,9 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, |
3003 | * response size smaller. |
3004 | */ |
3005 | req->MaxOutputResponse = cpu_to_le32(max_response_size); |
3006 | - |
3007 | + req->sync_hdr.CreditCharge = |
3008 | + cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size), |
3009 | + SMB2_MAX_BUFFER_SIZE)); |
3010 | if (is_fsctl) |
3011 | req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); |
3012 | else |
3013 | diff --git a/fs/fat/inode.c b/fs/fat/inode.c |
3014 | index d40cbad16659..3d5ad11aacc5 100644 |
3015 | --- a/fs/fat/inode.c |
3016 | +++ b/fs/fat/inode.c |
3017 | @@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b, |
3018 | goto out; |
3019 | } |
3020 | |
3021 | + if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) { |
3022 | + if (!silent) |
3023 | + fat_msg(sb, KERN_ERR, "bogus number of FAT sectors"); |
3024 | + goto out; |
3025 | + } |
3026 | + |
3027 | error = 0; |
3028 | |
3029 | out: |
3030 | diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c |
3031 | index 8303b44a5068..d2ed4dc4434c 100644 |
3032 | --- a/fs/gfs2/lops.c |
3033 | +++ b/fs/gfs2/lops.c |
3034 | @@ -504,12 +504,12 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, |
3035 | unsigned int bsize = sdp->sd_sb.sb_bsize, off; |
3036 | unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift; |
3037 | unsigned int shift = PAGE_SHIFT - bsize_shift; |
3038 | - unsigned int max_bio_size = 2 * 1024 * 1024; |
3039 | + unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift; |
3040 | struct gfs2_journal_extent *je; |
3041 | int sz, ret = 0; |
3042 | struct bio *bio = NULL; |
3043 | struct page *page = NULL; |
3044 | - bool bio_chained = false, done = false; |
3045 | + bool done = false; |
3046 | errseq_t since; |
3047 | |
3048 | memset(head, 0, sizeof(*head)); |
3049 | @@ -532,10 +532,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, |
3050 | off = 0; |
3051 | } |
3052 | |
3053 | - if (!bio || (bio_chained && !off) || |
3054 | - bio->bi_iter.bi_size >= max_bio_size) { |
3055 | - /* start new bio */ |
3056 | - } else { |
3057 | + if (bio && (off || block < blocks_submitted + max_blocks)) { |
3058 | sector_t sector = dblock << sdp->sd_fsb2bb_shift; |
3059 | |
3060 | if (bio_end_sector(bio) == sector) { |
3061 | @@ -548,19 +545,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, |
3062 | (PAGE_SIZE - off) >> bsize_shift; |
3063 | |
3064 | bio = gfs2_chain_bio(bio, blocks); |
3065 | - bio_chained = true; |
3066 | goto add_block_to_new_bio; |
3067 | } |
3068 | } |
3069 | |
3070 | if (bio) { |
3071 | - blocks_submitted = block + 1; |
3072 | + blocks_submitted = block; |
3073 | submit_bio(bio); |
3074 | } |
3075 | |
3076 | bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read); |
3077 | bio->bi_opf = REQ_OP_READ; |
3078 | - bio_chained = false; |
3079 | add_block_to_new_bio: |
3080 | sz = bio_add_page(bio, page, bsize, off); |
3081 | BUG_ON(sz != bsize); |
3082 | @@ -568,7 +563,7 @@ block_added: |
3083 | off += bsize; |
3084 | if (off == PAGE_SIZE) |
3085 | page = NULL; |
3086 | - if (blocks_submitted < 2 * max_bio_size >> bsize_shift) { |
3087 | + if (blocks_submitted <= blocks_read + max_blocks) { |
3088 | /* Keep at least one bio in flight */ |
3089 | continue; |
3090 | } |
3091 | diff --git a/fs/io_uring.c b/fs/io_uring.c |
3092 | index 2050100e6e84..7fa3cd3fff4d 100644 |
3093 | --- a/fs/io_uring.c |
3094 | +++ b/fs/io_uring.c |
3095 | @@ -3498,8 +3498,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, |
3096 | |
3097 | ret = 0; |
3098 | if (!pages || nr_pages > got_pages) { |
3099 | - kfree(vmas); |
3100 | - kfree(pages); |
3101 | + kvfree(vmas); |
3102 | + kvfree(pages); |
3103 | pages = kvmalloc_array(nr_pages, sizeof(struct page *), |
3104 | GFP_KERNEL); |
3105 | vmas = kvmalloc_array(nr_pages, |
3106 | diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c |
3107 | index 445eef41bfaf..91b58c897f92 100644 |
3108 | --- a/fs/nilfs2/segment.c |
3109 | +++ b/fs/nilfs2/segment.c |
3110 | @@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root) |
3111 | if (!nilfs->ns_writer) |
3112 | return -ENOMEM; |
3113 | |
3114 | + inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL); |
3115 | + |
3116 | err = nilfs_segctor_start_thread(nilfs->ns_writer); |
3117 | if (err) { |
3118 | kfree(nilfs->ns_writer); |
3119 | diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c |
3120 | index deb13f0a0f7d..d24548ed31b9 100644 |
3121 | --- a/fs/notify/fanotify/fanotify.c |
3122 | +++ b/fs/notify/fanotify/fanotify.c |
3123 | @@ -171,6 +171,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, |
3124 | if (!fsnotify_iter_should_report_type(iter_info, type)) |
3125 | continue; |
3126 | mark = iter_info->marks[type]; |
3127 | + |
3128 | + /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */ |
3129 | + marks_ignored_mask |= mark->ignored_mask; |
3130 | + |
3131 | /* |
3132 | * If the event is on dir and this mark doesn't care about |
3133 | * events on dir, don't send it! |
3134 | @@ -188,7 +192,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, |
3135 | continue; |
3136 | |
3137 | marks_mask |= mark->mask; |
3138 | - marks_ignored_mask |= mark->ignored_mask; |
3139 | } |
3140 | |
3141 | test_mask = event_mask & marks_mask & ~marks_ignored_mask; |
3142 | diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c |
3143 | index b801c6353100..ec5eca5a96f4 100644 |
3144 | --- a/fs/overlayfs/copy_up.c |
3145 | +++ b/fs/overlayfs/copy_up.c |
3146 | @@ -40,7 +40,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new) |
3147 | { |
3148 | ssize_t list_size, size, value_size = 0; |
3149 | char *buf, *name, *value = NULL; |
3150 | - int uninitialized_var(error); |
3151 | + int error = 0; |
3152 | size_t slen; |
3153 | |
3154 | if (!(old->d_inode->i_opflags & IOP_XATTR) || |
3155 | diff --git a/fs/proc/inode.c b/fs/proc/inode.c |
3156 | index dbe43a50caf2..3f0c89001fcf 100644 |
3157 | --- a/fs/proc/inode.c |
3158 | +++ b/fs/proc/inode.c |
3159 | @@ -448,7 +448,7 @@ const struct inode_operations proc_link_inode_operations = { |
3160 | |
3161 | struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) |
3162 | { |
3163 | - struct inode *inode = new_inode_pseudo(sb); |
3164 | + struct inode *inode = new_inode(sb); |
3165 | |
3166 | if (inode) { |
3167 | inode->i_ino = de->low_ino; |
3168 | diff --git a/fs/proc/self.c b/fs/proc/self.c |
3169 | index 57c0a1047250..32af065397f8 100644 |
3170 | --- a/fs/proc/self.c |
3171 | +++ b/fs/proc/self.c |
3172 | @@ -43,7 +43,7 @@ int proc_setup_self(struct super_block *s) |
3173 | inode_lock(root_inode); |
3174 | self = d_alloc_name(s->s_root, "self"); |
3175 | if (self) { |
3176 | - struct inode *inode = new_inode_pseudo(s); |
3177 | + struct inode *inode = new_inode(s); |
3178 | if (inode) { |
3179 | inode->i_ino = self_inum; |
3180 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); |
3181 | diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c |
3182 | index f61ae53533f5..fac9e50b33a6 100644 |
3183 | --- a/fs/proc/thread_self.c |
3184 | +++ b/fs/proc/thread_self.c |
3185 | @@ -43,7 +43,7 @@ int proc_setup_thread_self(struct super_block *s) |
3186 | inode_lock(root_inode); |
3187 | thread_self = d_alloc_name(s->s_root, "thread-self"); |
3188 | if (thread_self) { |
3189 | - struct inode *inode = new_inode_pseudo(s); |
3190 | + struct inode *inode = new_inode(s); |
3191 | if (inode) { |
3192 | inode->i_ino = thread_self_inum; |
3193 | inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode); |
3194 | diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h |
3195 | index dae64600ccbf..b6d7347ccda7 100644 |
3196 | --- a/include/asm-generic/vmlinux.lds.h |
3197 | +++ b/include/asm-generic/vmlinux.lds.h |
3198 | @@ -496,10 +496,12 @@ |
3199 | __start___modver = .; \ |
3200 | KEEP(*(__modver)) \ |
3201 | __stop___modver = .; \ |
3202 | - . = ALIGN((align)); \ |
3203 | - __end_rodata = .; \ |
3204 | } \ |
3205 | - . = ALIGN((align)); |
3206 | + \ |
3207 | + BTF \ |
3208 | + \ |
3209 | + . = ALIGN((align)); \ |
3210 | + __end_rodata = .; |
3211 | |
3212 | /* RODATA & RO_DATA provided for backward compatibility. |
3213 | * All archs are supposed to use RO_DATA() */ |
3214 | @@ -588,6 +590,20 @@ |
3215 | __stop___ex_table = .; \ |
3216 | } |
3217 | |
3218 | +/* |
3219 | + * .BTF |
3220 | + */ |
3221 | +#ifdef CONFIG_DEBUG_INFO_BTF |
3222 | +#define BTF \ |
3223 | + .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \ |
3224 | + __start_BTF = .; \ |
3225 | + *(.BTF) \ |
3226 | + __stop_BTF = .; \ |
3227 | + } |
3228 | +#else |
3229 | +#define BTF |
3230 | +#endif |
3231 | + |
3232 | /* |
3233 | * Init task |
3234 | */ |
3235 | diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h |
3236 | index f236f5b931b2..7fdd7f355b52 100644 |
3237 | --- a/include/linux/elfnote.h |
3238 | +++ b/include/linux/elfnote.h |
3239 | @@ -54,7 +54,7 @@ |
3240 | .popsection ; |
3241 | |
3242 | #define ELFNOTE(name, type, desc) \ |
3243 | - ELFNOTE_START(name, type, "") \ |
3244 | + ELFNOTE_START(name, type, "a") \ |
3245 | desc ; \ |
3246 | ELFNOTE_END |
3247 | |
3248 | diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h |
3249 | index 678b0a5797a0..21aa6d736e99 100644 |
3250 | --- a/include/linux/kvm_host.h |
3251 | +++ b/include/linux/kvm_host.h |
3252 | @@ -1376,8 +1376,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, |
3253 | } |
3254 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ |
3255 | |
3256 | -int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
3257 | - unsigned long start, unsigned long end, bool blockable); |
3258 | +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
3259 | + unsigned long start, unsigned long end); |
3260 | |
3261 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
3262 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); |
3263 | diff --git a/include/linux/mm.h b/include/linux/mm.h |
3264 | index 53bad834adf5..3285dae06c03 100644 |
3265 | --- a/include/linux/mm.h |
3266 | +++ b/include/linux/mm.h |
3267 | @@ -694,6 +694,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) |
3268 | } |
3269 | |
3270 | extern void kvfree(const void *addr); |
3271 | +extern void kvfree_sensitive(const void *addr, size_t len); |
3272 | |
3273 | /* |
3274 | * Mapcount of compound page as a whole, does not include mapped sub-pages. |
3275 | diff --git a/include/linux/padata.h b/include/linux/padata.h |
3276 | index cccab7a59787..fa35dcfbd13f 100644 |
3277 | --- a/include/linux/padata.h |
3278 | +++ b/include/linux/padata.h |
3279 | @@ -145,7 +145,8 @@ struct padata_shell { |
3280 | /** |
3281 | * struct padata_instance - The overall control structure. |
3282 | * |
3283 | - * @cpu_notifier: cpu hotplug notifier. |
3284 | + * @cpu_online_node: Linkage for CPU online callback. |
3285 | + * @cpu_dead_node: Linkage for CPU offline callback. |
3286 | * @parallel_wq: The workqueue used for parallel work. |
3287 | * @serial_wq: The workqueue used for serial work. |
3288 | * @pslist: List of padata_shell objects attached to this instance. |
3289 | @@ -160,7 +161,8 @@ struct padata_shell { |
3290 | * @flags: padata flags. |
3291 | */ |
3292 | struct padata_instance { |
3293 | - struct hlist_node node; |
3294 | + struct hlist_node cpu_online_node; |
3295 | + struct hlist_node cpu_dead_node; |
3296 | struct workqueue_struct *parallel_wq; |
3297 | struct workqueue_struct *serial_wq; |
3298 | struct list_head pslist; |
3299 | diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h |
3300 | index 86281ac7c305..860e0f843c12 100644 |
3301 | --- a/include/linux/set_memory.h |
3302 | +++ b/include/linux/set_memory.h |
3303 | @@ -26,7 +26,7 @@ static inline int set_direct_map_default_noflush(struct page *page) |
3304 | #endif |
3305 | |
3306 | #ifndef set_mce_nospec |
3307 | -static inline int set_mce_nospec(unsigned long pfn) |
3308 | +static inline int set_mce_nospec(unsigned long pfn, bool unmap) |
3309 | { |
3310 | return 0; |
3311 | } |
3312 | diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h |
3313 | index d0019d3395cf..59802eb8d2cc 100644 |
3314 | --- a/include/net/inet_hashtables.h |
3315 | +++ b/include/net/inet_hashtables.h |
3316 | @@ -185,6 +185,12 @@ static inline spinlock_t *inet_ehash_lockp( |
3317 | |
3318 | int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo); |
3319 | |
3320 | +static inline void inet_hashinfo2_free_mod(struct inet_hashinfo *h) |
3321 | +{ |
3322 | + kfree(h->lhash2); |
3323 | + h->lhash2 = NULL; |
3324 | +} |
3325 | + |
3326 | static inline void inet_ehash_locks_free(struct inet_hashinfo *hashinfo) |
3327 | { |
3328 | kvfree(hashinfo->ehash_locks); |
3329 | diff --git a/kernel/bpf/sysfs_btf.c b/kernel/bpf/sysfs_btf.c |
3330 | index 7ae5dddd1fe6..3b495773de5a 100644 |
3331 | --- a/kernel/bpf/sysfs_btf.c |
3332 | +++ b/kernel/bpf/sysfs_btf.c |
3333 | @@ -9,15 +9,15 @@ |
3334 | #include <linux/sysfs.h> |
3335 | |
3336 | /* See scripts/link-vmlinux.sh, gen_btf() func for details */ |
3337 | -extern char __weak _binary__btf_vmlinux_bin_start[]; |
3338 | -extern char __weak _binary__btf_vmlinux_bin_end[]; |
3339 | +extern char __weak __start_BTF[]; |
3340 | +extern char __weak __stop_BTF[]; |
3341 | |
3342 | static ssize_t |
3343 | btf_vmlinux_read(struct file *file, struct kobject *kobj, |
3344 | struct bin_attribute *bin_attr, |
3345 | char *buf, loff_t off, size_t len) |
3346 | { |
3347 | - memcpy(buf, _binary__btf_vmlinux_bin_start + off, len); |
3348 | + memcpy(buf, __start_BTF + off, len); |
3349 | return len; |
3350 | } |
3351 | |
3352 | @@ -30,15 +30,14 @@ static struct kobject *btf_kobj; |
3353 | |
3354 | static int __init btf_vmlinux_init(void) |
3355 | { |
3356 | - if (!_binary__btf_vmlinux_bin_start) |
3357 | + if (!__start_BTF) |
3358 | return 0; |
3359 | |
3360 | btf_kobj = kobject_create_and_add("btf", kernel_kobj); |
3361 | if (!btf_kobj) |
3362 | return -ENOMEM; |
3363 | |
3364 | - bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end - |
3365 | - _binary__btf_vmlinux_bin_start; |
3366 | + bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF; |
3367 | |
3368 | return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux); |
3369 | } |
3370 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
3371 | index 7382fc95d41e..aaaf50b25cc9 100644 |
3372 | --- a/kernel/events/core.c |
3373 | +++ b/kernel/events/core.c |
3374 | @@ -93,11 +93,11 @@ static void remote_function(void *data) |
3375 | * @info: the function call argument |
3376 | * |
3377 | * Calls the function @func when the task is currently running. This might |
3378 | - * be on the current CPU, which just calls the function directly |
3379 | + * be on the current CPU, which just calls the function directly. This will |
3380 | + * retry due to any failures in smp_call_function_single(), such as if the |
3381 | + * task_cpu() goes offline concurrently. |
3382 | * |
3383 | - * returns: @func return value, or |
3384 | - * -ESRCH - when the process isn't running |
3385 | - * -EAGAIN - when the process moved away |
3386 | + * returns @func return value or -ESRCH when the process isn't running |
3387 | */ |
3388 | static int |
3389 | task_function_call(struct task_struct *p, remote_function_f func, void *info) |
3390 | @@ -110,11 +110,16 @@ task_function_call(struct task_struct *p, remote_function_f func, void *info) |
3391 | }; |
3392 | int ret; |
3393 | |
3394 | - do { |
3395 | - ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1); |
3396 | - if (!ret) |
3397 | - ret = data.ret; |
3398 | - } while (ret == -EAGAIN); |
3399 | + for (;;) { |
3400 | + ret = smp_call_function_single(task_cpu(p), remote_function, |
3401 | + &data, 1); |
3402 | + ret = !ret ? data.ret : -EAGAIN; |
3403 | + |
3404 | + if (ret != -EAGAIN) |
3405 | + break; |
3406 | + |
3407 | + cond_resched(); |
3408 | + } |
3409 | |
3410 | return ret; |
3411 | } |
3412 | diff --git a/kernel/padata.c b/kernel/padata.c |
3413 | index c4b774331e46..92a4867e8adc 100644 |
3414 | --- a/kernel/padata.c |
3415 | +++ b/kernel/padata.c |
3416 | @@ -782,7 +782,7 @@ static int padata_cpu_online(unsigned int cpu, struct hlist_node *node) |
3417 | struct padata_instance *pinst; |
3418 | int ret; |
3419 | |
3420 | - pinst = hlist_entry_safe(node, struct padata_instance, node); |
3421 | + pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node); |
3422 | if (!pinst_has_cpu(pinst, cpu)) |
3423 | return 0; |
3424 | |
3425 | @@ -797,7 +797,7 @@ static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node) |
3426 | struct padata_instance *pinst; |
3427 | int ret; |
3428 | |
3429 | - pinst = hlist_entry_safe(node, struct padata_instance, node); |
3430 | + pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node); |
3431 | if (!pinst_has_cpu(pinst, cpu)) |
3432 | return 0; |
3433 | |
3434 | @@ -813,8 +813,9 @@ static enum cpuhp_state hp_online; |
3435 | static void __padata_free(struct padata_instance *pinst) |
3436 | { |
3437 | #ifdef CONFIG_HOTPLUG_CPU |
3438 | - cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node); |
3439 | - cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node); |
3440 | + cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, |
3441 | + &pinst->cpu_dead_node); |
3442 | + cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node); |
3443 | #endif |
3444 | |
3445 | WARN_ON(!list_empty(&pinst->pslist)); |
3446 | @@ -1020,9 +1021,10 @@ static struct padata_instance *padata_alloc(const char *name, |
3447 | mutex_init(&pinst->lock); |
3448 | |
3449 | #ifdef CONFIG_HOTPLUG_CPU |
3450 | - cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node); |
3451 | + cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, |
3452 | + &pinst->cpu_online_node); |
3453 | cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD, |
3454 | - &pinst->node); |
3455 | + &pinst->cpu_dead_node); |
3456 | #endif |
3457 | |
3458 | put_online_cpus(); |
3459 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
3460 | index 193b6ab74d7f..8a0e6bdba50d 100644 |
3461 | --- a/kernel/sched/fair.c |
3462 | +++ b/kernel/sched/fair.c |
3463 | @@ -2678,7 +2678,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr) |
3464 | /* |
3465 | * We don't care about NUMA placement if we don't have memory. |
3466 | */ |
3467 | - if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) |
3468 | + if ((curr->flags & (PF_EXITING | PF_KTHREAD)) || work->next != work) |
3469 | return; |
3470 | |
3471 | /* |
3472 | diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c |
3473 | index 717c940112f9..8ad5ba2b86e2 100644 |
3474 | --- a/lib/lzo/lzo1x_compress.c |
3475 | +++ b/lib/lzo/lzo1x_compress.c |
3476 | @@ -268,6 +268,19 @@ m_len_done: |
3477 | *op++ = (M4_MARKER | ((m_off >> 11) & 8) |
3478 | | (m_len - 2)); |
3479 | else { |
3480 | + if (unlikely(((m_off & 0x403f) == 0x403f) |
3481 | + && (m_len >= 261) |
3482 | + && (m_len <= 264)) |
3483 | + && likely(bitstream_version)) { |
3484 | + // Under lzo-rle, block copies |
3485 | + // for 261 <= length <= 264 and |
3486 | + // (distance & 0x80f3) == 0x80f3 |
3487 | + // can result in ambiguous |
3488 | + // output. Adjust length |
3489 | + // to 260 to prevent ambiguity. |
3490 | + ip -= m_len - 260; |
3491 | + m_len = 260; |
3492 | + } |
3493 | m_len -= M4_MAX_LEN; |
3494 | *op++ = (M4_MARKER | ((m_off >> 11) & 8)); |
3495 | while (unlikely(m_len > 255)) { |
3496 | diff --git a/mm/gup.c b/mm/gup.c |
3497 | index 745b4036cdfd..4a8e969a6e59 100644 |
3498 | --- a/mm/gup.c |
3499 | +++ b/mm/gup.c |
3500 | @@ -161,13 +161,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, |
3501 | } |
3502 | |
3503 | /* |
3504 | - * FOLL_FORCE can write to even unwritable pte's, but only |
3505 | - * after we've gone through a COW cycle and they are dirty. |
3506 | + * FOLL_FORCE or a forced COW break can write even to unwritable pte's, |
3507 | + * but only after we've gone through a COW cycle and they are dirty. |
3508 | */ |
3509 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) |
3510 | { |
3511 | - return pte_write(pte) || |
3512 | - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); |
3513 | + return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); |
3514 | +} |
3515 | + |
3516 | +/* |
3517 | + * A (separate) COW fault might break the page the other way and |
3518 | + * get_user_pages() would return the page from what is now the wrong |
3519 | + * VM. So we need to force a COW break at GUP time even for reads. |
3520 | + */ |
3521 | +static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) |
3522 | +{ |
3523 | + return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); |
3524 | } |
3525 | |
3526 | static struct page *follow_page_pte(struct vm_area_struct *vma, |
3527 | @@ -823,12 +832,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, |
3528 | goto out; |
3529 | } |
3530 | if (is_vm_hugetlb_page(vma)) { |
3531 | + if (should_force_cow_break(vma, foll_flags)) |
3532 | + foll_flags |= FOLL_WRITE; |
3533 | i = follow_hugetlb_page(mm, vma, pages, vmas, |
3534 | &start, &nr_pages, i, |
3535 | - gup_flags, nonblocking); |
3536 | + foll_flags, nonblocking); |
3537 | continue; |
3538 | } |
3539 | } |
3540 | + |
3541 | + if (should_force_cow_break(vma, foll_flags)) |
3542 | + foll_flags |= FOLL_WRITE; |
3543 | + |
3544 | retry: |
3545 | /* |
3546 | * If we have a pending SIGKILL, don't keep faulting pages and |
3547 | @@ -2316,6 +2331,10 @@ static bool gup_fast_permitted(unsigned long start, unsigned long end) |
3548 | * |
3549 | * If the architecture does not support this function, simply return with no |
3550 | * pages pinned. |
3551 | + * |
3552 | + * Careful, careful! COW breaking can go either way, so a non-write |
3553 | + * access can get ambiguous page results. If you call this function without |
3554 | + * 'write' set, you'd better be sure that you're ok with that ambiguity. |
3555 | */ |
3556 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
3557 | struct page **pages) |
3558 | @@ -2343,6 +2362,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, |
3559 | * |
3560 | * We do not adopt an rcu_read_lock(.) here as we also want to |
3561 | * block IPIs that come from THPs splitting. |
3562 | + * |
3563 | + * NOTE! We allow read-only gup_fast() here, but you'd better be |
3564 | + * careful about possible COW pages. You'll get _a_ COW page, but |
3565 | + * not necessarily the one you intended to get depending on what |
3566 | + * COW event happens after this. COW may break the page copy in a |
3567 | + * random direction. |
3568 | */ |
3569 | |
3570 | if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && |
3571 | @@ -2415,10 +2440,17 @@ int get_user_pages_fast(unsigned long start, int nr_pages, |
3572 | if (unlikely(!access_ok((void __user *)start, len))) |
3573 | return -EFAULT; |
3574 | |
3575 | + /* |
3576 | + * The FAST_GUP case requires FOLL_WRITE even for pure reads, |
3577 | + * because get_user_pages() may need to cause an early COW in |
3578 | + * order to avoid confusing the normal COW routines. So only |
3579 | + * targets that are already writable are safe to do by just |
3580 | + * looking at the page tables. |
3581 | + */ |
3582 | if (IS_ENABLED(CONFIG_HAVE_FAST_GUP) && |
3583 | gup_fast_permitted(start, end)) { |
3584 | local_irq_disable(); |
3585 | - gup_pgd_range(addr, end, gup_flags, pages, &nr); |
3586 | + gup_pgd_range(addr, end, gup_flags | FOLL_WRITE, pages, &nr); |
3587 | local_irq_enable(); |
3588 | ret = nr; |
3589 | } |
3590 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
3591 | index 0d96831b6ded..7ec5710afc99 100644 |
3592 | --- a/mm/huge_memory.c |
3593 | +++ b/mm/huge_memory.c |
3594 | @@ -1454,13 +1454,12 @@ out_unlock: |
3595 | } |
3596 | |
3597 | /* |
3598 | - * FOLL_FORCE can write to even unwritable pmd's, but only |
3599 | - * after we've gone through a COW cycle and they are dirty. |
3600 | + * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, |
3601 | + * but only after we've gone through a COW cycle and they are dirty. |
3602 | */ |
3603 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) |
3604 | { |
3605 | - return pmd_write(pmd) || |
3606 | - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); |
3607 | + return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); |
3608 | } |
3609 | |
3610 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
3611 | diff --git a/mm/slub.c b/mm/slub.c |
3612 | index af44807d5b05..fca33abd6c42 100644 |
3613 | --- a/mm/slub.c |
3614 | +++ b/mm/slub.c |
3615 | @@ -5776,8 +5776,10 @@ static int sysfs_slab_add(struct kmem_cache *s) |
3616 | |
3617 | s->kobj.kset = kset; |
3618 | err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); |
3619 | - if (err) |
3620 | + if (err) { |
3621 | + kobject_put(&s->kobj); |
3622 | goto out; |
3623 | + } |
3624 | |
3625 | err = sysfs_create_group(&s->kobj, &slab_attr_group); |
3626 | if (err) |
3627 | diff --git a/mm/util.c b/mm/util.c |
3628 | index 3ad6db9a722e..ab358c64bbd3 100644 |
3629 | --- a/mm/util.c |
3630 | +++ b/mm/util.c |
3631 | @@ -594,6 +594,24 @@ void kvfree(const void *addr) |
3632 | } |
3633 | EXPORT_SYMBOL(kvfree); |
3634 | |
3635 | +/** |
3636 | + * kvfree_sensitive - Free a data object containing sensitive information. |
3637 | + * @addr: address of the data object to be freed. |
3638 | + * @len: length of the data object. |
3639 | + * |
3640 | + * Use the special memzero_explicit() function to clear the content of a |
3641 | + * kvmalloc'ed object containing sensitive data to make sure that the |
3642 | + * compiler won't optimize out the data clearing. |
3643 | + */ |
3644 | +void kvfree_sensitive(const void *addr, size_t len) |
3645 | +{ |
3646 | + if (likely(!ZERO_OR_NULL_PTR(addr))) { |
3647 | + memzero_explicit((void *)addr, len); |
3648 | + kvfree(addr); |
3649 | + } |
3650 | +} |
3651 | +EXPORT_SYMBOL(kvfree_sensitive); |
3652 | + |
3653 | static inline void *__page_rmapping(struct page *page) |
3654 | { |
3655 | unsigned long mapping; |
3656 | diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c |
3657 | index 37908561a64b..b18cdf03edb3 100644 |
3658 | --- a/net/bridge/br_arp_nd_proxy.c |
3659 | +++ b/net/bridge/br_arp_nd_proxy.c |
3660 | @@ -276,6 +276,10 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p, |
3661 | ns_olen = request->len - (skb_network_offset(request) + |
3662 | sizeof(struct ipv6hdr)) - sizeof(*ns); |
3663 | for (i = 0; i < ns_olen - 1; i += (ns->opt[i + 1] << 3)) { |
3664 | + if (!ns->opt[i + 1]) { |
3665 | + kfree_skb(reply); |
3666 | + return; |
3667 | + } |
3668 | if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) { |
3669 | daddr = ns->opt + i + sizeof(struct nd_opt_hdr); |
3670 | break; |
3671 | diff --git a/net/dccp/proto.c b/net/dccp/proto.c |
3672 | index 5bad08dc4316..cb61a9d281f6 100644 |
3673 | --- a/net/dccp/proto.c |
3674 | +++ b/net/dccp/proto.c |
3675 | @@ -1139,14 +1139,14 @@ static int __init dccp_init(void) |
3676 | inet_hashinfo_init(&dccp_hashinfo); |
3677 | rc = inet_hashinfo2_init_mod(&dccp_hashinfo); |
3678 | if (rc) |
3679 | - goto out_fail; |
3680 | + goto out_free_percpu; |
3681 | rc = -ENOBUFS; |
3682 | dccp_hashinfo.bind_bucket_cachep = |
3683 | kmem_cache_create("dccp_bind_bucket", |
3684 | sizeof(struct inet_bind_bucket), 0, |
3685 | SLAB_HWCACHE_ALIGN, NULL); |
3686 | if (!dccp_hashinfo.bind_bucket_cachep) |
3687 | - goto out_free_percpu; |
3688 | + goto out_free_hashinfo2; |
3689 | |
3690 | /* |
3691 | * Size and allocate the main established and bind bucket |
3692 | @@ -1242,6 +1242,8 @@ out_free_dccp_ehash: |
3693 | free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order); |
3694 | out_free_bind_bucket_cachep: |
3695 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); |
3696 | +out_free_hashinfo2: |
3697 | + inet_hashinfo2_free_mod(&dccp_hashinfo); |
3698 | out_free_percpu: |
3699 | percpu_counter_destroy(&dccp_orphan_count); |
3700 | out_fail: |
3701 | @@ -1265,6 +1267,7 @@ static void __exit dccp_fini(void) |
3702 | kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep); |
3703 | dccp_ackvec_exit(); |
3704 | dccp_sysctl_exit(); |
3705 | + inet_hashinfo2_free_mod(&dccp_hashinfo); |
3706 | percpu_counter_destroy(&dccp_orphan_count); |
3707 | } |
3708 | |
3709 | diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c |
3710 | index f12fa8da6127..1b851fd82613 100644 |
3711 | --- a/net/ipv4/fib_trie.c |
3712 | +++ b/net/ipv4/fib_trie.c |
3713 | @@ -2455,6 +2455,7 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) |
3714 | " %zd bytes, size of tnode: %zd bytes.\n", |
3715 | LEAF_SIZE, TNODE_SIZE(0)); |
3716 | |
3717 | + rcu_read_lock(); |
3718 | for (h = 0; h < FIB_TABLE_HASHSZ; h++) { |
3719 | struct hlist_head *head = &net->ipv4.fib_table_hash[h]; |
3720 | struct fib_table *tb; |
3721 | @@ -2474,7 +2475,9 @@ static int fib_triestat_seq_show(struct seq_file *seq, void *v) |
3722 | trie_show_usage(seq, t->stats); |
3723 | #endif |
3724 | } |
3725 | + cond_resched_rcu(); |
3726 | } |
3727 | + rcu_read_unlock(); |
3728 | |
3729 | return 0; |
3730 | } |
3731 | diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c |
3732 | index 18d05403d3b5..5af97b4f5df3 100644 |
3733 | --- a/net/ipv6/ipv6_sockglue.c |
3734 | +++ b/net/ipv6/ipv6_sockglue.c |
3735 | @@ -183,14 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, |
3736 | retv = -EBUSY; |
3737 | break; |
3738 | } |
3739 | - } |
3740 | - if (sk->sk_protocol == IPPROTO_TCP && |
3741 | - sk->sk_prot != &tcpv6_prot) { |
3742 | - retv = -EBUSY; |
3743 | + } else if (sk->sk_protocol == IPPROTO_TCP) { |
3744 | + if (sk->sk_prot != &tcpv6_prot) { |
3745 | + retv = -EBUSY; |
3746 | + break; |
3747 | + } |
3748 | + } else { |
3749 | break; |
3750 | } |
3751 | - if (sk->sk_protocol != IPPROTO_TCP) |
3752 | - break; |
3753 | + |
3754 | if (sk->sk_state != TCP_ESTABLISHED) { |
3755 | retv = -ENOTCONN; |
3756 | break; |
3757 | diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c |
3758 | index bc734cfaa29e..c87af430107a 100644 |
3759 | --- a/net/sctp/ipv6.c |
3760 | +++ b/net/sctp/ipv6.c |
3761 | @@ -228,7 +228,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3762 | { |
3763 | struct sctp_association *asoc = t->asoc; |
3764 | struct dst_entry *dst = NULL; |
3765 | - struct flowi6 *fl6 = &fl->u.ip6; |
3766 | + struct flowi _fl; |
3767 | + struct flowi6 *fl6 = &_fl.u.ip6; |
3768 | struct sctp_bind_addr *bp; |
3769 | struct ipv6_pinfo *np = inet6_sk(sk); |
3770 | struct sctp_sockaddr_entry *laddr; |
3771 | @@ -238,7 +239,7 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3772 | enum sctp_scope scope; |
3773 | __u8 matchlen = 0; |
3774 | |
3775 | - memset(fl6, 0, sizeof(struct flowi6)); |
3776 | + memset(&_fl, 0, sizeof(_fl)); |
3777 | fl6->daddr = daddr->v6.sin6_addr; |
3778 | fl6->fl6_dport = daddr->v6.sin6_port; |
3779 | fl6->flowi6_proto = IPPROTO_SCTP; |
3780 | @@ -276,8 +277,11 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3781 | rcu_read_unlock(); |
3782 | |
3783 | dst = ip6_dst_lookup_flow(sock_net(sk), sk, fl6, final_p); |
3784 | - if (!asoc || saddr) |
3785 | + if (!asoc || saddr) { |
3786 | + t->dst = dst; |
3787 | + memcpy(fl, &_fl, sizeof(_fl)); |
3788 | goto out; |
3789 | + } |
3790 | |
3791 | bp = &asoc->base.bind_addr; |
3792 | scope = sctp_scope(daddr); |
3793 | @@ -300,6 +304,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3794 | if ((laddr->a.sa.sa_family == AF_INET6) && |
3795 | (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { |
3796 | rcu_read_unlock(); |
3797 | + t->dst = dst; |
3798 | + memcpy(fl, &_fl, sizeof(_fl)); |
3799 | goto out; |
3800 | } |
3801 | } |
3802 | @@ -338,6 +344,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3803 | if (!IS_ERR_OR_NULL(dst)) |
3804 | dst_release(dst); |
3805 | dst = bdst; |
3806 | + t->dst = dst; |
3807 | + memcpy(fl, &_fl, sizeof(_fl)); |
3808 | break; |
3809 | } |
3810 | |
3811 | @@ -351,6 +359,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3812 | dst_release(dst); |
3813 | dst = bdst; |
3814 | matchlen = bmatchlen; |
3815 | + t->dst = dst; |
3816 | + memcpy(fl, &_fl, sizeof(_fl)); |
3817 | } |
3818 | rcu_read_unlock(); |
3819 | |
3820 | @@ -359,14 +369,12 @@ out: |
3821 | struct rt6_info *rt; |
3822 | |
3823 | rt = (struct rt6_info *)dst; |
3824 | - t->dst = dst; |
3825 | t->dst_cookie = rt6_get_cookie(rt); |
3826 | pr_debug("rt6_dst:%pI6/%d rt6_src:%pI6\n", |
3827 | &rt->rt6i_dst.addr, rt->rt6i_dst.plen, |
3828 | - &fl6->saddr); |
3829 | + &fl->u.ip6.saddr); |
3830 | } else { |
3831 | t->dst = NULL; |
3832 | - |
3833 | pr_debug("no route\n"); |
3834 | } |
3835 | } |
3836 | diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c |
3837 | index 681ffb3545db..237c88eeb538 100644 |
3838 | --- a/net/sctp/protocol.c |
3839 | +++ b/net/sctp/protocol.c |
3840 | @@ -409,7 +409,8 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3841 | { |
3842 | struct sctp_association *asoc = t->asoc; |
3843 | struct rtable *rt; |
3844 | - struct flowi4 *fl4 = &fl->u.ip4; |
3845 | + struct flowi _fl; |
3846 | + struct flowi4 *fl4 = &_fl.u.ip4; |
3847 | struct sctp_bind_addr *bp; |
3848 | struct sctp_sockaddr_entry *laddr; |
3849 | struct dst_entry *dst = NULL; |
3850 | @@ -419,7 +420,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3851 | |
3852 | if (t->dscp & SCTP_DSCP_SET_MASK) |
3853 | tos = t->dscp & SCTP_DSCP_VAL_MASK; |
3854 | - memset(fl4, 0x0, sizeof(struct flowi4)); |
3855 | + memset(&_fl, 0x0, sizeof(_fl)); |
3856 | fl4->daddr = daddr->v4.sin_addr.s_addr; |
3857 | fl4->fl4_dport = daddr->v4.sin_port; |
3858 | fl4->flowi4_proto = IPPROTO_SCTP; |
3859 | @@ -438,8 +439,11 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3860 | &fl4->saddr); |
3861 | |
3862 | rt = ip_route_output_key(sock_net(sk), fl4); |
3863 | - if (!IS_ERR(rt)) |
3864 | + if (!IS_ERR(rt)) { |
3865 | dst = &rt->dst; |
3866 | + t->dst = dst; |
3867 | + memcpy(fl, &_fl, sizeof(_fl)); |
3868 | + } |
3869 | |
3870 | /* If there is no association or if a source address is passed, no |
3871 | * more validation is required. |
3872 | @@ -502,27 +506,33 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, |
3873 | odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, |
3874 | false); |
3875 | if (!odev || odev->ifindex != fl4->flowi4_oif) { |
3876 | - if (!dst) |
3877 | + if (!dst) { |
3878 | dst = &rt->dst; |
3879 | - else |
3880 | + t->dst = dst; |
3881 | + memcpy(fl, &_fl, sizeof(_fl)); |
3882 | + } else { |
3883 | dst_release(&rt->dst); |
3884 | + } |
3885 | continue; |
3886 | } |
3887 | |
3888 | dst_release(dst); |
3889 | dst = &rt->dst; |
3890 | + t->dst = dst; |
3891 | + memcpy(fl, &_fl, sizeof(_fl)); |
3892 | break; |
3893 | } |
3894 | |
3895 | out_unlock: |
3896 | rcu_read_unlock(); |
3897 | out: |
3898 | - t->dst = dst; |
3899 | - if (dst) |
3900 | + if (dst) { |
3901 | pr_debug("rt_dst:%pI4, rt_src:%pI4\n", |
3902 | - &fl4->daddr, &fl4->saddr); |
3903 | - else |
3904 | + &fl->u.ip4.daddr, &fl->u.ip4.saddr); |
3905 | + } else { |
3906 | + t->dst = NULL; |
3907 | pr_debug("no route\n"); |
3908 | + } |
3909 | } |
3910 | |
3911 | /* For v4, the source address is cached in the route entry(dst). So no need |
3912 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
3913 | index ffd3262b7a41..58fe6556cdf5 100644 |
3914 | --- a/net/sctp/socket.c |
3915 | +++ b/net/sctp/socket.c |
3916 | @@ -147,29 +147,44 @@ static void sctp_clear_owner_w(struct sctp_chunk *chunk) |
3917 | skb_orphan(chunk->skb); |
3918 | } |
3919 | |
3920 | +#define traverse_and_process() \ |
3921 | +do { \ |
3922 | + msg = chunk->msg; \ |
3923 | + if (msg == prev_msg) \ |
3924 | + continue; \ |
3925 | + list_for_each_entry(c, &msg->chunks, frag_list) { \ |
3926 | + if ((clear && asoc->base.sk == c->skb->sk) || \ |
3927 | + (!clear && asoc->base.sk != c->skb->sk)) \ |
3928 | + cb(c); \ |
3929 | + } \ |
3930 | + prev_msg = msg; \ |
3931 | +} while (0) |
3932 | + |
3933 | static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, |
3934 | + bool clear, |
3935 | void (*cb)(struct sctp_chunk *)) |
3936 | |
3937 | { |
3938 | + struct sctp_datamsg *msg, *prev_msg = NULL; |
3939 | struct sctp_outq *q = &asoc->outqueue; |
3940 | + struct sctp_chunk *chunk, *c; |
3941 | struct sctp_transport *t; |
3942 | - struct sctp_chunk *chunk; |
3943 | |
3944 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) |
3945 | list_for_each_entry(chunk, &t->transmitted, transmitted_list) |
3946 | - cb(chunk); |
3947 | + traverse_and_process(); |
3948 | |
3949 | list_for_each_entry(chunk, &q->retransmit, transmitted_list) |
3950 | - cb(chunk); |
3951 | + traverse_and_process(); |
3952 | |
3953 | list_for_each_entry(chunk, &q->sacked, transmitted_list) |
3954 | - cb(chunk); |
3955 | + traverse_and_process(); |
3956 | |
3957 | list_for_each_entry(chunk, &q->abandoned, transmitted_list) |
3958 | - cb(chunk); |
3959 | + traverse_and_process(); |
3960 | |
3961 | list_for_each_entry(chunk, &q->out_chunk_list, list) |
3962 | - cb(chunk); |
3963 | + traverse_and_process(); |
3964 | } |
3965 | |
3966 | static void sctp_for_each_rx_skb(struct sctp_association *asoc, struct sock *sk, |
3967 | @@ -9461,9 +9476,9 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, |
3968 | * paths won't try to lock it and then oldsk. |
3969 | */ |
3970 | lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); |
3971 | - sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); |
3972 | + sctp_for_each_tx_datachunk(assoc, true, sctp_clear_owner_w); |
3973 | sctp_assoc_migrate(assoc, newsk); |
3974 | - sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); |
3975 | + sctp_for_each_tx_datachunk(assoc, false, sctp_set_owner_w); |
3976 | |
3977 | /* If the association on the newsk is already closed before accept() |
3978 | * is called, set RCV_SHUTDOWN flag. |
3979 | diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh |
3980 | index aa1386079f0c..8b6325c2dfc5 100755 |
3981 | --- a/scripts/link-vmlinux.sh |
3982 | +++ b/scripts/link-vmlinux.sh |
3983 | @@ -113,9 +113,6 @@ vmlinux_link() |
3984 | gen_btf() |
3985 | { |
3986 | local pahole_ver |
3987 | - local bin_arch |
3988 | - local bin_format |
3989 | - local bin_file |
3990 | |
3991 | if ! [ -x "$(command -v ${PAHOLE})" ]; then |
3992 | echo >&2 "BTF: ${1}: pahole (${PAHOLE}) is not available" |
3993 | @@ -133,17 +130,16 @@ gen_btf() |
3994 | info "BTF" ${2} |
3995 | LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} |
3996 | |
3997 | - # dump .BTF section into raw binary file to link with final vmlinux |
3998 | - bin_arch=$(LANG=C ${OBJDUMP} -f ${1} | grep architecture | \ |
3999 | - cut -d, -f1 | cut -d' ' -f2) |
4000 | - bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \ |
4001 | - awk '{print $4}') |
4002 | - bin_file=.btf.vmlinux.bin |
4003 | - ${OBJCOPY} --change-section-address .BTF=0 \ |
4004 | - --set-section-flags .BTF=alloc -O binary \ |
4005 | - --only-section=.BTF ${1} $bin_file |
4006 | - ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \ |
4007 | - --rename-section .data=.BTF $bin_file ${2} |
4008 | + # Create ${2} which contains just .BTF section but no symbols. Add |
4009 | + # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all |
4010 | + # deletes all symbols including __start_BTF and __stop_BTF, which will |
4011 | + # be redefined in the linker script. Add 2>/dev/null to suppress GNU |
4012 | + # objcopy warnings: "empty loadable segment detected at ..." |
4013 | + ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \ |
4014 | + --strip-all ${1} ${2} 2>/dev/null |
4015 | + # Change e_type to ET_REL so that it can be used to link final vmlinux. |
4016 | + # Unlike GNU ld, lld does not allow an ET_EXEC input. |
4017 | + printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none |
4018 | } |
4019 | |
4020 | # Create ${2} .o file with all symbols from the ${1} object file |
4021 | diff --git a/security/keys/internal.h b/security/keys/internal.h |
4022 | index 7e9914943616..1ca8bfaed0e8 100644 |
4023 | --- a/security/keys/internal.h |
4024 | +++ b/security/keys/internal.h |
4025 | @@ -350,15 +350,4 @@ static inline void key_check(const struct key *key) |
4026 | #define key_check(key) do {} while(0) |
4027 | |
4028 | #endif |
4029 | - |
4030 | -/* |
4031 | - * Helper function to clear and free a kvmalloc'ed memory object. |
4032 | - */ |
4033 | -static inline void __kvzfree(const void *addr, size_t len) |
4034 | -{ |
4035 | - if (addr) { |
4036 | - memset((void *)addr, 0, len); |
4037 | - kvfree(addr); |
4038 | - } |
4039 | -} |
4040 | #endif /* _INTERNAL_H */ |
4041 | diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c |
4042 | index 5e01192e222a..edde63a63007 100644 |
4043 | --- a/security/keys/keyctl.c |
4044 | +++ b/security/keys/keyctl.c |
4045 | @@ -142,10 +142,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, |
4046 | |
4047 | key_ref_put(keyring_ref); |
4048 | error3: |
4049 | - if (payload) { |
4050 | - memzero_explicit(payload, plen); |
4051 | - kvfree(payload); |
4052 | - } |
4053 | + kvfree_sensitive(payload, plen); |
4054 | error2: |
4055 | kfree(description); |
4056 | error: |
4057 | @@ -360,7 +357,7 @@ long keyctl_update_key(key_serial_t id, |
4058 | |
4059 | key_ref_put(key_ref); |
4060 | error2: |
4061 | - __kvzfree(payload, plen); |
4062 | + kvfree_sensitive(payload, plen); |
4063 | error: |
4064 | return ret; |
4065 | } |
4066 | @@ -914,7 +911,7 @@ can_read_key: |
4067 | */ |
4068 | if (ret > key_data_len) { |
4069 | if (unlikely(key_data)) |
4070 | - __kvzfree(key_data, key_data_len); |
4071 | + kvfree_sensitive(key_data, key_data_len); |
4072 | key_data_len = ret; |
4073 | continue; /* Allocate buffer */ |
4074 | } |
4075 | @@ -923,7 +920,7 @@ can_read_key: |
4076 | ret = -EFAULT; |
4077 | break; |
4078 | } |
4079 | - __kvzfree(key_data, key_data_len); |
4080 | + kvfree_sensitive(key_data, key_data_len); |
4081 | |
4082 | key_put_out: |
4083 | key_put(key); |
4084 | @@ -1225,10 +1222,7 @@ long keyctl_instantiate_key_common(key_serial_t id, |
4085 | keyctl_change_reqkey_auth(NULL); |
4086 | |
4087 | error2: |
4088 | - if (payload) { |
4089 | - memzero_explicit(payload, plen); |
4090 | - kvfree(payload); |
4091 | - } |
4092 | + kvfree_sensitive(payload, plen); |
4093 | error: |
4094 | return ret; |
4095 | } |
4096 | diff --git a/security/smack/smack.h b/security/smack/smack.h |
4097 | index 62529f382942..335d2411abe4 100644 |
4098 | --- a/security/smack/smack.h |
4099 | +++ b/security/smack/smack.h |
4100 | @@ -148,7 +148,6 @@ struct smk_net4addr { |
4101 | struct smack_known *smk_label; /* label */ |
4102 | }; |
4103 | |
4104 | -#if IS_ENABLED(CONFIG_IPV6) |
4105 | /* |
4106 | * An entry in the table identifying IPv6 hosts. |
4107 | */ |
4108 | @@ -159,9 +158,7 @@ struct smk_net6addr { |
4109 | int smk_masks; /* mask size */ |
4110 | struct smack_known *smk_label; /* label */ |
4111 | }; |
4112 | -#endif /* CONFIG_IPV6 */ |
4113 | |
4114 | -#ifdef SMACK_IPV6_PORT_LABELING |
4115 | /* |
4116 | * An entry in the table identifying ports. |
4117 | */ |
4118 | @@ -174,7 +171,6 @@ struct smk_port_label { |
4119 | short smk_sock_type; /* Socket type */ |
4120 | short smk_can_reuse; |
4121 | }; |
4122 | -#endif /* SMACK_IPV6_PORT_LABELING */ |
4123 | |
4124 | struct smack_known_list_elem { |
4125 | struct list_head list; |
4126 | @@ -335,9 +331,7 @@ extern struct smack_known smack_known_web; |
4127 | extern struct mutex smack_known_lock; |
4128 | extern struct list_head smack_known_list; |
4129 | extern struct list_head smk_net4addr_list; |
4130 | -#if IS_ENABLED(CONFIG_IPV6) |
4131 | extern struct list_head smk_net6addr_list; |
4132 | -#endif /* CONFIG_IPV6 */ |
4133 | |
4134 | extern struct mutex smack_onlycap_lock; |
4135 | extern struct list_head smack_onlycap_list; |
4136 | diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c |
4137 | index ad22066eba04..12c0fa85d9f8 100644 |
4138 | --- a/security/smack/smack_lsm.c |
4139 | +++ b/security/smack/smack_lsm.c |
4140 | @@ -51,10 +51,8 @@ |
4141 | #define SMK_RECEIVING 1 |
4142 | #define SMK_SENDING 2 |
4143 | |
4144 | -#ifdef SMACK_IPV6_PORT_LABELING |
4145 | -DEFINE_MUTEX(smack_ipv6_lock); |
4146 | +static DEFINE_MUTEX(smack_ipv6_lock); |
4147 | static LIST_HEAD(smk_ipv6_port_list); |
4148 | -#endif |
4149 | static struct kmem_cache *smack_inode_cache; |
4150 | struct kmem_cache *smack_rule_cache; |
4151 | int smack_enabled; |
4152 | @@ -2326,7 +2324,6 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) |
4153 | return NULL; |
4154 | } |
4155 | |
4156 | -#if IS_ENABLED(CONFIG_IPV6) |
4157 | /* |
4158 | * smk_ipv6_localhost - Check for local ipv6 host address |
4159 | * @sip: the address |
4160 | @@ -2394,7 +2391,6 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) |
4161 | |
4162 | return NULL; |
4163 | } |
4164 | -#endif /* CONFIG_IPV6 */ |
4165 | |
4166 | /** |
4167 | * smack_netlabel - Set the secattr on a socket |
4168 | @@ -2483,7 +2479,6 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) |
4169 | return smack_netlabel(sk, sk_lbl); |
4170 | } |
4171 | |
4172 | -#if IS_ENABLED(CONFIG_IPV6) |
4173 | /** |
4174 | * smk_ipv6_check - check Smack access |
4175 | * @subject: subject Smack label |
4176 | @@ -2516,7 +2511,6 @@ static int smk_ipv6_check(struct smack_known *subject, |
4177 | rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); |
4178 | return rc; |
4179 | } |
4180 | -#endif /* CONFIG_IPV6 */ |
4181 | |
4182 | #ifdef SMACK_IPV6_PORT_LABELING |
4183 | /** |
4184 | @@ -2605,6 +2599,7 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) |
4185 | mutex_unlock(&smack_ipv6_lock); |
4186 | return; |
4187 | } |
4188 | +#endif |
4189 | |
4190 | /** |
4191 | * smk_ipv6_port_check - check Smack port access |
4192 | @@ -2667,7 +2662,6 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, |
4193 | |
4194 | return smk_ipv6_check(skp, object, address, act); |
4195 | } |
4196 | -#endif /* SMACK_IPV6_PORT_LABELING */ |
4197 | |
4198 | /** |
4199 | * smack_inode_setsecurity - set smack xattrs |
4200 | @@ -2842,24 +2836,21 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, |
4201 | return 0; |
4202 | if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { |
4203 | struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; |
4204 | -#ifdef SMACK_IPV6_SECMARK_LABELING |
4205 | - struct smack_known *rsp; |
4206 | -#endif |
4207 | + struct smack_known *rsp = NULL; |
4208 | |
4209 | if (addrlen < SIN6_LEN_RFC2133) |
4210 | return 0; |
4211 | -#ifdef SMACK_IPV6_SECMARK_LABELING |
4212 | - rsp = smack_ipv6host_label(sip); |
4213 | + if (__is_defined(SMACK_IPV6_SECMARK_LABELING)) |
4214 | + rsp = smack_ipv6host_label(sip); |
4215 | if (rsp != NULL) { |
4216 | struct socket_smack *ssp = sock->sk->sk_security; |
4217 | |
4218 | rc = smk_ipv6_check(ssp->smk_out, rsp, sip, |
4219 | SMK_CONNECTING); |
4220 | } |
4221 | -#endif |
4222 | -#ifdef SMACK_IPV6_PORT_LABELING |
4223 | - rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); |
4224 | -#endif |
4225 | + if (__is_defined(SMACK_IPV6_PORT_LABELING)) |
4226 | + rc = smk_ipv6_port_check(sock->sk, sip, SMK_CONNECTING); |
4227 | + |
4228 | return rc; |
4229 | } |
4230 | if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) |
4231 | diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c |
4232 | index e3e05c04dbd1..c21b656b3263 100644 |
4233 | --- a/security/smack/smackfs.c |
4234 | +++ b/security/smack/smackfs.c |
4235 | @@ -878,11 +878,21 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, |
4236 | else |
4237 | rule += strlen(skp->smk_known) + 1; |
4238 | |
4239 | + if (rule > data + count) { |
4240 | + rc = -EOVERFLOW; |
4241 | + goto out; |
4242 | + } |
4243 | + |
4244 | ret = sscanf(rule, "%d", &maplevel); |
4245 | if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) |
4246 | goto out; |
4247 | |
4248 | rule += SMK_DIGITLEN; |
4249 | + if (rule > data + count) { |
4250 | + rc = -EOVERFLOW; |
4251 | + goto out; |
4252 | + } |
4253 | + |
4254 | ret = sscanf(rule, "%d", &catlen); |
4255 | if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) |
4256 | goto out; |
4257 | diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c |
4258 | index 5c74ea2bb44b..ec501fbaabe4 100644 |
4259 | --- a/sound/core/pcm_native.c |
4260 | +++ b/sound/core/pcm_native.c |
4261 | @@ -136,6 +136,16 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) |
4262 | } |
4263 | EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); |
4264 | |
4265 | +static void snd_pcm_stream_lock_nested(struct snd_pcm_substream *substream) |
4266 | +{ |
4267 | + struct snd_pcm_group *group = &substream->self_group; |
4268 | + |
4269 | + if (substream->pcm->nonatomic) |
4270 | + mutex_lock_nested(&group->mutex, SINGLE_DEPTH_NESTING); |
4271 | + else |
4272 | + spin_lock_nested(&group->lock, SINGLE_DEPTH_NESTING); |
4273 | +} |
4274 | + |
4275 | /** |
4276 | * snd_pcm_stream_unlock_irq - Unlock the PCM stream |
4277 | * @substream: PCM substream |
4278 | @@ -1994,6 +2004,12 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) |
4279 | } |
4280 | pcm_file = f.file->private_data; |
4281 | substream1 = pcm_file->substream; |
4282 | + |
4283 | + if (substream == substream1) { |
4284 | + res = -EINVAL; |
4285 | + goto _badf; |
4286 | + } |
4287 | + |
4288 | group = kzalloc(sizeof(*group), GFP_KERNEL); |
4289 | if (!group) { |
4290 | res = -ENOMEM; |
4291 | @@ -2022,7 +2038,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) |
4292 | snd_pcm_stream_unlock_irq(substream); |
4293 | |
4294 | snd_pcm_group_lock_irq(target_group, nonatomic); |
4295 | - snd_pcm_stream_lock(substream1); |
4296 | + snd_pcm_stream_lock_nested(substream1); |
4297 | snd_pcm_group_assign(substream1, target_group); |
4298 | refcount_inc(&target_group->refs); |
4299 | snd_pcm_stream_unlock(substream1); |
4300 | @@ -2038,7 +2054,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd) |
4301 | |
4302 | static void relink_to_local(struct snd_pcm_substream *substream) |
4303 | { |
4304 | - snd_pcm_stream_lock(substream); |
4305 | + snd_pcm_stream_lock_nested(substream); |
4306 | snd_pcm_group_assign(substream, &substream->self_group); |
4307 | snd_pcm_stream_unlock(substream); |
4308 | } |
4309 | diff --git a/sound/firewire/fireface/ff-protocol-latter.c b/sound/firewire/fireface/ff-protocol-latter.c |
4310 | index 0e4c3a9ed5e4..76ae568489ef 100644 |
4311 | --- a/sound/firewire/fireface/ff-protocol-latter.c |
4312 | +++ b/sound/firewire/fireface/ff-protocol-latter.c |
4313 | @@ -107,18 +107,18 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate) |
4314 | int err; |
4315 | |
4316 | // Set the number of data blocks transferred in a second. |
4317 | - if (rate % 32000 == 0) |
4318 | - code = 0x00; |
4319 | + if (rate % 48000 == 0) |
4320 | + code = 0x04; |
4321 | else if (rate % 44100 == 0) |
4322 | code = 0x02; |
4323 | - else if (rate % 48000 == 0) |
4324 | - code = 0x04; |
4325 | + else if (rate % 32000 == 0) |
4326 | + code = 0x00; |
4327 | else |
4328 | return -EINVAL; |
4329 | |
4330 | if (rate >= 64000 && rate < 128000) |
4331 | code |= 0x08; |
4332 | - else if (rate >= 128000 && rate < 192000) |
4333 | + else if (rate >= 128000) |
4334 | code |= 0x10; |
4335 | |
4336 | reg = cpu_to_le32(code); |
4337 | @@ -140,7 +140,7 @@ static int latter_allocate_resources(struct snd_ff *ff, unsigned int rate) |
4338 | if (curr_rate == rate) |
4339 | break; |
4340 | } |
4341 | - if (count == 10) |
4342 | + if (count > 10) |
4343 | return -ETIMEDOUT; |
4344 | |
4345 | for (i = 0; i < ARRAY_SIZE(amdtp_rate_table); ++i) { |
4346 | diff --git a/sound/isa/es1688/es1688.c b/sound/isa/es1688/es1688.c |
4347 | index 9be89377171b..b4e9b0de3b42 100644 |
4348 | --- a/sound/isa/es1688/es1688.c |
4349 | +++ b/sound/isa/es1688/es1688.c |
4350 | @@ -267,8 +267,10 @@ static int snd_es968_pnp_detect(struct pnp_card_link *pcard, |
4351 | return error; |
4352 | } |
4353 | error = snd_es1688_probe(card, dev); |
4354 | - if (error < 0) |
4355 | + if (error < 0) { |
4356 | + snd_card_free(card); |
4357 | return error; |
4358 | + } |
4359 | pnp_set_card_drvdata(pcard, card); |
4360 | snd_es968_pnp_is_probed = 1; |
4361 | return 0; |
4362 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
4363 | index da4d21445e80..df5afac0b600 100644 |
4364 | --- a/sound/pci/hda/patch_realtek.c |
4365 | +++ b/sound/pci/hda/patch_realtek.c |
4366 | @@ -8156,6 +8156,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
4367 | ALC225_STANDARD_PINS, |
4368 | {0x12, 0xb7a60130}, |
4369 | {0x17, 0x90170110}), |
4370 | + SND_HDA_PIN_QUIRK(0x10ec0623, 0x17aa, "Lenovo", ALC283_FIXUP_HEADSET_MIC, |
4371 | + {0x14, 0x01014010}, |
4372 | + {0x17, 0x90170120}, |
4373 | + {0x18, 0x02a11030}, |
4374 | + {0x19, 0x02a1103f}, |
4375 | + {0x21, 0x0221101f}), |
4376 | {} |
4377 | }; |
4378 | |
4379 | diff --git a/sound/soc/codecs/max9867.c b/sound/soc/codecs/max9867.c |
4380 | index 8600c5439e1e..2e4aa23b5a60 100644 |
4381 | --- a/sound/soc/codecs/max9867.c |
4382 | +++ b/sound/soc/codecs/max9867.c |
4383 | @@ -46,13 +46,13 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(max9867_micboost_tlv, |
4384 | |
4385 | static const struct snd_kcontrol_new max9867_snd_controls[] = { |
4386 | SOC_DOUBLE_R_TLV("Master Playback Volume", MAX9867_LEFTVOL, |
4387 | - MAX9867_RIGHTVOL, 0, 41, 1, max9867_master_tlv), |
4388 | + MAX9867_RIGHTVOL, 0, 40, 1, max9867_master_tlv), |
4389 | SOC_DOUBLE_R_TLV("Line Capture Volume", MAX9867_LEFTLINELVL, |
4390 | MAX9867_RIGHTLINELVL, 0, 15, 1, max9867_line_tlv), |
4391 | SOC_DOUBLE_R_TLV("Mic Capture Volume", MAX9867_LEFTMICGAIN, |
4392 | MAX9867_RIGHTMICGAIN, 0, 20, 1, max9867_mic_tlv), |
4393 | SOC_DOUBLE_R_TLV("Mic Boost Capture Volume", MAX9867_LEFTMICGAIN, |
4394 | - MAX9867_RIGHTMICGAIN, 5, 4, 0, max9867_micboost_tlv), |
4395 | + MAX9867_RIGHTMICGAIN, 5, 3, 0, max9867_micboost_tlv), |
4396 | SOC_SINGLE("Digital Sidetone Volume", MAX9867_SIDETONE, 0, 31, 1), |
4397 | SOC_SINGLE_TLV("Digital Playback Volume", MAX9867_DACLEVEL, 0, 15, 1, |
4398 | max9867_dac_tlv), |
4399 | diff --git a/sound/usb/card.c b/sound/usb/card.c |
4400 | index 54f9ce38471e..f9a64e9526f5 100644 |
4401 | --- a/sound/usb/card.c |
4402 | +++ b/sound/usb/card.c |
4403 | @@ -810,9 +810,6 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) |
4404 | if (chip == (void *)-1L) |
4405 | return 0; |
4406 | |
4407 | - chip->autosuspended = !!PMSG_IS_AUTO(message); |
4408 | - if (!chip->autosuspended) |
4409 | - snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); |
4410 | if (!chip->num_suspended_intf++) { |
4411 | list_for_each_entry(as, &chip->pcm_list, list) { |
4412 | snd_usb_pcm_suspend(as); |
4413 | @@ -825,6 +822,11 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message) |
4414 | snd_usb_mixer_suspend(mixer); |
4415 | } |
4416 | |
4417 | + if (!PMSG_IS_AUTO(message) && !chip->system_suspend) { |
4418 | + snd_power_change_state(chip->card, SNDRV_CTL_POWER_D3hot); |
4419 | + chip->system_suspend = chip->num_suspended_intf; |
4420 | + } |
4421 | + |
4422 | return 0; |
4423 | } |
4424 | |
4425 | @@ -838,10 +840,10 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) |
4426 | |
4427 | if (chip == (void *)-1L) |
4428 | return 0; |
4429 | - if (--chip->num_suspended_intf) |
4430 | - return 0; |
4431 | |
4432 | atomic_inc(&chip->active); /* avoid autopm */ |
4433 | + if (chip->num_suspended_intf > 1) |
4434 | + goto out; |
4435 | |
4436 | list_for_each_entry(as, &chip->pcm_list, list) { |
4437 | err = snd_usb_pcm_resume(as); |
4438 | @@ -863,9 +865,12 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume) |
4439 | snd_usbmidi_resume(p); |
4440 | } |
4441 | |
4442 | - if (!chip->autosuspended) |
4443 | + out: |
4444 | + if (chip->num_suspended_intf == chip->system_suspend) { |
4445 | snd_power_change_state(chip->card, SNDRV_CTL_POWER_D0); |
4446 | - chip->autosuspended = 0; |
4447 | + chip->system_suspend = 0; |
4448 | + } |
4449 | + chip->num_suspended_intf--; |
4450 | |
4451 | err_out: |
4452 | atomic_dec(&chip->active); /* allow autopm after this point */ |
4453 | diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
4454 | index bbae11605a4c..042a5e8eb79d 100644 |
4455 | --- a/sound/usb/quirks-table.h |
4456 | +++ b/sound/usb/quirks-table.h |
4457 | @@ -25,6 +25,26 @@ |
4458 | .idProduct = prod, \ |
4459 | .bInterfaceClass = USB_CLASS_VENDOR_SPEC |
4460 | |
4461 | +/* HP Thunderbolt Dock Audio Headset */ |
4462 | +{ |
4463 | + USB_DEVICE(0x03f0, 0x0269), |
4464 | + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { |
4465 | + .vendor_name = "HP", |
4466 | + .product_name = "Thunderbolt Dock Audio Headset", |
4467 | + .profile_name = "HP-Thunderbolt-Dock-Audio-Headset", |
4468 | + .ifnum = QUIRK_NO_INTERFACE |
4469 | + } |
4470 | +}, |
4471 | +/* HP Thunderbolt Dock Audio Module */ |
4472 | +{ |
4473 | + USB_DEVICE(0x03f0, 0x0567), |
4474 | + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { |
4475 | + .vendor_name = "HP", |
4476 | + .product_name = "Thunderbolt Dock Audio Module", |
4477 | + .profile_name = "HP-Thunderbolt-Dock-Audio-Module", |
4478 | + .ifnum = QUIRK_NO_INTERFACE |
4479 | + } |
4480 | +}, |
4481 | /* FTDI devices */ |
4482 | { |
4483 | USB_DEVICE(0x0403, 0xb8d8), |
4484 | diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h |
4485 | index e360680f45f3..55a2119c2411 100644 |
4486 | --- a/sound/usb/usbaudio.h |
4487 | +++ b/sound/usb/usbaudio.h |
4488 | @@ -26,7 +26,7 @@ struct snd_usb_audio { |
4489 | struct usb_interface *pm_intf; |
4490 | u32 usb_id; |
4491 | struct mutex mutex; |
4492 | - unsigned int autosuspended:1; |
4493 | + unsigned int system_suspend; |
4494 | atomic_t active; |
4495 | atomic_t shutdown; |
4496 | atomic_t usage_count; |
4497 | diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c |
4498 | index 91cab5f669d2..92b07be0b48b 100644 |
4499 | --- a/tools/perf/util/probe-event.c |
4500 | +++ b/tools/perf/util/probe-event.c |
4501 | @@ -1757,8 +1757,7 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev) |
4502 | fmt1_str = strtok_r(argv0_str, ":", &fmt); |
4503 | fmt2_str = strtok_r(NULL, "/", &fmt); |
4504 | fmt3_str = strtok_r(NULL, " \t", &fmt); |
4505 | - if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL |
4506 | - || fmt3_str == NULL) { |
4507 | + if (fmt1_str == NULL || fmt2_str == NULL || fmt3_str == NULL) { |
4508 | semantic_error("Failed to parse event name: %s\n", argv[0]); |
4509 | ret = -EINVAL; |
4510 | goto out; |
4511 | diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc |
4512 | index 021c03fd885d..23465823532b 100644 |
4513 | --- a/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc |
4514 | +++ b/tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc |
4515 | @@ -14,6 +14,8 @@ if [ ! -f set_event ]; then |
4516 | exit_unsupported |
4517 | fi |
4518 | |
4519 | +[ -f error_log ] || exit_unsupported |
4520 | + |
4521 | ftrace_errlog_check 'event filter parse error' '((sig >= 10 && sig < 15) || dsig ^== 17) && comm != bash' 'events/signal/signal_generate/filter' |
4522 | |
4523 | exit 0 |
4524 | diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c |
4525 | index 6dee9e636a95..422e7761254d 100644 |
4526 | --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c |
4527 | +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c |
4528 | @@ -115,6 +115,7 @@ static struct option long_options[] = { |
4529 | { "tcp", no_argument, 0, 't' }, |
4530 | { "udp", no_argument, 0, 'u' }, |
4531 | { "ip", no_argument, 0, 'i' }, |
4532 | + { NULL, 0, NULL, 0 }, |
4533 | }; |
4534 | |
4535 | static int next_port = 19999; |
4536 | diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json |
4537 | index 0f89cd50a94b..152ffa45e857 100644 |
4538 | --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json |
4539 | +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json |
4540 | @@ -54,7 +54,7 @@ |
4541 | "setup": [ |
4542 | "$TC qdisc add dev $DEV2 ingress" |
4543 | ], |
4544 | - "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 parent ffff: handle 0xffffffff flower action ok", |
4545 | + "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip pref 1 ingress handle 0xffffffff flower action ok", |
4546 | "expExitCode": "0", |
4547 | "verifyCmd": "$TC filter show dev $DEV2 ingress", |
4548 | "matchPattern": "filter protocol ip pref 1 flower.*handle 0xffffffff", |
4549 | @@ -99,9 +99,9 @@ |
4550 | }, |
4551 | "setup": [ |
4552 | "$TC qdisc add dev $DEV2 ingress", |
4553 | - "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop" |
4554 | + "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop" |
4555 | ], |
4556 | - "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 parent ffff: flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop", |
4557 | + "cmdUnderTest": "$TC filter add dev $DEV2 protocol ip prio 1 ingress flower dst_mac e4:11:22:11:4a:51 src_mac e4:11:22:11:4a:50 ip_proto tcp src_ip 1.1.1.1 dst_ip 2.2.2.2 action drop", |
4558 | "expExitCode": "2", |
4559 | "verifyCmd": "$TC -s filter show dev $DEV2 ingress", |
4560 | "matchPattern": "filter protocol ip pref 1 flower chain 0 handle", |
4561 | diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py |
4562 | index 6a2bd2cf528e..995f66ce43eb 100755 |
4563 | --- a/tools/testing/selftests/tc-testing/tdc_batch.py |
4564 | +++ b/tools/testing/selftests/tc-testing/tdc_batch.py |
4565 | @@ -72,21 +72,21 @@ mac_prefix = args.mac_prefix |
4566 | |
4567 | def format_add_filter(device, prio, handle, skip, src_mac, dst_mac, |
4568 | share_action): |
4569 | - return ("filter add dev {} {} protocol ip parent ffff: handle {} " |
4570 | + return ("filter add dev {} {} protocol ip ingress handle {} " |
4571 | " flower {} src_mac {} dst_mac {} action drop {}".format( |
4572 | device, prio, handle, skip, src_mac, dst_mac, share_action)) |
4573 | |
4574 | |
4575 | def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac, |
4576 | share_action): |
4577 | - return ("filter replace dev {} {} protocol ip parent ffff: handle {} " |
4578 | + return ("filter replace dev {} {} protocol ip ingress handle {} " |
4579 | " flower {} src_mac {} dst_mac {} action drop {}".format( |
4580 | device, prio, handle, skip, src_mac, dst_mac, share_action)) |
4581 | |
4582 | |
4583 | def format_del_filter(device, prio, handle, skip, src_mac, dst_mac, |
4584 | share_action): |
4585 | - return ("filter del dev {} {} protocol ip parent ffff: handle {} " |
4586 | + return ("filter del dev {} {} protocol ip ingress handle {} " |
4587 | "flower".format(device, prio, handle)) |
4588 | |
4589 | |
4590 | diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c |
4591 | index 0a356aa91aa1..f2047fc69006 100644 |
4592 | --- a/virt/kvm/arm/aarch32.c |
4593 | +++ b/virt/kvm/arm/aarch32.c |
4594 | @@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { |
4595 | [7] = { 4, 4 }, /* FIQ, unused */ |
4596 | }; |
4597 | |
4598 | +static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) |
4599 | +{ |
4600 | + preempt_disable(); |
4601 | + if (kvm_arm_vcpu_loaded(vcpu)) { |
4602 | + kvm_arch_vcpu_put(vcpu); |
4603 | + return true; |
4604 | + } |
4605 | + |
4606 | + preempt_enable(); |
4607 | + return false; |
4608 | +} |
4609 | + |
4610 | +static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) |
4611 | +{ |
4612 | + if (loaded) { |
4613 | + kvm_arch_vcpu_load(vcpu, smp_processor_id()); |
4614 | + preempt_enable(); |
4615 | + } |
4616 | +} |
4617 | + |
4618 | /* |
4619 | * When an exception is taken, most CPSR fields are left unchanged in the |
4620 | * handler. However, some are explicitly overridden (e.g. M[4:0]). |
4621 | @@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) |
4622 | |
4623 | void kvm_inject_undef32(struct kvm_vcpu *vcpu) |
4624 | { |
4625 | + bool loaded = pre_fault_synchronize(vcpu); |
4626 | + |
4627 | prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); |
4628 | + post_fault_synchronize(vcpu, loaded); |
4629 | } |
4630 | |
4631 | /* |
4632 | @@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, |
4633 | u32 vect_offset; |
4634 | u32 *far, *fsr; |
4635 | bool is_lpae; |
4636 | + bool loaded; |
4637 | + |
4638 | + loaded = pre_fault_synchronize(vcpu); |
4639 | |
4640 | if (is_pabt) { |
4641 | vect_offset = 12; |
4642 | @@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, |
4643 | /* no need to shuffle FS[4] into DFSR[10] as its 0 */ |
4644 | *fsr = DFSR_FSC_EXTABT_nLPAE; |
4645 | } |
4646 | + |
4647 | + post_fault_synchronize(vcpu, loaded); |
4648 | } |
4649 | |
4650 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) |
4651 | diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c |
4652 | index 86c6aa1cb58e..986fbc3cf667 100644 |
4653 | --- a/virt/kvm/arm/arm.c |
4654 | +++ b/virt/kvm/arm/arm.c |
4655 | @@ -354,6 +354,16 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
4656 | return kvm_vgic_vcpu_init(vcpu); |
4657 | } |
4658 | |
4659 | +#ifdef CONFIG_ARM64 |
4660 | +#define __ptrauth_save_key(regs, key) \ |
4661 | +({ \ |
4662 | + regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ |
4663 | + regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ |
4664 | +}) |
4665 | +#else |
4666 | +#define __ptrauth_save_key(regs, key) do { } while (0) |
4667 | +#endif |
4668 | + |
4669 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
4670 | { |
4671 | int *last_ran; |
4672 | @@ -386,7 +396,17 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
4673 | else |
4674 | vcpu_set_wfe_traps(vcpu); |
4675 | |
4676 | - vcpu_ptrauth_setup_lazy(vcpu); |
4677 | + if (vcpu_has_ptrauth(vcpu)) { |
4678 | + struct kvm_cpu_context __maybe_unused *ctxt = vcpu->arch.host_cpu_context; |
4679 | + |
4680 | + __ptrauth_save_key(ctxt->sys_regs, APIA); |
4681 | + __ptrauth_save_key(ctxt->sys_regs, APIB); |
4682 | + __ptrauth_save_key(ctxt->sys_regs, APDA); |
4683 | + __ptrauth_save_key(ctxt->sys_regs, APDB); |
4684 | + __ptrauth_save_key(ctxt->sys_regs, APGA); |
4685 | + |
4686 | + vcpu_ptrauth_disable(vcpu); |
4687 | + } |
4688 | } |
4689 | |
4690 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
4691 | diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
4692 | index 03c681568ab1..d5d4cd581af3 100644 |
4693 | --- a/virt/kvm/kvm_main.c |
4694 | +++ b/virt/kvm/kvm_main.c |
4695 | @@ -157,10 +157,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm); |
4696 | static unsigned long long kvm_createvm_count; |
4697 | static unsigned long long kvm_active_vms; |
4698 | |
4699 | -__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
4700 | - unsigned long start, unsigned long end, bool blockable) |
4701 | +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
4702 | + unsigned long start, unsigned long end) |
4703 | { |
4704 | - return 0; |
4705 | } |
4706 | |
4707 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn) |
4708 | @@ -381,6 +380,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) |
4709 | return container_of(mn, struct kvm, mmu_notifier); |
4710 | } |
4711 | |
4712 | +static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn, |
4713 | + struct mm_struct *mm, |
4714 | + unsigned long start, unsigned long end) |
4715 | +{ |
4716 | + struct kvm *kvm = mmu_notifier_to_kvm(mn); |
4717 | + int idx; |
4718 | + |
4719 | + idx = srcu_read_lock(&kvm->srcu); |
4720 | + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); |
4721 | + srcu_read_unlock(&kvm->srcu, idx); |
4722 | +} |
4723 | + |
4724 | static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, |
4725 | struct mm_struct *mm, |
4726 | unsigned long address, |
4727 | @@ -405,7 +416,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
4728 | { |
4729 | struct kvm *kvm = mmu_notifier_to_kvm(mn); |
4730 | int need_tlb_flush = 0, idx; |
4731 | - int ret; |
4732 | |
4733 | idx = srcu_read_lock(&kvm->srcu); |
4734 | spin_lock(&kvm->mmu_lock); |
4735 | @@ -422,14 +432,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
4736 | kvm_flush_remote_tlbs(kvm); |
4737 | |
4738 | spin_unlock(&kvm->mmu_lock); |
4739 | - |
4740 | - ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start, |
4741 | - range->end, |
4742 | - mmu_notifier_range_blockable(range)); |
4743 | - |
4744 | srcu_read_unlock(&kvm->srcu, idx); |
4745 | |
4746 | - return ret; |
4747 | + return 0; |
4748 | } |
4749 | |
4750 | static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
4751 | @@ -535,6 +540,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn, |
4752 | } |
4753 | |
4754 | static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { |
4755 | + .invalidate_range = kvm_mmu_notifier_invalidate_range, |
4756 | .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start, |
4757 | .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end, |
4758 | .clear_flush_young = kvm_mmu_notifier_clear_flush_young, |