Annotation of /trunk/kernel-alx/patches-4.19/0118-4.19.19-all-fixes.patch
Parent Directory | Revision Log
Revision 3397 -
(hide annotations)
(download)
Fri Aug 2 11:47:31 2019 UTC (5 years, 1 month ago) by niro
File size: 146380 byte(s)
Fri Aug 2 11:47:31 2019 UTC (5 years, 1 month ago) by niro
File size: 146380 byte(s)
-linux-4.19.19
1 | niro | 3397 | diff --git a/Makefile b/Makefile |
2 | index 9f37a8a9feb9..39c4e7c3c13c 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,7 +1,7 @@ | ||
6 | # SPDX-License-Identifier: GPL-2.0 | ||
7 | VERSION = 4 | ||
8 | PATCHLEVEL = 19 | ||
9 | -SUBLEVEL = 18 | ||
10 | +SUBLEVEL = 19 | ||
11 | EXTRAVERSION = | ||
12 | NAME = "People's Front" | ||
13 | |||
14 | diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h | ||
15 | index 9185541035cc..6958545390f0 100644 | ||
16 | --- a/arch/arc/include/asm/perf_event.h | ||
17 | +++ b/arch/arc/include/asm/perf_event.h | ||
18 | @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { | ||
19 | |||
20 | /* counts condition */ | ||
21 | [PERF_COUNT_HW_INSTRUCTIONS] = "iall", | ||
22 | - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ | ||
23 | + /* All jump instructions that are taken */ | ||
24 | + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", | ||
25 | [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ | ||
26 | #ifdef CONFIG_ISA_ARCV2 | ||
27 | [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", | ||
28 | diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S | ||
29 | index 62ad4bcb841a..f230bb7092fd 100644 | ||
30 | --- a/arch/arc/lib/memset-archs.S | ||
31 | +++ b/arch/arc/lib/memset-archs.S | ||
32 | @@ -7,11 +7,39 @@ | ||
33 | */ | ||
34 | |||
35 | #include <linux/linkage.h> | ||
36 | +#include <asm/cache.h> | ||
37 | |||
38 | -#undef PREALLOC_NOT_AVAIL | ||
39 | +/* | ||
40 | + * The memset implementation below is optimized to use prefetchw and prealloc | ||
41 | + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) | ||
42 | + * If you want to implement optimized memset for other possible L1 data cache | ||
43 | + * line lengths (32B and 128B) you should rewrite code carefully checking | ||
44 | + * we don't call any prefetchw/prealloc instruction for L1 cache lines which | ||
45 | + * don't belongs to memset area. | ||
46 | + */ | ||
47 | + | ||
48 | +#if L1_CACHE_SHIFT == 6 | ||
49 | + | ||
50 | +.macro PREALLOC_INSTR reg, off | ||
51 | + prealloc [\reg, \off] | ||
52 | +.endm | ||
53 | + | ||
54 | +.macro PREFETCHW_INSTR reg, off | ||
55 | + prefetchw [\reg, \off] | ||
56 | +.endm | ||
57 | + | ||
58 | +#else | ||
59 | + | ||
60 | +.macro PREALLOC_INSTR | ||
61 | +.endm | ||
62 | + | ||
63 | +.macro PREFETCHW_INSTR | ||
64 | +.endm | ||
65 | + | ||
66 | +#endif | ||
67 | |||
68 | ENTRY_CFI(memset) | ||
69 | - prefetchw [r0] ; Prefetch the write location | ||
70 | + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location | ||
71 | mov.f 0, r2 | ||
72 | ;;; if size is zero | ||
73 | jz.d [blink] | ||
74 | @@ -48,11 +76,8 @@ ENTRY_CFI(memset) | ||
75 | |||
76 | lpnz @.Lset64bytes | ||
77 | ;; LOOP START | ||
78 | -#ifdef PREALLOC_NOT_AVAIL | ||
79 | - prefetchw [r3, 64] ;Prefetch the next write location | ||
80 | -#else | ||
81 | - prealloc [r3, 64] | ||
82 | -#endif | ||
83 | + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching | ||
84 | + | ||
85 | #ifdef CONFIG_ARC_HAS_LL64 | ||
86 | std.ab r4, [r3, 8] | ||
87 | std.ab r4, [r3, 8] | ||
88 | @@ -85,7 +110,6 @@ ENTRY_CFI(memset) | ||
89 | lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes | ||
90 | lpnz .Lset32bytes | ||
91 | ;; LOOP START | ||
92 | - prefetchw [r3, 32] ;Prefetch the next write location | ||
93 | #ifdef CONFIG_ARC_HAS_LL64 | ||
94 | std.ab r4, [r3, 8] | ||
95 | std.ab r4, [r3, 8] | ||
96 | diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c | ||
97 | index ba145065c579..f890b2f9f82f 100644 | ||
98 | --- a/arch/arc/mm/init.c | ||
99 | +++ b/arch/arc/mm/init.c | ||
100 | @@ -138,7 +138,8 @@ void __init setup_arch_memory(void) | ||
101 | */ | ||
102 | |||
103 | memblock_add_node(low_mem_start, low_mem_sz, 0); | ||
104 | - memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); | ||
105 | + memblock_reserve(CONFIG_LINUX_LINK_BASE, | ||
106 | + __pa(_end) - CONFIG_LINUX_LINK_BASE); | ||
107 | |||
108 | #ifdef CONFIG_BLK_DEV_INITRD | ||
109 | if (initrd_start) | ||
110 | diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h | ||
111 | index f1ab9420ccfb..09b61d0e491f 100644 | ||
112 | --- a/arch/s390/include/asm/mmu_context.h | ||
113 | +++ b/arch/s390/include/asm/mmu_context.h | ||
114 | @@ -89,8 +89,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
115 | { | ||
116 | int cpu = smp_processor_id(); | ||
117 | |||
118 | - if (prev == next) | ||
119 | - return; | ||
120 | S390_lowcore.user_asce = next->context.asce; | ||
121 | cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); | ||
122 | /* Clear previous user-ASCE from CR1 and CR7 */ | ||
123 | @@ -102,7 +100,8 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | ||
124 | __ctl_load(S390_lowcore.vdso_asce, 7, 7); | ||
125 | clear_cpu_flag(CIF_ASCE_SECONDARY); | ||
126 | } | ||
127 | - cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | ||
128 | + if (prev != next) | ||
129 | + cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); | ||
130 | } | ||
131 | |||
132 | #define finish_arch_post_lock_switch finish_arch_post_lock_switch | ||
133 | diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c | ||
134 | index 5b28b434f8a1..e7e6608b996c 100644 | ||
135 | --- a/arch/s390/kernel/early.c | ||
136 | +++ b/arch/s390/kernel/early.c | ||
137 | @@ -64,10 +64,10 @@ static noinline __init void detect_machine_type(void) | ||
138 | if (stsi(vmms, 3, 2, 2) || !vmms->count) | ||
139 | return; | ||
140 | |||
141 | - /* Running under KVM? If not we assume z/VM */ | ||
142 | + /* Detect known hypervisors */ | ||
143 | if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3)) | ||
144 | S390_lowcore.machine_flags |= MACHINE_FLAG_KVM; | ||
145 | - else | ||
146 | + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4)) | ||
147 | S390_lowcore.machine_flags |= MACHINE_FLAG_VM; | ||
148 | } | ||
149 | |||
150 | diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c | ||
151 | index c637c12f9e37..a0097f8bada8 100644 | ||
152 | --- a/arch/s390/kernel/setup.c | ||
153 | +++ b/arch/s390/kernel/setup.c | ||
154 | @@ -882,6 +882,8 @@ void __init setup_arch(char **cmdline_p) | ||
155 | pr_info("Linux is running under KVM in 64-bit mode\n"); | ||
156 | else if (MACHINE_IS_LPAR) | ||
157 | pr_info("Linux is running natively in 64-bit mode\n"); | ||
158 | + else | ||
159 | + pr_info("Linux is running as a guest in 64-bit mode\n"); | ||
160 | |||
161 | /* Have one command line that is parsed and saved in /proc/cmdline */ | ||
162 | /* boot_command_line has been already set up in early.c */ | ||
163 | diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c | ||
164 | index 2f8f7d7dd9a8..da02f4087d61 100644 | ||
165 | --- a/arch/s390/kernel/smp.c | ||
166 | +++ b/arch/s390/kernel/smp.c | ||
167 | @@ -371,9 +371,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data) | ||
168 | */ | ||
169 | void smp_call_ipl_cpu(void (*func)(void *), void *data) | ||
170 | { | ||
171 | + struct lowcore *lc = pcpu_devices->lowcore; | ||
172 | + | ||
173 | + if (pcpu_devices[0].address == stap()) | ||
174 | + lc = &S390_lowcore; | ||
175 | + | ||
176 | pcpu_delegate(&pcpu_devices[0], func, data, | ||
177 | - pcpu_devices->lowcore->panic_stack - | ||
178 | - PANIC_FRAME_OFFSET + PAGE_SIZE); | ||
179 | + lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE); | ||
180 | } | ||
181 | |||
182 | int smp_find_processor_id(u16 address) | ||
183 | @@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev, | ||
184 | { | ||
185 | int rc; | ||
186 | |||
187 | + rc = lock_device_hotplug_sysfs(); | ||
188 | + if (rc) | ||
189 | + return rc; | ||
190 | rc = smp_rescan_cpus(); | ||
191 | + unlock_device_hotplug(); | ||
192 | return rc ? rc : count; | ||
193 | } | ||
194 | static DEVICE_ATTR_WO(rescan); | ||
195 | diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S | ||
196 | index 7d0df78db727..40d2834a8101 100644 | ||
197 | --- a/arch/x86/entry/entry_64_compat.S | ||
198 | +++ b/arch/x86/entry/entry_64_compat.S | ||
199 | @@ -356,7 +356,8 @@ ENTRY(entry_INT80_compat) | ||
200 | |||
201 | /* Need to switch before accessing the thread stack. */ | ||
202 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi | ||
203 | - movq %rsp, %rdi | ||
204 | + /* In the Xen PV case we already run on the thread stack. */ | ||
205 | + ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV | ||
206 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp | ||
207 | |||
208 | pushq 6*8(%rdi) /* regs->ss */ | ||
209 | @@ -365,8 +366,9 @@ ENTRY(entry_INT80_compat) | ||
210 | pushq 3*8(%rdi) /* regs->cs */ | ||
211 | pushq 2*8(%rdi) /* regs->ip */ | ||
212 | pushq 1*8(%rdi) /* regs->orig_ax */ | ||
213 | - | ||
214 | pushq (%rdi) /* pt_regs->di */ | ||
215 | +.Lint80_keep_stack: | ||
216 | + | ||
217 | pushq %rsi /* pt_regs->si */ | ||
218 | xorl %esi, %esi /* nospec si */ | ||
219 | pushq %rdx /* pt_regs->dx */ | ||
220 | diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h | ||
221 | index eeeb9289c764..2252b63d38b5 100644 | ||
222 | --- a/arch/x86/include/asm/mmu_context.h | ||
223 | +++ b/arch/x86/include/asm/mmu_context.h | ||
224 | @@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | ||
225 | |||
226 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); | ||
227 | |||
228 | +/* | ||
229 | + * Init a new mm. Used on mm copies, like at fork() | ||
230 | + * and on mm's that are brand-new, like at execve(). | ||
231 | + */ | ||
232 | static inline int init_new_context(struct task_struct *tsk, | ||
233 | struct mm_struct *mm) | ||
234 | { | ||
235 | @@ -228,8 +232,22 @@ do { \ | ||
236 | } while (0) | ||
237 | #endif | ||
238 | |||
239 | +static inline void arch_dup_pkeys(struct mm_struct *oldmm, | ||
240 | + struct mm_struct *mm) | ||
241 | +{ | ||
242 | +#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | ||
243 | + if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) | ||
244 | + return; | ||
245 | + | ||
246 | + /* Duplicate the oldmm pkey state in mm: */ | ||
247 | + mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map; | ||
248 | + mm->context.execute_only_pkey = oldmm->context.execute_only_pkey; | ||
249 | +#endif | ||
250 | +} | ||
251 | + | ||
252 | static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | ||
253 | { | ||
254 | + arch_dup_pkeys(oldmm, mm); | ||
255 | paravirt_arch_dup_mmap(oldmm, mm); | ||
256 | return ldt_dup_context(oldmm, mm); | ||
257 | } | ||
258 | diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c | ||
259 | index d9b71924c23c..7f89d609095a 100644 | ||
260 | --- a/arch/x86/kernel/kvm.c | ||
261 | +++ b/arch/x86/kernel/kvm.c | ||
262 | @@ -457,6 +457,7 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) | ||
263 | #else | ||
264 | u64 ipi_bitmap = 0; | ||
265 | #endif | ||
266 | + long ret; | ||
267 | |||
268 | if (cpumask_empty(mask)) | ||
269 | return; | ||
270 | @@ -482,8 +483,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) | ||
271 | } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE) { | ||
272 | max = apic_id < max ? max : apic_id; | ||
273 | } else { | ||
274 | - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, | ||
275 | + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, | ||
276 | (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); | ||
277 | + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); | ||
278 | min = max = apic_id; | ||
279 | ipi_bitmap = 0; | ||
280 | } | ||
281 | @@ -491,8 +493,9 @@ static void __send_ipi_mask(const struct cpumask *mask, int vector) | ||
282 | } | ||
283 | |||
284 | if (ipi_bitmap) { | ||
285 | - kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, | ||
286 | + ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap, | ||
287 | (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr); | ||
288 | + WARN_ONCE(ret < 0, "KVM: failed to send PV IPI: %ld", ret); | ||
289 | } | ||
290 | |||
291 | local_irq_restore(flags); | ||
292 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c | ||
293 | index 841740045554..39a0e34ff676 100644 | ||
294 | --- a/arch/x86/kvm/vmx.c | ||
295 | +++ b/arch/x86/kvm/vmx.c | ||
296 | @@ -8290,11 +8290,11 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu) | ||
297 | if (r < 0) | ||
298 | goto out_vmcs02; | ||
299 | |||
300 | - vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); | ||
301 | + vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); | ||
302 | if (!vmx->nested.cached_vmcs12) | ||
303 | goto out_cached_vmcs12; | ||
304 | |||
305 | - vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); | ||
306 | + vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL); | ||
307 | if (!vmx->nested.cached_shadow_vmcs12) | ||
308 | goto out_cached_shadow_vmcs12; | ||
309 | |||
310 | @@ -11733,7 +11733,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu, | ||
311 | !nested_exit_intr_ack_set(vcpu) || | ||
312 | (vmcs12->posted_intr_nv & 0xff00) || | ||
313 | (vmcs12->posted_intr_desc_addr & 0x3f) || | ||
314 | - (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr)))) | ||
315 | + (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))) | ||
316 | return -EINVAL; | ||
317 | |||
318 | /* tpr shadow is needed by all apicv features. */ | ||
319 | @@ -13984,13 +13984,17 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu, | ||
320 | else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs) | ||
321 | copy_shadow_to_vmcs12(vmx); | ||
322 | |||
323 | - if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12))) | ||
324 | + /* | ||
325 | + * Copy over the full allocated size of vmcs12 rather than just the size | ||
326 | + * of the struct. | ||
327 | + */ | ||
328 | + if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE)) | ||
329 | return -EFAULT; | ||
330 | |||
331 | if (nested_cpu_has_shadow_vmcs(vmcs12) && | ||
332 | vmcs12->vmcs_link_pointer != -1ull) { | ||
333 | if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE, | ||
334 | - get_shadow_vmcs12(vcpu), sizeof(*vmcs12))) | ||
335 | + get_shadow_vmcs12(vcpu), VMCS12_SIZE)) | ||
336 | return -EFAULT; | ||
337 | } | ||
338 | |||
339 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c | ||
340 | index 956eecd227f8..5a9a3ebe8fba 100644 | ||
341 | --- a/arch/x86/kvm/x86.c | ||
342 | +++ b/arch/x86/kvm/x86.c | ||
343 | @@ -6277,8 +6277,7 @@ restart: | ||
344 | toggle_interruptibility(vcpu, ctxt->interruptibility); | ||
345 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | ||
346 | kvm_rip_write(vcpu, ctxt->eip); | ||
347 | - if (r == EMULATE_DONE && | ||
348 | - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) | ||
349 | + if (r == EMULATE_DONE && ctxt->tf) | ||
350 | kvm_vcpu_do_singlestep(vcpu, &r); | ||
351 | if (!ctxt->have_exception || | ||
352 | exception_type(ctxt->exception.vector) == EXCPT_TRAP) | ||
353 | @@ -6868,10 +6867,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | ||
354 | case KVM_HC_CLOCK_PAIRING: | ||
355 | ret = kvm_pv_clock_pairing(vcpu, a0, a1); | ||
356 | break; | ||
357 | +#endif | ||
358 | case KVM_HC_SEND_IPI: | ||
359 | ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); | ||
360 | break; | ||
361 | -#endif | ||
362 | default: | ||
363 | ret = -KVM_ENOSYS; | ||
364 | break; | ||
365 | diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c | ||
366 | index 79778ab200e4..a53665116458 100644 | ||
367 | --- a/arch/x86/lib/kaslr.c | ||
368 | +++ b/arch/x86/lib/kaslr.c | ||
369 | @@ -36,8 +36,8 @@ static inline u16 i8254(void) | ||
370 | u16 status, timer; | ||
371 | |||
372 | do { | ||
373 | - outb(I8254_PORT_CONTROL, | ||
374 | - I8254_CMD_READBACK | I8254_SELECT_COUNTER0); | ||
375 | + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0, | ||
376 | + I8254_PORT_CONTROL); | ||
377 | status = inb(I8254_PORT_COUNTER0); | ||
378 | timer = inb(I8254_PORT_COUNTER0); | ||
379 | timer |= inb(I8254_PORT_COUNTER0) << 8; | ||
380 | diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c | ||
381 | index 75b331f8a16a..ea59c01ce8db 100644 | ||
382 | --- a/drivers/acpi/nfit/core.c | ||
383 | +++ b/drivers/acpi/nfit/core.c | ||
384 | @@ -391,6 +391,32 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) | ||
385 | return id; | ||
386 | } | ||
387 | |||
388 | +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, | ||
389 | + struct nd_cmd_pkg *call_pkg) | ||
390 | +{ | ||
391 | + if (call_pkg) { | ||
392 | + int i; | ||
393 | + | ||
394 | + if (nfit_mem->family != call_pkg->nd_family) | ||
395 | + return -ENOTTY; | ||
396 | + | ||
397 | + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) | ||
398 | + if (call_pkg->nd_reserved2[i]) | ||
399 | + return -EINVAL; | ||
400 | + return call_pkg->nd_command; | ||
401 | + } | ||
402 | + | ||
403 | + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */ | ||
404 | + if (nfit_mem->family == NVDIMM_FAMILY_INTEL) | ||
405 | + return cmd; | ||
406 | + | ||
407 | + /* | ||
408 | + * Force function number validation to fail since 0 is never | ||
409 | + * published as a valid function in dsm_mask. | ||
410 | + */ | ||
411 | + return 0; | ||
412 | +} | ||
413 | + | ||
414 | int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | ||
415 | unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) | ||
416 | { | ||
417 | @@ -404,30 +430,23 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | ||
418 | unsigned long cmd_mask, dsm_mask; | ||
419 | u32 offset, fw_status = 0; | ||
420 | acpi_handle handle; | ||
421 | - unsigned int func; | ||
422 | const guid_t *guid; | ||
423 | - int rc, i; | ||
424 | + int func, rc, i; | ||
425 | |||
426 | if (cmd_rc) | ||
427 | *cmd_rc = -EINVAL; | ||
428 | - func = cmd; | ||
429 | - if (cmd == ND_CMD_CALL) { | ||
430 | - call_pkg = buf; | ||
431 | - func = call_pkg->nd_command; | ||
432 | - | ||
433 | - for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) | ||
434 | - if (call_pkg->nd_reserved2[i]) | ||
435 | - return -EINVAL; | ||
436 | - } | ||
437 | |||
438 | if (nvdimm) { | ||
439 | struct acpi_device *adev = nfit_mem->adev; | ||
440 | |||
441 | if (!adev) | ||
442 | return -ENOTTY; | ||
443 | - if (call_pkg && nfit_mem->family != call_pkg->nd_family) | ||
444 | - return -ENOTTY; | ||
445 | |||
446 | + if (cmd == ND_CMD_CALL) | ||
447 | + call_pkg = buf; | ||
448 | + func = cmd_to_func(nfit_mem, cmd, call_pkg); | ||
449 | + if (func < 0) | ||
450 | + return func; | ||
451 | dimm_name = nvdimm_name(nvdimm); | ||
452 | cmd_name = nvdimm_cmd_name(cmd); | ||
453 | cmd_mask = nvdimm_cmd_mask(nvdimm); | ||
454 | @@ -438,6 +457,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | ||
455 | } else { | ||
456 | struct acpi_device *adev = to_acpi_dev(acpi_desc); | ||
457 | |||
458 | + func = cmd; | ||
459 | cmd_name = nvdimm_bus_cmd_name(cmd); | ||
460 | cmd_mask = nd_desc->cmd_mask; | ||
461 | dsm_mask = cmd_mask; | ||
462 | @@ -452,7 +472,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, | ||
463 | if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) | ||
464 | return -ENOTTY; | ||
465 | |||
466 | - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) | ||
467 | + /* | ||
468 | + * Check for a valid command. For ND_CMD_CALL, we also have to | ||
469 | + * make sure that the DSM function is supported. | ||
470 | + */ | ||
471 | + if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) | ||
472 | + return -ENOTTY; | ||
473 | + else if (!test_bit(cmd, &cmd_mask)) | ||
474 | return -ENOTTY; | ||
475 | |||
476 | in_obj.type = ACPI_TYPE_PACKAGE; | ||
477 | @@ -1764,6 +1790,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, | ||
478 | return 0; | ||
479 | } | ||
480 | |||
481 | + /* | ||
482 | + * Function 0 is the command interrogation function, don't | ||
483 | + * export it to potential userspace use, and enable it to be | ||
484 | + * used as an error value in acpi_nfit_ctl(). | ||
485 | + */ | ||
486 | + dsm_mask &= ~1UL; | ||
487 | + | ||
488 | guid = to_nfit_uuid(nfit_mem->family); | ||
489 | for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) | ||
490 | if (acpi_check_dsm(adev_dimm->handle, guid, | ||
491 | diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c | ||
492 | index b5e3103c1175..e43c876a9223 100644 | ||
493 | --- a/drivers/char/mwave/mwavedd.c | ||
494 | +++ b/drivers/char/mwave/mwavedd.c | ||
495 | @@ -59,6 +59,7 @@ | ||
496 | #include <linux/mutex.h> | ||
497 | #include <linux/delay.h> | ||
498 | #include <linux/serial_8250.h> | ||
499 | +#include <linux/nospec.h> | ||
500 | #include "smapi.h" | ||
501 | #include "mwavedd.h" | ||
502 | #include "3780i.h" | ||
503 | @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | ||
504 | ipcnum); | ||
505 | return -EINVAL; | ||
506 | } | ||
507 | + ipcnum = array_index_nospec(ipcnum, | ||
508 | + ARRAY_SIZE(pDrvData->IPCs)); | ||
509 | PRINTK_3(TRACE_MWAVE, | ||
510 | "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" | ||
511 | " ipcnum %x entry usIntCount %x\n", | ||
512 | @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | ||
513 | " Invalid ipcnum %x\n", ipcnum); | ||
514 | return -EINVAL; | ||
515 | } | ||
516 | + ipcnum = array_index_nospec(ipcnum, | ||
517 | + ARRAY_SIZE(pDrvData->IPCs)); | ||
518 | PRINTK_3(TRACE_MWAVE, | ||
519 | "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC" | ||
520 | " ipcnum %x, usIntCount %x\n", | ||
521 | @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, | ||
522 | ipcnum); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | + ipcnum = array_index_nospec(ipcnum, | ||
526 | + ARRAY_SIZE(pDrvData->IPCs)); | ||
527 | mutex_lock(&mwave_mutex); | ||
528 | if (pDrvData->IPCs[ipcnum].bIsEnabled == true) { | ||
529 | pDrvData->IPCs[ipcnum].bIsEnabled = false; | ||
530 | diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c | ||
531 | index 2d5d8b43727e..c4d0b6f6abf2 100644 | ||
532 | --- a/drivers/clk/socfpga/clk-pll-s10.c | ||
533 | +++ b/drivers/clk/socfpga/clk-pll-s10.c | ||
534 | @@ -43,7 +43,7 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, | ||
535 | /* Read mdiv and fdiv from the fdbck register */ | ||
536 | reg = readl(socfpgaclk->hw.reg + 0x4); | ||
537 | mdiv = (reg & SOCFPGA_PLL_MDIV_MASK) >> SOCFPGA_PLL_MDIV_SHIFT; | ||
538 | - vco_freq = (unsigned long long)parent_rate * (mdiv + 6); | ||
539 | + vco_freq = (unsigned long long)vco_freq * (mdiv + 6); | ||
540 | |||
541 | return (unsigned long)vco_freq; | ||
542 | } | ||
543 | diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c | ||
544 | index 5b238fc314ac..8281dfbf38c2 100644 | ||
545 | --- a/drivers/clk/socfpga/clk-s10.c | ||
546 | +++ b/drivers/clk/socfpga/clk-s10.c | ||
547 | @@ -12,17 +12,17 @@ | ||
548 | |||
549 | #include "stratix10-clk.h" | ||
550 | |||
551 | -static const char * const pll_mux[] = { "osc1", "cb_intosc_hs_div2_clk", | ||
552 | - "f2s_free_clk",}; | ||
553 | +static const char * const pll_mux[] = { "osc1", "cb-intosc-hs-div2-clk", | ||
554 | + "f2s-free-clk",}; | ||
555 | static const char * const cntr_mux[] = { "main_pll", "periph_pll", | ||
556 | - "osc1", "cb_intosc_hs_div2_clk", | ||
557 | - "f2s_free_clk"}; | ||
558 | -static const char * const boot_mux[] = { "osc1", "cb_intosc_hs_div2_clk",}; | ||
559 | + "osc1", "cb-intosc-hs-div2-clk", | ||
560 | + "f2s-free-clk"}; | ||
561 | +static const char * const boot_mux[] = { "osc1", "cb-intosc-hs-div2-clk",}; | ||
562 | |||
563 | static const char * const noc_free_mux[] = {"main_noc_base_clk", | ||
564 | "peri_noc_base_clk", | ||
565 | - "osc1", "cb_intosc_hs_div2_clk", | ||
566 | - "f2s_free_clk"}; | ||
567 | + "osc1", "cb-intosc-hs-div2-clk", | ||
568 | + "f2s-free-clk"}; | ||
569 | |||
570 | static const char * const emaca_free_mux[] = {"peri_emaca_clk", "boot_clk"}; | ||
571 | static const char * const emacb_free_mux[] = {"peri_emacb_clk", "boot_clk"}; | ||
572 | @@ -33,14 +33,14 @@ static const char * const s2f_usr1_free_mux[] = {"peri_s2f_usr1_clk", "boot_clk" | ||
573 | static const char * const psi_ref_free_mux[] = {"peri_psi_ref_clk", "boot_clk"}; | ||
574 | static const char * const mpu_mux[] = { "mpu_free_clk", "boot_clk",}; | ||
575 | |||
576 | -static const char * const s2f_usr0_mux[] = {"f2s_free_clk", "boot_clk"}; | ||
577 | +static const char * const s2f_usr0_mux[] = {"f2s-free-clk", "boot_clk"}; | ||
578 | static const char * const emac_mux[] = {"emaca_free_clk", "emacb_free_clk"}; | ||
579 | static const char * const noc_mux[] = {"noc_free_clk", "boot_clk"}; | ||
580 | |||
581 | static const char * const mpu_free_mux[] = {"main_mpu_base_clk", | ||
582 | "peri_mpu_base_clk", | ||
583 | - "osc1", "cb_intosc_hs_div2_clk", | ||
584 | - "f2s_free_clk"}; | ||
585 | + "osc1", "cb-intosc-hs-div2-clk", | ||
586 | + "f2s-free-clk"}; | ||
587 | |||
588 | /* clocks in AO (always on) controller */ | ||
589 | static const struct stratix10_pll_clock s10_pll_clks[] = { | ||
590 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | ||
591 | index a028661d9e20..92b11de19581 100644 | ||
592 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | ||
593 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | ||
594 | @@ -576,6 +576,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { | ||
595 | { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
596 | { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
597 | { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
598 | + { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX }, | ||
599 | { 0, 0, 0, 0, 0 }, | ||
600 | }; | ||
601 | |||
602 | diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c | ||
603 | index 191b314f9e9e..709475d5cc30 100644 | ||
604 | --- a/drivers/gpu/drm/meson/meson_crtc.c | ||
605 | +++ b/drivers/gpu/drm/meson/meson_crtc.c | ||
606 | @@ -45,7 +45,6 @@ struct meson_crtc { | ||
607 | struct drm_crtc base; | ||
608 | struct drm_pending_vblank_event *event; | ||
609 | struct meson_drm *priv; | ||
610 | - bool enabled; | ||
611 | }; | ||
612 | #define to_meson_crtc(x) container_of(x, struct meson_crtc, base) | ||
613 | |||
614 | @@ -81,7 +80,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { | ||
615 | |||
616 | }; | ||
617 | |||
618 | -static void meson_crtc_enable(struct drm_crtc *crtc) | ||
619 | +static void meson_crtc_atomic_enable(struct drm_crtc *crtc, | ||
620 | + struct drm_crtc_state *old_state) | ||
621 | { | ||
622 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
623 | struct drm_crtc_state *crtc_state = crtc->state; | ||
624 | @@ -103,20 +103,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) | ||
625 | |||
626 | drm_crtc_vblank_on(crtc); | ||
627 | |||
628 | - meson_crtc->enabled = true; | ||
629 | -} | ||
630 | - | ||
631 | -static void meson_crtc_atomic_enable(struct drm_crtc *crtc, | ||
632 | - struct drm_crtc_state *old_state) | ||
633 | -{ | ||
634 | - struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
635 | - struct meson_drm *priv = meson_crtc->priv; | ||
636 | - | ||
637 | - DRM_DEBUG_DRIVER("\n"); | ||
638 | - | ||
639 | - if (!meson_crtc->enabled) | ||
640 | - meson_crtc_enable(crtc); | ||
641 | - | ||
642 | priv->viu.osd1_enabled = true; | ||
643 | } | ||
644 | |||
645 | @@ -142,8 +128,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, | ||
646 | |||
647 | crtc->state->event = NULL; | ||
648 | } | ||
649 | - | ||
650 | - meson_crtc->enabled = false; | ||
651 | } | ||
652 | |||
653 | static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | ||
654 | @@ -152,9 +136,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, | ||
655 | struct meson_crtc *meson_crtc = to_meson_crtc(crtc); | ||
656 | unsigned long flags; | ||
657 | |||
658 | - if (crtc->state->enable && !meson_crtc->enabled) | ||
659 | - meson_crtc_enable(crtc); | ||
660 | - | ||
661 | if (crtc->state->event) { | ||
662 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
663 | |||
664 | diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c | ||
665 | index d3443125e661..bf5f294f172f 100644 | ||
666 | --- a/drivers/gpu/drm/meson/meson_drv.c | ||
667 | +++ b/drivers/gpu/drm/meson/meson_drv.c | ||
668 | @@ -82,6 +82,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { | ||
669 | .fb_create = drm_gem_fb_create, | ||
670 | }; | ||
671 | |||
672 | +static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = { | ||
673 | + .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm, | ||
674 | +}; | ||
675 | + | ||
676 | static irqreturn_t meson_irq(int irq, void *arg) | ||
677 | { | ||
678 | struct drm_device *dev = arg; | ||
679 | @@ -246,6 +250,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) | ||
680 | drm->mode_config.max_width = 3840; | ||
681 | drm->mode_config.max_height = 2160; | ||
682 | drm->mode_config.funcs = &meson_mode_config_funcs; | ||
683 | + drm->mode_config.helper_private = &meson_mode_config_helpers; | ||
684 | |||
685 | /* Hardware Initialization */ | ||
686 | |||
687 | diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c | ||
688 | index b1b788082793..d2a735ac9ba1 100644 | ||
689 | --- a/drivers/hv/hv_balloon.c | ||
690 | +++ b/drivers/hv/hv_balloon.c | ||
691 | @@ -888,12 +888,14 @@ static unsigned long handle_pg_range(unsigned long pg_start, | ||
692 | pfn_cnt -= pgs_ol; | ||
693 | /* | ||
694 | * Check if the corresponding memory block is already | ||
695 | - * online by checking its last previously backed page. | ||
696 | - * In case it is we need to bring rest (which was not | ||
697 | - * backed previously) online too. | ||
698 | + * online. It is possible to observe struct pages still | ||
699 | + * being uninitialized here so check section instead. | ||
700 | + * In case the section is online we need to bring the | ||
701 | + * rest of pfns (which were not backed previously) | ||
702 | + * online too. | ||
703 | */ | ||
704 | if (start_pfn > has->start_pfn && | ||
705 | - !PageReserved(pfn_to_page(start_pfn - 1))) | ||
706 | + online_section_nr(pfn_to_section_nr(start_pfn))) | ||
707 | hv_bring_pgs_online(has, start_pfn, pgs_ol); | ||
708 | |||
709 | } | ||
710 | diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c | ||
711 | index 3e90eb91db45..6cb45f256107 100644 | ||
712 | --- a/drivers/hv/ring_buffer.c | ||
713 | +++ b/drivers/hv/ring_buffer.c | ||
714 | @@ -164,26 +164,25 @@ hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, | ||
715 | } | ||
716 | |||
717 | /* Get various debug metrics for the specified ring buffer. */ | ||
718 | -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | ||
719 | - struct hv_ring_buffer_debug_info *debug_info) | ||
720 | +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | ||
721 | + struct hv_ring_buffer_debug_info *debug_info) | ||
722 | { | ||
723 | u32 bytes_avail_towrite; | ||
724 | u32 bytes_avail_toread; | ||
725 | |||
726 | - if (ring_info->ring_buffer) { | ||
727 | - hv_get_ringbuffer_availbytes(ring_info, | ||
728 | - &bytes_avail_toread, | ||
729 | - &bytes_avail_towrite); | ||
730 | - | ||
731 | - debug_info->bytes_avail_toread = bytes_avail_toread; | ||
732 | - debug_info->bytes_avail_towrite = bytes_avail_towrite; | ||
733 | - debug_info->current_read_index = | ||
734 | - ring_info->ring_buffer->read_index; | ||
735 | - debug_info->current_write_index = | ||
736 | - ring_info->ring_buffer->write_index; | ||
737 | - debug_info->current_interrupt_mask = | ||
738 | - ring_info->ring_buffer->interrupt_mask; | ||
739 | - } | ||
740 | + if (!ring_info->ring_buffer) | ||
741 | + return -EINVAL; | ||
742 | + | ||
743 | + hv_get_ringbuffer_availbytes(ring_info, | ||
744 | + &bytes_avail_toread, | ||
745 | + &bytes_avail_towrite); | ||
746 | + debug_info->bytes_avail_toread = bytes_avail_toread; | ||
747 | + debug_info->bytes_avail_towrite = bytes_avail_towrite; | ||
748 | + debug_info->current_read_index = ring_info->ring_buffer->read_index; | ||
749 | + debug_info->current_write_index = ring_info->ring_buffer->write_index; | ||
750 | + debug_info->current_interrupt_mask | ||
751 | + = ring_info->ring_buffer->interrupt_mask; | ||
752 | + return 0; | ||
753 | } | ||
754 | EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo); | ||
755 | |||
756 | diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c | ||
757 | index 2c6d5c7a4445..9aa18f387a34 100644 | ||
758 | --- a/drivers/hv/vmbus_drv.c | ||
759 | +++ b/drivers/hv/vmbus_drv.c | ||
760 | @@ -313,12 +313,16 @@ static ssize_t out_intr_mask_show(struct device *dev, | ||
761 | { | ||
762 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
763 | struct hv_ring_buffer_debug_info outbound; | ||
764 | + int ret; | ||
765 | |||
766 | if (!hv_dev->channel) | ||
767 | return -ENODEV; | ||
768 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
769 | - return -EINVAL; | ||
770 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | ||
771 | + | ||
772 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, | ||
773 | + &outbound); | ||
774 | + if (ret < 0) | ||
775 | + return ret; | ||
776 | + | ||
777 | return sprintf(buf, "%d\n", outbound.current_interrupt_mask); | ||
778 | } | ||
779 | static DEVICE_ATTR_RO(out_intr_mask); | ||
780 | @@ -328,12 +332,15 @@ static ssize_t out_read_index_show(struct device *dev, | ||
781 | { | ||
782 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
783 | struct hv_ring_buffer_debug_info outbound; | ||
784 | + int ret; | ||
785 | |||
786 | if (!hv_dev->channel) | ||
787 | return -ENODEV; | ||
788 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
789 | - return -EINVAL; | ||
790 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | ||
791 | + | ||
792 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, | ||
793 | + &outbound); | ||
794 | + if (ret < 0) | ||
795 | + return ret; | ||
796 | return sprintf(buf, "%d\n", outbound.current_read_index); | ||
797 | } | ||
798 | static DEVICE_ATTR_RO(out_read_index); | ||
799 | @@ -344,12 +351,15 @@ static ssize_t out_write_index_show(struct device *dev, | ||
800 | { | ||
801 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
802 | struct hv_ring_buffer_debug_info outbound; | ||
803 | + int ret; | ||
804 | |||
805 | if (!hv_dev->channel) | ||
806 | return -ENODEV; | ||
807 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
808 | - return -EINVAL; | ||
809 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | ||
810 | + | ||
811 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, | ||
812 | + &outbound); | ||
813 | + if (ret < 0) | ||
814 | + return ret; | ||
815 | return sprintf(buf, "%d\n", outbound.current_write_index); | ||
816 | } | ||
817 | static DEVICE_ATTR_RO(out_write_index); | ||
818 | @@ -360,12 +370,15 @@ static ssize_t out_read_bytes_avail_show(struct device *dev, | ||
819 | { | ||
820 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
821 | struct hv_ring_buffer_debug_info outbound; | ||
822 | + int ret; | ||
823 | |||
824 | if (!hv_dev->channel) | ||
825 | return -ENODEV; | ||
826 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
827 | - return -EINVAL; | ||
828 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | ||
829 | + | ||
830 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, | ||
831 | + &outbound); | ||
832 | + if (ret < 0) | ||
833 | + return ret; | ||
834 | return sprintf(buf, "%d\n", outbound.bytes_avail_toread); | ||
835 | } | ||
836 | static DEVICE_ATTR_RO(out_read_bytes_avail); | ||
837 | @@ -376,12 +389,15 @@ static ssize_t out_write_bytes_avail_show(struct device *dev, | ||
838 | { | ||
839 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
840 | struct hv_ring_buffer_debug_info outbound; | ||
841 | + int ret; | ||
842 | |||
843 | if (!hv_dev->channel) | ||
844 | return -ENODEV; | ||
845 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
846 | - return -EINVAL; | ||
847 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound); | ||
848 | + | ||
849 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, | ||
850 | + &outbound); | ||
851 | + if (ret < 0) | ||
852 | + return ret; | ||
853 | return sprintf(buf, "%d\n", outbound.bytes_avail_towrite); | ||
854 | } | ||
855 | static DEVICE_ATTR_RO(out_write_bytes_avail); | ||
856 | @@ -391,12 +407,15 @@ static ssize_t in_intr_mask_show(struct device *dev, | ||
857 | { | ||
858 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
859 | struct hv_ring_buffer_debug_info inbound; | ||
860 | + int ret; | ||
861 | |||
862 | if (!hv_dev->channel) | ||
863 | return -ENODEV; | ||
864 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
865 | - return -EINVAL; | ||
866 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
867 | + | ||
868 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
869 | + if (ret < 0) | ||
870 | + return ret; | ||
871 | + | ||
872 | return sprintf(buf, "%d\n", inbound.current_interrupt_mask); | ||
873 | } | ||
874 | static DEVICE_ATTR_RO(in_intr_mask); | ||
875 | @@ -406,12 +425,15 @@ static ssize_t in_read_index_show(struct device *dev, | ||
876 | { | ||
877 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
878 | struct hv_ring_buffer_debug_info inbound; | ||
879 | + int ret; | ||
880 | |||
881 | if (!hv_dev->channel) | ||
882 | return -ENODEV; | ||
883 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
884 | - return -EINVAL; | ||
885 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
886 | + | ||
887 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
888 | + if (ret < 0) | ||
889 | + return ret; | ||
890 | + | ||
891 | return sprintf(buf, "%d\n", inbound.current_read_index); | ||
892 | } | ||
893 | static DEVICE_ATTR_RO(in_read_index); | ||
894 | @@ -421,12 +443,15 @@ static ssize_t in_write_index_show(struct device *dev, | ||
895 | { | ||
896 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
897 | struct hv_ring_buffer_debug_info inbound; | ||
898 | + int ret; | ||
899 | |||
900 | if (!hv_dev->channel) | ||
901 | return -ENODEV; | ||
902 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
903 | - return -EINVAL; | ||
904 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
905 | + | ||
906 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
907 | + if (ret < 0) | ||
908 | + return ret; | ||
909 | + | ||
910 | return sprintf(buf, "%d\n", inbound.current_write_index); | ||
911 | } | ||
912 | static DEVICE_ATTR_RO(in_write_index); | ||
913 | @@ -437,12 +462,15 @@ static ssize_t in_read_bytes_avail_show(struct device *dev, | ||
914 | { | ||
915 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
916 | struct hv_ring_buffer_debug_info inbound; | ||
917 | + int ret; | ||
918 | |||
919 | if (!hv_dev->channel) | ||
920 | return -ENODEV; | ||
921 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
922 | - return -EINVAL; | ||
923 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
924 | + | ||
925 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
926 | + if (ret < 0) | ||
927 | + return ret; | ||
928 | + | ||
929 | return sprintf(buf, "%d\n", inbound.bytes_avail_toread); | ||
930 | } | ||
931 | static DEVICE_ATTR_RO(in_read_bytes_avail); | ||
932 | @@ -453,12 +481,15 @@ static ssize_t in_write_bytes_avail_show(struct device *dev, | ||
933 | { | ||
934 | struct hv_device *hv_dev = device_to_hv_device(dev); | ||
935 | struct hv_ring_buffer_debug_info inbound; | ||
936 | + int ret; | ||
937 | |||
938 | if (!hv_dev->channel) | ||
939 | return -ENODEV; | ||
940 | - if (hv_dev->channel->state != CHANNEL_OPENED_STATE) | ||
941 | - return -EINVAL; | ||
942 | - hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
943 | + | ||
944 | + ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound); | ||
945 | + if (ret < 0) | ||
946 | + return ret; | ||
947 | + | ||
948 | return sprintf(buf, "%d\n", inbound.bytes_avail_towrite); | ||
949 | } | ||
950 | static DEVICE_ATTR_RO(in_write_bytes_avail); | ||
951 | diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c | ||
952 | index 45c997430332..0e51803de0e7 100644 | ||
953 | --- a/drivers/ide/ide-proc.c | ||
954 | +++ b/drivers/ide/ide-proc.c | ||
955 | @@ -544,7 +544,7 @@ void ide_proc_port_register_devices(ide_hwif_t *hwif) | ||
956 | drive->proc = proc_mkdir(drive->name, parent); | ||
957 | if (drive->proc) { | ||
958 | ide_add_proc_entries(drive->proc, generic_drive_entries, drive); | ||
959 | - proc_create_data("setting", S_IFREG|S_IRUSR|S_IWUSR, | ||
960 | + proc_create_data("settings", S_IFREG|S_IRUSR|S_IWUSR, | ||
961 | drive->proc, &ide_settings_proc_fops, | ||
962 | drive); | ||
963 | } | ||
964 | diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c | ||
965 | index cfc8b94527b9..aa4e431cbcd3 100644 | ||
966 | --- a/drivers/input/joystick/xpad.c | ||
967 | +++ b/drivers/input/joystick/xpad.c | ||
968 | @@ -252,6 +252,8 @@ static const struct xpad_device { | ||
969 | { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, | ||
970 | { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, | ||
971 | { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, | ||
972 | + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, | ||
973 | + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 }, | ||
974 | { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, | ||
975 | { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, | ||
976 | { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, | ||
977 | @@ -428,6 +430,7 @@ static const struct usb_device_id xpad_table[] = { | ||
978 | XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ | ||
979 | XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ | ||
980 | XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ | ||
981 | + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */ | ||
982 | XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ | ||
983 | XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ | ||
984 | XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ | ||
985 | diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c | ||
986 | index 8ec483e8688b..26ec603fe220 100644 | ||
987 | --- a/drivers/input/misc/uinput.c | ||
988 | +++ b/drivers/input/misc/uinput.c | ||
989 | @@ -39,6 +39,7 @@ | ||
990 | #include <linux/init.h> | ||
991 | #include <linux/fs.h> | ||
992 | #include <linux/miscdevice.h> | ||
993 | +#include <linux/overflow.h> | ||
994 | #include <linux/input/mt.h> | ||
995 | #include "../input-compat.h" | ||
996 | |||
997 | @@ -405,7 +406,7 @@ static int uinput_open(struct inode *inode, struct file *file) | ||
998 | static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, | ||
999 | const struct input_absinfo *abs) | ||
1000 | { | ||
1001 | - int min, max; | ||
1002 | + int min, max, range; | ||
1003 | |||
1004 | min = abs->minimum; | ||
1005 | max = abs->maximum; | ||
1006 | @@ -417,7 +418,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code, | ||
1007 | return -EINVAL; | ||
1008 | } | ||
1009 | |||
1010 | - if (abs->flat > max - min) { | ||
1011 | + if (!check_sub_overflow(max, min, &range) && abs->flat > range) { | ||
1012 | printk(KERN_DEBUG | ||
1013 | "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n", | ||
1014 | UINPUT_NAME, code, abs->flat, min, max); | ||
1015 | diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c | ||
1016 | index c2df341ff6fa..cf3abb8d284f 100644 | ||
1017 | --- a/drivers/irqchip/irq-gic-v3-its.c | ||
1018 | +++ b/drivers/irqchip/irq-gic-v3-its.c | ||
1019 | @@ -2267,13 +2267,14 @@ static void its_free_device(struct its_device *its_dev) | ||
1020 | kfree(its_dev); | ||
1021 | } | ||
1022 | |||
1023 | -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) | ||
1024 | +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) | ||
1025 | { | ||
1026 | int idx; | ||
1027 | |||
1028 | - idx = find_first_zero_bit(dev->event_map.lpi_map, | ||
1029 | - dev->event_map.nr_lpis); | ||
1030 | - if (idx == dev->event_map.nr_lpis) | ||
1031 | + idx = bitmap_find_free_region(dev->event_map.lpi_map, | ||
1032 | + dev->event_map.nr_lpis, | ||
1033 | + get_count_order(nvecs)); | ||
1034 | + if (idx < 0) | ||
1035 | return -ENOSPC; | ||
1036 | |||
1037 | *hwirq = dev->event_map.lpi_base + idx; | ||
1038 | @@ -2369,21 +2370,21 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | ||
1039 | int err; | ||
1040 | int i; | ||
1041 | |||
1042 | - for (i = 0; i < nr_irqs; i++) { | ||
1043 | - err = its_alloc_device_irq(its_dev, &hwirq); | ||
1044 | - if (err) | ||
1045 | - return err; | ||
1046 | + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); | ||
1047 | + if (err) | ||
1048 | + return err; | ||
1049 | |||
1050 | - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); | ||
1051 | + for (i = 0; i < nr_irqs; i++) { | ||
1052 | + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); | ||
1053 | if (err) | ||
1054 | return err; | ||
1055 | |||
1056 | irq_domain_set_hwirq_and_chip(domain, virq + i, | ||
1057 | - hwirq, &its_irq_chip, its_dev); | ||
1058 | + hwirq + i, &its_irq_chip, its_dev); | ||
1059 | irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i))); | ||
1060 | pr_debug("ID:%d pID:%d vID:%d\n", | ||
1061 | - (int)(hwirq - its_dev->event_map.lpi_base), | ||
1062 | - (int) hwirq, virq + i); | ||
1063 | + (int)(hwirq + i - its_dev->event_map.lpi_base), | ||
1064 | + (int)(hwirq + i), virq + i); | ||
1065 | } | ||
1066 | |||
1067 | return 0; | ||
1068 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c | ||
1069 | index f2ec882f96be..5921ecc670c1 100644 | ||
1070 | --- a/drivers/md/dm-crypt.c | ||
1071 | +++ b/drivers/md/dm-crypt.c | ||
1072 | @@ -2405,9 +2405,21 @@ static int crypt_ctr_cipher_new(struct dm_target *ti, char *cipher_in, char *key | ||
1073 | * capi:cipher_api_spec-iv:ivopts | ||
1074 | */ | ||
1075 | tmp = &cipher_in[strlen("capi:")]; | ||
1076 | - cipher_api = strsep(&tmp, "-"); | ||
1077 | - *ivmode = strsep(&tmp, ":"); | ||
1078 | - *ivopts = tmp; | ||
1079 | + | ||
1080 | + /* Separate IV options if present, it can contain another '-' in hash name */ | ||
1081 | + *ivopts = strrchr(tmp, ':'); | ||
1082 | + if (*ivopts) { | ||
1083 | + **ivopts = '\0'; | ||
1084 | + (*ivopts)++; | ||
1085 | + } | ||
1086 | + /* Parse IV mode */ | ||
1087 | + *ivmode = strrchr(tmp, '-'); | ||
1088 | + if (*ivmode) { | ||
1089 | + **ivmode = '\0'; | ||
1090 | + (*ivmode)++; | ||
1091 | + } | ||
1092 | + /* The rest is crypto API spec */ | ||
1093 | + cipher_api = tmp; | ||
1094 | |||
1095 | if (*ivmode && !strcmp(*ivmode, "lmk")) | ||
1096 | cc->tfms_count = 64; | ||
1097 | @@ -2477,11 +2489,8 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key | ||
1098 | goto bad_mem; | ||
1099 | |||
1100 | chainmode = strsep(&tmp, "-"); | ||
1101 | - *ivopts = strsep(&tmp, "-"); | ||
1102 | - *ivmode = strsep(&*ivopts, ":"); | ||
1103 | - | ||
1104 | - if (tmp) | ||
1105 | - DMWARN("Ignoring unexpected additional cipher options"); | ||
1106 | + *ivmode = strsep(&tmp, ":"); | ||
1107 | + *ivopts = tmp; | ||
1108 | |||
1109 | /* | ||
1110 | * For compatibility with the original dm-crypt mapping format, if | ||
1111 | diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c | ||
1112 | index 20b0776e39ef..ed3caceaed07 100644 | ||
1113 | --- a/drivers/md/dm-thin-metadata.c | ||
1114 | +++ b/drivers/md/dm-thin-metadata.c | ||
1115 | @@ -1678,7 +1678,7 @@ int dm_thin_remove_range(struct dm_thin_device *td, | ||
1116 | return r; | ||
1117 | } | ||
1118 | |||
1119 | -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) | ||
1120 | +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result) | ||
1121 | { | ||
1122 | int r; | ||
1123 | uint32_t ref_count; | ||
1124 | @@ -1686,7 +1686,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu | ||
1125 | down_read(&pmd->root_lock); | ||
1126 | r = dm_sm_get_count(pmd->data_sm, b, &ref_count); | ||
1127 | if (!r) | ||
1128 | - *result = (ref_count != 0); | ||
1129 | + *result = (ref_count > 1); | ||
1130 | up_read(&pmd->root_lock); | ||
1131 | |||
1132 | return r; | ||
1133 | diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h | ||
1134 | index 35e954ea20a9..f6be0d733c20 100644 | ||
1135 | --- a/drivers/md/dm-thin-metadata.h | ||
1136 | +++ b/drivers/md/dm-thin-metadata.h | ||
1137 | @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd, | ||
1138 | |||
1139 | int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); | ||
1140 | |||
1141 | -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); | ||
1142 | +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result); | ||
1143 | |||
1144 | int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); | ||
1145 | int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e); | ||
1146 | diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c | ||
1147 | index 1f225a1e08dd..c30a7850b2da 100644 | ||
1148 | --- a/drivers/md/dm-thin.c | ||
1149 | +++ b/drivers/md/dm-thin.c | ||
1150 | @@ -1048,7 +1048,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m | ||
1151 | * passdown we have to check that these blocks are now unused. | ||
1152 | */ | ||
1153 | int r = 0; | ||
1154 | - bool used = true; | ||
1155 | + bool shared = true; | ||
1156 | struct thin_c *tc = m->tc; | ||
1157 | struct pool *pool = tc->pool; | ||
1158 | dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; | ||
1159 | @@ -1058,11 +1058,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m | ||
1160 | while (b != end) { | ||
1161 | /* find start of unmapped run */ | ||
1162 | for (; b < end; b++) { | ||
1163 | - r = dm_pool_block_is_used(pool->pmd, b, &used); | ||
1164 | + r = dm_pool_block_is_shared(pool->pmd, b, &shared); | ||
1165 | if (r) | ||
1166 | goto out; | ||
1167 | |||
1168 | - if (!used) | ||
1169 | + if (!shared) | ||
1170 | break; | ||
1171 | } | ||
1172 | |||
1173 | @@ -1071,11 +1071,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m | ||
1174 | |||
1175 | /* find end of run */ | ||
1176 | for (e = b + 1; e != end; e++) { | ||
1177 | - r = dm_pool_block_is_used(pool->pmd, e, &used); | ||
1178 | + r = dm_pool_block_is_shared(pool->pmd, e, &shared); | ||
1179 | if (r) | ||
1180 | goto out; | ||
1181 | |||
1182 | - if (used) | ||
1183 | + if (shared) | ||
1184 | break; | ||
1185 | } | ||
1186 | |||
1187 | diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c | ||
1188 | index b8aaa684c397..2ed23c99f59f 100644 | ||
1189 | --- a/drivers/misc/ibmvmc.c | ||
1190 | +++ b/drivers/misc/ibmvmc.c | ||
1191 | @@ -820,21 +820,24 @@ static int ibmvmc_send_msg(struct crq_server_adapter *adapter, | ||
1192 | * | ||
1193 | * Return: | ||
1194 | * 0 - Success | ||
1195 | + * Non-zero - Failure | ||
1196 | */ | ||
1197 | static int ibmvmc_open(struct inode *inode, struct file *file) | ||
1198 | { | ||
1199 | struct ibmvmc_file_session *session; | ||
1200 | - int rc = 0; | ||
1201 | |||
1202 | pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__, | ||
1203 | (unsigned long)inode, (unsigned long)file, | ||
1204 | ibmvmc.state); | ||
1205 | |||
1206 | session = kzalloc(sizeof(*session), GFP_KERNEL); | ||
1207 | + if (!session) | ||
1208 | + return -ENOMEM; | ||
1209 | + | ||
1210 | session->file = file; | ||
1211 | file->private_data = session; | ||
1212 | |||
1213 | - return rc; | ||
1214 | + return 0; | ||
1215 | } | ||
1216 | |||
1217 | /** | ||
1218 | diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h | ||
1219 | index e4b10b2d1a08..23739a60517f 100644 | ||
1220 | --- a/drivers/misc/mei/hw-me-regs.h | ||
1221 | +++ b/drivers/misc/mei/hw-me-regs.h | ||
1222 | @@ -127,6 +127,8 @@ | ||
1223 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ | ||
1224 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ | ||
1225 | |||
1226 | +#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */ | ||
1227 | + | ||
1228 | #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ | ||
1229 | |||
1230 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ | ||
1231 | diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c | ||
1232 | index ea4e152270a3..c8e21c894a5f 100644 | ||
1233 | --- a/drivers/misc/mei/pci-me.c | ||
1234 | +++ b/drivers/misc/mei/pci-me.c | ||
1235 | @@ -88,11 +88,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | ||
1236 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)}, | ||
1237 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)}, | ||
1238 | {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)}, | ||
1239 | - {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH8_CFG)}, | ||
1240 | + {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_CFG)}, | ||
1241 | |||
1242 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, | ||
1243 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, | ||
1244 | |||
1245 | + {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, MEI_ME_PCH8_CFG)}, | ||
1246 | + | ||
1247 | {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, | ||
1248 | |||
1249 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, | ||
1250 | diff --git a/drivers/mmc/host/dw_mmc-bluefield.c b/drivers/mmc/host/dw_mmc-bluefield.c | ||
1251 | index 54c3fbb4a391..db56d4f58aaa 100644 | ||
1252 | --- a/drivers/mmc/host/dw_mmc-bluefield.c | ||
1253 | +++ b/drivers/mmc/host/dw_mmc-bluefield.c | ||
1254 | @@ -1,11 +1,6 @@ | ||
1255 | // SPDX-License-Identifier: GPL-2.0 | ||
1256 | /* | ||
1257 | * Copyright (C) 2018 Mellanox Technologies. | ||
1258 | - * | ||
1259 | - * This program is free software; you can redistribute it and/or modify | ||
1260 | - * it under the terms of the GNU General Public License as published by | ||
1261 | - * the Free Software Foundation; either version 2 of the License, or | ||
1262 | - * (at your option) any later version. | ||
1263 | */ | ||
1264 | |||
1265 | #include <linux/bitfield.h> | ||
1266 | diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c | ||
1267 | index c201c378537e..ef9deaa361c7 100644 | ||
1268 | --- a/drivers/mmc/host/meson-gx-mmc.c | ||
1269 | +++ b/drivers/mmc/host/meson-gx-mmc.c | ||
1270 | @@ -174,6 +174,8 @@ struct meson_host { | ||
1271 | struct sd_emmc_desc *descs; | ||
1272 | dma_addr_t descs_dma_addr; | ||
1273 | |||
1274 | + int irq; | ||
1275 | + | ||
1276 | bool vqmmc_enabled; | ||
1277 | }; | ||
1278 | |||
1279 | @@ -1181,7 +1183,7 @@ static int meson_mmc_probe(struct platform_device *pdev) | ||
1280 | struct resource *res; | ||
1281 | struct meson_host *host; | ||
1282 | struct mmc_host *mmc; | ||
1283 | - int ret, irq; | ||
1284 | + int ret; | ||
1285 | |||
1286 | mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev); | ||
1287 | if (!mmc) | ||
1288 | @@ -1228,8 +1230,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | ||
1289 | goto free_host; | ||
1290 | } | ||
1291 | |||
1292 | - irq = platform_get_irq(pdev, 0); | ||
1293 | - if (irq <= 0) { | ||
1294 | + host->irq = platform_get_irq(pdev, 0); | ||
1295 | + if (host->irq <= 0) { | ||
1296 | dev_err(&pdev->dev, "failed to get interrupt resource.\n"); | ||
1297 | ret = -EINVAL; | ||
1298 | goto free_host; | ||
1299 | @@ -1283,9 +1285,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | ||
1300 | writel(IRQ_CRC_ERR | IRQ_TIMEOUTS | IRQ_END_OF_CHAIN, | ||
1301 | host->regs + SD_EMMC_IRQ_EN); | ||
1302 | |||
1303 | - ret = devm_request_threaded_irq(&pdev->dev, irq, meson_mmc_irq, | ||
1304 | - meson_mmc_irq_thread, IRQF_SHARED, | ||
1305 | - NULL, host); | ||
1306 | + ret = request_threaded_irq(host->irq, meson_mmc_irq, | ||
1307 | + meson_mmc_irq_thread, IRQF_SHARED, NULL, host); | ||
1308 | if (ret) | ||
1309 | goto err_init_clk; | ||
1310 | |||
1311 | @@ -1303,7 +1304,7 @@ static int meson_mmc_probe(struct platform_device *pdev) | ||
1312 | if (host->bounce_buf == NULL) { | ||
1313 | dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n"); | ||
1314 | ret = -ENOMEM; | ||
1315 | - goto err_init_clk; | ||
1316 | + goto err_free_irq; | ||
1317 | } | ||
1318 | |||
1319 | host->descs = dma_alloc_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, | ||
1320 | @@ -1322,6 +1323,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | ||
1321 | err_bounce_buf: | ||
1322 | dma_free_coherent(host->dev, host->bounce_buf_size, | ||
1323 | host->bounce_buf, host->bounce_dma_addr); | ||
1324 | +err_free_irq: | ||
1325 | + free_irq(host->irq, host); | ||
1326 | err_init_clk: | ||
1327 | clk_disable_unprepare(host->mmc_clk); | ||
1328 | err_core_clk: | ||
1329 | @@ -1339,6 +1342,7 @@ static int meson_mmc_remove(struct platform_device *pdev) | ||
1330 | |||
1331 | /* disable interrupts */ | ||
1332 | writel(0, host->regs + SD_EMMC_IRQ_EN); | ||
1333 | + free_irq(host->irq, host); | ||
1334 | |||
1335 | dma_free_coherent(host->dev, SD_EMMC_DESC_BUF_LEN, | ||
1336 | host->descs, host->descs_dma_addr); | ||
1337 | diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c | ||
1338 | index 3b3f88ffab53..c05e4d50d43d 100644 | ||
1339 | --- a/drivers/net/can/dev.c | ||
1340 | +++ b/drivers/net/can/dev.c | ||
1341 | @@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); | ||
1342 | struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) | ||
1343 | { | ||
1344 | struct can_priv *priv = netdev_priv(dev); | ||
1345 | - struct sk_buff *skb = priv->echo_skb[idx]; | ||
1346 | - struct canfd_frame *cf; | ||
1347 | |||
1348 | if (idx >= priv->echo_skb_max) { | ||
1349 | netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", | ||
1350 | @@ -489,20 +487,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 | ||
1351 | return NULL; | ||
1352 | } | ||
1353 | |||
1354 | - if (!skb) { | ||
1355 | - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", | ||
1356 | - __func__, idx); | ||
1357 | - return NULL; | ||
1358 | - } | ||
1359 | + if (priv->echo_skb[idx]) { | ||
1360 | + /* Using "struct canfd_frame::len" for the frame | ||
1361 | + * length is supported on both CAN and CANFD frames. | ||
1362 | + */ | ||
1363 | + struct sk_buff *skb = priv->echo_skb[idx]; | ||
1364 | + struct canfd_frame *cf = (struct canfd_frame *)skb->data; | ||
1365 | + u8 len = cf->len; | ||
1366 | |||
1367 | - /* Using "struct canfd_frame::len" for the frame | ||
1368 | - * length is supported on both CAN and CANFD frames. | ||
1369 | - */ | ||
1370 | - cf = (struct canfd_frame *)skb->data; | ||
1371 | - *len_ptr = cf->len; | ||
1372 | - priv->echo_skb[idx] = NULL; | ||
1373 | + *len_ptr = len; | ||
1374 | + priv->echo_skb[idx] = NULL; | ||
1375 | |||
1376 | - return skb; | ||
1377 | + return skb; | ||
1378 | + } | ||
1379 | + | ||
1380 | + return NULL; | ||
1381 | } | ||
1382 | |||
1383 | /* | ||
1384 | diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c | ||
1385 | index 75ce11395ee8..ae219b8a7754 100644 | ||
1386 | --- a/drivers/net/can/flexcan.c | ||
1387 | +++ b/drivers/net/can/flexcan.c | ||
1388 | @@ -1004,7 +1004,7 @@ static int flexcan_chip_start(struct net_device *dev) | ||
1389 | } | ||
1390 | } else { | ||
1391 | /* clear and invalidate unused mailboxes first */ | ||
1392 | - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) { | ||
1393 | + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i < ARRAY_SIZE(regs->mb); i++) { | ||
1394 | priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, | ||
1395 | ®s->mb[i].can_ctrl); | ||
1396 | } | ||
1397 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | ||
1398 | index d272dc6984ac..b40d4377cc71 100644 | ||
1399 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h | ||
1400 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | ||
1401 | @@ -431,8 +431,6 @@ | ||
1402 | #define MAC_MDIOSCAR_PA_WIDTH 5 | ||
1403 | #define MAC_MDIOSCAR_RA_INDEX 0 | ||
1404 | #define MAC_MDIOSCAR_RA_WIDTH 16 | ||
1405 | -#define MAC_MDIOSCAR_REG_INDEX 0 | ||
1406 | -#define MAC_MDIOSCAR_REG_WIDTH 21 | ||
1407 | #define MAC_MDIOSCCDR_BUSY_INDEX 22 | ||
1408 | #define MAC_MDIOSCCDR_BUSY_WIDTH 1 | ||
1409 | #define MAC_MDIOSCCDR_CMD_INDEX 16 | ||
1410 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | ||
1411 | index 1e929a1e4ca7..4666084eda16 100644 | ||
1412 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | ||
1413 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | ||
1414 | @@ -1284,6 +1284,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | +static unsigned int xgbe_create_mdio_sca(int port, int reg) | ||
1419 | +{ | ||
1420 | + unsigned int mdio_sca, da; | ||
1421 | + | ||
1422 | + da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; | ||
1423 | + | ||
1424 | + mdio_sca = 0; | ||
1425 | + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); | ||
1426 | + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); | ||
1427 | + XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); | ||
1428 | + | ||
1429 | + return mdio_sca; | ||
1430 | +} | ||
1431 | + | ||
1432 | static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | ||
1433 | int reg, u16 val) | ||
1434 | { | ||
1435 | @@ -1291,9 +1305,7 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | ||
1436 | |||
1437 | reinit_completion(&pdata->mdio_complete); | ||
1438 | |||
1439 | - mdio_sca = 0; | ||
1440 | - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); | ||
1441 | - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); | ||
1442 | + mdio_sca = xgbe_create_mdio_sca(addr, reg); | ||
1443 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); | ||
1444 | |||
1445 | mdio_sccd = 0; | ||
1446 | @@ -1317,9 +1329,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, | ||
1447 | |||
1448 | reinit_completion(&pdata->mdio_complete); | ||
1449 | |||
1450 | - mdio_sca = 0; | ||
1451 | - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg); | ||
1452 | - XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr); | ||
1453 | + mdio_sca = xgbe_create_mdio_sca(addr, reg); | ||
1454 | XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); | ||
1455 | |||
1456 | mdio_sccd = 0; | ||
1457 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c | ||
1458 | index 5890fdfd62c3..c7901a3f2a79 100644 | ||
1459 | --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c | ||
1460 | +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c | ||
1461 | @@ -604,29 +604,31 @@ static void mlxsw_pci_cq_tasklet(unsigned long data) | ||
1462 | u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); | ||
1463 | u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); | ||
1464 | u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); | ||
1465 | + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; | ||
1466 | + | ||
1467 | + memcpy(ncqe, cqe, q->elem_size); | ||
1468 | + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); | ||
1469 | |||
1470 | if (sendq) { | ||
1471 | struct mlxsw_pci_queue *sdq; | ||
1472 | |||
1473 | sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); | ||
1474 | mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, | ||
1475 | - wqe_counter, cqe); | ||
1476 | + wqe_counter, ncqe); | ||
1477 | q->u.cq.comp_sdq_count++; | ||
1478 | } else { | ||
1479 | struct mlxsw_pci_queue *rdq; | ||
1480 | |||
1481 | rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); | ||
1482 | mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, | ||
1483 | - wqe_counter, q->u.cq.v, cqe); | ||
1484 | + wqe_counter, q->u.cq.v, ncqe); | ||
1485 | q->u.cq.comp_rdq_count++; | ||
1486 | } | ||
1487 | if (++items == credits) | ||
1488 | break; | ||
1489 | } | ||
1490 | - if (items) { | ||
1491 | - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); | ||
1492 | + if (items) | ||
1493 | mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); | ||
1494 | - } | ||
1495 | } | ||
1496 | |||
1497 | static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) | ||
1498 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | ||
1499 | index 83f452b7ccbb..72cdaa01d56d 100644 | ||
1500 | --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | ||
1501 | +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | ||
1502 | @@ -27,7 +27,7 @@ | ||
1503 | |||
1504 | #define MLXSW_PCI_SW_RESET 0xF0010 | ||
1505 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) | ||
1506 | -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 | ||
1507 | +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 | ||
1508 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 | ||
1509 | #define MLXSW_PCI_FW_READY 0xA1844 | ||
1510 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF | ||
1511 | @@ -53,6 +53,7 @@ | ||
1512 | #define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */ | ||
1513 | #define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */ | ||
1514 | #define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */ | ||
1515 | +#define MLXSW_PCI_CQE_SIZE_MAX MLXSW_PCI_CQE2_SIZE | ||
1516 | #define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */ | ||
1517 | #define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE) | ||
1518 | #define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE) | ||
1519 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | ||
1520 | index 715d24ff937e..562c4429eec7 100644 | ||
1521 | --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | ||
1522 | +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c | ||
1523 | @@ -696,8 +696,8 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_dummy_ops = { | ||
1524 | static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { | ||
1525 | .type = MLXSW_SP_FID_TYPE_DUMMY, | ||
1526 | .fid_size = sizeof(struct mlxsw_sp_fid), | ||
1527 | - .start_index = MLXSW_SP_RFID_BASE - 1, | ||
1528 | - .end_index = MLXSW_SP_RFID_BASE - 1, | ||
1529 | + .start_index = VLAN_N_VID - 1, | ||
1530 | + .end_index = VLAN_N_VID - 1, | ||
1531 | .ops = &mlxsw_sp_fid_dummy_ops, | ||
1532 | }; | ||
1533 | |||
1534 | diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c | ||
1535 | index 9020b084b953..7ec4eb74fe21 100644 | ||
1536 | --- a/drivers/net/ethernet/sun/cassini.c | ||
1537 | +++ b/drivers/net/ethernet/sun/cassini.c | ||
1538 | @@ -1,22 +1,9 @@ | ||
1539 | -// SPDX-License-Identifier: GPL-2.0 | ||
1540 | +// SPDX-License-Identifier: GPL-2.0+ | ||
1541 | /* cassini.c: Sun Microsystems Cassini(+) ethernet driver. | ||
1542 | * | ||
1543 | * Copyright (C) 2004 Sun Microsystems Inc. | ||
1544 | * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) | ||
1545 | * | ||
1546 | - * This program is free software; you can redistribute it and/or | ||
1547 | - * modify it under the terms of the GNU General Public License as | ||
1548 | - * published by the Free Software Foundation; either version 2 of the | ||
1549 | - * License, or (at your option) any later version. | ||
1550 | - * | ||
1551 | - * This program is distributed in the hope that it will be useful, | ||
1552 | - * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1553 | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1554 | - * GNU General Public License for more details. | ||
1555 | - * | ||
1556 | - * You should have received a copy of the GNU General Public License | ||
1557 | - * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
1558 | - * | ||
1559 | * This driver uses the sungem driver (c) David Miller | ||
1560 | * (davem@redhat.com) as its basis. | ||
1561 | * | ||
1562 | diff --git a/drivers/net/ethernet/sun/cassini.h b/drivers/net/ethernet/sun/cassini.h | ||
1563 | index 13f3860496a8..ae5f05f03f88 100644 | ||
1564 | --- a/drivers/net/ethernet/sun/cassini.h | ||
1565 | +++ b/drivers/net/ethernet/sun/cassini.h | ||
1566 | @@ -1,23 +1,10 @@ | ||
1567 | -/* SPDX-License-Identifier: GPL-2.0 */ | ||
1568 | +/* SPDX-License-Identifier: GPL-2.0+ */ | ||
1569 | /* $Id: cassini.h,v 1.16 2004/08/17 21:15:16 zaumen Exp $ | ||
1570 | * cassini.h: Definitions for Sun Microsystems Cassini(+) ethernet driver. | ||
1571 | * | ||
1572 | * Copyright (C) 2004 Sun Microsystems Inc. | ||
1573 | * Copyright (c) 2003 Adrian Sun (asun@darksunrising.com) | ||
1574 | * | ||
1575 | - * This program is free software; you can redistribute it and/or | ||
1576 | - * modify it under the terms of the GNU General Public License as | ||
1577 | - * published by the Free Software Foundation; either version 2 of the | ||
1578 | - * License, or (at your option) any later version. | ||
1579 | - * | ||
1580 | - * This program is distributed in the hope that it will be useful, | ||
1581 | - * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
1582 | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
1583 | - * GNU General Public License for more details. | ||
1584 | - * | ||
1585 | - * You should have received a copy of the GNU General Public License | ||
1586 | - * along with this program; if not, see <http://www.gnu.org/licenses/>. | ||
1587 | - * | ||
1588 | * vendor id: 0x108E (Sun Microsystems, Inc.) | ||
1589 | * device id: 0xabba (Cassini) | ||
1590 | * revision ids: 0x01 = Cassini | ||
1591 | diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c | ||
1592 | index f7c69ca34056..d71be15c8c69 100644 | ||
1593 | --- a/drivers/net/phy/marvell.c | ||
1594 | +++ b/drivers/net/phy/marvell.c | ||
1595 | @@ -1063,6 +1063,39 @@ static int m88e1145_config_init(struct phy_device *phydev) | ||
1596 | return 0; | ||
1597 | } | ||
1598 | |||
1599 | +/* The VOD can be out of specification on link up. Poke an | ||
1600 | + * undocumented register, in an undocumented page, with a magic value | ||
1601 | + * to fix this. | ||
1602 | + */ | ||
1603 | +static int m88e6390_errata(struct phy_device *phydev) | ||
1604 | +{ | ||
1605 | + int err; | ||
1606 | + | ||
1607 | + err = phy_write(phydev, MII_BMCR, | ||
1608 | + BMCR_ANENABLE | BMCR_SPEED1000 | BMCR_FULLDPLX); | ||
1609 | + if (err) | ||
1610 | + return err; | ||
1611 | + | ||
1612 | + usleep_range(300, 400); | ||
1613 | + | ||
1614 | + err = phy_write_paged(phydev, 0xf8, 0x08, 0x36); | ||
1615 | + if (err) | ||
1616 | + return err; | ||
1617 | + | ||
1618 | + return genphy_soft_reset(phydev); | ||
1619 | +} | ||
1620 | + | ||
1621 | +static int m88e6390_config_aneg(struct phy_device *phydev) | ||
1622 | +{ | ||
1623 | + int err; | ||
1624 | + | ||
1625 | + err = m88e6390_errata(phydev); | ||
1626 | + if (err) | ||
1627 | + return err; | ||
1628 | + | ||
1629 | + return m88e1510_config_aneg(phydev); | ||
1630 | +} | ||
1631 | + | ||
1632 | /** | ||
1633 | * fiber_lpa_to_ethtool_lpa_t | ||
1634 | * @lpa: value of the MII_LPA register for fiber link | ||
1635 | @@ -1418,7 +1451,7 @@ static int m88e1318_set_wol(struct phy_device *phydev, | ||
1636 | * before enabling it if !phy_interrupt_is_valid() | ||
1637 | */ | ||
1638 | if (!phy_interrupt_is_valid(phydev)) | ||
1639 | - phy_read(phydev, MII_M1011_IEVENT); | ||
1640 | + __phy_read(phydev, MII_M1011_IEVENT); | ||
1641 | |||
1642 | /* Enable the WOL interrupt */ | ||
1643 | err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, | ||
1644 | @@ -2313,7 +2346,7 @@ static struct phy_driver marvell_drivers[] = { | ||
1645 | .flags = PHY_HAS_INTERRUPT, | ||
1646 | .probe = m88e6390_probe, | ||
1647 | .config_init = &marvell_config_init, | ||
1648 | - .config_aneg = &m88e1510_config_aneg, | ||
1649 | + .config_aneg = &m88e6390_config_aneg, | ||
1650 | .read_status = &marvell_read_status, | ||
1651 | .ack_interrupt = &marvell_ack_interrupt, | ||
1652 | .config_intr = &marvell_config_intr, | ||
1653 | diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c | ||
1654 | index 98f4b1f706df..15c5586d74ff 100644 | ||
1655 | --- a/drivers/net/phy/mdio_bus.c | ||
1656 | +++ b/drivers/net/phy/mdio_bus.c | ||
1657 | @@ -391,6 +391,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | ||
1658 | if (IS_ERR(gpiod)) { | ||
1659 | dev_err(&bus->dev, "mii_bus %s couldn't get reset GPIO\n", | ||
1660 | bus->id); | ||
1661 | + device_del(&bus->dev); | ||
1662 | return PTR_ERR(gpiod); | ||
1663 | } else if (gpiod) { | ||
1664 | bus->reset_gpiod = gpiod; | ||
1665 | diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c | ||
1666 | index 62dc564b251d..f22639f0116a 100644 | ||
1667 | --- a/drivers/net/ppp/pppoe.c | ||
1668 | +++ b/drivers/net/ppp/pppoe.c | ||
1669 | @@ -445,6 +445,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, | ||
1670 | if (pskb_trim_rcsum(skb, len)) | ||
1671 | goto drop; | ||
1672 | |||
1673 | + ph = pppoe_hdr(skb); | ||
1674 | pn = pppoe_pernet(dev_net(dev)); | ||
1675 | |||
1676 | /* Note that get_item does a sock_hold(), so sk_pppox(po) | ||
1677 | diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c | ||
1678 | index e57f3902beb3..08f997a390d5 100644 | ||
1679 | --- a/drivers/nvme/target/rdma.c | ||
1680 | +++ b/drivers/nvme/target/rdma.c | ||
1681 | @@ -139,6 +139,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | ||
1682 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); | ||
1683 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); | ||
1684 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); | ||
1685 | +static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, | ||
1686 | + struct nvmet_rdma_rsp *r); | ||
1687 | +static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | ||
1688 | + struct nvmet_rdma_rsp *r); | ||
1689 | |||
1690 | static const struct nvmet_fabrics_ops nvmet_rdma_ops; | ||
1691 | |||
1692 | @@ -182,9 +186,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) | ||
1693 | spin_unlock_irqrestore(&queue->rsps_lock, flags); | ||
1694 | |||
1695 | if (unlikely(!rsp)) { | ||
1696 | - rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); | ||
1697 | + int ret; | ||
1698 | + | ||
1699 | + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); | ||
1700 | if (unlikely(!rsp)) | ||
1701 | return NULL; | ||
1702 | + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); | ||
1703 | + if (unlikely(ret)) { | ||
1704 | + kfree(rsp); | ||
1705 | + return NULL; | ||
1706 | + } | ||
1707 | + | ||
1708 | rsp->allocated = true; | ||
1709 | } | ||
1710 | |||
1711 | @@ -196,7 +208,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | ||
1712 | { | ||
1713 | unsigned long flags; | ||
1714 | |||
1715 | - if (rsp->allocated) { | ||
1716 | + if (unlikely(rsp->allocated)) { | ||
1717 | + nvmet_rdma_free_rsp(rsp->queue->dev, rsp); | ||
1718 | kfree(rsp); | ||
1719 | return; | ||
1720 | } | ||
1721 | diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c | ||
1722 | index 194ffd5c8580..039b2074db7e 100644 | ||
1723 | --- a/drivers/s390/char/sclp_config.c | ||
1724 | +++ b/drivers/s390/char/sclp_config.c | ||
1725 | @@ -60,7 +60,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | ||
1726 | |||
1727 | static void __ref sclp_cpu_change_notify(struct work_struct *work) | ||
1728 | { | ||
1729 | + lock_device_hotplug(); | ||
1730 | smp_rescan_cpus(); | ||
1731 | + unlock_device_hotplug(); | ||
1732 | } | ||
1733 | |||
1734 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | ||
1735 | diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c | ||
1736 | index 0b81d9d03357..12ddb5928a73 100644 | ||
1737 | --- a/drivers/scsi/ufs/ufshcd.c | ||
1738 | +++ b/drivers/scsi/ufs/ufshcd.c | ||
1739 | @@ -109,13 +109,19 @@ | ||
1740 | int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, | ||
1741 | const char *prefix) | ||
1742 | { | ||
1743 | - u8 *regs; | ||
1744 | + u32 *regs; | ||
1745 | + size_t pos; | ||
1746 | + | ||
1747 | + if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ | ||
1748 | + return -EINVAL; | ||
1749 | |||
1750 | regs = kzalloc(len, GFP_KERNEL); | ||
1751 | if (!regs) | ||
1752 | return -ENOMEM; | ||
1753 | |||
1754 | - memcpy_fromio(regs, hba->mmio_base + offset, len); | ||
1755 | + for (pos = 0; pos < len; pos += 4) | ||
1756 | + regs[pos / 4] = ufshcd_readl(hba, offset + pos); | ||
1757 | + | ||
1758 | ufshcd_hex_dump(prefix, regs, len); | ||
1759 | kfree(regs); | ||
1760 | |||
1761 | diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | ||
1762 | index 28cbd6b3d26c..dfee6985efa6 100644 | ||
1763 | --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c | ||
1764 | +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c | ||
1765 | @@ -35,6 +35,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = { | ||
1766 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ | ||
1767 | {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ | ||
1768 | {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ | ||
1769 | + {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */ | ||
1770 | {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ | ||
1771 | {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ | ||
1772 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */ | ||
1773 | diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c | ||
1774 | index dabb391909aa..bb63519db7ae 100644 | ||
1775 | --- a/drivers/tty/n_hdlc.c | ||
1776 | +++ b/drivers/tty/n_hdlc.c | ||
1777 | @@ -597,6 +597,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, | ||
1778 | /* too large for caller's buffer */ | ||
1779 | ret = -EOVERFLOW; | ||
1780 | } else { | ||
1781 | + __set_current_state(TASK_RUNNING); | ||
1782 | if (copy_to_user(buf, rbuf->buf, rbuf->count)) | ||
1783 | ret = -EFAULT; | ||
1784 | else | ||
1785 | diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c | ||
1786 | index ad126f51d549..7fe679413188 100644 | ||
1787 | --- a/drivers/tty/serial/serial_core.c | ||
1788 | +++ b/drivers/tty/serial/serial_core.c | ||
1789 | @@ -550,10 +550,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c) | ||
1790 | int ret = 0; | ||
1791 | |||
1792 | circ = &state->xmit; | ||
1793 | - if (!circ->buf) | ||
1794 | + port = uart_port_lock(state, flags); | ||
1795 | + if (!circ->buf) { | ||
1796 | + uart_port_unlock(port, flags); | ||
1797 | return 0; | ||
1798 | + } | ||
1799 | |||
1800 | - port = uart_port_lock(state, flags); | ||
1801 | if (port && uart_circ_chars_free(circ) != 0) { | ||
1802 | circ->buf[circ->head] = c; | ||
1803 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); | ||
1804 | @@ -586,11 +588,13 @@ static int uart_write(struct tty_struct *tty, | ||
1805 | return -EL3HLT; | ||
1806 | } | ||
1807 | |||
1808 | + port = uart_port_lock(state, flags); | ||
1809 | circ = &state->xmit; | ||
1810 | - if (!circ->buf) | ||
1811 | + if (!circ->buf) { | ||
1812 | + uart_port_unlock(port, flags); | ||
1813 | return 0; | ||
1814 | + } | ||
1815 | |||
1816 | - port = uart_port_lock(state, flags); | ||
1817 | while (port) { | ||
1818 | c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); | ||
1819 | if (count < c) | ||
1820 | diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c | ||
1821 | index 052ec16a4e84..e7d192ebecd7 100644 | ||
1822 | --- a/drivers/tty/tty_io.c | ||
1823 | +++ b/drivers/tty/tty_io.c | ||
1824 | @@ -2188,7 +2188,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p) | ||
1825 | ld = tty_ldisc_ref_wait(tty); | ||
1826 | if (!ld) | ||
1827 | return -EIO; | ||
1828 | - ld->ops->receive_buf(tty, &ch, &mbz, 1); | ||
1829 | + if (ld->ops->receive_buf) | ||
1830 | + ld->ops->receive_buf(tty, &ch, &mbz, 1); | ||
1831 | tty_ldisc_deref(ld); | ||
1832 | return 0; | ||
1833 | } | ||
1834 | diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c | ||
1835 | index 476ec4b1b86c..da335899527b 100644 | ||
1836 | --- a/drivers/tty/vt/vt.c | ||
1837 | +++ b/drivers/tty/vt/vt.c | ||
1838 | @@ -1275,6 +1275,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, | ||
1839 | if (con_is_visible(vc)) | ||
1840 | update_screen(vc); | ||
1841 | vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num); | ||
1842 | + notify_update(vc); | ||
1843 | return err; | ||
1844 | } | ||
1845 | |||
1846 | @@ -2767,8 +2768,8 @@ rescan_last_byte: | ||
1847 | con_flush(vc, draw_from, draw_to, &draw_x); | ||
1848 | vc_uniscr_debug_check(vc); | ||
1849 | console_conditional_schedule(); | ||
1850 | - console_unlock(); | ||
1851 | notify_update(vc); | ||
1852 | + console_unlock(); | ||
1853 | return n; | ||
1854 | } | ||
1855 | |||
1856 | @@ -2887,8 +2888,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) | ||
1857 | unsigned char c; | ||
1858 | static DEFINE_SPINLOCK(printing_lock); | ||
1859 | const ushort *start; | ||
1860 | - ushort cnt = 0; | ||
1861 | - ushort myx; | ||
1862 | + ushort start_x, cnt; | ||
1863 | int kmsg_console; | ||
1864 | |||
1865 | /* console busy or not yet initialized */ | ||
1866 | @@ -2901,10 +2901,6 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) | ||
1867 | if (kmsg_console && vc_cons_allocated(kmsg_console - 1)) | ||
1868 | vc = vc_cons[kmsg_console - 1].d; | ||
1869 | |||
1870 | - /* read `x' only after setting currcons properly (otherwise | ||
1871 | - the `x' macro will read the x of the foreground console). */ | ||
1872 | - myx = vc->vc_x; | ||
1873 | - | ||
1874 | if (!vc_cons_allocated(fg_console)) { | ||
1875 | /* impossible */ | ||
1876 | /* printk("vt_console_print: tty %d not allocated ??\n", currcons+1); */ | ||
1877 | @@ -2919,53 +2915,41 @@ static void vt_console_print(struct console *co, const char *b, unsigned count) | ||
1878 | hide_cursor(vc); | ||
1879 | |||
1880 | start = (ushort *)vc->vc_pos; | ||
1881 | - | ||
1882 | - /* Contrived structure to try to emulate original need_wrap behaviour | ||
1883 | - * Problems caused when we have need_wrap set on '\n' character */ | ||
1884 | + start_x = vc->vc_x; | ||
1885 | + cnt = 0; | ||
1886 | while (count--) { | ||
1887 | c = *b++; | ||
1888 | if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) { | ||
1889 | - if (cnt > 0) { | ||
1890 | - if (con_is_visible(vc)) | ||
1891 | - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); | ||
1892 | - vc->vc_x += cnt; | ||
1893 | - if (vc->vc_need_wrap) | ||
1894 | - vc->vc_x--; | ||
1895 | - cnt = 0; | ||
1896 | - } | ||
1897 | + if (cnt && con_is_visible(vc)) | ||
1898 | + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); | ||
1899 | + cnt = 0; | ||
1900 | if (c == 8) { /* backspace */ | ||
1901 | bs(vc); | ||
1902 | start = (ushort *)vc->vc_pos; | ||
1903 | - myx = vc->vc_x; | ||
1904 | + start_x = vc->vc_x; | ||
1905 | continue; | ||
1906 | } | ||
1907 | if (c != 13) | ||
1908 | lf(vc); | ||
1909 | cr(vc); | ||
1910 | start = (ushort *)vc->vc_pos; | ||
1911 | - myx = vc->vc_x; | ||
1912 | + start_x = vc->vc_x; | ||
1913 | if (c == 10 || c == 13) | ||
1914 | continue; | ||
1915 | } | ||
1916 | + vc_uniscr_putc(vc, c); | ||
1917 | scr_writew((vc->vc_attr << 8) + c, (unsigned short *)vc->vc_pos); | ||
1918 | notify_write(vc, c); | ||
1919 | cnt++; | ||
1920 | - if (myx == vc->vc_cols - 1) { | ||
1921 | - vc->vc_need_wrap = 1; | ||
1922 | - continue; | ||
1923 | - } | ||
1924 | - vc->vc_pos += 2; | ||
1925 | - myx++; | ||
1926 | - } | ||
1927 | - if (cnt > 0) { | ||
1928 | - if (con_is_visible(vc)) | ||
1929 | - vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x); | ||
1930 | - vc->vc_x += cnt; | ||
1931 | - if (vc->vc_x == vc->vc_cols) { | ||
1932 | - vc->vc_x--; | ||
1933 | + if (vc->vc_x == vc->vc_cols - 1) { | ||
1934 | vc->vc_need_wrap = 1; | ||
1935 | + } else { | ||
1936 | + vc->vc_pos += 2; | ||
1937 | + vc->vc_x++; | ||
1938 | } | ||
1939 | } | ||
1940 | + if (cnt && con_is_visible(vc)) | ||
1941 | + vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, start_x); | ||
1942 | set_cursor(vc); | ||
1943 | notify_update(vc); | ||
1944 | |||
1945 | diff --git a/drivers/usb/core/ledtrig-usbport.c b/drivers/usb/core/ledtrig-usbport.c | ||
1946 | index dc7f7fd71684..c12ac56606c3 100644 | ||
1947 | --- a/drivers/usb/core/ledtrig-usbport.c | ||
1948 | +++ b/drivers/usb/core/ledtrig-usbport.c | ||
1949 | @@ -119,11 +119,6 @@ static const struct attribute_group ports_group = { | ||
1950 | .attrs = ports_attrs, | ||
1951 | }; | ||
1952 | |||
1953 | -static const struct attribute_group *ports_groups[] = { | ||
1954 | - &ports_group, | ||
1955 | - NULL | ||
1956 | -}; | ||
1957 | - | ||
1958 | /*************************************** | ||
1959 | * Adding & removing ports | ||
1960 | ***************************************/ | ||
1961 | @@ -307,6 +302,7 @@ static int usbport_trig_notify(struct notifier_block *nb, unsigned long action, | ||
1962 | static int usbport_trig_activate(struct led_classdev *led_cdev) | ||
1963 | { | ||
1964 | struct usbport_trig_data *usbport_data; | ||
1965 | + int err; | ||
1966 | |||
1967 | usbport_data = kzalloc(sizeof(*usbport_data), GFP_KERNEL); | ||
1968 | if (!usbport_data) | ||
1969 | @@ -315,6 +311,9 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) | ||
1970 | |||
1971 | /* List of ports */ | ||
1972 | INIT_LIST_HEAD(&usbport_data->ports); | ||
1973 | + err = sysfs_create_group(&led_cdev->dev->kobj, &ports_group); | ||
1974 | + if (err) | ||
1975 | + goto err_free; | ||
1976 | usb_for_each_dev(usbport_data, usbport_trig_add_usb_dev_ports); | ||
1977 | usbport_trig_update_count(usbport_data); | ||
1978 | |||
1979 | @@ -322,8 +321,11 @@ static int usbport_trig_activate(struct led_classdev *led_cdev) | ||
1980 | usbport_data->nb.notifier_call = usbport_trig_notify; | ||
1981 | led_set_trigger_data(led_cdev, usbport_data); | ||
1982 | usb_register_notify(&usbport_data->nb); | ||
1983 | - | ||
1984 | return 0; | ||
1985 | + | ||
1986 | +err_free: | ||
1987 | + kfree(usbport_data); | ||
1988 | + return err; | ||
1989 | } | ||
1990 | |||
1991 | static void usbport_trig_deactivate(struct led_classdev *led_cdev) | ||
1992 | @@ -335,6 +337,8 @@ static void usbport_trig_deactivate(struct led_classdev *led_cdev) | ||
1993 | usbport_trig_remove_port(usbport_data, port); | ||
1994 | } | ||
1995 | |||
1996 | + sysfs_remove_group(&led_cdev->dev->kobj, &ports_group); | ||
1997 | + | ||
1998 | usb_unregister_notify(&usbport_data->nb); | ||
1999 | |||
2000 | kfree(usbport_data); | ||
2001 | @@ -344,7 +348,6 @@ static struct led_trigger usbport_led_trigger = { | ||
2002 | .name = "usbport", | ||
2003 | .activate = usbport_trig_activate, | ||
2004 | .deactivate = usbport_trig_deactivate, | ||
2005 | - .groups = ports_groups, | ||
2006 | }; | ||
2007 | |||
2008 | static int __init usbport_trig_init(void) | ||
2009 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c | ||
2010 | index 558949b826d0..d8bf9307901e 100644 | ||
2011 | --- a/drivers/usb/dwc3/gadget.c | ||
2012 | +++ b/drivers/usb/dwc3/gadget.c | ||
2013 | @@ -177,6 +177,8 @@ static void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep, | ||
2014 | req->started = false; | ||
2015 | list_del(&req->list); | ||
2016 | req->remaining = 0; | ||
2017 | + req->unaligned = false; | ||
2018 | + req->zero = false; | ||
2019 | |||
2020 | if (req->request.status == -EINPROGRESS) | ||
2021 | req->request.status = status; | ||
2022 | diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c | ||
2023 | index d5b38f096698..5a6df6e9ad57 100644 | ||
2024 | --- a/drivers/usb/serial/pl2303.c | ||
2025 | +++ b/drivers/usb/serial/pl2303.c | ||
2026 | @@ -46,6 +46,7 @@ static const struct usb_device_id id_table[] = { | ||
2027 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) }, | ||
2028 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) }, | ||
2029 | { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) }, | ||
2030 | + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) }, | ||
2031 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) }, | ||
2032 | { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) }, | ||
2033 | { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID), | ||
2034 | diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h | ||
2035 | index 4e2554d55362..559941ca884d 100644 | ||
2036 | --- a/drivers/usb/serial/pl2303.h | ||
2037 | +++ b/drivers/usb/serial/pl2303.h | ||
2038 | @@ -8,6 +8,7 @@ | ||
2039 | |||
2040 | #define PL2303_VENDOR_ID 0x067b | ||
2041 | #define PL2303_PRODUCT_ID 0x2303 | ||
2042 | +#define PL2303_PRODUCT_ID_TB 0x2304 | ||
2043 | #define PL2303_PRODUCT_ID_RSAQ2 0x04bb | ||
2044 | #define PL2303_PRODUCT_ID_DCU11 0x1234 | ||
2045 | #define PL2303_PRODUCT_ID_PHAROS 0xaaa0 | ||
2046 | @@ -20,6 +21,7 @@ | ||
2047 | #define PL2303_PRODUCT_ID_MOTOROLA 0x0307 | ||
2048 | #define PL2303_PRODUCT_ID_ZTEK 0xe1f1 | ||
2049 | |||
2050 | + | ||
2051 | #define ATEN_VENDOR_ID 0x0557 | ||
2052 | #define ATEN_VENDOR_ID2 0x0547 | ||
2053 | #define ATEN_PRODUCT_ID 0x2008 | ||
2054 | diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c | ||
2055 | index 4d0273508043..edbbb13d6de6 100644 | ||
2056 | --- a/drivers/usb/serial/usb-serial-simple.c | ||
2057 | +++ b/drivers/usb/serial/usb-serial-simple.c | ||
2058 | @@ -85,7 +85,8 @@ DEVICE(moto_modem, MOTO_IDS); | ||
2059 | /* Motorola Tetra driver */ | ||
2060 | #define MOTOROLA_TETRA_IDS() \ | ||
2061 | { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \ | ||
2062 | - { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */ | ||
2063 | + { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \ | ||
2064 | + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */ | ||
2065 | DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS); | ||
2066 | |||
2067 | /* Novatel Wireless GPS driver */ | ||
2068 | diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c | ||
2069 | index 4e656f89cb22..a86aa65ad66d 100644 | ||
2070 | --- a/drivers/vhost/net.c | ||
2071 | +++ b/drivers/vhost/net.c | ||
2072 | @@ -1024,7 +1024,8 @@ static void handle_rx(struct vhost_net *net) | ||
2073 | if (nvq->done_idx > VHOST_NET_BATCH) | ||
2074 | vhost_net_signal_used(nvq); | ||
2075 | if (unlikely(vq_log)) | ||
2076 | - vhost_log_write(vq, vq_log, log, vhost_len); | ||
2077 | + vhost_log_write(vq, vq_log, log, vhost_len, | ||
2078 | + vq->iov, in); | ||
2079 | total_len += vhost_len; | ||
2080 | if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) { | ||
2081 | vhost_poll_queue(&vq->poll); | ||
2082 | diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c | ||
2083 | index 55e5aa662ad5..c66fc8308b5e 100644 | ||
2084 | --- a/drivers/vhost/vhost.c | ||
2085 | +++ b/drivers/vhost/vhost.c | ||
2086 | @@ -1733,13 +1733,87 @@ static int log_write(void __user *log_base, | ||
2087 | return r; | ||
2088 | } | ||
2089 | |||
2090 | +static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) | ||
2091 | +{ | ||
2092 | + struct vhost_umem *umem = vq->umem; | ||
2093 | + struct vhost_umem_node *u; | ||
2094 | + u64 start, end, l, min; | ||
2095 | + int r; | ||
2096 | + bool hit = false; | ||
2097 | + | ||
2098 | + while (len) { | ||
2099 | + min = len; | ||
2100 | + /* More than one GPAs can be mapped into a single HVA. So | ||
2101 | + * iterate all possible umems here to be safe. | ||
2102 | + */ | ||
2103 | + list_for_each_entry(u, &umem->umem_list, link) { | ||
2104 | + if (u->userspace_addr > hva - 1 + len || | ||
2105 | + u->userspace_addr - 1 + u->size < hva) | ||
2106 | + continue; | ||
2107 | + start = max(u->userspace_addr, hva); | ||
2108 | + end = min(u->userspace_addr - 1 + u->size, | ||
2109 | + hva - 1 + len); | ||
2110 | + l = end - start + 1; | ||
2111 | + r = log_write(vq->log_base, | ||
2112 | + u->start + start - u->userspace_addr, | ||
2113 | + l); | ||
2114 | + if (r < 0) | ||
2115 | + return r; | ||
2116 | + hit = true; | ||
2117 | + min = min(l, min); | ||
2118 | + } | ||
2119 | + | ||
2120 | + if (!hit) | ||
2121 | + return -EFAULT; | ||
2122 | + | ||
2123 | + len -= min; | ||
2124 | + hva += min; | ||
2125 | + } | ||
2126 | + | ||
2127 | + return 0; | ||
2128 | +} | ||
2129 | + | ||
2130 | +static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) | ||
2131 | +{ | ||
2132 | + struct iovec iov[64]; | ||
2133 | + int i, ret; | ||
2134 | + | ||
2135 | + if (!vq->iotlb) | ||
2136 | + return log_write(vq->log_base, vq->log_addr + used_offset, len); | ||
2137 | + | ||
2138 | + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, | ||
2139 | + len, iov, 64, VHOST_ACCESS_WO); | ||
2140 | + if (ret) | ||
2141 | + return ret; | ||
2142 | + | ||
2143 | + for (i = 0; i < ret; i++) { | ||
2144 | + ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base, | ||
2145 | + iov[i].iov_len); | ||
2146 | + if (ret) | ||
2147 | + return ret; | ||
2148 | + } | ||
2149 | + | ||
2150 | + return 0; | ||
2151 | +} | ||
2152 | + | ||
2153 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | ||
2154 | - unsigned int log_num, u64 len) | ||
2155 | + unsigned int log_num, u64 len, struct iovec *iov, int count) | ||
2156 | { | ||
2157 | int i, r; | ||
2158 | |||
2159 | /* Make sure data written is seen before log. */ | ||
2160 | smp_wmb(); | ||
2161 | + | ||
2162 | + if (vq->iotlb) { | ||
2163 | + for (i = 0; i < count; i++) { | ||
2164 | + r = log_write_hva(vq, (uintptr_t)iov[i].iov_base, | ||
2165 | + iov[i].iov_len); | ||
2166 | + if (r < 0) | ||
2167 | + return r; | ||
2168 | + } | ||
2169 | + return 0; | ||
2170 | + } | ||
2171 | + | ||
2172 | for (i = 0; i < log_num; ++i) { | ||
2173 | u64 l = min(log[i].len, len); | ||
2174 | r = log_write(vq->log_base, log[i].addr, l); | ||
2175 | @@ -1769,9 +1843,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq) | ||
2176 | smp_wmb(); | ||
2177 | /* Log used flag write. */ | ||
2178 | used = &vq->used->flags; | ||
2179 | - log_write(vq->log_base, vq->log_addr + | ||
2180 | - (used - (void __user *)vq->used), | ||
2181 | - sizeof vq->used->flags); | ||
2182 | + log_used(vq, (used - (void __user *)vq->used), | ||
2183 | + sizeof vq->used->flags); | ||
2184 | if (vq->log_ctx) | ||
2185 | eventfd_signal(vq->log_ctx, 1); | ||
2186 | } | ||
2187 | @@ -1789,9 +1862,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event) | ||
2188 | smp_wmb(); | ||
2189 | /* Log avail event write */ | ||
2190 | used = vhost_avail_event(vq); | ||
2191 | - log_write(vq->log_base, vq->log_addr + | ||
2192 | - (used - (void __user *)vq->used), | ||
2193 | - sizeof *vhost_avail_event(vq)); | ||
2194 | + log_used(vq, (used - (void __user *)vq->used), | ||
2195 | + sizeof *vhost_avail_event(vq)); | ||
2196 | if (vq->log_ctx) | ||
2197 | eventfd_signal(vq->log_ctx, 1); | ||
2198 | } | ||
2199 | @@ -2191,10 +2263,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq, | ||
2200 | /* Make sure data is seen before log. */ | ||
2201 | smp_wmb(); | ||
2202 | /* Log used ring entry write. */ | ||
2203 | - log_write(vq->log_base, | ||
2204 | - vq->log_addr + | ||
2205 | - ((void __user *)used - (void __user *)vq->used), | ||
2206 | - count * sizeof *used); | ||
2207 | + log_used(vq, ((void __user *)used - (void __user *)vq->used), | ||
2208 | + count * sizeof *used); | ||
2209 | } | ||
2210 | old = vq->last_used_idx; | ||
2211 | new = (vq->last_used_idx += count); | ||
2212 | @@ -2236,9 +2306,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads, | ||
2213 | /* Make sure used idx is seen before log. */ | ||
2214 | smp_wmb(); | ||
2215 | /* Log used index update. */ | ||
2216 | - log_write(vq->log_base, | ||
2217 | - vq->log_addr + offsetof(struct vring_used, idx), | ||
2218 | - sizeof vq->used->idx); | ||
2219 | + log_used(vq, offsetof(struct vring_used, idx), | ||
2220 | + sizeof vq->used->idx); | ||
2221 | if (vq->log_ctx) | ||
2222 | eventfd_signal(vq->log_ctx, 1); | ||
2223 | } | ||
2224 | diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h | ||
2225 | index 466ef7542291..1b675dad5e05 100644 | ||
2226 | --- a/drivers/vhost/vhost.h | ||
2227 | +++ b/drivers/vhost/vhost.h | ||
2228 | @@ -205,7 +205,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *); | ||
2229 | bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); | ||
2230 | |||
2231 | int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, | ||
2232 | - unsigned int log_num, u64 len); | ||
2233 | + unsigned int log_num, u64 len, | ||
2234 | + struct iovec *iov, int count); | ||
2235 | int vq_iotlb_prefetch(struct vhost_virtqueue *vq); | ||
2236 | |||
2237 | struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type); | ||
2238 | diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c | ||
2239 | index 09731b2f6815..c6b3bdbbdbc9 100644 | ||
2240 | --- a/drivers/video/console/vgacon.c | ||
2241 | +++ b/drivers/video/console/vgacon.c | ||
2242 | @@ -271,6 +271,7 @@ static void vgacon_scrollback_update(struct vc_data *c, int t, int count) | ||
2243 | |||
2244 | static void vgacon_restore_screen(struct vc_data *c) | ||
2245 | { | ||
2246 | + c->vc_origin = c->vc_visible_origin; | ||
2247 | vgacon_scrollback_cur->save = 0; | ||
2248 | |||
2249 | if (!vga_is_gfx && !vgacon_scrollback_cur->restore) { | ||
2250 | @@ -287,8 +288,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) | ||
2251 | int start, end, count, soff; | ||
2252 | |||
2253 | if (!lines) { | ||
2254 | - c->vc_visible_origin = c->vc_origin; | ||
2255 | - vga_set_mem_top(c); | ||
2256 | + vgacon_restore_screen(c); | ||
2257 | return; | ||
2258 | } | ||
2259 | |||
2260 | @@ -298,6 +298,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) | ||
2261 | if (!vgacon_scrollback_cur->save) { | ||
2262 | vgacon_cursor(c, CM_ERASE); | ||
2263 | vgacon_save_screen(c); | ||
2264 | + c->vc_origin = (unsigned long)c->vc_screenbuf; | ||
2265 | vgacon_scrollback_cur->save = 1; | ||
2266 | } | ||
2267 | |||
2268 | @@ -335,7 +336,7 @@ static void vgacon_scrolldelta(struct vc_data *c, int lines) | ||
2269 | int copysize; | ||
2270 | |||
2271 | int diff = c->vc_rows - count; | ||
2272 | - void *d = (void *) c->vc_origin; | ||
2273 | + void *d = (void *) c->vc_visible_origin; | ||
2274 | void *s = (void *) c->vc_screenbuf; | ||
2275 | |||
2276 | count *= c->vc_size_row; | ||
2277 | diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c | ||
2278 | index eadffaa39f4e..c7542e8dd096 100644 | ||
2279 | --- a/fs/ceph/caps.c | ||
2280 | +++ b/fs/ceph/caps.c | ||
2281 | @@ -1030,6 +1030,8 @@ static void drop_inode_snap_realm(struct ceph_inode_info *ci) | ||
2282 | list_del_init(&ci->i_snap_realm_item); | ||
2283 | ci->i_snap_realm_counter++; | ||
2284 | ci->i_snap_realm = NULL; | ||
2285 | + if (realm->ino == ci->i_vino.ino) | ||
2286 | + realm->inode = NULL; | ||
2287 | spin_unlock(&realm->inodes_with_caps_lock); | ||
2288 | ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, | ||
2289 | realm); | ||
2290 | diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c | ||
2291 | index 5657b79dbc99..269471c8f42b 100644 | ||
2292 | --- a/fs/cifs/cifssmb.c | ||
2293 | +++ b/fs/cifs/cifssmb.c | ||
2294 | @@ -1458,18 +1458,26 @@ cifs_discard_remaining_data(struct TCP_Server_Info *server) | ||
2295 | } | ||
2296 | |||
2297 | static int | ||
2298 | -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
2299 | +__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, | ||
2300 | + bool malformed) | ||
2301 | { | ||
2302 | int length; | ||
2303 | - struct cifs_readdata *rdata = mid->callback_data; | ||
2304 | |||
2305 | length = cifs_discard_remaining_data(server); | ||
2306 | - dequeue_mid(mid, rdata->result); | ||
2307 | + dequeue_mid(mid, malformed); | ||
2308 | mid->resp_buf = server->smallbuf; | ||
2309 | server->smallbuf = NULL; | ||
2310 | return length; | ||
2311 | } | ||
2312 | |||
2313 | +static int | ||
2314 | +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
2315 | +{ | ||
2316 | + struct cifs_readdata *rdata = mid->callback_data; | ||
2317 | + | ||
2318 | + return __cifs_readv_discard(server, mid, rdata->result); | ||
2319 | +} | ||
2320 | + | ||
2321 | int | ||
2322 | cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
2323 | { | ||
2324 | @@ -1511,12 +1519,23 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
2325 | return -1; | ||
2326 | } | ||
2327 | |||
2328 | + /* set up first two iov for signature check and to get credits */ | ||
2329 | + rdata->iov[0].iov_base = buf; | ||
2330 | + rdata->iov[0].iov_len = 4; | ||
2331 | + rdata->iov[1].iov_base = buf + 4; | ||
2332 | + rdata->iov[1].iov_len = server->total_read - 4; | ||
2333 | + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
2334 | + rdata->iov[0].iov_base, rdata->iov[0].iov_len); | ||
2335 | + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", | ||
2336 | + rdata->iov[1].iov_base, rdata->iov[1].iov_len); | ||
2337 | + | ||
2338 | /* Was the SMB read successful? */ | ||
2339 | rdata->result = server->ops->map_error(buf, false); | ||
2340 | if (rdata->result != 0) { | ||
2341 | cifs_dbg(FYI, "%s: server returned error %d\n", | ||
2342 | __func__, rdata->result); | ||
2343 | - return cifs_readv_discard(server, mid); | ||
2344 | + /* normal error on read response */ | ||
2345 | + return __cifs_readv_discard(server, mid, false); | ||
2346 | } | ||
2347 | |||
2348 | /* Is there enough to get to the rest of the READ_RSP header? */ | ||
2349 | @@ -1560,14 +1579,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) | ||
2350 | server->total_read += length; | ||
2351 | } | ||
2352 | |||
2353 | - /* set up first iov for signature check */ | ||
2354 | - rdata->iov[0].iov_base = buf; | ||
2355 | - rdata->iov[0].iov_len = 4; | ||
2356 | - rdata->iov[1].iov_base = buf + 4; | ||
2357 | - rdata->iov[1].iov_len = server->total_read - 4; | ||
2358 | - cifs_dbg(FYI, "0: iov_base=%p iov_len=%u\n", | ||
2359 | - rdata->iov[0].iov_base, server->total_read); | ||
2360 | - | ||
2361 | /* how much data is in the response? */ | ||
2362 | #ifdef CONFIG_CIFS_SMB_DIRECT | ||
2363 | use_rdma_mr = rdata->mr; | ||
2364 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c | ||
2365 | index 52d71b64c0c6..d0bba175117c 100644 | ||
2366 | --- a/fs/cifs/connect.c | ||
2367 | +++ b/fs/cifs/connect.c | ||
2368 | @@ -533,6 +533,21 @@ server_unresponsive(struct TCP_Server_Info *server) | ||
2369 | return false; | ||
2370 | } | ||
2371 | |||
2372 | +static inline bool | ||
2373 | +zero_credits(struct TCP_Server_Info *server) | ||
2374 | +{ | ||
2375 | + int val; | ||
2376 | + | ||
2377 | + spin_lock(&server->req_lock); | ||
2378 | + val = server->credits + server->echo_credits + server->oplock_credits; | ||
2379 | + if (server->in_flight == 0 && val == 0) { | ||
2380 | + spin_unlock(&server->req_lock); | ||
2381 | + return true; | ||
2382 | + } | ||
2383 | + spin_unlock(&server->req_lock); | ||
2384 | + return false; | ||
2385 | +} | ||
2386 | + | ||
2387 | static int | ||
2388 | cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) | ||
2389 | { | ||
2390 | @@ -545,6 +560,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) | ||
2391 | for (total_read = 0; msg_data_left(smb_msg); total_read += length) { | ||
2392 | try_to_freeze(); | ||
2393 | |||
2394 | + /* reconnect if no credits and no requests in flight */ | ||
2395 | + if (zero_credits(server)) { | ||
2396 | + cifs_reconnect(server); | ||
2397 | + return -ECONNABORTED; | ||
2398 | + } | ||
2399 | + | ||
2400 | if (server_unresponsive(server)) | ||
2401 | return -ECONNABORTED; | ||
2402 | if (cifs_rdma_enabled(server) && server->smbd_conn) | ||
2403 | diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c | ||
2404 | index 6a9c47541c53..7b8b58fb4d3f 100644 | ||
2405 | --- a/fs/cifs/smb2misc.c | ||
2406 | +++ b/fs/cifs/smb2misc.c | ||
2407 | @@ -648,6 +648,13 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server) | ||
2408 | if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK) | ||
2409 | return false; | ||
2410 | |||
2411 | + if (rsp->sync_hdr.CreditRequest) { | ||
2412 | + spin_lock(&server->req_lock); | ||
2413 | + server->credits += le16_to_cpu(rsp->sync_hdr.CreditRequest); | ||
2414 | + spin_unlock(&server->req_lock); | ||
2415 | + wake_up(&server->request_q); | ||
2416 | + } | ||
2417 | + | ||
2418 | if (rsp->StructureSize != | ||
2419 | smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) { | ||
2420 | if (le16_to_cpu(rsp->StructureSize) == 44) | ||
2421 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c | ||
2422 | index f44bb4a304e9..237d7281ada3 100644 | ||
2423 | --- a/fs/cifs/smb2ops.c | ||
2424 | +++ b/fs/cifs/smb2ops.c | ||
2425 | @@ -34,6 +34,7 @@ | ||
2426 | #include "cifs_ioctl.h" | ||
2427 | #include "smbdirect.h" | ||
2428 | |||
2429 | +/* Change credits for different ops and return the total number of credits */ | ||
2430 | static int | ||
2431 | change_conf(struct TCP_Server_Info *server) | ||
2432 | { | ||
2433 | @@ -41,17 +42,15 @@ change_conf(struct TCP_Server_Info *server) | ||
2434 | server->oplock_credits = server->echo_credits = 0; | ||
2435 | switch (server->credits) { | ||
2436 | case 0: | ||
2437 | - return -1; | ||
2438 | + return 0; | ||
2439 | case 1: | ||
2440 | server->echoes = false; | ||
2441 | server->oplocks = false; | ||
2442 | - cifs_dbg(VFS, "disabling echoes and oplocks\n"); | ||
2443 | break; | ||
2444 | case 2: | ||
2445 | server->echoes = true; | ||
2446 | server->oplocks = false; | ||
2447 | server->echo_credits = 1; | ||
2448 | - cifs_dbg(FYI, "disabling oplocks\n"); | ||
2449 | break; | ||
2450 | default: | ||
2451 | server->echoes = true; | ||
2452 | @@ -64,14 +63,15 @@ change_conf(struct TCP_Server_Info *server) | ||
2453 | server->echo_credits = 1; | ||
2454 | } | ||
2455 | server->credits -= server->echo_credits + server->oplock_credits; | ||
2456 | - return 0; | ||
2457 | + return server->credits + server->echo_credits + server->oplock_credits; | ||
2458 | } | ||
2459 | |||
2460 | static void | ||
2461 | smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, | ||
2462 | const int optype) | ||
2463 | { | ||
2464 | - int *val, rc = 0; | ||
2465 | + int *val, rc = -1; | ||
2466 | + | ||
2467 | spin_lock(&server->req_lock); | ||
2468 | val = server->ops->get_credits_field(server, optype); | ||
2469 | *val += add; | ||
2470 | @@ -95,8 +95,26 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add, | ||
2471 | } | ||
2472 | spin_unlock(&server->req_lock); | ||
2473 | wake_up(&server->request_q); | ||
2474 | - if (rc) | ||
2475 | - cifs_reconnect(server); | ||
2476 | + | ||
2477 | + if (server->tcpStatus == CifsNeedReconnect) | ||
2478 | + return; | ||
2479 | + | ||
2480 | + switch (rc) { | ||
2481 | + case -1: | ||
2482 | + /* change_conf hasn't been executed */ | ||
2483 | + break; | ||
2484 | + case 0: | ||
2485 | + cifs_dbg(VFS, "Possible client or server bug - zero credits\n"); | ||
2486 | + break; | ||
2487 | + case 1: | ||
2488 | + cifs_dbg(VFS, "disabling echoes and oplocks\n"); | ||
2489 | + break; | ||
2490 | + case 2: | ||
2491 | + cifs_dbg(FYI, "disabling oplocks\n"); | ||
2492 | + break; | ||
2493 | + default: | ||
2494 | + cifs_dbg(FYI, "add %u credits total=%d\n", add, rc); | ||
2495 | + } | ||
2496 | } | ||
2497 | |||
2498 | static void | ||
2499 | @@ -154,14 +172,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, | ||
2500 | |||
2501 | scredits = server->credits; | ||
2502 | /* can deadlock with reopen */ | ||
2503 | - if (scredits == 1) { | ||
2504 | + if (scredits <= 8) { | ||
2505 | *num = SMB2_MAX_BUFFER_SIZE; | ||
2506 | *credits = 0; | ||
2507 | break; | ||
2508 | } | ||
2509 | |||
2510 | - /* leave one credit for a possible reopen */ | ||
2511 | - scredits--; | ||
2512 | + /* leave some credits for reopen and other ops */ | ||
2513 | + scredits -= 8; | ||
2514 | *num = min_t(unsigned int, size, | ||
2515 | scredits * SMB2_MAX_BUFFER_SIZE); | ||
2516 | |||
2517 | @@ -2901,11 +2919,23 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | ||
2518 | server->ops->is_status_pending(buf, server, 0)) | ||
2519 | return -1; | ||
2520 | |||
2521 | - rdata->result = server->ops->map_error(buf, false); | ||
2522 | + /* set up first two iov to get credits */ | ||
2523 | + rdata->iov[0].iov_base = buf; | ||
2524 | + rdata->iov[0].iov_len = 4; | ||
2525 | + rdata->iov[1].iov_base = buf + 4; | ||
2526 | + rdata->iov[1].iov_len = | ||
2527 | + min_t(unsigned int, buf_len, server->vals->read_rsp_size) - 4; | ||
2528 | + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
2529 | + rdata->iov[0].iov_base, rdata->iov[0].iov_len); | ||
2530 | + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", | ||
2531 | + rdata->iov[1].iov_base, rdata->iov[1].iov_len); | ||
2532 | + | ||
2533 | + rdata->result = server->ops->map_error(buf, true); | ||
2534 | if (rdata->result != 0) { | ||
2535 | cifs_dbg(FYI, "%s: server returned error %d\n", | ||
2536 | __func__, rdata->result); | ||
2537 | - dequeue_mid(mid, rdata->result); | ||
2538 | + /* normal error on read response */ | ||
2539 | + dequeue_mid(mid, false); | ||
2540 | return 0; | ||
2541 | } | ||
2542 | |||
2543 | @@ -2978,14 +3008,6 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid, | ||
2544 | return 0; | ||
2545 | } | ||
2546 | |||
2547 | - /* set up first iov for signature check */ | ||
2548 | - rdata->iov[0].iov_base = buf; | ||
2549 | - rdata->iov[0].iov_len = 4; | ||
2550 | - rdata->iov[1].iov_base = buf + 4; | ||
2551 | - rdata->iov[1].iov_len = server->vals->read_rsp_size - 4; | ||
2552 | - cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", | ||
2553 | - rdata->iov[0].iov_base, server->vals->read_rsp_size); | ||
2554 | - | ||
2555 | length = rdata->copy_into_pages(server, rdata, &iter); | ||
2556 | |||
2557 | kfree(bvec); | ||
2558 | diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c | ||
2559 | index ac6978d3208c..780bba695453 100644 | ||
2560 | --- a/fs/notify/inotify/inotify_user.c | ||
2561 | +++ b/fs/notify/inotify/inotify_user.c | ||
2562 | @@ -724,8 +724,10 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname, | ||
2563 | return -EBADF; | ||
2564 | |||
2565 | /* IN_MASK_ADD and IN_MASK_CREATE don't make sense together */ | ||
2566 | - if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) | ||
2567 | - return -EINVAL; | ||
2568 | + if (unlikely((mask & IN_MASK_ADD) && (mask & IN_MASK_CREATE))) { | ||
2569 | + ret = -EINVAL; | ||
2570 | + goto fput_and_out; | ||
2571 | + } | ||
2572 | |||
2573 | /* verify that this is indeed an inotify instance */ | ||
2574 | if (unlikely(f.file->f_op != &inotify_fops)) { | ||
2575 | diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h | ||
2576 | index 1fd6fa822d2c..91393724e933 100644 | ||
2577 | --- a/include/linux/bpf_verifier.h | ||
2578 | +++ b/include/linux/bpf_verifier.h | ||
2579 | @@ -134,6 +134,7 @@ struct bpf_verifier_state { | ||
2580 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; | ||
2581 | struct bpf_verifier_state *parent; | ||
2582 | u32 curframe; | ||
2583 | + bool speculative; | ||
2584 | }; | ||
2585 | |||
2586 | /* linked list of verifier states used to prune search */ | ||
2587 | @@ -142,15 +143,25 @@ struct bpf_verifier_state_list { | ||
2588 | struct bpf_verifier_state_list *next; | ||
2589 | }; | ||
2590 | |||
2591 | +/* Possible states for alu_state member. */ | ||
2592 | +#define BPF_ALU_SANITIZE_SRC 1U | ||
2593 | +#define BPF_ALU_SANITIZE_DST 2U | ||
2594 | +#define BPF_ALU_NEG_VALUE (1U << 2) | ||
2595 | +#define BPF_ALU_NON_POINTER (1U << 3) | ||
2596 | +#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ | ||
2597 | + BPF_ALU_SANITIZE_DST) | ||
2598 | + | ||
2599 | struct bpf_insn_aux_data { | ||
2600 | union { | ||
2601 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | ||
2602 | unsigned long map_state; /* pointer/poison value for maps */ | ||
2603 | s32 call_imm; /* saved imm field of call insn */ | ||
2604 | + u32 alu_limit; /* limit for add/sub register with pointer */ | ||
2605 | }; | ||
2606 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ | ||
2607 | int sanitize_stack_off; /* stack slot to be cleared */ | ||
2608 | bool seen; /* this insn was processed by the verifier */ | ||
2609 | + u8 alu_state; /* used in combination with alu_limit */ | ||
2610 | }; | ||
2611 | |||
2612 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | ||
2613 | @@ -186,6 +197,8 @@ struct bpf_subprog_info { | ||
2614 | * one verifier_env per bpf_check() call | ||
2615 | */ | ||
2616 | struct bpf_verifier_env { | ||
2617 | + u32 insn_idx; | ||
2618 | + u32 prev_insn_idx; | ||
2619 | struct bpf_prog *prog; /* eBPF program being verified */ | ||
2620 | const struct bpf_verifier_ops *ops; | ||
2621 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ | ||
2622 | diff --git a/include/linux/filter.h b/include/linux/filter.h | ||
2623 | index ec90d5255cf7..1a39d57eb88f 100644 | ||
2624 | --- a/include/linux/filter.h | ||
2625 | +++ b/include/linux/filter.h | ||
2626 | @@ -53,14 +53,10 @@ struct sock_reuseport; | ||
2627 | #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ | ||
2628 | #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ | ||
2629 | |||
2630 | -/* Kernel hidden auxiliary/helper register for hardening step. | ||
2631 | - * Only used by eBPF JITs. It's nothing more than a temporary | ||
2632 | - * register that JITs use internally, only that here it's part | ||
2633 | - * of eBPF instructions that have been rewritten for blinding | ||
2634 | - * constants. See JIT pre-step in bpf_jit_blind_constants(). | ||
2635 | - */ | ||
2636 | +/* Kernel hidden auxiliary/helper register. */ | ||
2637 | #define BPF_REG_AX MAX_BPF_REG | ||
2638 | -#define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) | ||
2639 | +#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) | ||
2640 | +#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG | ||
2641 | |||
2642 | /* unused opcode to mark special call to bpf_tail_call() helper */ | ||
2643 | #define BPF_TAIL_CALL 0xf0 | ||
2644 | diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h | ||
2645 | index 5185a16b19ba..bbde887ed393 100644 | ||
2646 | --- a/include/linux/hyperv.h | ||
2647 | +++ b/include/linux/hyperv.h | ||
2648 | @@ -1166,8 +1166,9 @@ struct hv_ring_buffer_debug_info { | ||
2649 | u32 bytes_avail_towrite; | ||
2650 | }; | ||
2651 | |||
2652 | -void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | ||
2653 | - struct hv_ring_buffer_debug_info *debug_info); | ||
2654 | + | ||
2655 | +int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | ||
2656 | + struct hv_ring_buffer_debug_info *debug_info); | ||
2657 | |||
2658 | /* Vmbus interface */ | ||
2659 | #define vmbus_driver_register(driver) \ | ||
2660 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h | ||
2661 | index 60a2e7646985..5d69e208e8d9 100644 | ||
2662 | --- a/include/linux/skbuff.h | ||
2663 | +++ b/include/linux/skbuff.h | ||
2664 | @@ -3178,6 +3178,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); | ||
2665 | * | ||
2666 | * This is exactly the same as pskb_trim except that it ensures the | ||
2667 | * checksum of received packets are still valid after the operation. | ||
2668 | + * It can change skb pointers. | ||
2669 | */ | ||
2670 | |||
2671 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | ||
2672 | diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h | ||
2673 | index c9b7b136939d..95eed32d8c6b 100644 | ||
2674 | --- a/include/net/ip_fib.h | ||
2675 | +++ b/include/net/ip_fib.h | ||
2676 | @@ -230,7 +230,7 @@ int fib_table_delete(struct net *, struct fib_table *, struct fib_config *, | ||
2677 | struct netlink_ext_ack *extack); | ||
2678 | int fib_table_dump(struct fib_table *table, struct sk_buff *skb, | ||
2679 | struct netlink_callback *cb); | ||
2680 | -int fib_table_flush(struct net *net, struct fib_table *table); | ||
2681 | +int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all); | ||
2682 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); | ||
2683 | void fib_table_flush_external(struct fib_table *table); | ||
2684 | void fib_free_table(struct fib_table *tb); | ||
2685 | diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h | ||
2686 | index fb78f6f500f3..f056b2a00d5c 100644 | ||
2687 | --- a/include/uapi/linux/input.h | ||
2688 | +++ b/include/uapi/linux/input.h | ||
2689 | @@ -26,13 +26,17 @@ | ||
2690 | */ | ||
2691 | |||
2692 | struct input_event { | ||
2693 | -#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL) | ||
2694 | +#if (__BITS_PER_LONG != 32 || !defined(__USE_TIME_BITS64)) && !defined(__KERNEL__) | ||
2695 | struct timeval time; | ||
2696 | #define input_event_sec time.tv_sec | ||
2697 | #define input_event_usec time.tv_usec | ||
2698 | #else | ||
2699 | __kernel_ulong_t __sec; | ||
2700 | +#if defined(__sparc__) && defined(__arch64__) | ||
2701 | + unsigned int __usec; | ||
2702 | +#else | ||
2703 | __kernel_ulong_t __usec; | ||
2704 | +#endif | ||
2705 | #define input_event_sec __sec | ||
2706 | #define input_event_usec __usec | ||
2707 | #endif | ||
2708 | diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c | ||
2709 | index 3f5bf1af0826..474525e3a9db 100644 | ||
2710 | --- a/kernel/bpf/core.c | ||
2711 | +++ b/kernel/bpf/core.c | ||
2712 | @@ -52,6 +52,7 @@ | ||
2713 | #define DST regs[insn->dst_reg] | ||
2714 | #define SRC regs[insn->src_reg] | ||
2715 | #define FP regs[BPF_REG_FP] | ||
2716 | +#define AX regs[BPF_REG_AX] | ||
2717 | #define ARG1 regs[BPF_REG_ARG1] | ||
2718 | #define CTX regs[BPF_REG_CTX] | ||
2719 | #define IMM insn->imm | ||
2720 | @@ -642,6 +643,26 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, | ||
2721 | BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); | ||
2722 | BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); | ||
2723 | |||
2724 | + /* Constraints on AX register: | ||
2725 | + * | ||
2726 | + * AX register is inaccessible from user space. It is mapped in | ||
2727 | + * all JITs, and used here for constant blinding rewrites. It is | ||
2728 | + * typically "stateless" meaning its contents are only valid within | ||
2729 | + * the executed instruction, but not across several instructions. | ||
2730 | + * There are a few exceptions however which are further detailed | ||
2731 | + * below. | ||
2732 | + * | ||
2733 | + * Constant blinding is only used by JITs, not in the interpreter. | ||
2734 | + * The interpreter uses AX in some occasions as a local temporary | ||
2735 | + * register e.g. in DIV or MOD instructions. | ||
2736 | + * | ||
2737 | + * In restricted circumstances, the verifier can also use the AX | ||
2738 | + * register for rewrites as long as they do not interfere with | ||
2739 | + * the above cases! | ||
2740 | + */ | ||
2741 | + if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) | ||
2742 | + goto out; | ||
2743 | + | ||
2744 | if (from->imm == 0 && | ||
2745 | (from->code == (BPF_ALU | BPF_MOV | BPF_K) || | ||
2746 | from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { | ||
2747 | @@ -971,7 +992,6 @@ bool bpf_opcode_in_insntable(u8 code) | ||
2748 | */ | ||
2749 | static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) | ||
2750 | { | ||
2751 | - u64 tmp; | ||
2752 | #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y | ||
2753 | #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z | ||
2754 | static const void *jumptable[256] = { | ||
2755 | @@ -1045,36 +1065,36 @@ select_insn: | ||
2756 | (*(s64 *) &DST) >>= IMM; | ||
2757 | CONT; | ||
2758 | ALU64_MOD_X: | ||
2759 | - div64_u64_rem(DST, SRC, &tmp); | ||
2760 | - DST = tmp; | ||
2761 | + div64_u64_rem(DST, SRC, &AX); | ||
2762 | + DST = AX; | ||
2763 | CONT; | ||
2764 | ALU_MOD_X: | ||
2765 | - tmp = (u32) DST; | ||
2766 | - DST = do_div(tmp, (u32) SRC); | ||
2767 | + AX = (u32) DST; | ||
2768 | + DST = do_div(AX, (u32) SRC); | ||
2769 | CONT; | ||
2770 | ALU64_MOD_K: | ||
2771 | - div64_u64_rem(DST, IMM, &tmp); | ||
2772 | - DST = tmp; | ||
2773 | + div64_u64_rem(DST, IMM, &AX); | ||
2774 | + DST = AX; | ||
2775 | CONT; | ||
2776 | ALU_MOD_K: | ||
2777 | - tmp = (u32) DST; | ||
2778 | - DST = do_div(tmp, (u32) IMM); | ||
2779 | + AX = (u32) DST; | ||
2780 | + DST = do_div(AX, (u32) IMM); | ||
2781 | CONT; | ||
2782 | ALU64_DIV_X: | ||
2783 | DST = div64_u64(DST, SRC); | ||
2784 | CONT; | ||
2785 | ALU_DIV_X: | ||
2786 | - tmp = (u32) DST; | ||
2787 | - do_div(tmp, (u32) SRC); | ||
2788 | - DST = (u32) tmp; | ||
2789 | + AX = (u32) DST; | ||
2790 | + do_div(AX, (u32) SRC); | ||
2791 | + DST = (u32) AX; | ||
2792 | CONT; | ||
2793 | ALU64_DIV_K: | ||
2794 | DST = div64_u64(DST, IMM); | ||
2795 | CONT; | ||
2796 | ALU_DIV_K: | ||
2797 | - tmp = (u32) DST; | ||
2798 | - do_div(tmp, (u32) IMM); | ||
2799 | - DST = (u32) tmp; | ||
2800 | + AX = (u32) DST; | ||
2801 | + do_div(AX, (u32) IMM); | ||
2802 | + DST = (u32) AX; | ||
2803 | CONT; | ||
2804 | ALU_END_TO_BE: | ||
2805 | switch (IMM) { | ||
2806 | @@ -1330,7 +1350,7 @@ STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */ | ||
2807 | static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ | ||
2808 | { \ | ||
2809 | u64 stack[stack_size / sizeof(u64)]; \ | ||
2810 | - u64 regs[MAX_BPF_REG]; \ | ||
2811 | + u64 regs[MAX_BPF_EXT_REG]; \ | ||
2812 | \ | ||
2813 | FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ | ||
2814 | ARG1 = (u64) (unsigned long) ctx; \ | ||
2815 | @@ -1343,7 +1363,7 @@ static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ | ||
2816 | const struct bpf_insn *insn) \ | ||
2817 | { \ | ||
2818 | u64 stack[stack_size / sizeof(u64)]; \ | ||
2819 | - u64 regs[MAX_BPF_REG]; \ | ||
2820 | + u64 regs[MAX_BPF_EXT_REG]; \ | ||
2821 | \ | ||
2822 | FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ | ||
2823 | BPF_R1 = r1; \ | ||
2824 | diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c | ||
2825 | index 3bfbf4464416..9670ee5ee74e 100644 | ||
2826 | --- a/kernel/bpf/map_in_map.c | ||
2827 | +++ b/kernel/bpf/map_in_map.c | ||
2828 | @@ -12,6 +12,7 @@ | ||
2829 | struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | ||
2830 | { | ||
2831 | struct bpf_map *inner_map, *inner_map_meta; | ||
2832 | + u32 inner_map_meta_size; | ||
2833 | struct fd f; | ||
2834 | |||
2835 | f = fdget(inner_map_ufd); | ||
2836 | @@ -35,7 +36,12 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | ||
2837 | return ERR_PTR(-EINVAL); | ||
2838 | } | ||
2839 | |||
2840 | - inner_map_meta = kzalloc(sizeof(*inner_map_meta), GFP_USER); | ||
2841 | + inner_map_meta_size = sizeof(*inner_map_meta); | ||
2842 | + /* In some cases verifier needs to access beyond just base map. */ | ||
2843 | + if (inner_map->ops == &array_map_ops) | ||
2844 | + inner_map_meta_size = sizeof(struct bpf_array); | ||
2845 | + | ||
2846 | + inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); | ||
2847 | if (!inner_map_meta) { | ||
2848 | fdput(f); | ||
2849 | return ERR_PTR(-ENOMEM); | ||
2850 | @@ -45,9 +51,16 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) | ||
2851 | inner_map_meta->key_size = inner_map->key_size; | ||
2852 | inner_map_meta->value_size = inner_map->value_size; | ||
2853 | inner_map_meta->map_flags = inner_map->map_flags; | ||
2854 | - inner_map_meta->ops = inner_map->ops; | ||
2855 | inner_map_meta->max_entries = inner_map->max_entries; | ||
2856 | |||
2857 | + /* Misc members not needed in bpf_map_meta_equal() check. */ | ||
2858 | + inner_map_meta->ops = inner_map->ops; | ||
2859 | + if (inner_map->ops == &array_map_ops) { | ||
2860 | + inner_map_meta->unpriv_array = inner_map->unpriv_array; | ||
2861 | + container_of(inner_map_meta, struct bpf_array, map)->index_mask = | ||
2862 | + container_of(inner_map, struct bpf_array, map)->index_mask; | ||
2863 | + } | ||
2864 | + | ||
2865 | fdput(f); | ||
2866 | return inner_map_meta; | ||
2867 | } | ||
2868 | diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c | ||
2869 | index 341806668f03..4d81be2d0739 100644 | ||
2870 | --- a/kernel/bpf/verifier.c | ||
2871 | +++ b/kernel/bpf/verifier.c | ||
2872 | @@ -156,6 +156,7 @@ struct bpf_verifier_stack_elem { | ||
2873 | |||
2874 | #define BPF_COMPLEXITY_LIMIT_INSNS 131072 | ||
2875 | #define BPF_COMPLEXITY_LIMIT_STACK 1024 | ||
2876 | +#define BPF_COMPLEXITY_LIMIT_STATES 64 | ||
2877 | |||
2878 | #define BPF_MAP_PTR_UNPRIV 1UL | ||
2879 | #define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ | ||
2880 | @@ -465,6 +466,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, | ||
2881 | free_func_state(dst_state->frame[i]); | ||
2882 | dst_state->frame[i] = NULL; | ||
2883 | } | ||
2884 | + dst_state->speculative = src->speculative; | ||
2885 | dst_state->curframe = src->curframe; | ||
2886 | dst_state->parent = src->parent; | ||
2887 | for (i = 0; i <= src->curframe; i++) { | ||
2888 | @@ -510,7 +512,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, | ||
2889 | } | ||
2890 | |||
2891 | static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, | ||
2892 | - int insn_idx, int prev_insn_idx) | ||
2893 | + int insn_idx, int prev_insn_idx, | ||
2894 | + bool speculative) | ||
2895 | { | ||
2896 | struct bpf_verifier_state *cur = env->cur_state; | ||
2897 | struct bpf_verifier_stack_elem *elem; | ||
2898 | @@ -528,6 +531,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, | ||
2899 | err = copy_verifier_state(&elem->st, cur); | ||
2900 | if (err) | ||
2901 | goto err; | ||
2902 | + elem->st.speculative |= speculative; | ||
2903 | if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) { | ||
2904 | verbose(env, "BPF program is too complex\n"); | ||
2905 | goto err; | ||
2906 | @@ -1237,6 +1241,31 @@ static int check_stack_read(struct bpf_verifier_env *env, | ||
2907 | } | ||
2908 | } | ||
2909 | |||
2910 | +static int check_stack_access(struct bpf_verifier_env *env, | ||
2911 | + const struct bpf_reg_state *reg, | ||
2912 | + int off, int size) | ||
2913 | +{ | ||
2914 | + /* Stack accesses must be at a fixed offset, so that we | ||
2915 | + * can determine what type of data were returned. See | ||
2916 | + * check_stack_read(). | ||
2917 | + */ | ||
2918 | + if (!tnum_is_const(reg->var_off)) { | ||
2919 | + char tn_buf[48]; | ||
2920 | + | ||
2921 | + tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); | ||
2922 | + verbose(env, "variable stack access var_off=%s off=%d size=%d", | ||
2923 | + tn_buf, off, size); | ||
2924 | + return -EACCES; | ||
2925 | + } | ||
2926 | + | ||
2927 | + if (off >= 0 || off < -MAX_BPF_STACK) { | ||
2928 | + verbose(env, "invalid stack off=%d size=%d\n", off, size); | ||
2929 | + return -EACCES; | ||
2930 | + } | ||
2931 | + | ||
2932 | + return 0; | ||
2933 | +} | ||
2934 | + | ||
2935 | /* check read/write into map element returned by bpf_map_lookup_elem() */ | ||
2936 | static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, | ||
2937 | int size, bool zero_size_allowed) | ||
2938 | @@ -1268,13 +1297,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, | ||
2939 | */ | ||
2940 | if (env->log.level) | ||
2941 | print_verifier_state(env, state); | ||
2942 | + | ||
2943 | /* The minimum value is only important with signed | ||
2944 | * comparisons where we can't assume the floor of a | ||
2945 | * value is 0. If we are using signed variables for our | ||
2946 | * index'es we need to make sure that whatever we use | ||
2947 | * will have a set floor within our range. | ||
2948 | */ | ||
2949 | - if (reg->smin_value < 0) { | ||
2950 | + if (reg->smin_value < 0 && | ||
2951 | + (reg->smin_value == S64_MIN || | ||
2952 | + (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || | ||
2953 | + reg->smin_value + off < 0)) { | ||
2954 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", | ||
2955 | regno); | ||
2956 | return -EACCES; | ||
2957 | @@ -1735,24 +1768,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | ||
2958 | } | ||
2959 | |||
2960 | } else if (reg->type == PTR_TO_STACK) { | ||
2961 | - /* stack accesses must be at a fixed offset, so that we can | ||
2962 | - * determine what type of data were returned. | ||
2963 | - * See check_stack_read(). | ||
2964 | - */ | ||
2965 | - if (!tnum_is_const(reg->var_off)) { | ||
2966 | - char tn_buf[48]; | ||
2967 | - | ||
2968 | - tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); | ||
2969 | - verbose(env, "variable stack access var_off=%s off=%d size=%d", | ||
2970 | - tn_buf, off, size); | ||
2971 | - return -EACCES; | ||
2972 | - } | ||
2973 | off += reg->var_off.value; | ||
2974 | - if (off >= 0 || off < -MAX_BPF_STACK) { | ||
2975 | - verbose(env, "invalid stack off=%d size=%d\n", off, | ||
2976 | - size); | ||
2977 | - return -EACCES; | ||
2978 | - } | ||
2979 | + err = check_stack_access(env, reg, off, size); | ||
2980 | + if (err) | ||
2981 | + return err; | ||
2982 | |||
2983 | state = func(env, reg); | ||
2984 | err = update_stack_depth(env, state, off); | ||
2985 | @@ -2682,6 +2701,125 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env, | ||
2986 | return true; | ||
2987 | } | ||
2988 | |||
2989 | +static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) | ||
2990 | +{ | ||
2991 | + return &env->insn_aux_data[env->insn_idx]; | ||
2992 | +} | ||
2993 | + | ||
2994 | +static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, | ||
2995 | + u32 *ptr_limit, u8 opcode, bool off_is_neg) | ||
2996 | +{ | ||
2997 | + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || | ||
2998 | + (opcode == BPF_SUB && !off_is_neg); | ||
2999 | + u32 off; | ||
3000 | + | ||
3001 | + switch (ptr_reg->type) { | ||
3002 | + case PTR_TO_STACK: | ||
3003 | + off = ptr_reg->off + ptr_reg->var_off.value; | ||
3004 | + if (mask_to_left) | ||
3005 | + *ptr_limit = MAX_BPF_STACK + off; | ||
3006 | + else | ||
3007 | + *ptr_limit = -off; | ||
3008 | + return 0; | ||
3009 | + case PTR_TO_MAP_VALUE: | ||
3010 | + if (mask_to_left) { | ||
3011 | + *ptr_limit = ptr_reg->umax_value + ptr_reg->off; | ||
3012 | + } else { | ||
3013 | + off = ptr_reg->smin_value + ptr_reg->off; | ||
3014 | + *ptr_limit = ptr_reg->map_ptr->value_size - off; | ||
3015 | + } | ||
3016 | + return 0; | ||
3017 | + default: | ||
3018 | + return -EINVAL; | ||
3019 | + } | ||
3020 | +} | ||
3021 | + | ||
3022 | +static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, | ||
3023 | + const struct bpf_insn *insn) | ||
3024 | +{ | ||
3025 | + return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; | ||
3026 | +} | ||
3027 | + | ||
3028 | +static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, | ||
3029 | + u32 alu_state, u32 alu_limit) | ||
3030 | +{ | ||
3031 | + /* If we arrived here from different branches with different | ||
3032 | + * state or limits to sanitize, then this won't work. | ||
3033 | + */ | ||
3034 | + if (aux->alu_state && | ||
3035 | + (aux->alu_state != alu_state || | ||
3036 | + aux->alu_limit != alu_limit)) | ||
3037 | + return -EACCES; | ||
3038 | + | ||
3039 | + /* Corresponding fixup done in fixup_bpf_calls(). */ | ||
3040 | + aux->alu_state = alu_state; | ||
3041 | + aux->alu_limit = alu_limit; | ||
3042 | + return 0; | ||
3043 | +} | ||
3044 | + | ||
3045 | +static int sanitize_val_alu(struct bpf_verifier_env *env, | ||
3046 | + struct bpf_insn *insn) | ||
3047 | +{ | ||
3048 | + struct bpf_insn_aux_data *aux = cur_aux(env); | ||
3049 | + | ||
3050 | + if (can_skip_alu_sanitation(env, insn)) | ||
3051 | + return 0; | ||
3052 | + | ||
3053 | + return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); | ||
3054 | +} | ||
3055 | + | ||
3056 | +static int sanitize_ptr_alu(struct bpf_verifier_env *env, | ||
3057 | + struct bpf_insn *insn, | ||
3058 | + const struct bpf_reg_state *ptr_reg, | ||
3059 | + struct bpf_reg_state *dst_reg, | ||
3060 | + bool off_is_neg) | ||
3061 | +{ | ||
3062 | + struct bpf_verifier_state *vstate = env->cur_state; | ||
3063 | + struct bpf_insn_aux_data *aux = cur_aux(env); | ||
3064 | + bool ptr_is_dst_reg = ptr_reg == dst_reg; | ||
3065 | + u8 opcode = BPF_OP(insn->code); | ||
3066 | + u32 alu_state, alu_limit; | ||
3067 | + struct bpf_reg_state tmp; | ||
3068 | + bool ret; | ||
3069 | + | ||
3070 | + if (can_skip_alu_sanitation(env, insn)) | ||
3071 | + return 0; | ||
3072 | + | ||
3073 | + /* We already marked aux for masking from non-speculative | ||
3074 | + * paths, thus we got here in the first place. We only care | ||
3075 | + * to explore bad access from here. | ||
3076 | + */ | ||
3077 | + if (vstate->speculative) | ||
3078 | + goto do_sim; | ||
3079 | + | ||
3080 | + alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; | ||
3081 | + alu_state |= ptr_is_dst_reg ? | ||
3082 | + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; | ||
3083 | + | ||
3084 | + if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) | ||
3085 | + return 0; | ||
3086 | + if (update_alu_sanitation_state(aux, alu_state, alu_limit)) | ||
3087 | + return -EACCES; | ||
3088 | +do_sim: | ||
3089 | + /* Simulate and find potential out-of-bounds access under | ||
3090 | + * speculative execution from truncation as a result of | ||
3091 | + * masking when off was not within expected range. If off | ||
3092 | + * sits in dst, then we temporarily need to move ptr there | ||
3093 | + * to simulate dst (== 0) +/-= ptr. Needed, for example, | ||
3094 | + * for cases where we use K-based arithmetic in one direction | ||
3095 | + * and truncated reg-based in the other in order to explore | ||
3096 | + * bad access. | ||
3097 | + */ | ||
3098 | + if (!ptr_is_dst_reg) { | ||
3099 | + tmp = *dst_reg; | ||
3100 | + *dst_reg = *ptr_reg; | ||
3101 | + } | ||
3102 | + ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); | ||
3103 | + if (!ptr_is_dst_reg) | ||
3104 | + *dst_reg = tmp; | ||
3105 | + return !ret ? -EFAULT : 0; | ||
3106 | +} | ||
3107 | + | ||
3108 | /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. | ||
3109 | * Caller should also handle BPF_MOV case separately. | ||
3110 | * If we return -EACCES, caller may want to try again treating pointer as a | ||
3111 | @@ -2700,8 +2838,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, | ||
3112 | smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; | ||
3113 | u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, | ||
3114 | umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; | ||
3115 | + u32 dst = insn->dst_reg, src = insn->src_reg; | ||
3116 | u8 opcode = BPF_OP(insn->code); | ||
3117 | - u32 dst = insn->dst_reg; | ||
3118 | + int ret; | ||
3119 | |||
3120 | dst_reg = ®s[dst]; | ||
3121 | |||
3122 | @@ -2737,6 +2876,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, | ||
3123 | dst); | ||
3124 | return -EACCES; | ||
3125 | } | ||
3126 | + if (ptr_reg->type == PTR_TO_MAP_VALUE && | ||
3127 | + !env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) { | ||
3128 | + verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n", | ||
3129 | + off_reg == dst_reg ? dst : src); | ||
3130 | + return -EACCES; | ||
3131 | + } | ||
3132 | |||
3133 | /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. | ||
3134 | * The id may be overwritten later if we create a new variable offset. | ||
3135 | @@ -2750,6 +2895,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, | ||
3136 | |||
3137 | switch (opcode) { | ||
3138 | case BPF_ADD: | ||
3139 | + ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); | ||
3140 | + if (ret < 0) { | ||
3141 | + verbose(env, "R%d tried to add from different maps or paths\n", dst); | ||
3142 | + return ret; | ||
3143 | + } | ||
3144 | /* We can take a fixed offset as long as it doesn't overflow | ||
3145 | * the s32 'off' field | ||
3146 | */ | ||
3147 | @@ -2800,6 +2950,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, | ||
3148 | } | ||
3149 | break; | ||
3150 | case BPF_SUB: | ||
3151 | + ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); | ||
3152 | + if (ret < 0) { | ||
3153 | + verbose(env, "R%d tried to sub from different maps or paths\n", dst); | ||
3154 | + return ret; | ||
3155 | + } | ||
3156 | if (dst_reg == off_reg) { | ||
3157 | /* scalar -= pointer. Creates an unknown scalar */ | ||
3158 | verbose(env, "R%d tried to subtract pointer from scalar\n", | ||
3159 | @@ -2879,6 +3034,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, | ||
3160 | __update_reg_bounds(dst_reg); | ||
3161 | __reg_deduce_bounds(dst_reg); | ||
3162 | __reg_bound_offset(dst_reg); | ||
3163 | + | ||
3164 | + /* For unprivileged we require that resulting offset must be in bounds | ||
3165 | + * in order to be able to sanitize access later on. | ||
3166 | + */ | ||
3167 | + if (!env->allow_ptr_leaks) { | ||
3168 | + if (dst_reg->type == PTR_TO_MAP_VALUE && | ||
3169 | + check_map_access(env, dst, dst_reg->off, 1, false)) { | ||
3170 | + verbose(env, "R%d pointer arithmetic of map value goes out of range, " | ||
3171 | + "prohibited for !root\n", dst); | ||
3172 | + return -EACCES; | ||
3173 | + } else if (dst_reg->type == PTR_TO_STACK && | ||
3174 | + check_stack_access(env, dst_reg, dst_reg->off + | ||
3175 | + dst_reg->var_off.value, 1)) { | ||
3176 | + verbose(env, "R%d stack pointer arithmetic goes out of range, " | ||
3177 | + "prohibited for !root\n", dst); | ||
3178 | + return -EACCES; | ||
3179 | + } | ||
3180 | + } | ||
3181 | + | ||
3182 | return 0; | ||
3183 | } | ||
3184 | |||
3185 | @@ -2897,6 +3071,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | ||
3186 | s64 smin_val, smax_val; | ||
3187 | u64 umin_val, umax_val; | ||
3188 | u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; | ||
3189 | + u32 dst = insn->dst_reg; | ||
3190 | + int ret; | ||
3191 | |||
3192 | if (insn_bitness == 32) { | ||
3193 | /* Relevant for 32-bit RSH: Information can propagate towards | ||
3194 | @@ -2931,6 +3107,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | ||
3195 | |||
3196 | switch (opcode) { | ||
3197 | case BPF_ADD: | ||
3198 | + ret = sanitize_val_alu(env, insn); | ||
3199 | + if (ret < 0) { | ||
3200 | + verbose(env, "R%d tried to add from different pointers or scalars\n", dst); | ||
3201 | + return ret; | ||
3202 | + } | ||
3203 | if (signed_add_overflows(dst_reg->smin_value, smin_val) || | ||
3204 | signed_add_overflows(dst_reg->smax_value, smax_val)) { | ||
3205 | dst_reg->smin_value = S64_MIN; | ||
3206 | @@ -2950,6 +3131,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | ||
3207 | dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); | ||
3208 | break; | ||
3209 | case BPF_SUB: | ||
3210 | + ret = sanitize_val_alu(env, insn); | ||
3211 | + if (ret < 0) { | ||
3212 | + verbose(env, "R%d tried to sub from different pointers or scalars\n", dst); | ||
3213 | + return ret; | ||
3214 | + } | ||
3215 | if (signed_sub_overflows(dst_reg->smin_value, smax_val) || | ||
3216 | signed_sub_overflows(dst_reg->smax_value, smin_val)) { | ||
3217 | /* Overflow possible, we know nothing */ | ||
3218 | @@ -3475,6 +3661,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, | ||
3219 | } | ||
3220 | } | ||
3221 | |||
3222 | +/* compute branch direction of the expression "if (reg opcode val) goto target;" | ||
3223 | + * and return: | ||
3224 | + * 1 - branch will be taken and "goto target" will be executed | ||
3225 | + * 0 - branch will not be taken and fall-through to next insn | ||
3226 | + * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] | ||
3227 | + */ | ||
3228 | +static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) | ||
3229 | +{ | ||
3230 | + if (__is_pointer_value(false, reg)) | ||
3231 | + return -1; | ||
3232 | + | ||
3233 | + switch (opcode) { | ||
3234 | + case BPF_JEQ: | ||
3235 | + if (tnum_is_const(reg->var_off)) | ||
3236 | + return !!tnum_equals_const(reg->var_off, val); | ||
3237 | + break; | ||
3238 | + case BPF_JNE: | ||
3239 | + if (tnum_is_const(reg->var_off)) | ||
3240 | + return !tnum_equals_const(reg->var_off, val); | ||
3241 | + break; | ||
3242 | + case BPF_JGT: | ||
3243 | + if (reg->umin_value > val) | ||
3244 | + return 1; | ||
3245 | + else if (reg->umax_value <= val) | ||
3246 | + return 0; | ||
3247 | + break; | ||
3248 | + case BPF_JSGT: | ||
3249 | + if (reg->smin_value > (s64)val) | ||
3250 | + return 1; | ||
3251 | + else if (reg->smax_value < (s64)val) | ||
3252 | + return 0; | ||
3253 | + break; | ||
3254 | + case BPF_JLT: | ||
3255 | + if (reg->umax_value < val) | ||
3256 | + return 1; | ||
3257 | + else if (reg->umin_value >= val) | ||
3258 | + return 0; | ||
3259 | + break; | ||
3260 | + case BPF_JSLT: | ||
3261 | + if (reg->smax_value < (s64)val) | ||
3262 | + return 1; | ||
3263 | + else if (reg->smin_value >= (s64)val) | ||
3264 | + return 0; | ||
3265 | + break; | ||
3266 | + case BPF_JGE: | ||
3267 | + if (reg->umin_value >= val) | ||
3268 | + return 1; | ||
3269 | + else if (reg->umax_value < val) | ||
3270 | + return 0; | ||
3271 | + break; | ||
3272 | + case BPF_JSGE: | ||
3273 | + if (reg->smin_value >= (s64)val) | ||
3274 | + return 1; | ||
3275 | + else if (reg->smax_value < (s64)val) | ||
3276 | + return 0; | ||
3277 | + break; | ||
3278 | + case BPF_JLE: | ||
3279 | + if (reg->umax_value <= val) | ||
3280 | + return 1; | ||
3281 | + else if (reg->umin_value > val) | ||
3282 | + return 0; | ||
3283 | + break; | ||
3284 | + case BPF_JSLE: | ||
3285 | + if (reg->smax_value <= (s64)val) | ||
3286 | + return 1; | ||
3287 | + else if (reg->smin_value > (s64)val) | ||
3288 | + return 0; | ||
3289 | + break; | ||
3290 | + } | ||
3291 | + | ||
3292 | + return -1; | ||
3293 | +} | ||
3294 | + | ||
3295 | /* Adjusts the register min/max values in the case that the dst_reg is the | ||
3296 | * variable register that we are working on, and src_reg is a constant or we're | ||
3297 | * simply doing a BPF_K check. | ||
3298 | @@ -3868,28 +4127,23 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | ||
3299 | |||
3300 | dst_reg = ®s[insn->dst_reg]; | ||
3301 | |||
3302 | - /* detect if R == 0 where R was initialized to zero earlier */ | ||
3303 | - if (BPF_SRC(insn->code) == BPF_K && | ||
3304 | - (opcode == BPF_JEQ || opcode == BPF_JNE) && | ||
3305 | - dst_reg->type == SCALAR_VALUE && | ||
3306 | - tnum_is_const(dst_reg->var_off)) { | ||
3307 | - if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) || | ||
3308 | - (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) { | ||
3309 | - /* if (imm == imm) goto pc+off; | ||
3310 | - * only follow the goto, ignore fall-through | ||
3311 | - */ | ||
3312 | + if (BPF_SRC(insn->code) == BPF_K) { | ||
3313 | + int pred = is_branch_taken(dst_reg, insn->imm, opcode); | ||
3314 | + | ||
3315 | + if (pred == 1) { | ||
3316 | + /* only follow the goto, ignore fall-through */ | ||
3317 | *insn_idx += insn->off; | ||
3318 | return 0; | ||
3319 | - } else { | ||
3320 | - /* if (imm != imm) goto pc+off; | ||
3321 | - * only follow fall-through branch, since | ||
3322 | + } else if (pred == 0) { | ||
3323 | + /* only follow fall-through branch, since | ||
3324 | * that's where the program will go | ||
3325 | */ | ||
3326 | return 0; | ||
3327 | } | ||
3328 | } | ||
3329 | |||
3330 | - other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); | ||
3331 | + other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, | ||
3332 | + false); | ||
3333 | if (!other_branch) | ||
3334 | return -EFAULT; | ||
3335 | other_branch_regs = other_branch->frame[other_branch->curframe]->regs; | ||
3336 | @@ -4604,6 +4858,12 @@ static bool states_equal(struct bpf_verifier_env *env, | ||
3337 | if (old->curframe != cur->curframe) | ||
3338 | return false; | ||
3339 | |||
3340 | + /* Verification state from speculative execution simulation | ||
3341 | + * must never prune a non-speculative execution one. | ||
3342 | + */ | ||
3343 | + if (old->speculative && !cur->speculative) | ||
3344 | + return false; | ||
3345 | + | ||
3346 | /* for states to be equal callsites have to be the same | ||
3347 | * and all frame states need to be equivalent | ||
3348 | */ | ||
3349 | @@ -4668,7 +4928,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | ||
3350 | struct bpf_verifier_state_list *new_sl; | ||
3351 | struct bpf_verifier_state_list *sl; | ||
3352 | struct bpf_verifier_state *cur = env->cur_state; | ||
3353 | - int i, j, err; | ||
3354 | + int i, j, err, states_cnt = 0; | ||
3355 | |||
3356 | sl = env->explored_states[insn_idx]; | ||
3357 | if (!sl) | ||
3358 | @@ -4695,8 +4955,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) | ||
3359 | return 1; | ||
3360 | } | ||
3361 | sl = sl->next; | ||
3362 | + states_cnt++; | ||
3363 | } | ||
3364 | |||
3365 | + if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) | ||
3366 | + return 0; | ||
3367 | + | ||
3368 | /* there were no equivalent states, remember current one. | ||
3369 | * technically the current state is not proven to be safe yet, | ||
3370 | * but it will either reach outer most bpf_exit (which means it's safe) | ||
3371 | @@ -4744,7 +5008,6 @@ static int do_check(struct bpf_verifier_env *env) | ||
3372 | struct bpf_insn *insns = env->prog->insnsi; | ||
3373 | struct bpf_reg_state *regs; | ||
3374 | int insn_cnt = env->prog->len, i; | ||
3375 | - int insn_idx, prev_insn_idx = 0; | ||
3376 | int insn_processed = 0; | ||
3377 | bool do_print_state = false; | ||
3378 | |||
3379 | @@ -4752,7 +5015,7 @@ static int do_check(struct bpf_verifier_env *env) | ||
3380 | if (!state) | ||
3381 | return -ENOMEM; | ||
3382 | state->curframe = 0; | ||
3383 | - state->parent = NULL; | ||
3384 | + state->speculative = false; | ||
3385 | state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); | ||
3386 | if (!state->frame[0]) { | ||
3387 | kfree(state); | ||
3388 | @@ -4763,19 +5026,19 @@ static int do_check(struct bpf_verifier_env *env) | ||
3389 | BPF_MAIN_FUNC /* callsite */, | ||
3390 | 0 /* frameno */, | ||
3391 | 0 /* subprogno, zero == main subprog */); | ||
3392 | - insn_idx = 0; | ||
3393 | + | ||
3394 | for (;;) { | ||
3395 | struct bpf_insn *insn; | ||
3396 | u8 class; | ||
3397 | int err; | ||
3398 | |||
3399 | - if (insn_idx >= insn_cnt) { | ||
3400 | + if (env->insn_idx >= insn_cnt) { | ||
3401 | verbose(env, "invalid insn idx %d insn_cnt %d\n", | ||
3402 | - insn_idx, insn_cnt); | ||
3403 | + env->insn_idx, insn_cnt); | ||
3404 | return -EFAULT; | ||
3405 | } | ||
3406 | |||
3407 | - insn = &insns[insn_idx]; | ||
3408 | + insn = &insns[env->insn_idx]; | ||
3409 | class = BPF_CLASS(insn->code); | ||
3410 | |||
3411 | if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { | ||
3412 | @@ -4785,17 +5048,19 @@ static int do_check(struct bpf_verifier_env *env) | ||
3413 | return -E2BIG; | ||
3414 | } | ||
3415 | |||
3416 | - err = is_state_visited(env, insn_idx); | ||
3417 | + err = is_state_visited(env, env->insn_idx); | ||
3418 | if (err < 0) | ||
3419 | return err; | ||
3420 | if (err == 1) { | ||
3421 | /* found equivalent state, can prune the search */ | ||
3422 | if (env->log.level) { | ||
3423 | if (do_print_state) | ||
3424 | - verbose(env, "\nfrom %d to %d: safe\n", | ||
3425 | - prev_insn_idx, insn_idx); | ||
3426 | + verbose(env, "\nfrom %d to %d%s: safe\n", | ||
3427 | + env->prev_insn_idx, env->insn_idx, | ||
3428 | + env->cur_state->speculative ? | ||
3429 | + " (speculative execution)" : ""); | ||
3430 | else | ||
3431 | - verbose(env, "%d: safe\n", insn_idx); | ||
3432 | + verbose(env, "%d: safe\n", env->insn_idx); | ||
3433 | } | ||
3434 | goto process_bpf_exit; | ||
3435 | } | ||
3436 | @@ -4808,10 +5073,12 @@ static int do_check(struct bpf_verifier_env *env) | ||
3437 | |||
3438 | if (env->log.level > 1 || (env->log.level && do_print_state)) { | ||
3439 | if (env->log.level > 1) | ||
3440 | - verbose(env, "%d:", insn_idx); | ||
3441 | + verbose(env, "%d:", env->insn_idx); | ||
3442 | else | ||
3443 | - verbose(env, "\nfrom %d to %d:", | ||
3444 | - prev_insn_idx, insn_idx); | ||
3445 | + verbose(env, "\nfrom %d to %d%s:", | ||
3446 | + env->prev_insn_idx, env->insn_idx, | ||
3447 | + env->cur_state->speculative ? | ||
3448 | + " (speculative execution)" : ""); | ||
3449 | print_verifier_state(env, state->frame[state->curframe]); | ||
3450 | do_print_state = false; | ||
3451 | } | ||
3452 | @@ -4822,19 +5089,20 @@ static int do_check(struct bpf_verifier_env *env) | ||
3453 | .private_data = env, | ||
3454 | }; | ||
3455 | |||
3456 | - verbose(env, "%d: ", insn_idx); | ||
3457 | + verbose(env, "%d: ", env->insn_idx); | ||
3458 | print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); | ||
3459 | } | ||
3460 | |||
3461 | if (bpf_prog_is_dev_bound(env->prog->aux)) { | ||
3462 | - err = bpf_prog_offload_verify_insn(env, insn_idx, | ||
3463 | - prev_insn_idx); | ||
3464 | + err = bpf_prog_offload_verify_insn(env, env->insn_idx, | ||
3465 | + env->prev_insn_idx); | ||
3466 | if (err) | ||
3467 | return err; | ||
3468 | } | ||
3469 | |||
3470 | regs = cur_regs(env); | ||
3471 | - env->insn_aux_data[insn_idx].seen = true; | ||
3472 | + env->insn_aux_data[env->insn_idx].seen = true; | ||
3473 | + | ||
3474 | if (class == BPF_ALU || class == BPF_ALU64) { | ||
3475 | err = check_alu_op(env, insn); | ||
3476 | if (err) | ||
3477 | @@ -4859,13 +5127,13 @@ static int do_check(struct bpf_verifier_env *env) | ||
3478 | /* check that memory (src_reg + off) is readable, | ||
3479 | * the state of dst_reg will be updated by this func | ||
3480 | */ | ||
3481 | - err = check_mem_access(env, insn_idx, insn->src_reg, insn->off, | ||
3482 | - BPF_SIZE(insn->code), BPF_READ, | ||
3483 | - insn->dst_reg, false); | ||
3484 | + err = check_mem_access(env, env->insn_idx, insn->src_reg, | ||
3485 | + insn->off, BPF_SIZE(insn->code), | ||
3486 | + BPF_READ, insn->dst_reg, false); | ||
3487 | if (err) | ||
3488 | return err; | ||
3489 | |||
3490 | - prev_src_type = &env->insn_aux_data[insn_idx].ptr_type; | ||
3491 | + prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; | ||
3492 | |||
3493 | if (*prev_src_type == NOT_INIT) { | ||
3494 | /* saw a valid insn | ||
3495 | @@ -4892,10 +5160,10 @@ static int do_check(struct bpf_verifier_env *env) | ||
3496 | enum bpf_reg_type *prev_dst_type, dst_reg_type; | ||
3497 | |||
3498 | if (BPF_MODE(insn->code) == BPF_XADD) { | ||
3499 | - err = check_xadd(env, insn_idx, insn); | ||
3500 | + err = check_xadd(env, env->insn_idx, insn); | ||
3501 | if (err) | ||
3502 | return err; | ||
3503 | - insn_idx++; | ||
3504 | + env->insn_idx++; | ||
3505 | continue; | ||
3506 | } | ||
3507 | |||
3508 | @@ -4911,13 +5179,13 @@ static int do_check(struct bpf_verifier_env *env) | ||
3509 | dst_reg_type = regs[insn->dst_reg].type; | ||
3510 | |||
3511 | /* check that memory (dst_reg + off) is writeable */ | ||
3512 | - err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, | ||
3513 | - BPF_SIZE(insn->code), BPF_WRITE, | ||
3514 | - insn->src_reg, false); | ||
3515 | + err = check_mem_access(env, env->insn_idx, insn->dst_reg, | ||
3516 | + insn->off, BPF_SIZE(insn->code), | ||
3517 | + BPF_WRITE, insn->src_reg, false); | ||
3518 | if (err) | ||
3519 | return err; | ||
3520 | |||
3521 | - prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type; | ||
3522 | + prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; | ||
3523 | |||
3524 | if (*prev_dst_type == NOT_INIT) { | ||
3525 | *prev_dst_type = dst_reg_type; | ||
3526 | @@ -4946,9 +5214,9 @@ static int do_check(struct bpf_verifier_env *env) | ||
3527 | } | ||
3528 | |||
3529 | /* check that memory (dst_reg + off) is writeable */ | ||
3530 | - err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, | ||
3531 | - BPF_SIZE(insn->code), BPF_WRITE, | ||
3532 | - -1, false); | ||
3533 | + err = check_mem_access(env, env->insn_idx, insn->dst_reg, | ||
3534 | + insn->off, BPF_SIZE(insn->code), | ||
3535 | + BPF_WRITE, -1, false); | ||
3536 | if (err) | ||
3537 | return err; | ||
3538 | |||
3539 | @@ -4966,9 +5234,9 @@ static int do_check(struct bpf_verifier_env *env) | ||
3540 | } | ||
3541 | |||
3542 | if (insn->src_reg == BPF_PSEUDO_CALL) | ||
3543 | - err = check_func_call(env, insn, &insn_idx); | ||
3544 | + err = check_func_call(env, insn, &env->insn_idx); | ||
3545 | else | ||
3546 | - err = check_helper_call(env, insn->imm, insn_idx); | ||
3547 | + err = check_helper_call(env, insn->imm, env->insn_idx); | ||
3548 | if (err) | ||
3549 | return err; | ||
3550 | |||
3551 | @@ -4981,7 +5249,7 @@ static int do_check(struct bpf_verifier_env *env) | ||
3552 | return -EINVAL; | ||
3553 | } | ||
3554 | |||
3555 | - insn_idx += insn->off + 1; | ||
3556 | + env->insn_idx += insn->off + 1; | ||
3557 | continue; | ||
3558 | |||
3559 | } else if (opcode == BPF_EXIT) { | ||
3560 | @@ -4995,8 +5263,8 @@ static int do_check(struct bpf_verifier_env *env) | ||
3561 | |||
3562 | if (state->curframe) { | ||
3563 | /* exit from nested function */ | ||
3564 | - prev_insn_idx = insn_idx; | ||
3565 | - err = prepare_func_exit(env, &insn_idx); | ||
3566 | + env->prev_insn_idx = env->insn_idx; | ||
3567 | + err = prepare_func_exit(env, &env->insn_idx); | ||
3568 | if (err) | ||
3569 | return err; | ||
3570 | do_print_state = true; | ||
3571 | @@ -5022,7 +5290,8 @@ static int do_check(struct bpf_verifier_env *env) | ||
3572 | if (err) | ||
3573 | return err; | ||
3574 | process_bpf_exit: | ||
3575 | - err = pop_stack(env, &prev_insn_idx, &insn_idx); | ||
3576 | + err = pop_stack(env, &env->prev_insn_idx, | ||
3577 | + &env->insn_idx); | ||
3578 | if (err < 0) { | ||
3579 | if (err != -ENOENT) | ||
3580 | return err; | ||
3581 | @@ -5032,7 +5301,7 @@ process_bpf_exit: | ||
3582 | continue; | ||
3583 | } | ||
3584 | } else { | ||
3585 | - err = check_cond_jmp_op(env, insn, &insn_idx); | ||
3586 | + err = check_cond_jmp_op(env, insn, &env->insn_idx); | ||
3587 | if (err) | ||
3588 | return err; | ||
3589 | } | ||
3590 | @@ -5049,8 +5318,8 @@ process_bpf_exit: | ||
3591 | if (err) | ||
3592 | return err; | ||
3593 | |||
3594 | - insn_idx++; | ||
3595 | - env->insn_aux_data[insn_idx].seen = true; | ||
3596 | + env->insn_idx++; | ||
3597 | + env->insn_aux_data[env->insn_idx].seen = true; | ||
3598 | } else { | ||
3599 | verbose(env, "invalid BPF_LD mode\n"); | ||
3600 | return -EINVAL; | ||
3601 | @@ -5060,7 +5329,7 @@ process_bpf_exit: | ||
3602 | return -EINVAL; | ||
3603 | } | ||
3604 | |||
3605 | - insn_idx++; | ||
3606 | + env->insn_idx++; | ||
3607 | } | ||
3608 | |||
3609 | verbose(env, "processed %d insns (limit %d), stack depth ", | ||
3610 | @@ -5756,6 +6025,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | ||
3611 | continue; | ||
3612 | } | ||
3613 | |||
3614 | + if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || | ||
3615 | + insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { | ||
3616 | + const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; | ||
3617 | + const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; | ||
3618 | + struct bpf_insn insn_buf[16]; | ||
3619 | + struct bpf_insn *patch = &insn_buf[0]; | ||
3620 | + bool issrc, isneg; | ||
3621 | + u32 off_reg; | ||
3622 | + | ||
3623 | + aux = &env->insn_aux_data[i + delta]; | ||
3624 | + if (!aux->alu_state) | ||
3625 | + continue; | ||
3626 | + | ||
3627 | + isneg = aux->alu_state & BPF_ALU_NEG_VALUE; | ||
3628 | + issrc = (aux->alu_state & BPF_ALU_SANITIZE) == | ||
3629 | + BPF_ALU_SANITIZE_SRC; | ||
3630 | + | ||
3631 | + off_reg = issrc ? insn->src_reg : insn->dst_reg; | ||
3632 | + if (isneg) | ||
3633 | + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); | ||
3634 | + *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); | ||
3635 | + *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); | ||
3636 | + *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); | ||
3637 | + *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); | ||
3638 | + *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); | ||
3639 | + if (issrc) { | ||
3640 | + *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, | ||
3641 | + off_reg); | ||
3642 | + insn->src_reg = BPF_REG_AX; | ||
3643 | + } else { | ||
3644 | + *patch++ = BPF_ALU64_REG(BPF_AND, off_reg, | ||
3645 | + BPF_REG_AX); | ||
3646 | + } | ||
3647 | + if (isneg) | ||
3648 | + insn->code = insn->code == code_add ? | ||
3649 | + code_sub : code_add; | ||
3650 | + *patch++ = *insn; | ||
3651 | + if (issrc && isneg) | ||
3652 | + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); | ||
3653 | + cnt = patch - insn_buf; | ||
3654 | + | ||
3655 | + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); | ||
3656 | + if (!new_prog) | ||
3657 | + return -ENOMEM; | ||
3658 | + | ||
3659 | + delta += cnt - 1; | ||
3660 | + env->prog = prog = new_prog; | ||
3661 | + insn = new_prog->insnsi + i + delta; | ||
3662 | + continue; | ||
3663 | + } | ||
3664 | + | ||
3665 | if (insn->code != (BPF_JMP | BPF_CALL)) | ||
3666 | continue; | ||
3667 | if (insn->src_reg == BPF_PSEUDO_CALL) | ||
3668 | diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c | ||
3669 | index ce32cf741b25..76801b9b481e 100644 | ||
3670 | --- a/kernel/time/posix-cpu-timers.c | ||
3671 | +++ b/kernel/time/posix-cpu-timers.c | ||
3672 | @@ -685,6 +685,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, | ||
3673 | * set up the signal and overrun bookkeeping. | ||
3674 | */ | ||
3675 | timer->it.cpu.incr = timespec64_to_ns(&new->it_interval); | ||
3676 | + timer->it_interval = ns_to_ktime(timer->it.cpu.incr); | ||
3677 | |||
3678 | /* | ||
3679 | * This acts as a modification timestamp for the timer, | ||
3680 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c | ||
3681 | index 93e73ccb4dec..9e45553cabd6 100644 | ||
3682 | --- a/mm/page_alloc.c | ||
3683 | +++ b/mm/page_alloc.c | ||
3684 | @@ -5538,18 +5538,6 @@ not_early: | ||
3685 | cond_resched(); | ||
3686 | } | ||
3687 | } | ||
3688 | -#ifdef CONFIG_SPARSEMEM | ||
3689 | - /* | ||
3690 | - * If the zone does not span the rest of the section then | ||
3691 | - * we should at least initialize those pages. Otherwise we | ||
3692 | - * could blow up on a poisoned page in some paths which depend | ||
3693 | - * on full sections being initialized (e.g. memory hotplug). | ||
3694 | - */ | ||
3695 | - while (end_pfn % PAGES_PER_SECTION) { | ||
3696 | - __init_single_page(pfn_to_page(end_pfn), end_pfn, zone, nid); | ||
3697 | - end_pfn++; | ||
3698 | - } | ||
3699 | -#endif | ||
3700 | } | ||
3701 | |||
3702 | static void __meminit zone_init_free_lists(struct zone *zone) | ||
3703 | diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c | ||
3704 | index 2cb8da465b98..48ddc60b4fbd 100644 | ||
3705 | --- a/net/bridge/br_forward.c | ||
3706 | +++ b/net/bridge/br_forward.c | ||
3707 | @@ -36,10 +36,10 @@ static inline int should_deliver(const struct net_bridge_port *p, | ||
3708 | |||
3709 | int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) | ||
3710 | { | ||
3711 | + skb_push(skb, ETH_HLEN); | ||
3712 | if (!is_skb_forwardable(skb->dev, skb)) | ||
3713 | goto drop; | ||
3714 | |||
3715 | - skb_push(skb, ETH_HLEN); | ||
3716 | br_drop_fake_rtable(skb); | ||
3717 | |||
3718 | if (skb->ip_summed == CHECKSUM_PARTIAL && | ||
3719 | @@ -98,12 +98,11 @@ static void __br_forward(const struct net_bridge_port *to, | ||
3720 | net = dev_net(indev); | ||
3721 | } else { | ||
3722 | if (unlikely(netpoll_tx_running(to->br->dev))) { | ||
3723 | - if (!is_skb_forwardable(skb->dev, skb)) { | ||
3724 | + skb_push(skb, ETH_HLEN); | ||
3725 | + if (!is_skb_forwardable(skb->dev, skb)) | ||
3726 | kfree_skb(skb); | ||
3727 | - } else { | ||
3728 | - skb_push(skb, ETH_HLEN); | ||
3729 | + else | ||
3730 | br_netpoll_send_skb(to, skb); | ||
3731 | - } | ||
3732 | return; | ||
3733 | } | ||
3734 | br_hook = NF_BR_LOCAL_OUT; | ||
3735 | diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c | ||
3736 | index 96c072e71ea2..5811208863b7 100644 | ||
3737 | --- a/net/bridge/br_netfilter_ipv6.c | ||
3738 | +++ b/net/bridge/br_netfilter_ipv6.c | ||
3739 | @@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb) | ||
3740 | IPSTATS_MIB_INDISCARDS); | ||
3741 | goto drop; | ||
3742 | } | ||
3743 | + hdr = ipv6_hdr(skb); | ||
3744 | } | ||
3745 | if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb)) | ||
3746 | goto drop; | ||
3747 | diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c | ||
3748 | index 08cbed7d940e..419e8edf23ba 100644 | ||
3749 | --- a/net/bridge/netfilter/nft_reject_bridge.c | ||
3750 | +++ b/net/bridge/netfilter/nft_reject_bridge.c | ||
3751 | @@ -229,6 +229,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook) | ||
3752 | pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h))) | ||
3753 | return false; | ||
3754 | |||
3755 | + ip6h = ipv6_hdr(skb); | ||
3756 | thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo); | ||
3757 | if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0) | ||
3758 | return false; | ||
3759 | diff --git a/net/can/bcm.c b/net/can/bcm.c | ||
3760 | index 0af8f0db892a..79bb8afa9c0c 100644 | ||
3761 | --- a/net/can/bcm.c | ||
3762 | +++ b/net/can/bcm.c | ||
3763 | @@ -67,6 +67,9 @@ | ||
3764 | */ | ||
3765 | #define MAX_NFRAMES 256 | ||
3766 | |||
3767 | +/* limit timers to 400 days for sending/timeouts */ | ||
3768 | +#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60) | ||
3769 | + | ||
3770 | /* use of last_frames[index].flags */ | ||
3771 | #define RX_RECV 0x40 /* received data for this element */ | ||
3772 | #define RX_THR 0x80 /* element not been sent due to throttle feature */ | ||
3773 | @@ -140,6 +143,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv) | ||
3774 | return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC); | ||
3775 | } | ||
3776 | |||
3777 | +/* check limitations for timeval provided by user */ | ||
3778 | +static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head) | ||
3779 | +{ | ||
3780 | + if ((msg_head->ival1.tv_sec < 0) || | ||
3781 | + (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) || | ||
3782 | + (msg_head->ival1.tv_usec < 0) || | ||
3783 | + (msg_head->ival1.tv_usec >= USEC_PER_SEC) || | ||
3784 | + (msg_head->ival2.tv_sec < 0) || | ||
3785 | + (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) || | ||
3786 | + (msg_head->ival2.tv_usec < 0) || | ||
3787 | + (msg_head->ival2.tv_usec >= USEC_PER_SEC)) | ||
3788 | + return true; | ||
3789 | + | ||
3790 | + return false; | ||
3791 | +} | ||
3792 | + | ||
3793 | #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU) | ||
3794 | #define OPSIZ sizeof(struct bcm_op) | ||
3795 | #define MHSIZ sizeof(struct bcm_msg_head) | ||
3796 | @@ -873,6 +892,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | ||
3797 | if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) | ||
3798 | return -EINVAL; | ||
3799 | |||
3800 | + /* check timeval limitations */ | ||
3801 | + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) | ||
3802 | + return -EINVAL; | ||
3803 | + | ||
3804 | /* check the given can_id */ | ||
3805 | op = bcm_find_op(&bo->tx_ops, msg_head, ifindex); | ||
3806 | if (op) { | ||
3807 | @@ -1053,6 +1076,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | ||
3808 | (!(msg_head->can_id & CAN_RTR_FLAG)))) | ||
3809 | return -EINVAL; | ||
3810 | |||
3811 | + /* check timeval limitations */ | ||
3812 | + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head)) | ||
3813 | + return -EINVAL; | ||
3814 | + | ||
3815 | /* check the given can_id */ | ||
3816 | op = bcm_find_op(&bo->rx_ops, msg_head, ifindex); | ||
3817 | if (op) { | ||
3818 | diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c | ||
3819 | index 0113993e9b2c..958e185a8e8d 100644 | ||
3820 | --- a/net/ipv4/fib_frontend.c | ||
3821 | +++ b/net/ipv4/fib_frontend.c | ||
3822 | @@ -203,7 +203,7 @@ static void fib_flush(struct net *net) | ||
3823 | struct fib_table *tb; | ||
3824 | |||
3825 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) | ||
3826 | - flushed += fib_table_flush(net, tb); | ||
3827 | + flushed += fib_table_flush(net, tb, false); | ||
3828 | } | ||
3829 | |||
3830 | if (flushed) | ||
3831 | @@ -1357,7 +1357,7 @@ static void ip_fib_net_exit(struct net *net) | ||
3832 | |||
3833 | hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) { | ||
3834 | hlist_del(&tb->tb_hlist); | ||
3835 | - fib_table_flush(net, tb); | ||
3836 | + fib_table_flush(net, tb, true); | ||
3837 | fib_free_table(tb); | ||
3838 | } | ||
3839 | } | ||
3840 | diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c | ||
3841 | index 5bc0c89e81e4..3955a6d7ea66 100644 | ||
3842 | --- a/net/ipv4/fib_trie.c | ||
3843 | +++ b/net/ipv4/fib_trie.c | ||
3844 | @@ -1856,7 +1856,7 @@ void fib_table_flush_external(struct fib_table *tb) | ||
3845 | } | ||
3846 | |||
3847 | /* Caller must hold RTNL. */ | ||
3848 | -int fib_table_flush(struct net *net, struct fib_table *tb) | ||
3849 | +int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all) | ||
3850 | { | ||
3851 | struct trie *t = (struct trie *)tb->tb_data; | ||
3852 | struct key_vector *pn = t->kv; | ||
3853 | @@ -1904,8 +1904,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb) | ||
3854 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { | ||
3855 | struct fib_info *fi = fa->fa_info; | ||
3856 | |||
3857 | - if (!fi || !(fi->fib_flags & RTNH_F_DEAD) || | ||
3858 | - tb->tb_id != fa->tb_id) { | ||
3859 | + if (!fi || tb->tb_id != fa->tb_id || | ||
3860 | + (!(fi->fib_flags & RTNH_F_DEAD) && | ||
3861 | + !fib_props[fa->fa_type].error)) { | ||
3862 | + slen = fa->fa_slen; | ||
3863 | + continue; | ||
3864 | + } | ||
3865 | + | ||
3866 | + /* Do not flush error routes if network namespace is | ||
3867 | + * not being dismantled | ||
3868 | + */ | ||
3869 | + if (!flush_all && fib_props[fa->fa_type].error) { | ||
3870 | slen = fa->fa_slen; | ||
3871 | continue; | ||
3872 | } | ||
3873 | diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c | ||
3874 | index 5ef5df3a06f1..0bfad3e72509 100644 | ||
3875 | --- a/net/ipv4/ip_gre.c | ||
3876 | +++ b/net/ipv4/ip_gre.c | ||
3877 | @@ -570,8 +570,7 @@ err_free_skb: | ||
3878 | dev->stats.tx_dropped++; | ||
3879 | } | ||
3880 | |||
3881 | -static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | ||
3882 | - __be16 proto) | ||
3883 | +static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev) | ||
3884 | { | ||
3885 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
3886 | struct ip_tunnel_info *tun_info; | ||
3887 | @@ -579,10 +578,10 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | ||
3888 | struct erspan_metadata *md; | ||
3889 | struct rtable *rt = NULL; | ||
3890 | bool truncate = false; | ||
3891 | + __be16 df, proto; | ||
3892 | struct flowi4 fl; | ||
3893 | int tunnel_hlen; | ||
3894 | int version; | ||
3895 | - __be16 df; | ||
3896 | int nhoff; | ||
3897 | int thoff; | ||
3898 | |||
3899 | @@ -627,18 +626,20 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev, | ||
3900 | if (version == 1) { | ||
3901 | erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)), | ||
3902 | ntohl(md->u.index), truncate, true); | ||
3903 | + proto = htons(ETH_P_ERSPAN); | ||
3904 | } else if (version == 2) { | ||
3905 | erspan_build_header_v2(skb, | ||
3906 | ntohl(tunnel_id_to_key32(key->tun_id)), | ||
3907 | md->u.md2.dir, | ||
3908 | get_hwid(&md->u.md2), | ||
3909 | truncate, true); | ||
3910 | + proto = htons(ETH_P_ERSPAN2); | ||
3911 | } else { | ||
3912 | goto err_free_rt; | ||
3913 | } | ||
3914 | |||
3915 | gre_build_header(skb, 8, TUNNEL_SEQ, | ||
3916 | - htons(ETH_P_ERSPAN), 0, htonl(tunnel->o_seqno++)); | ||
3917 | + proto, 0, htonl(tunnel->o_seqno++)); | ||
3918 | |||
3919 | df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; | ||
3920 | |||
3921 | @@ -722,12 +723,13 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | ||
3922 | { | ||
3923 | struct ip_tunnel *tunnel = netdev_priv(dev); | ||
3924 | bool truncate = false; | ||
3925 | + __be16 proto; | ||
3926 | |||
3927 | if (!pskb_inet_may_pull(skb)) | ||
3928 | goto free_skb; | ||
3929 | |||
3930 | if (tunnel->collect_md) { | ||
3931 | - erspan_fb_xmit(skb, dev, skb->protocol); | ||
3932 | + erspan_fb_xmit(skb, dev); | ||
3933 | return NETDEV_TX_OK; | ||
3934 | } | ||
3935 | |||
3936 | @@ -743,19 +745,22 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | ||
3937 | } | ||
3938 | |||
3939 | /* Push ERSPAN header */ | ||
3940 | - if (tunnel->erspan_ver == 1) | ||
3941 | + if (tunnel->erspan_ver == 1) { | ||
3942 | erspan_build_header(skb, ntohl(tunnel->parms.o_key), | ||
3943 | tunnel->index, | ||
3944 | truncate, true); | ||
3945 | - else if (tunnel->erspan_ver == 2) | ||
3946 | + proto = htons(ETH_P_ERSPAN); | ||
3947 | + } else if (tunnel->erspan_ver == 2) { | ||
3948 | erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key), | ||
3949 | tunnel->dir, tunnel->hwid, | ||
3950 | truncate, true); | ||
3951 | - else | ||
3952 | + proto = htons(ETH_P_ERSPAN2); | ||
3953 | + } else { | ||
3954 | goto free_skb; | ||
3955 | + } | ||
3956 | |||
3957 | tunnel->parms.o_flags &= ~TUNNEL_KEY; | ||
3958 | - __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_ERSPAN)); | ||
3959 | + __gre_xmit(skb, dev, &tunnel->parms.iph, proto); | ||
3960 | return NETDEV_TX_OK; | ||
3961 | |||
3962 | free_skb: | ||
3963 | diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c | ||
3964 | index 27c863f6dd83..6f977b0fef54 100644 | ||
3965 | --- a/net/ipv4/ip_input.c | ||
3966 | +++ b/net/ipv4/ip_input.c | ||
3967 | @@ -489,6 +489,7 @@ static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) | ||
3968 | goto drop; | ||
3969 | } | ||
3970 | |||
3971 | + iph = ip_hdr(skb); | ||
3972 | skb->transport_header = skb->network_header + iph->ihl*4; | ||
3973 | |||
3974 | /* Remove any debris in the socket control block */ | ||
3975 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c | ||
3976 | index a32a0f4cc138..87fe44197aa1 100644 | ||
3977 | --- a/net/ipv4/tcp.c | ||
3978 | +++ b/net/ipv4/tcp.c | ||
3979 | @@ -1186,7 +1186,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size) | ||
3980 | flags = msg->msg_flags; | ||
3981 | |||
3982 | if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) { | ||
3983 | - if (sk->sk_state != TCP_ESTABLISHED) { | ||
3984 | + if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { | ||
3985 | err = -EINVAL; | ||
3986 | goto out_err; | ||
3987 | } | ||
3988 | diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c | ||
3989 | index f8183fdce5b2..e45a5e19e509 100644 | ||
3990 | --- a/net/ipv4/udp.c | ||
3991 | +++ b/net/ipv4/udp.c | ||
3992 | @@ -785,15 +785,23 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, | ||
3993 | const int hlen = skb_network_header_len(skb) + | ||
3994 | sizeof(struct udphdr); | ||
3995 | |||
3996 | - if (hlen + cork->gso_size > cork->fragsize) | ||
3997 | + if (hlen + cork->gso_size > cork->fragsize) { | ||
3998 | + kfree_skb(skb); | ||
3999 | return -EINVAL; | ||
4000 | - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) | ||
4001 | + } | ||
4002 | + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { | ||
4003 | + kfree_skb(skb); | ||
4004 | return -EINVAL; | ||
4005 | - if (sk->sk_no_check_tx) | ||
4006 | + } | ||
4007 | + if (sk->sk_no_check_tx) { | ||
4008 | + kfree_skb(skb); | ||
4009 | return -EINVAL; | ||
4010 | + } | ||
4011 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || | ||
4012 | - dst_xfrm(skb_dst(skb))) | ||
4013 | + dst_xfrm(skb_dst(skb))) { | ||
4014 | + kfree_skb(skb); | ||
4015 | return -EIO; | ||
4016 | + } | ||
4017 | |||
4018 | skb_shinfo(skb)->gso_size = cork->gso_size; | ||
4019 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; | ||
4020 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c | ||
4021 | index c270726b01b0..345e6839f031 100644 | ||
4022 | --- a/net/ipv6/ip6_gre.c | ||
4023 | +++ b/net/ipv6/ip6_gre.c | ||
4024 | @@ -938,6 +938,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | ||
4025 | __u8 dsfield = false; | ||
4026 | struct flowi6 fl6; | ||
4027 | int err = -EINVAL; | ||
4028 | + __be16 proto; | ||
4029 | __u32 mtu; | ||
4030 | int nhoff; | ||
4031 | int thoff; | ||
4032 | @@ -1051,8 +1052,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, | ||
4033 | } | ||
4034 | |||
4035 | /* Push GRE header. */ | ||
4036 | - gre_build_header(skb, 8, TUNNEL_SEQ, | ||
4037 | - htons(ETH_P_ERSPAN), 0, htonl(t->o_seqno++)); | ||
4038 | + proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN) | ||
4039 | + : htons(ETH_P_ERSPAN2); | ||
4040 | + gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(t->o_seqno++)); | ||
4041 | |||
4042 | /* TooBig packet may have updated dst->dev's mtu */ | ||
4043 | if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) | ||
4044 | @@ -1185,6 +1187,10 @@ static void ip6gre_tnl_copy_tnl_parm(struct ip6_tnl *t, | ||
4045 | t->parms.i_flags = p->i_flags; | ||
4046 | t->parms.o_flags = p->o_flags; | ||
4047 | t->parms.fwmark = p->fwmark; | ||
4048 | + t->parms.erspan_ver = p->erspan_ver; | ||
4049 | + t->parms.index = p->index; | ||
4050 | + t->parms.dir = p->dir; | ||
4051 | + t->parms.hwid = p->hwid; | ||
4052 | dst_cache_reset(&t->dst_cache); | ||
4053 | } | ||
4054 | |||
4055 | @@ -2047,9 +2053,9 @@ static int ip6gre_changelink(struct net_device *dev, struct nlattr *tb[], | ||
4056 | struct nlattr *data[], | ||
4057 | struct netlink_ext_ack *extack) | ||
4058 | { | ||
4059 | - struct ip6gre_net *ign = net_generic(dev_net(dev), ip6gre_net_id); | ||
4060 | + struct ip6_tnl *t = netdev_priv(dev); | ||
4061 | + struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id); | ||
4062 | struct __ip6_tnl_parm p; | ||
4063 | - struct ip6_tnl *t; | ||
4064 | |||
4065 | t = ip6gre_changelink_common(dev, tb, data, &p, extack); | ||
4066 | if (IS_ERR(t)) | ||
4067 | diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c | ||
4068 | index b36694b6716e..76ba2f34ef6b 100644 | ||
4069 | --- a/net/ipv6/udp.c | ||
4070 | +++ b/net/ipv6/udp.c | ||
4071 | @@ -1056,15 +1056,23 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, | ||
4072 | const int hlen = skb_network_header_len(skb) + | ||
4073 | sizeof(struct udphdr); | ||
4074 | |||
4075 | - if (hlen + cork->gso_size > cork->fragsize) | ||
4076 | + if (hlen + cork->gso_size > cork->fragsize) { | ||
4077 | + kfree_skb(skb); | ||
4078 | return -EINVAL; | ||
4079 | - if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) | ||
4080 | + } | ||
4081 | + if (skb->len > cork->gso_size * UDP_MAX_SEGMENTS) { | ||
4082 | + kfree_skb(skb); | ||
4083 | return -EINVAL; | ||
4084 | - if (udp_sk(sk)->no_check6_tx) | ||
4085 | + } | ||
4086 | + if (udp_sk(sk)->no_check6_tx) { | ||
4087 | + kfree_skb(skb); | ||
4088 | return -EINVAL; | ||
4089 | + } | ||
4090 | if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || | ||
4091 | - dst_xfrm(skb_dst(skb))) | ||
4092 | + dst_xfrm(skb_dst(skb))) { | ||
4093 | + kfree_skb(skb); | ||
4094 | return -EIO; | ||
4095 | + } | ||
4096 | |||
4097 | skb_shinfo(skb)->gso_size = cork->gso_size; | ||
4098 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4; | ||
4099 | diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c | ||
4100 | index 865ecef68196..c7b6010b2c09 100644 | ||
4101 | --- a/net/openvswitch/flow_netlink.c | ||
4102 | +++ b/net/openvswitch/flow_netlink.c | ||
4103 | @@ -500,7 +500,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, | ||
4104 | return -EINVAL; | ||
4105 | } | ||
4106 | |||
4107 | - if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | ||
4108 | + if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) { | ||
4109 | attrs |= 1 << type; | ||
4110 | a[type] = nla; | ||
4111 | } | ||
4112 | diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c | ||
4113 | index 681f6f04e7da..0f6601fdf889 100644 | ||
4114 | --- a/net/sched/act_tunnel_key.c | ||
4115 | +++ b/net/sched/act_tunnel_key.c | ||
4116 | @@ -197,6 +197,15 @@ static const struct nla_policy tunnel_key_policy[TCA_TUNNEL_KEY_MAX + 1] = { | ||
4117 | [TCA_TUNNEL_KEY_ENC_TTL] = { .type = NLA_U8 }, | ||
4118 | }; | ||
4119 | |||
4120 | +static void tunnel_key_release_params(struct tcf_tunnel_key_params *p) | ||
4121 | +{ | ||
4122 | + if (!p) | ||
4123 | + return; | ||
4124 | + if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
4125 | + dst_release(&p->tcft_enc_metadata->dst); | ||
4126 | + kfree_rcu(p, rcu); | ||
4127 | +} | ||
4128 | + | ||
4129 | static int tunnel_key_init(struct net *net, struct nlattr *nla, | ||
4130 | struct nlattr *est, struct tc_action **a, | ||
4131 | int ovr, int bind, bool rtnl_held, | ||
4132 | @@ -360,8 +369,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla, | ||
4133 | rcu_swap_protected(t->params, params_new, | ||
4134 | lockdep_is_held(&t->tcf_lock)); | ||
4135 | spin_unlock_bh(&t->tcf_lock); | ||
4136 | - if (params_new) | ||
4137 | - kfree_rcu(params_new, rcu); | ||
4138 | + tunnel_key_release_params(params_new); | ||
4139 | |||
4140 | if (ret == ACT_P_CREATED) | ||
4141 | tcf_idr_insert(tn, *a); | ||
4142 | @@ -385,12 +393,7 @@ static void tunnel_key_release(struct tc_action *a) | ||
4143 | struct tcf_tunnel_key_params *params; | ||
4144 | |||
4145 | params = rcu_dereference_protected(t->params, 1); | ||
4146 | - if (params) { | ||
4147 | - if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) | ||
4148 | - dst_release(¶ms->tcft_enc_metadata->dst); | ||
4149 | - | ||
4150 | - kfree_rcu(params, rcu); | ||
4151 | - } | ||
4152 | + tunnel_key_release_params(params); | ||
4153 | } | ||
4154 | |||
4155 | static int tunnel_key_geneve_opts_dump(struct sk_buff *skb, | ||
4156 | diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c | ||
4157 | index 70f144ac5e1d..2167c6ca55e3 100644 | ||
4158 | --- a/net/sched/cls_api.c | ||
4159 | +++ b/net/sched/cls_api.c | ||
4160 | @@ -960,7 +960,6 @@ static int tcf_block_cb_call(struct tcf_block *block, enum tc_setup_type type, | ||
4161 | int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | ||
4162 | struct tcf_result *res, bool compat_mode) | ||
4163 | { | ||
4164 | - __be16 protocol = tc_skb_protocol(skb); | ||
4165 | #ifdef CONFIG_NET_CLS_ACT | ||
4166 | const int max_reclassify_loop = 4; | ||
4167 | const struct tcf_proto *orig_tp = tp; | ||
4168 | @@ -970,6 +969,7 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp, | ||
4169 | reclassify: | ||
4170 | #endif | ||
4171 | for (; tp; tp = rcu_dereference_bh(tp->next)) { | ||
4172 | + __be16 protocol = tc_skb_protocol(skb); | ||
4173 | int err; | ||
4174 | |||
4175 | if (tp->protocol != protocol && | ||
4176 | @@ -1002,7 +1002,6 @@ reset: | ||
4177 | } | ||
4178 | |||
4179 | tp = first_tp; | ||
4180 | - protocol = tc_skb_protocol(skb); | ||
4181 | goto reclassify; | ||
4182 | #endif | ||
4183 | } | ||
4184 | diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c | ||
4185 | index 7fade7107f95..84893bc67531 100644 | ||
4186 | --- a/net/sched/cls_flower.c | ||
4187 | +++ b/net/sched/cls_flower.c | ||
4188 | @@ -1176,17 +1176,23 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | ||
4189 | struct cls_fl_head *head = rtnl_dereference(tp->root); | ||
4190 | struct cls_fl_filter *fold = *arg; | ||
4191 | struct cls_fl_filter *fnew; | ||
4192 | + struct fl_flow_mask *mask; | ||
4193 | struct nlattr **tb; | ||
4194 | - struct fl_flow_mask mask = {}; | ||
4195 | int err; | ||
4196 | |||
4197 | if (!tca[TCA_OPTIONS]) | ||
4198 | return -EINVAL; | ||
4199 | |||
4200 | - tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); | ||
4201 | - if (!tb) | ||
4202 | + mask = kzalloc(sizeof(struct fl_flow_mask), GFP_KERNEL); | ||
4203 | + if (!mask) | ||
4204 | return -ENOBUFS; | ||
4205 | |||
4206 | + tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); | ||
4207 | + if (!tb) { | ||
4208 | + err = -ENOBUFS; | ||
4209 | + goto errout_mask_alloc; | ||
4210 | + } | ||
4211 | + | ||
4212 | err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], | ||
4213 | fl_policy, NULL); | ||
4214 | if (err < 0) | ||
4215 | @@ -1229,12 +1235,12 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | ||
4216 | } | ||
4217 | } | ||
4218 | |||
4219 | - err = fl_set_parms(net, tp, fnew, &mask, base, tb, tca[TCA_RATE], ovr, | ||
4220 | + err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr, | ||
4221 | tp->chain->tmplt_priv, extack); | ||
4222 | if (err) | ||
4223 | goto errout_idr; | ||
4224 | |||
4225 | - err = fl_check_assign_mask(head, fnew, fold, &mask); | ||
4226 | + err = fl_check_assign_mask(head, fnew, fold, mask); | ||
4227 | if (err) | ||
4228 | goto errout_idr; | ||
4229 | |||
4230 | @@ -1281,6 +1287,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | ||
4231 | } | ||
4232 | |||
4233 | kfree(tb); | ||
4234 | + kfree(mask); | ||
4235 | return 0; | ||
4236 | |||
4237 | errout_mask: | ||
4238 | @@ -1294,6 +1301,8 @@ errout: | ||
4239 | kfree(fnew); | ||
4240 | errout_tb: | ||
4241 | kfree(tb); | ||
4242 | +errout_mask_alloc: | ||
4243 | + kfree(mask); | ||
4244 | return err; | ||
4245 | } | ||
4246 | |||
4247 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c | ||
4248 | index f9176e3b4d37..31a84a5a1338 100644 | ||
4249 | --- a/sound/pci/hda/patch_conexant.c | ||
4250 | +++ b/sound/pci/hda/patch_conexant.c | ||
4251 | @@ -931,6 +931,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | ||
4252 | SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), | ||
4253 | SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), | ||
4254 | SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), | ||
4255 | + SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), | ||
4256 | SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), | ||
4257 | SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), | ||
4258 | SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE), | ||
4259 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c | ||
4260 | index 8b9f2487969b..f39f34e12fb6 100644 | ||
4261 | --- a/sound/pci/hda/patch_realtek.c | ||
4262 | +++ b/sound/pci/hda/patch_realtek.c | ||
4263 | @@ -6842,7 +6842,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | ||
4264 | {.id = ALC293_FIXUP_LENOVO_SPK_NOISE, .name = "lenovo-spk-noise"}, | ||
4265 | {.id = ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, .name = "lenovo-hotkey"}, | ||
4266 | {.id = ALC255_FIXUP_DELL_SPK_NOISE, .name = "dell-spk-noise"}, | ||
4267 | - {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc255-dell1"}, | ||
4268 | + {.id = ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "alc225-dell1"}, | ||
4269 | {.id = ALC295_FIXUP_DISABLE_DAC3, .name = "alc295-disable-dac3"}, | ||
4270 | {.id = ALC280_FIXUP_HP_HEADSET_MIC, .name = "alc280-hp-headset"}, | ||
4271 | {.id = ALC221_FIXUP_HP_FRONT_MIC, .name = "alc221-hp-mic"}, | ||
4272 | diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c | ||
4273 | index 6478d10c4f4a..cdb1f40009ab 100644 | ||
4274 | --- a/sound/soc/codecs/rt5514-spi.c | ||
4275 | +++ b/sound/soc/codecs/rt5514-spi.c | ||
4276 | @@ -278,6 +278,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_component *component) | ||
4277 | |||
4278 | rt5514_dsp = devm_kzalloc(component->dev, sizeof(*rt5514_dsp), | ||
4279 | GFP_KERNEL); | ||
4280 | + if (!rt5514_dsp) | ||
4281 | + return -ENOMEM; | ||
4282 | |||
4283 | rt5514_dsp->dev = &rt5514_spi->dev; | ||
4284 | mutex_init(&rt5514_dsp->dma_lock); | ||
4285 | diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c | ||
4286 | index e2b5a11b16d1..f03195d2ab2e 100644 | ||
4287 | --- a/sound/soc/codecs/tlv320aic32x4.c | ||
4288 | +++ b/sound/soc/codecs/tlv320aic32x4.c | ||
4289 | @@ -822,6 +822,10 @@ static int aic32x4_set_bias_level(struct snd_soc_component *component, | ||
4290 | case SND_SOC_BIAS_PREPARE: | ||
4291 | break; | ||
4292 | case SND_SOC_BIAS_STANDBY: | ||
4293 | + /* Initial cold start */ | ||
4294 | + if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) | ||
4295 | + break; | ||
4296 | + | ||
4297 | /* Switch off BCLK_N Divider */ | ||
4298 | snd_soc_component_update_bits(component, AIC32X4_BCLKN, | ||
4299 | AIC32X4_BCLKEN, 0); | ||
4300 | diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c | ||
4301 | index 6c36da560877..e662400873ec 100644 | ||
4302 | --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c | ||
4303 | +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c | ||
4304 | @@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream, | ||
4305 | struct snd_pcm_hw_params *params, | ||
4306 | struct snd_soc_dai *dai) | ||
4307 | { | ||
4308 | - snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); | ||
4309 | + int ret; | ||
4310 | + | ||
4311 | + ret = | ||
4312 | + snd_pcm_lib_malloc_pages(substream, | ||
4313 | + params_buffer_bytes(params)); | ||
4314 | + if (ret) | ||
4315 | + return ret; | ||
4316 | memset(substream->runtime->dma_area, 0, params_buffer_bytes(params)); | ||
4317 | return 0; | ||
4318 | } | ||
4319 | diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c | ||
4320 | index 460b4bdf4c1e..5d546dcdbc80 100644 | ||
4321 | --- a/tools/testing/selftests/x86/protection_keys.c | ||
4322 | +++ b/tools/testing/selftests/x86/protection_keys.c | ||
4323 | @@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) | ||
4324 | pkey_assert(err); | ||
4325 | } | ||
4326 | |||
4327 | +void become_child(void) | ||
4328 | +{ | ||
4329 | + pid_t forkret; | ||
4330 | + | ||
4331 | + forkret = fork(); | ||
4332 | + pkey_assert(forkret >= 0); | ||
4333 | + dprintf3("[%d] fork() ret: %d\n", getpid(), forkret); | ||
4334 | + | ||
4335 | + if (!forkret) { | ||
4336 | + /* in the child */ | ||
4337 | + return; | ||
4338 | + } | ||
4339 | + exit(0); | ||
4340 | +} | ||
4341 | + | ||
4342 | /* Assumes that all pkeys other than 'pkey' are unallocated */ | ||
4343 | void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | ||
4344 | { | ||
4345 | @@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | ||
4346 | int nr_allocated_pkeys = 0; | ||
4347 | int i; | ||
4348 | |||
4349 | - for (i = 0; i < NR_PKEYS*2; i++) { | ||
4350 | + for (i = 0; i < NR_PKEYS*3; i++) { | ||
4351 | int new_pkey; | ||
4352 | dprintf1("%s() alloc loop: %d\n", __func__, i); | ||
4353 | new_pkey = alloc_pkey(); | ||
4354 | @@ -1152,20 +1167,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) | ||
4355 | if ((new_pkey == -1) && (errno == ENOSPC)) { | ||
4356 | dprintf2("%s() failed to allocate pkey after %d tries\n", | ||
4357 | __func__, nr_allocated_pkeys); | ||
4358 | - break; | ||
4359 | + } else { | ||
4360 | + /* | ||
4361 | + * Ensure the number of successes never | ||
4362 | + * exceeds the number of keys supported | ||
4363 | + * in the hardware. | ||
4364 | + */ | ||
4365 | + pkey_assert(nr_allocated_pkeys < NR_PKEYS); | ||
4366 | + allocated_pkeys[nr_allocated_pkeys++] = new_pkey; | ||
4367 | } | ||
4368 | - pkey_assert(nr_allocated_pkeys < NR_PKEYS); | ||
4369 | - allocated_pkeys[nr_allocated_pkeys++] = new_pkey; | ||
4370 | + | ||
4371 | + /* | ||
4372 | + * Make sure that allocation state is properly | ||
4373 | + * preserved across fork(). | ||
4374 | + */ | ||
4375 | + if (i == NR_PKEYS*2) | ||
4376 | + become_child(); | ||
4377 | } | ||
4378 | |||
4379 | dprintf3("%s()::%d\n", __func__, __LINE__); | ||
4380 | |||
4381 | - /* | ||
4382 | - * ensure it did not reach the end of the loop without | ||
4383 | - * failure: | ||
4384 | - */ | ||
4385 | - pkey_assert(i < NR_PKEYS*2); | ||
4386 | - | ||
4387 | /* | ||
4388 | * There are 16 pkeys supported in hardware. Three are | ||
4389 | * allocated by the time we get here: |