Contents of /trunk/kernel-alx/patches-5.4/0189-5.4.90-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 57969 byte(s)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 57969 byte(s)
-add missing
1 | diff --git a/Makefile b/Makefile |
2 | index 95848875110ef..5c9d680b7ce51 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 89 |
10 | +SUBLEVEL = 90 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c |
15 | index 3acb4192918df..f85a0fd6aca5c 100644 |
16 | --- a/arch/arm/mach-omap2/omap_device.c |
17 | +++ b/arch/arm/mach-omap2/omap_device.c |
18 | @@ -234,10 +234,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb, |
19 | break; |
20 | case BUS_NOTIFY_BIND_DRIVER: |
21 | od = to_omap_device(pdev); |
22 | - if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && |
23 | - pm_runtime_status_suspended(dev)) { |
24 | + if (od) { |
25 | od->_driver_status = BUS_NOTIFY_BIND_DRIVER; |
26 | - pm_runtime_set_active(dev); |
27 | + if (od->_state == OMAP_DEVICE_STATE_ENABLED && |
28 | + pm_runtime_status_suspended(dev)) { |
29 | + pm_runtime_set_active(dev); |
30 | + } |
31 | } |
32 | break; |
33 | case BUS_NOTIFY_ADD_DEVICE: |
34 | diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c |
35 | index 6478635ff2142..98a177dd1f89f 100644 |
36 | --- a/arch/arm64/kvm/sys_regs.c |
37 | +++ b/arch/arm64/kvm/sys_regs.c |
38 | @@ -625,6 +625,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) |
39 | { |
40 | u64 pmcr, val; |
41 | |
42 | + /* No PMU available, PMCR_EL0 may UNDEF... */ |
43 | + if (!kvm_arm_support_pmu_v3()) |
44 | + return; |
45 | + |
46 | pmcr = read_sysreg(pmcr_el0); |
47 | /* |
48 | * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN |
49 | diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S |
50 | index 390edb7638265..bde3e0f85425f 100644 |
51 | --- a/arch/x86/entry/entry_32.S |
52 | +++ b/arch/x86/entry/entry_32.S |
53 | @@ -869,9 +869,10 @@ GLOBAL(__begin_SYSENTER_singlestep_region) |
54 | * Xen doesn't set %esp to be precisely what the normal SYSENTER |
55 | * entry point expects, so fix it up before using the normal path. |
56 | */ |
57 | -ENTRY(xen_sysenter_target) |
58 | +SYM_CODE_START(xen_sysenter_target) |
59 | addl $5*4, %esp /* remove xen-provided frame */ |
60 | jmp .Lsysenter_past_esp |
61 | +SYM_CODE_END(xen_sysenter_target) |
62 | #endif |
63 | |
64 | /* |
65 | diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S |
66 | index e95e95960156b..5b076cb79f5fb 100644 |
67 | --- a/arch/x86/kernel/acpi/wakeup_32.S |
68 | +++ b/arch/x86/kernel/acpi/wakeup_32.S |
69 | @@ -9,8 +9,7 @@ |
70 | .code32 |
71 | ALIGN |
72 | |
73 | -ENTRY(wakeup_pmode_return) |
74 | -wakeup_pmode_return: |
75 | +SYM_CODE_START(wakeup_pmode_return) |
76 | movw $__KERNEL_DS, %ax |
77 | movw %ax, %ss |
78 | movw %ax, %fs |
79 | @@ -39,6 +38,7 @@ wakeup_pmode_return: |
80 | # jump to place where we left off |
81 | movl saved_eip, %eax |
82 | jmp *%eax |
83 | +SYM_CODE_END(wakeup_pmode_return) |
84 | |
85 | bogus_magic: |
86 | jmp bogus_magic |
87 | @@ -72,7 +72,7 @@ restore_registers: |
88 | popfl |
89 | ret |
90 | |
91 | -ENTRY(do_suspend_lowlevel) |
92 | +SYM_CODE_START(do_suspend_lowlevel) |
93 | call save_processor_state |
94 | call save_registers |
95 | pushl $3 |
96 | @@ -87,6 +87,7 @@ ret_point: |
97 | call restore_registers |
98 | call restore_processor_state |
99 | ret |
100 | +SYM_CODE_END(do_suspend_lowlevel) |
101 | |
102 | .data |
103 | ALIGN |
104 | diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c |
105 | index 830ccc396e26d..28f786289fce4 100644 |
106 | --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c |
107 | +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c |
108 | @@ -525,85 +525,70 @@ static void rdtgroup_remove(struct rdtgroup *rdtgrp) |
109 | kfree(rdtgrp); |
110 | } |
111 | |
112 | -struct task_move_callback { |
113 | - struct callback_head work; |
114 | - struct rdtgroup *rdtgrp; |
115 | -}; |
116 | - |
117 | -static void move_myself(struct callback_head *head) |
118 | +static void _update_task_closid_rmid(void *task) |
119 | { |
120 | - struct task_move_callback *callback; |
121 | - struct rdtgroup *rdtgrp; |
122 | - |
123 | - callback = container_of(head, struct task_move_callback, work); |
124 | - rdtgrp = callback->rdtgrp; |
125 | - |
126 | /* |
127 | - * If resource group was deleted before this task work callback |
128 | - * was invoked, then assign the task to root group and free the |
129 | - * resource group. |
130 | + * If the task is still current on this CPU, update PQR_ASSOC MSR. |
131 | + * Otherwise, the MSR is updated when the task is scheduled in. |
132 | */ |
133 | - if (atomic_dec_and_test(&rdtgrp->waitcount) && |
134 | - (rdtgrp->flags & RDT_DELETED)) { |
135 | - current->closid = 0; |
136 | - current->rmid = 0; |
137 | - rdtgroup_remove(rdtgrp); |
138 | - } |
139 | - |
140 | - preempt_disable(); |
141 | - /* update PQR_ASSOC MSR to make resource group go into effect */ |
142 | - resctrl_sched_in(); |
143 | - preempt_enable(); |
144 | + if (task == current) |
145 | + resctrl_sched_in(); |
146 | +} |
147 | |
148 | - kfree(callback); |
149 | +static void update_task_closid_rmid(struct task_struct *t) |
150 | +{ |
151 | + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) |
152 | + smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); |
153 | + else |
154 | + _update_task_closid_rmid(t); |
155 | } |
156 | |
157 | static int __rdtgroup_move_task(struct task_struct *tsk, |
158 | struct rdtgroup *rdtgrp) |
159 | { |
160 | - struct task_move_callback *callback; |
161 | - int ret; |
162 | - |
163 | - callback = kzalloc(sizeof(*callback), GFP_KERNEL); |
164 | - if (!callback) |
165 | - return -ENOMEM; |
166 | - callback->work.func = move_myself; |
167 | - callback->rdtgrp = rdtgrp; |
168 | + /* If the task is already in rdtgrp, no need to move the task. */ |
169 | + if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && |
170 | + tsk->rmid == rdtgrp->mon.rmid) || |
171 | + (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && |
172 | + tsk->closid == rdtgrp->mon.parent->closid)) |
173 | + return 0; |
174 | |
175 | /* |
176 | - * Take a refcount, so rdtgrp cannot be freed before the |
177 | - * callback has been invoked. |
178 | + * Set the task's closid/rmid before the PQR_ASSOC MSR can be |
179 | + * updated by them. |
180 | + * |
181 | + * For ctrl_mon groups, move both closid and rmid. |
182 | + * For monitor groups, can move the tasks only from |
183 | + * their parent CTRL group. |
184 | */ |
185 | - atomic_inc(&rdtgrp->waitcount); |
186 | - ret = task_work_add(tsk, &callback->work, true); |
187 | - if (ret) { |
188 | - /* |
189 | - * Task is exiting. Drop the refcount and free the callback. |
190 | - * No need to check the refcount as the group cannot be |
191 | - * deleted before the write function unlocks rdtgroup_mutex. |
192 | - */ |
193 | - atomic_dec(&rdtgrp->waitcount); |
194 | - kfree(callback); |
195 | - rdt_last_cmd_puts("Task exited\n"); |
196 | - } else { |
197 | - /* |
198 | - * For ctrl_mon groups move both closid and rmid. |
199 | - * For monitor groups, can move the tasks only from |
200 | - * their parent CTRL group. |
201 | - */ |
202 | - if (rdtgrp->type == RDTCTRL_GROUP) { |
203 | - tsk->closid = rdtgrp->closid; |
204 | + |
205 | + if (rdtgrp->type == RDTCTRL_GROUP) { |
206 | + tsk->closid = rdtgrp->closid; |
207 | + tsk->rmid = rdtgrp->mon.rmid; |
208 | + } else if (rdtgrp->type == RDTMON_GROUP) { |
209 | + if (rdtgrp->mon.parent->closid == tsk->closid) { |
210 | tsk->rmid = rdtgrp->mon.rmid; |
211 | - } else if (rdtgrp->type == RDTMON_GROUP) { |
212 | - if (rdtgrp->mon.parent->closid == tsk->closid) { |
213 | - tsk->rmid = rdtgrp->mon.rmid; |
214 | - } else { |
215 | - rdt_last_cmd_puts("Can't move task to different control group\n"); |
216 | - ret = -EINVAL; |
217 | - } |
218 | + } else { |
219 | + rdt_last_cmd_puts("Can't move task to different control group\n"); |
220 | + return -EINVAL; |
221 | } |
222 | } |
223 | - return ret; |
224 | + |
225 | + /* |
226 | + * Ensure the task's closid and rmid are written before determining if |
227 | + * the task is current that will decide if it will be interrupted. |
228 | + */ |
229 | + barrier(); |
230 | + |
231 | + /* |
232 | + * By now, the task's closid and rmid are set. If the task is current |
233 | + * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource |
234 | + * group go into effect. If the task is not current, the MSR will be |
235 | + * updated when the task is scheduled in. |
236 | + */ |
237 | + update_task_closid_rmid(tsk); |
238 | + |
239 | + return 0; |
240 | } |
241 | |
242 | /** |
243 | diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S |
244 | index 073aab525d800..2cc0303522c99 100644 |
245 | --- a/arch/x86/kernel/ftrace_32.S |
246 | +++ b/arch/x86/kernel/ftrace_32.S |
247 | @@ -89,7 +89,7 @@ WEAK(ftrace_stub) |
248 | ret |
249 | END(ftrace_caller) |
250 | |
251 | -ENTRY(ftrace_regs_caller) |
252 | +SYM_CODE_START(ftrace_regs_caller) |
253 | /* |
254 | * We're here from an mcount/fentry CALL, and the stack frame looks like: |
255 | * |
256 | @@ -163,6 +163,7 @@ GLOBAL(ftrace_regs_call) |
257 | popl %eax |
258 | |
259 | jmp .Lftrace_ret |
260 | +SYM_CODE_END(ftrace_regs_caller) |
261 | |
262 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
263 | ENTRY(ftrace_graph_caller) |
264 | diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S |
265 | index 2e6a0676c1f43..11a5d5ade52ce 100644 |
266 | --- a/arch/x86/kernel/head_32.S |
267 | +++ b/arch/x86/kernel/head_32.S |
268 | @@ -64,7 +64,7 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) |
269 | * can. |
270 | */ |
271 | __HEAD |
272 | -ENTRY(startup_32) |
273 | +SYM_CODE_START(startup_32) |
274 | movl pa(initial_stack),%ecx |
275 | |
276 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
277 | @@ -172,6 +172,7 @@ num_subarch_entries = (. - subarch_entries) / 4 |
278 | #else |
279 | jmp .Ldefault_entry |
280 | #endif /* CONFIG_PARAVIRT */ |
281 | +SYM_CODE_END(startup_32) |
282 | |
283 | #ifdef CONFIG_HOTPLUG_CPU |
284 | /* |
285 | diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S |
286 | index 6fe383002125f..a19ed3d231853 100644 |
287 | --- a/arch/x86/power/hibernate_asm_32.S |
288 | +++ b/arch/x86/power/hibernate_asm_32.S |
289 | @@ -35,7 +35,7 @@ ENTRY(swsusp_arch_suspend) |
290 | ret |
291 | ENDPROC(swsusp_arch_suspend) |
292 | |
293 | -ENTRY(restore_image) |
294 | +SYM_CODE_START(restore_image) |
295 | /* prepare to jump to the image kernel */ |
296 | movl restore_jump_address, %ebx |
297 | movl restore_cr3, %ebp |
298 | @@ -45,9 +45,10 @@ ENTRY(restore_image) |
299 | /* jump to relocated restore code */ |
300 | movl relocated_restore_code, %eax |
301 | jmpl *%eax |
302 | +SYM_CODE_END(restore_image) |
303 | |
304 | /* code below has been relocated to a safe page */ |
305 | -ENTRY(core_restore_code) |
306 | +SYM_CODE_START(core_restore_code) |
307 | movl temp_pgt, %eax |
308 | movl %eax, %cr3 |
309 | |
310 | @@ -77,6 +78,7 @@ copy_loop: |
311 | |
312 | done: |
313 | jmpl *%ebx |
314 | +SYM_CODE_END(core_restore_code) |
315 | |
316 | /* code below belongs to the image kernel */ |
317 | .align PAGE_SIZE |
318 | diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S |
319 | index 1868b158480d4..3a0ef0d577344 100644 |
320 | --- a/arch/x86/realmode/rm/trampoline_32.S |
321 | +++ b/arch/x86/realmode/rm/trampoline_32.S |
322 | @@ -29,7 +29,7 @@ |
323 | .code16 |
324 | |
325 | .balign PAGE_SIZE |
326 | -ENTRY(trampoline_start) |
327 | +SYM_CODE_START(trampoline_start) |
328 | wbinvd # Needed for NUMA-Q should be harmless for others |
329 | |
330 | LJMPW_RM(1f) |
331 | @@ -54,11 +54,13 @@ ENTRY(trampoline_start) |
332 | lmsw %dx # into protected mode |
333 | |
334 | ljmpl $__BOOT_CS, $pa_startup_32 |
335 | +SYM_CODE_END(trampoline_start) |
336 | |
337 | .section ".text32","ax" |
338 | .code32 |
339 | -ENTRY(startup_32) # note: also used from wakeup_asm.S |
340 | +SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S |
341 | jmp *%eax |
342 | +SYM_CODE_END(startup_32) |
343 | |
344 | .bss |
345 | .balign 8 |
346 | diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S |
347 | index cd177772fe4d5..2712e91553063 100644 |
348 | --- a/arch/x86/xen/xen-asm_32.S |
349 | +++ b/arch/x86/xen/xen-asm_32.S |
350 | @@ -56,7 +56,7 @@ |
351 | _ASM_EXTABLE(1b,2b) |
352 | .endm |
353 | |
354 | -ENTRY(xen_iret) |
355 | +SYM_CODE_START(xen_iret) |
356 | /* test eflags for special cases */ |
357 | testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) |
358 | jnz hyper_iret |
359 | @@ -122,6 +122,7 @@ xen_iret_end_crit: |
360 | hyper_iret: |
361 | /* put this out of line since its very rarely used */ |
362 | jmp hypercall_page + __HYPERVISOR_iret * 32 |
363 | +SYM_CODE_END(xen_iret) |
364 | |
365 | .globl xen_iret_start_crit, xen_iret_end_crit |
366 | |
367 | @@ -152,7 +153,7 @@ hyper_iret: |
368 | * The only caveat is that if the outer eax hasn't been restored yet (i.e. |
369 | * it's still on stack), we need to restore its value here. |
370 | */ |
371 | -ENTRY(xen_iret_crit_fixup) |
372 | +SYM_CODE_START(xen_iret_crit_fixup) |
373 | /* |
374 | * Paranoia: Make sure we're really coming from kernel space. |
375 | * One could imagine a case where userspace jumps into the |
376 | @@ -179,4 +180,4 @@ ENTRY(xen_iret_crit_fixup) |
377 | |
378 | 2: |
379 | ret |
380 | -END(xen_iret_crit_fixup) |
381 | +SYM_CODE_END(xen_iret_crit_fixup) |
382 | diff --git a/block/genhd.c b/block/genhd.c |
383 | index 26b31fcae217f..604f0a2cbc9a0 100644 |
384 | --- a/block/genhd.c |
385 | +++ b/block/genhd.c |
386 | @@ -222,14 +222,17 @@ struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) |
387 | part = rcu_dereference(ptbl->part[piter->idx]); |
388 | if (!part) |
389 | continue; |
390 | + get_device(part_to_dev(part)); |
391 | + piter->part = part; |
392 | if (!part_nr_sects_read(part) && |
393 | !(piter->flags & DISK_PITER_INCL_EMPTY) && |
394 | !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && |
395 | - piter->idx == 0)) |
396 | + piter->idx == 0)) { |
397 | + put_device(part_to_dev(part)); |
398 | + piter->part = NULL; |
399 | continue; |
400 | + } |
401 | |
402 | - get_device(part_to_dev(part)); |
403 | - piter->part = part; |
404 | piter->idx += inc; |
405 | break; |
406 | } |
407 | diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c |
408 | index f58baff2be0af..398991381e9af 100644 |
409 | --- a/drivers/base/regmap/regmap-debugfs.c |
410 | +++ b/drivers/base/regmap/regmap-debugfs.c |
411 | @@ -583,8 +583,12 @@ void regmap_debugfs_init(struct regmap *map, const char *name) |
412 | devname = dev_name(map->dev); |
413 | |
414 | if (name) { |
415 | - map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", |
416 | + if (!map->debugfs_name) { |
417 | + map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", |
418 | devname, name); |
419 | + if (!map->debugfs_name) |
420 | + return; |
421 | + } |
422 | name = map->debugfs_name; |
423 | } else { |
424 | name = devname; |
425 | @@ -592,9 +596,10 @@ void regmap_debugfs_init(struct regmap *map, const char *name) |
426 | |
427 | if (!strcmp(name, "dummy")) { |
428 | kfree(map->debugfs_name); |
429 | - |
430 | map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", |
431 | dummy_index); |
432 | + if (!map->debugfs_name) |
433 | + return; |
434 | name = map->debugfs_name; |
435 | dummy_index++; |
436 | } |
437 | diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig |
438 | index 1bb8ec5753527..0fc27ac14f29c 100644 |
439 | --- a/drivers/block/Kconfig |
440 | +++ b/drivers/block/Kconfig |
441 | @@ -461,6 +461,7 @@ config BLK_DEV_RBD |
442 | config BLK_DEV_RSXX |
443 | tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver" |
444 | depends on PCI |
445 | + select CRC32 |
446 | help |
447 | Device driver for IBM's high speed PCIe SSD |
448 | storage device: Flash Adapter 900GB Full Height. |
449 | diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c |
450 | index 2db2f1739e092..1b2ec3be59eb7 100644 |
451 | --- a/drivers/cpufreq/powernow-k8.c |
452 | +++ b/drivers/cpufreq/powernow-k8.c |
453 | @@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data) |
454 | |
455 | /* Take a frequency, and issue the fid/vid transition command */ |
456 | static int transition_frequency_fidvid(struct powernow_k8_data *data, |
457 | - unsigned int index) |
458 | + unsigned int index, |
459 | + struct cpufreq_policy *policy) |
460 | { |
461 | - struct cpufreq_policy *policy; |
462 | u32 fid = 0; |
463 | u32 vid = 0; |
464 | int res; |
465 | @@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, |
466 | freqs.old = find_khz_freq_from_fid(data->currfid); |
467 | freqs.new = find_khz_freq_from_fid(fid); |
468 | |
469 | - policy = cpufreq_cpu_get(smp_processor_id()); |
470 | - cpufreq_cpu_put(policy); |
471 | - |
472 | cpufreq_freq_transition_begin(policy, &freqs); |
473 | res = transition_fid_vid(data, fid, vid); |
474 | cpufreq_freq_transition_end(policy, &freqs, res); |
475 | @@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg) |
476 | |
477 | powernow_k8_acpi_pst_values(data, newstate); |
478 | |
479 | - ret = transition_frequency_fidvid(data, newstate); |
480 | + ret = transition_frequency_fidvid(data, newstate, pol); |
481 | |
482 | if (ret) { |
483 | pr_err("transition frequency failed\n"); |
484 | diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c |
485 | index f81a5e35d8fd1..eddc6d1bdb2d1 100644 |
486 | --- a/drivers/crypto/chelsio/chtls/chtls_cm.c |
487 | +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c |
488 | @@ -577,7 +577,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx) |
489 | |
490 | while (!skb_queue_empty(&listen_ctx->synq)) { |
491 | struct chtls_sock *csk = |
492 | - container_of((struct synq *)__skb_dequeue |
493 | + container_of((struct synq *)skb_peek |
494 | (&listen_ctx->synq), struct chtls_sock, synq); |
495 | struct sock *child = csk->sk; |
496 | |
497 | @@ -1021,6 +1021,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, |
498 | const struct cpl_pass_accept_req *req, |
499 | struct chtls_dev *cdev) |
500 | { |
501 | + struct adapter *adap = pci_get_drvdata(cdev->pdev); |
502 | struct inet_sock *newinet; |
503 | const struct iphdr *iph; |
504 | struct tls_context *ctx; |
505 | @@ -1030,9 +1031,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk, |
506 | struct neighbour *n; |
507 | struct tcp_sock *tp; |
508 | struct sock *newsk; |
509 | + bool found = false; |
510 | u16 port_id; |
511 | int rxq_idx; |
512 | - int step; |
513 | + int step, i; |
514 | |
515 | iph = (const struct iphdr *)network_hdr; |
516 | newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb); |
517 | @@ -1044,7 +1046,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, |
518 | goto free_sk; |
519 | |
520 | n = dst_neigh_lookup(dst, &iph->saddr); |
521 | - if (!n) |
522 | + if (!n || !n->dev) |
523 | goto free_sk; |
524 | |
525 | ndev = n->dev; |
526 | @@ -1053,6 +1055,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk, |
527 | if (is_vlan_dev(ndev)) |
528 | ndev = vlan_dev_real_dev(ndev); |
529 | |
530 | + for_each_port(adap, i) |
531 | + if (cdev->ports[i] == ndev) |
532 | + found = true; |
533 | + |
534 | + if (!found) |
535 | + goto free_dst; |
536 | + |
537 | port_id = cxgb4_port_idx(ndev); |
538 | |
539 | csk = chtls_sock_create(cdev); |
540 | @@ -1108,6 +1117,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk, |
541 | free_csk: |
542 | chtls_sock_release(&csk->kref); |
543 | free_dst: |
544 | + neigh_release(n); |
545 | dst_release(dst); |
546 | free_sk: |
547 | inet_csk_prepare_forced_close(newsk); |
548 | @@ -1443,6 +1453,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb) |
549 | sk_wake_async(sk, 0, POLL_OUT); |
550 | |
551 | data = lookup_stid(cdev->tids, stid); |
552 | + if (!data) { |
553 | + /* listening server close */ |
554 | + kfree_skb(skb); |
555 | + goto unlock; |
556 | + } |
557 | lsk = ((struct listen_ctx *)data)->lsk; |
558 | |
559 | bh_lock_sock(lsk); |
560 | @@ -1828,39 +1843,6 @@ static void send_defer_abort_rpl(struct chtls_dev *cdev, struct sk_buff *skb) |
561 | kfree_skb(skb); |
562 | } |
563 | |
564 | -static void send_abort_rpl(struct sock *sk, struct sk_buff *skb, |
565 | - struct chtls_dev *cdev, int status, int queue) |
566 | -{ |
567 | - struct cpl_abort_req_rss *req = cplhdr(skb); |
568 | - struct sk_buff *reply_skb; |
569 | - struct chtls_sock *csk; |
570 | - |
571 | - csk = rcu_dereference_sk_user_data(sk); |
572 | - |
573 | - reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl), |
574 | - GFP_KERNEL); |
575 | - |
576 | - if (!reply_skb) { |
577 | - req->status = (queue << 1); |
578 | - send_defer_abort_rpl(cdev, skb); |
579 | - return; |
580 | - } |
581 | - |
582 | - set_abort_rpl_wr(reply_skb, GET_TID(req), status); |
583 | - kfree_skb(skb); |
584 | - |
585 | - set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue); |
586 | - if (csk_conn_inline(csk)) { |
587 | - struct l2t_entry *e = csk->l2t_entry; |
588 | - |
589 | - if (e && sk->sk_state != TCP_SYN_RECV) { |
590 | - cxgb4_l2t_send(csk->egress_dev, reply_skb, e); |
591 | - return; |
592 | - } |
593 | - } |
594 | - cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb); |
595 | -} |
596 | - |
597 | /* |
598 | * Add an skb to the deferred skb queue for processing from process context. |
599 | */ |
600 | @@ -1923,9 +1905,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb) |
601 | queue = csk->txq_idx; |
602 | |
603 | skb->sk = NULL; |
604 | + chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, |
605 | + CPL_ABORT_NO_RST, queue); |
606 | do_abort_syn_rcv(child, lsk); |
607 | - send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev, |
608 | - CPL_ABORT_NO_RST, queue); |
609 | } |
610 | |
611 | static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) |
612 | @@ -1955,8 +1937,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb) |
613 | if (!sock_owned_by_user(psk)) { |
614 | int queue = csk->txq_idx; |
615 | |
616 | + chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); |
617 | do_abort_syn_rcv(sk, psk); |
618 | - send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue); |
619 | } else { |
620 | skb->sk = sk; |
621 | BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv; |
622 | @@ -1974,9 +1956,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) |
623 | int queue = csk->txq_idx; |
624 | |
625 | if (is_neg_adv(req->status)) { |
626 | - if (sk->sk_state == TCP_SYN_RECV) |
627 | - chtls_set_tcb_tflag(sk, 0, 0); |
628 | - |
629 | kfree_skb(skb); |
630 | return; |
631 | } |
632 | @@ -2002,12 +1981,11 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb) |
633 | |
634 | if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb)) |
635 | return; |
636 | - |
637 | - chtls_release_resources(sk); |
638 | - chtls_conn_done(sk); |
639 | } |
640 | |
641 | chtls_send_abort_rpl(sk, skb, csk->cdev, rst_status, queue); |
642 | + chtls_release_resources(sk); |
643 | + chtls_conn_done(sk); |
644 | } |
645 | |
646 | static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb) |
647 | diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c |
648 | index 7f9a86c3c58ff..31577316f80bc 100644 |
649 | --- a/drivers/dma/dw-edma/dw-edma-core.c |
650 | +++ b/drivers/dma/dw-edma/dw-edma-core.c |
651 | @@ -85,12 +85,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc) |
652 | |
653 | if (desc->chunk) { |
654 | /* Create and add new element into the linked list */ |
655 | - desc->chunks_alloc++; |
656 | - list_add_tail(&chunk->list, &desc->chunk->list); |
657 | if (!dw_edma_alloc_burst(chunk)) { |
658 | kfree(chunk); |
659 | return NULL; |
660 | } |
661 | + desc->chunks_alloc++; |
662 | + list_add_tail(&chunk->list, &desc->chunk->list); |
663 | } else { |
664 | /* List head */ |
665 | chunk->burst = NULL; |
666 | diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c |
667 | index 4c58da7421432..04d89eec11e74 100644 |
668 | --- a/drivers/dma/mediatek/mtk-hsdma.c |
669 | +++ b/drivers/dma/mediatek/mtk-hsdma.c |
670 | @@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev) |
671 | return 0; |
672 | |
673 | err_free: |
674 | + mtk_hsdma_hw_deinit(hsdma); |
675 | of_dma_controller_free(pdev->dev.of_node); |
676 | err_unregister: |
677 | dma_async_device_unregister(dd); |
678 | diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c |
679 | index a6abfe702c5a3..1b5f3e9f43d70 100644 |
680 | --- a/drivers/dma/xilinx/xilinx_dma.c |
681 | +++ b/drivers/dma/xilinx/xilinx_dma.c |
682 | @@ -2431,7 +2431,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
683 | has_dre = false; |
684 | |
685 | if (!has_dre) |
686 | - xdev->common.copy_align = fls(width - 1); |
687 | + xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1); |
688 | |
689 | if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || |
690 | of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || |
691 | @@ -2543,7 +2543,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
692 | static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, |
693 | struct device_node *node) |
694 | { |
695 | - int ret, i, nr_channels = 1; |
696 | + int ret, i; |
697 | + u32 nr_channels = 1; |
698 | |
699 | ret = of_property_read_u32(node, "dma-channels", &nr_channels); |
700 | if ((ret < 0) && xdev->mcdma) |
701 | @@ -2742,7 +2743,11 @@ static int xilinx_dma_probe(struct platform_device *pdev) |
702 | } |
703 | |
704 | /* Register the DMA engine with the core */ |
705 | - dma_async_device_register(&xdev->common); |
706 | + err = dma_async_device_register(&xdev->common); |
707 | + if (err) { |
708 | + dev_err(xdev->dev, "failed to register the dma device\n"); |
709 | + goto error; |
710 | + } |
711 | |
712 | err = of_dma_controller_register(node, of_dma_xilinx_xlate, |
713 | xdev); |
714 | diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
715 | index cd71e71339446..9e852b4bbf92b 100644 |
716 | --- a/drivers/hid/wacom_sys.c |
717 | +++ b/drivers/hid/wacom_sys.c |
718 | @@ -1270,6 +1270,37 @@ static int wacom_devm_sysfs_create_group(struct wacom *wacom, |
719 | group); |
720 | } |
721 | |
722 | +static void wacom_devm_kfifo_release(struct device *dev, void *res) |
723 | +{ |
724 | + struct kfifo_rec_ptr_2 *devres = res; |
725 | + |
726 | + kfifo_free(devres); |
727 | +} |
728 | + |
729 | +static int wacom_devm_kfifo_alloc(struct wacom *wacom) |
730 | +{ |
731 | + struct wacom_wac *wacom_wac = &wacom->wacom_wac; |
732 | + struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo; |
733 | + int error; |
734 | + |
735 | + pen_fifo = devres_alloc(wacom_devm_kfifo_release, |
736 | + sizeof(struct kfifo_rec_ptr_2), |
737 | + GFP_KERNEL); |
738 | + |
739 | + if (!pen_fifo) |
740 | + return -ENOMEM; |
741 | + |
742 | + error = kfifo_alloc(pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); |
743 | + if (error) { |
744 | + devres_free(pen_fifo); |
745 | + return error; |
746 | + } |
747 | + |
748 | + devres_add(&wacom->hdev->dev, pen_fifo); |
749 | + |
750 | + return 0; |
751 | +} |
752 | + |
753 | enum led_brightness wacom_leds_brightness_get(struct wacom_led *led) |
754 | { |
755 | struct wacom *wacom = led->wacom; |
756 | @@ -2724,7 +2755,7 @@ static int wacom_probe(struct hid_device *hdev, |
757 | if (features->check_for_hid_type && features->hid_type != hdev->type) |
758 | return -ENODEV; |
759 | |
760 | - error = kfifo_alloc(&wacom_wac->pen_fifo, WACOM_PKGLEN_MAX, GFP_KERNEL); |
761 | + error = wacom_devm_kfifo_alloc(wacom); |
762 | if (error) |
763 | return error; |
764 | |
765 | @@ -2786,8 +2817,6 @@ static void wacom_remove(struct hid_device *hdev) |
766 | |
767 | if (wacom->wacom_wac.features.type != REMOTE) |
768 | wacom_release_resources(wacom); |
769 | - |
770 | - kfifo_free(&wacom_wac->pen_fifo); |
771 | } |
772 | |
773 | #ifdef CONFIG_PM |
774 | diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c |
775 | index c40eef4e7a985..2b6a4c1f188f4 100644 |
776 | --- a/drivers/i2c/busses/i2c-i801.c |
777 | +++ b/drivers/i2c/busses/i2c-i801.c |
778 | @@ -1424,7 +1424,7 @@ static int i801_add_mux(struct i801_priv *priv) |
779 | |
780 | /* Register GPIO descriptor lookup table */ |
781 | lookup = devm_kzalloc(dev, |
782 | - struct_size(lookup, table, mux_config->n_gpios), |
783 | + struct_size(lookup, table, mux_config->n_gpios + 1), |
784 | GFP_KERNEL); |
785 | if (!lookup) |
786 | return -ENOMEM; |
787 | diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c |
788 | index b432e7580458d..b2dc802864641 100644 |
789 | --- a/drivers/i2c/busses/i2c-sprd.c |
790 | +++ b/drivers/i2c/busses/i2c-sprd.c |
791 | @@ -72,6 +72,8 @@ |
792 | |
793 | /* timeout (ms) for pm runtime autosuspend */ |
794 | #define SPRD_I2C_PM_TIMEOUT 1000 |
795 | +/* timeout (ms) for transfer message */ |
796 | +#define I2C_XFER_TIMEOUT 1000 |
797 | |
798 | /* SPRD i2c data structure */ |
799 | struct sprd_i2c { |
800 | @@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, |
801 | struct i2c_msg *msg, bool is_last_msg) |
802 | { |
803 | struct sprd_i2c *i2c_dev = i2c_adap->algo_data; |
804 | + unsigned long time_left; |
805 | |
806 | i2c_dev->msg = msg; |
807 | i2c_dev->buf = msg->buf; |
808 | @@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap, |
809 | |
810 | sprd_i2c_opt_start(i2c_dev); |
811 | |
812 | - wait_for_completion(&i2c_dev->complete); |
813 | + time_left = wait_for_completion_timeout(&i2c_dev->complete, |
814 | + msecs_to_jiffies(I2C_XFER_TIMEOUT)); |
815 | + if (!time_left) |
816 | + return -ETIMEDOUT; |
817 | |
818 | return i2c_dev->err; |
819 | } |
820 | diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c |
821 | index b0f3da1976e4f..d1f2109012ed5 100644 |
822 | --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c |
823 | +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c |
824 | @@ -664,13 +664,29 @@ static irqreturn_t st_lsm6dsx_handler_irq(int irq, void *private) |
825 | static irqreturn_t st_lsm6dsx_handler_thread(int irq, void *private) |
826 | { |
827 | struct st_lsm6dsx_hw *hw = private; |
828 | - int count; |
829 | + int fifo_len = 0, len; |
830 | |
831 | - mutex_lock(&hw->fifo_lock); |
832 | - count = hw->settings->fifo_ops.read_fifo(hw); |
833 | - mutex_unlock(&hw->fifo_lock); |
834 | + /* |
835 | + * If we are using edge IRQs, new samples can arrive while |
836 | + * processing current interrupt since there are no hw |
837 | + * guarantees the irq line stays "low" long enough to properly |
838 | + * detect the new interrupt. In this case the new sample will |
839 | + * be missed. |
840 | + * Polling FIFO status register allow us to read new |
841 | + * samples even if the interrupt arrives while processing |
842 | + * previous data and the timeslot where the line is "low" is |
843 | + * too short to be properly detected. |
844 | + */ |
845 | + do { |
846 | + mutex_lock(&hw->fifo_lock); |
847 | + len = hw->settings->fifo_ops.read_fifo(hw); |
848 | + mutex_unlock(&hw->fifo_lock); |
849 | + |
850 | + if (len > 0) |
851 | + fifo_len += len; |
852 | + } while (len > 0); |
853 | |
854 | - return count ? IRQ_HANDLED : IRQ_NONE; |
855 | + return fifo_len ? IRQ_HANDLED : IRQ_NONE; |
856 | } |
857 | |
858 | static int st_lsm6dsx_buffer_preenable(struct iio_dev *iio_dev) |
859 | diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c |
860 | index f697f3a1d46bc..5dcc81b1df623 100644 |
861 | --- a/drivers/iommu/intel_irq_remapping.c |
862 | +++ b/drivers/iommu/intel_irq_remapping.c |
863 | @@ -1400,6 +1400,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain, |
864 | irq_data = irq_domain_get_irq_data(domain, virq + i); |
865 | irq_cfg = irqd_cfg(irq_data); |
866 | if (!irq_data || !irq_cfg) { |
867 | + if (!i) |
868 | + kfree(data); |
869 | ret = -EINVAL; |
870 | goto out_free_data; |
871 | } |
872 | diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig |
873 | index 8f39f9ba5c80e..4c2ce210c1237 100644 |
874 | --- a/drivers/lightnvm/Kconfig |
875 | +++ b/drivers/lightnvm/Kconfig |
876 | @@ -19,6 +19,7 @@ if NVM |
877 | |
878 | config NVM_PBLK |
879 | tristate "Physical Block Device Open-Channel SSD target" |
880 | + select CRC32 |
881 | help |
882 | Allows an open-channel SSD to be exposed as a block device to the |
883 | host. The target assumes the device exposes raw flash and must be |
884 | diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig |
885 | index 17c166cc8482d..e4d944770ccaf 100644 |
886 | --- a/drivers/net/can/Kconfig |
887 | +++ b/drivers/net/can/Kconfig |
888 | @@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3 |
889 | config CAN_KVASER_PCIEFD |
890 | depends on PCI |
891 | tristate "Kvaser PCIe FD cards" |
892 | + select CRC32 |
893 | help |
894 | This is a driver for the Kvaser PCI Express CAN FD family. |
895 | |
896 | diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c |
897 | index d2bb9a87eff9a..8a842545e3f69 100644 |
898 | --- a/drivers/net/can/m_can/m_can.c |
899 | +++ b/drivers/net/can/m_can/m_can.c |
900 | @@ -1868,8 +1868,6 @@ void m_can_class_unregister(struct m_can_classdev *m_can_dev) |
901 | { |
902 | unregister_candev(m_can_dev->net); |
903 | |
904 | - m_can_clk_stop(m_can_dev); |
905 | - |
906 | free_candev(m_can_dev->net); |
907 | } |
908 | EXPORT_SYMBOL_GPL(m_can_class_unregister); |
909 | diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c |
910 | index 681bb861de05e..1f8710b35c6d7 100644 |
911 | --- a/drivers/net/can/m_can/tcan4x5x.c |
912 | +++ b/drivers/net/can/m_can/tcan4x5x.c |
913 | @@ -126,30 +126,6 @@ struct tcan4x5x_priv { |
914 | int reg_offset; |
915 | }; |
916 | |
917 | -static struct can_bittiming_const tcan4x5x_bittiming_const = { |
918 | - .name = DEVICE_NAME, |
919 | - .tseg1_min = 2, |
920 | - .tseg1_max = 31, |
921 | - .tseg2_min = 2, |
922 | - .tseg2_max = 16, |
923 | - .sjw_max = 16, |
924 | - .brp_min = 1, |
925 | - .brp_max = 32, |
926 | - .brp_inc = 1, |
927 | -}; |
928 | - |
929 | -static struct can_bittiming_const tcan4x5x_data_bittiming_const = { |
930 | - .name = DEVICE_NAME, |
931 | - .tseg1_min = 1, |
932 | - .tseg1_max = 32, |
933 | - .tseg2_min = 1, |
934 | - .tseg2_max = 16, |
935 | - .sjw_max = 16, |
936 | - .brp_min = 1, |
937 | - .brp_max = 32, |
938 | - .brp_inc = 1, |
939 | -}; |
940 | - |
941 | static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) |
942 | { |
943 | int wake_state = 0; |
944 | @@ -449,8 +425,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi) |
945 | mcan_class->dev = &spi->dev; |
946 | mcan_class->ops = &tcan4x5x_ops; |
947 | mcan_class->is_peripheral = true; |
948 | - mcan_class->bit_timing = &tcan4x5x_bittiming_const; |
949 | - mcan_class->data_timing = &tcan4x5x_data_bittiming_const; |
950 | mcan_class->net->irq = spi->irq; |
951 | |
952 | spi_set_drvdata(spi, priv); |
953 | diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c |
954 | index 0d9b3fa7bd94e..ee1e67df1e7b4 100644 |
955 | --- a/drivers/net/dsa/lantiq_gswip.c |
956 | +++ b/drivers/net/dsa/lantiq_gswip.c |
957 | @@ -1419,11 +1419,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port, |
958 | phylink_set(mask, Pause); |
959 | phylink_set(mask, Asym_Pause); |
960 | |
961 | - /* With the exclusion of MII and Reverse MII, we support Gigabit, |
962 | - * including Half duplex |
963 | + /* With the exclusion of MII, Reverse MII and Reduced MII, we |
964 | + * support Gigabit, including Half duplex |
965 | */ |
966 | if (state->interface != PHY_INTERFACE_MODE_MII && |
967 | - state->interface != PHY_INTERFACE_MODE_REVMII) { |
968 | + state->interface != PHY_INTERFACE_MODE_REVMII && |
969 | + state->interface != PHY_INTERFACE_MODE_RMII) { |
970 | phylink_set(mask, 1000baseT_Full); |
971 | phylink_set(mask, 1000baseT_Half); |
972 | } |
973 | diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h |
974 | index f8a87f8ca9833..148e53812d89c 100644 |
975 | --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h |
976 | +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h |
977 | @@ -123,7 +123,7 @@ struct hclgevf_mbx_arq_ring { |
978 | #define hclge_mbx_ring_ptr_move_crq(crq) \ |
979 | (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num) |
980 | #define hclge_mbx_tail_ptr_move_arq(arq) \ |
981 | - (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) |
982 | + (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) |
983 | #define hclge_mbx_head_ptr_move_arq(arq) \ |
984 | - (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE) |
985 | + (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM) |
986 | #endif |
987 | diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
988 | index 6c3d13110993f..6887b7fda6e07 100644 |
989 | --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
990 | +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c |
991 | @@ -746,7 +746,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) |
992 | handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK; |
993 | handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK; |
994 | |
995 | - if (hdev->hw.mac.phydev) { |
996 | + if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv && |
997 | + hdev->hw.mac.phydev->drv->set_loopback) { |
998 | count += 1; |
999 | handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK; |
1000 | } |
1001 | diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c |
1002 | index 8827ab4b4932e..6988bbf2576f5 100644 |
1003 | --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c |
1004 | +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c |
1005 | @@ -4545,7 +4545,7 @@ static int mvpp2_port_init(struct mvpp2_port *port) |
1006 | struct mvpp2 *priv = port->priv; |
1007 | struct mvpp2_txq_pcpu *txq_pcpu; |
1008 | unsigned int thread; |
1009 | - int queue, err; |
1010 | + int queue, err, val; |
1011 | |
1012 | /* Checks for hardware constraints */ |
1013 | if (port->first_rxq + port->nrxqs > |
1014 | @@ -4559,6 +4559,18 @@ static int mvpp2_port_init(struct mvpp2_port *port) |
1015 | mvpp2_egress_disable(port); |
1016 | mvpp2_port_disable(port); |
1017 | |
1018 | + if (mvpp2_is_xlg(port->phy_interface)) { |
1019 | + val = readl(port->base + MVPP22_XLG_CTRL0_REG); |
1020 | + val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS; |
1021 | + val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN; |
1022 | + writel(val, port->base + MVPP22_XLG_CTRL0_REG); |
1023 | + } else { |
1024 | + val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
1025 | + val &= ~MVPP2_GMAC_FORCE_LINK_PASS; |
1026 | + val |= MVPP2_GMAC_FORCE_LINK_DOWN; |
1027 | + writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG); |
1028 | + } |
1029 | + |
1030 | port->tx_time_coal = MVPP2_TXDONE_COAL_USEC; |
1031 | |
1032 | port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs), |
1033 | diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c |
1034 | index 6d55e3d0b7ea2..54e9f6dc24ea0 100644 |
1035 | --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c |
1036 | +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c |
1037 | @@ -725,8 +725,10 @@ static int cgx_lmac_init(struct cgx *cgx) |
1038 | if (!lmac) |
1039 | return -ENOMEM; |
1040 | lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL); |
1041 | - if (!lmac->name) |
1042 | - return -ENOMEM; |
1043 | + if (!lmac->name) { |
1044 | + err = -ENOMEM; |
1045 | + goto err_lmac_free; |
1046 | + } |
1047 | sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i); |
1048 | lmac->lmac_id = i; |
1049 | lmac->cgx = cgx; |
1050 | @@ -737,7 +739,7 @@ static int cgx_lmac_init(struct cgx *cgx) |
1051 | CGX_LMAC_FWI + i * 9), |
1052 | cgx_fwi_event_handler, 0, lmac->name, lmac); |
1053 | if (err) |
1054 | - return err; |
1055 | + goto err_irq; |
1056 | |
1057 | /* Enable interrupt */ |
1058 | cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S, |
1059 | @@ -748,6 +750,12 @@ static int cgx_lmac_init(struct cgx *cgx) |
1060 | } |
1061 | |
1062 | return cgx_lmac_verify_fwi_version(cgx); |
1063 | + |
1064 | +err_irq: |
1065 | + kfree(lmac->name); |
1066 | +err_lmac_free: |
1067 | + kfree(lmac); |
1068 | + return err; |
1069 | } |
1070 | |
1071 | static int cgx_lmac_exit(struct cgx *cgx) |
1072 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
1073 | index 8cd529556b214..01089c2283d7f 100644 |
1074 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
1075 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c |
1076 | @@ -976,6 +976,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, |
1077 | return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); |
1078 | } |
1079 | |
1080 | +static int mlx5e_speed_validate(struct net_device *netdev, bool ext, |
1081 | + const unsigned long link_modes, u8 autoneg) |
1082 | +{ |
1083 | + /* Extended link-mode has no speed limitations. */ |
1084 | + if (ext) |
1085 | + return 0; |
1086 | + |
1087 | + if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && |
1088 | + autoneg != AUTONEG_ENABLE) { |
1089 | + netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n", |
1090 | + __func__); |
1091 | + return -EINVAL; |
1092 | + } |
1093 | + return 0; |
1094 | +} |
1095 | + |
1096 | static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) |
1097 | { |
1098 | u32 i, ptys_modes = 0; |
1099 | @@ -1068,13 +1084,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, |
1100 | link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : |
1101 | mlx5e_port_speed2linkmodes(mdev, speed, !ext); |
1102 | |
1103 | - if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && |
1104 | - autoneg != AUTONEG_ENABLE) { |
1105 | - netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n", |
1106 | - __func__); |
1107 | - err = -EINVAL; |
1108 | + err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg); |
1109 | + if (err) |
1110 | goto out; |
1111 | - } |
1112 | |
1113 | link_modes = link_modes & eproto.cap; |
1114 | if (!link_modes) { |
1115 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c |
1116 | index 713dc210f710c..c4ac7a9968d16 100644 |
1117 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c |
1118 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c |
1119 | @@ -927,6 +927,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, |
1120 | in = kvzalloc(inlen, GFP_KERNEL); |
1121 | if (!in) { |
1122 | kfree(ft->g); |
1123 | + ft->g = NULL; |
1124 | return -ENOMEM; |
1125 | } |
1126 | |
1127 | @@ -1067,6 +1068,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) |
1128 | in = kvzalloc(inlen, GFP_KERNEL); |
1129 | if (!in) { |
1130 | kfree(ft->g); |
1131 | + ft->g = NULL; |
1132 | return -ENOMEM; |
1133 | } |
1134 | |
1135 | @@ -1346,6 +1348,7 @@ err_destroy_groups: |
1136 | ft->g[ft->num_groups] = NULL; |
1137 | mlx5e_destroy_groups(ft); |
1138 | kvfree(in); |
1139 | + kfree(ft->g); |
1140 | |
1141 | return err; |
1142 | } |
1143 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c |
1144 | index 0fc7de4aa572f..8e0dddc6383f0 100644 |
1145 | --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c |
1146 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c |
1147 | @@ -116,7 +116,7 @@ free: |
1148 | static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) |
1149 | { |
1150 | mlx5_core_roce_gid_set(dev, 0, 0, 0, |
1151 | - NULL, NULL, false, 0, 0); |
1152 | + NULL, NULL, false, 0, 1); |
1153 | } |
1154 | |
1155 | static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid) |
1156 | diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c |
1157 | index 0937fc2a928ed..23c9394cd5d22 100644 |
1158 | --- a/drivers/net/ethernet/natsemi/macsonic.c |
1159 | +++ b/drivers/net/ethernet/natsemi/macsonic.c |
1160 | @@ -540,10 +540,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev) |
1161 | |
1162 | err = register_netdev(dev); |
1163 | if (err) |
1164 | - goto out; |
1165 | + goto undo_probe; |
1166 | |
1167 | return 0; |
1168 | |
1169 | +undo_probe: |
1170 | + dma_free_coherent(lp->device, |
1171 | + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), |
1172 | + lp->descriptors, lp->descriptors_laddr); |
1173 | out: |
1174 | free_netdev(dev); |
1175 | |
1176 | @@ -618,12 +622,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board) |
1177 | |
1178 | err = register_netdev(ndev); |
1179 | if (err) |
1180 | - goto out; |
1181 | + goto undo_probe; |
1182 | |
1183 | nubus_set_drvdata(board, ndev); |
1184 | |
1185 | return 0; |
1186 | |
1187 | +undo_probe: |
1188 | + dma_free_coherent(lp->device, |
1189 | + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), |
1190 | + lp->descriptors, lp->descriptors_laddr); |
1191 | out: |
1192 | free_netdev(ndev); |
1193 | return err; |
1194 | diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c |
1195 | index e1b886e87a762..44171d7bb434c 100644 |
1196 | --- a/drivers/net/ethernet/natsemi/xtsonic.c |
1197 | +++ b/drivers/net/ethernet/natsemi/xtsonic.c |
1198 | @@ -265,11 +265,14 @@ int xtsonic_probe(struct platform_device *pdev) |
1199 | sonic_msg_init(dev); |
1200 | |
1201 | if ((err = register_netdev(dev))) |
1202 | - goto out1; |
1203 | + goto undo_probe1; |
1204 | |
1205 | return 0; |
1206 | |
1207 | -out1: |
1208 | +undo_probe1: |
1209 | + dma_free_coherent(lp->device, |
1210 | + SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), |
1211 | + lp->descriptors, lp->descriptors_laddr); |
1212 | release_region(dev->base_addr, SONIC_MEM_SIZE); |
1213 | out: |
1214 | free_netdev(dev); |
1215 | diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig |
1216 | index 55a29ec766807..58eac2471d53a 100644 |
1217 | --- a/drivers/net/ethernet/qlogic/Kconfig |
1218 | +++ b/drivers/net/ethernet/qlogic/Kconfig |
1219 | @@ -78,6 +78,7 @@ config QED |
1220 | depends on PCI |
1221 | select ZLIB_INFLATE |
1222 | select CRC8 |
1223 | + select CRC32 |
1224 | select NET_DEVLINK |
1225 | ---help--- |
1226 | This enables the support for ... |
1227 | diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c |
1228 | index e9e0867ec139d..c4c9cbdeb601e 100644 |
1229 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c |
1230 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c |
1231 | @@ -64,6 +64,7 @@ struct emac_variant { |
1232 | * @variant: reference to the current board variant |
1233 | * @regmap: regmap for using the syscon |
1234 | * @internal_phy_powered: Does the internal PHY is enabled |
1235 | + * @use_internal_phy: Is the internal PHY selected for use |
1236 | * @mux_handle: Internal pointer used by mdio-mux lib |
1237 | */ |
1238 | struct sunxi_priv_data { |
1239 | @@ -74,6 +75,7 @@ struct sunxi_priv_data { |
1240 | const struct emac_variant *variant; |
1241 | struct regmap_field *regmap_field; |
1242 | bool internal_phy_powered; |
1243 | + bool use_internal_phy; |
1244 | void *mux_handle; |
1245 | }; |
1246 | |
1247 | @@ -523,8 +525,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = { |
1248 | .dma_interrupt = sun8i_dwmac_dma_interrupt, |
1249 | }; |
1250 | |
1251 | +static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv); |
1252 | + |
1253 | static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) |
1254 | { |
1255 | + struct net_device *ndev = platform_get_drvdata(pdev); |
1256 | struct sunxi_priv_data *gmac = priv; |
1257 | int ret; |
1258 | |
1259 | @@ -538,13 +543,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv) |
1260 | |
1261 | ret = clk_prepare_enable(gmac->tx_clk); |
1262 | if (ret) { |
1263 | - if (gmac->regulator) |
1264 | - regulator_disable(gmac->regulator); |
1265 | dev_err(&pdev->dev, "Could not enable AHB clock\n"); |
1266 | - return ret; |
1267 | + goto err_disable_regulator; |
1268 | + } |
1269 | + |
1270 | + if (gmac->use_internal_phy) { |
1271 | + ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev)); |
1272 | + if (ret) |
1273 | + goto err_disable_clk; |
1274 | } |
1275 | |
1276 | return 0; |
1277 | + |
1278 | +err_disable_clk: |
1279 | + clk_disable_unprepare(gmac->tx_clk); |
1280 | +err_disable_regulator: |
1281 | + if (gmac->regulator) |
1282 | + regulator_disable(gmac->regulator); |
1283 | + |
1284 | + return ret; |
1285 | } |
1286 | |
1287 | static void sun8i_dwmac_core_init(struct mac_device_info *hw, |
1288 | @@ -815,7 +832,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, |
1289 | struct sunxi_priv_data *gmac = priv->plat->bsp_priv; |
1290 | u32 reg, val; |
1291 | int ret = 0; |
1292 | - bool need_power_ephy = false; |
1293 | |
1294 | if (current_child ^ desired_child) { |
1295 | regmap_field_read(gmac->regmap_field, ®); |
1296 | @@ -823,13 +839,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, |
1297 | case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID: |
1298 | dev_info(priv->device, "Switch mux to internal PHY"); |
1299 | val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT; |
1300 | - |
1301 | - need_power_ephy = true; |
1302 | + gmac->use_internal_phy = true; |
1303 | break; |
1304 | case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID: |
1305 | dev_info(priv->device, "Switch mux to external PHY"); |
1306 | val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN; |
1307 | - need_power_ephy = false; |
1308 | + gmac->use_internal_phy = false; |
1309 | break; |
1310 | default: |
1311 | dev_err(priv->device, "Invalid child ID %x\n", |
1312 | @@ -837,7 +852,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child, |
1313 | return -EINVAL; |
1314 | } |
1315 | regmap_field_write(gmac->regmap_field, val); |
1316 | - if (need_power_ephy) { |
1317 | + if (gmac->use_internal_phy) { |
1318 | ret = sun8i_dwmac_power_internal_phy(priv); |
1319 | if (ret) |
1320 | return ret; |
1321 | @@ -988,17 +1003,12 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv) |
1322 | struct sunxi_priv_data *gmac = priv; |
1323 | |
1324 | if (gmac->variant->soc_has_internal_phy) { |
1325 | - /* sun8i_dwmac_exit could be called with mdiomux uninit */ |
1326 | - if (gmac->mux_handle) |
1327 | - mdio_mux_uninit(gmac->mux_handle); |
1328 | if (gmac->internal_phy_powered) |
1329 | sun8i_dwmac_unpower_internal_phy(gmac); |
1330 | } |
1331 | |
1332 | sun8i_dwmac_unset_syscon(gmac); |
1333 | |
1334 | - reset_control_put(gmac->rst_ephy); |
1335 | - |
1336 | clk_disable_unprepare(gmac->tx_clk); |
1337 | |
1338 | if (gmac->regulator) |
1339 | @@ -1227,12 +1237,32 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) |
1340 | |
1341 | return ret; |
1342 | dwmac_mux: |
1343 | + reset_control_put(gmac->rst_ephy); |
1344 | + clk_put(gmac->ephy_clk); |
1345 | sun8i_dwmac_unset_syscon(gmac); |
1346 | dwmac_exit: |
1347 | stmmac_pltfr_remove(pdev); |
1348 | return ret; |
1349 | } |
1350 | |
1351 | +static int sun8i_dwmac_remove(struct platform_device *pdev) |
1352 | +{ |
1353 | + struct net_device *ndev = platform_get_drvdata(pdev); |
1354 | + struct stmmac_priv *priv = netdev_priv(ndev); |
1355 | + struct sunxi_priv_data *gmac = priv->plat->bsp_priv; |
1356 | + |
1357 | + if (gmac->variant->soc_has_internal_phy) { |
1358 | + mdio_mux_uninit(gmac->mux_handle); |
1359 | + sun8i_dwmac_unpower_internal_phy(gmac); |
1360 | + reset_control_put(gmac->rst_ephy); |
1361 | + clk_put(gmac->ephy_clk); |
1362 | + } |
1363 | + |
1364 | + stmmac_pltfr_remove(pdev); |
1365 | + |
1366 | + return 0; |
1367 | +} |
1368 | + |
1369 | static const struct of_device_id sun8i_dwmac_match[] = { |
1370 | { .compatible = "allwinner,sun8i-h3-emac", |
1371 | .data = &emac_variant_h3 }, |
1372 | @@ -1252,7 +1282,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match); |
1373 | |
1374 | static struct platform_driver sun8i_dwmac_driver = { |
1375 | .probe = sun8i_dwmac_probe, |
1376 | - .remove = stmmac_pltfr_remove, |
1377 | + .remove = sun8i_dwmac_remove, |
1378 | .driver = { |
1379 | .name = "dwmac-sun8i", |
1380 | .pm = &stmmac_pltfr_pm_ops, |
1381 | diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
1382 | index d407489cec904..cbe7f35eac982 100644 |
1383 | --- a/drivers/net/usb/cdc_ncm.c |
1384 | +++ b/drivers/net/usb/cdc_ncm.c |
1385 | @@ -1126,7 +1126,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
1386 | * accordingly. Otherwise, we should check here. |
1387 | */ |
1388 | if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) |
1389 | - delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus); |
1390 | + delayed_ndp_size = ctx->max_ndp_size + |
1391 | + max_t(u32, |
1392 | + ctx->tx_ndp_modulus, |
1393 | + ctx->tx_modulus + ctx->tx_remainder) - 1; |
1394 | else |
1395 | delayed_ndp_size = 0; |
1396 | |
1397 | @@ -1307,7 +1310,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
1398 | if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && |
1399 | skb_out->len > ctx->min_tx_pkt) { |
1400 | padding_count = ctx->tx_curr_size - skb_out->len; |
1401 | - skb_put_zero(skb_out, padding_count); |
1402 | + if (!WARN_ON(padding_count > ctx->tx_curr_size)) |
1403 | + skb_put_zero(skb_out, padding_count); |
1404 | } else if (skb_out->len < ctx->tx_curr_size && |
1405 | (skb_out->len % dev->maxpacket) == 0) { |
1406 | skb_put_u8(skb_out, 0); /* force short packet */ |
1407 | diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig |
1408 | index 058d77d2e693d..0d6e1829e0ac9 100644 |
1409 | --- a/drivers/net/wan/Kconfig |
1410 | +++ b/drivers/net/wan/Kconfig |
1411 | @@ -282,6 +282,7 @@ config SLIC_DS26522 |
1412 | tristate "Slic Maxim ds26522 card support" |
1413 | depends on SPI |
1414 | depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST |
1415 | + select BITREVERSE |
1416 | help |
1417 | This module initializes and configures the slic maxim card |
1418 | in T1 or E1 mode. |
1419 | diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig |
1420 | index 0d1a8dab30ed4..32e1c036f3ac9 100644 |
1421 | --- a/drivers/net/wireless/ath/wil6210/Kconfig |
1422 | +++ b/drivers/net/wireless/ath/wil6210/Kconfig |
1423 | @@ -2,6 +2,7 @@ |
1424 | config WIL6210 |
1425 | tristate "Wilocity 60g WiFi card wil6210 support" |
1426 | select WANT_DEV_COREDUMP |
1427 | + select CRC32 |
1428 | depends on CFG80211 |
1429 | depends on PCI |
1430 | default n |
1431 | diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c |
1432 | index f11e4bfbc91be..a47f87b8373df 100644 |
1433 | --- a/drivers/regulator/qcom-rpmh-regulator.c |
1434 | +++ b/drivers/regulator/qcom-rpmh-regulator.c |
1435 | @@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = { |
1436 | static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = { |
1437 | .regulator_type = VRM, |
1438 | .ops = &rpmh_regulator_vrm_ops, |
1439 | - .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600), |
1440 | + .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000), |
1441 | .n_voltages = 5, |
1442 | .pmic_mode_map = pmic_mode_map_pmic5_smps, |
1443 | .of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode, |
1444 | diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c |
1445 | index a1c23e998f977..8dee16aca421f 100644 |
1446 | --- a/drivers/s390/net/qeth_l3_main.c |
1447 | +++ b/drivers/s390/net/qeth_l3_main.c |
1448 | @@ -2114,7 +2114,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, |
1449 | struct net_device *dev, |
1450 | netdev_features_t features) |
1451 | { |
1452 | - if (qeth_get_ip_version(skb) != 4) |
1453 | + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) |
1454 | features &= ~NETIF_F_HW_VLAN_CTAG_TX; |
1455 | return qeth_features_check(skb, dev, features); |
1456 | } |
1457 | diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c |
1458 | index ed20ad2950885..77ddf23b65d65 100644 |
1459 | --- a/drivers/spi/spi-stm32.c |
1460 | +++ b/drivers/spi/spi-stm32.c |
1461 | @@ -494,9 +494,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) |
1462 | |
1463 | /* align packet size with data registers access */ |
1464 | if (spi->cur_bpw > 8) |
1465 | - fthlv -= (fthlv % 2); /* multiple of 2 */ |
1466 | + fthlv += (fthlv % 2) ? 1 : 0; |
1467 | else |
1468 | - fthlv -= (fthlv % 4); /* multiple of 4 */ |
1469 | + fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0; |
1470 | |
1471 | if (!fthlv) |
1472 | fthlv = 1; |
1473 | diff --git a/drivers/staging/exfat/exfat_super.c b/drivers/staging/exfat/exfat_super.c |
1474 | index 58c7d66060f7e..dd12777b9a788 100644 |
1475 | --- a/drivers/staging/exfat/exfat_super.c |
1476 | +++ b/drivers/staging/exfat/exfat_super.c |
1477 | @@ -59,7 +59,7 @@ static void exfat_write_super(struct super_block *sb); |
1478 | /* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ |
1479 | static void exfat_time_fat2unix(struct timespec64 *ts, struct date_time_t *tp) |
1480 | { |
1481 | - ts->tv_sec = mktime64(tp->Year + 1980, tp->Month + 1, tp->Day, |
1482 | + ts->tv_sec = mktime64(tp->Year + 1980, tp->Month, tp->Day, |
1483 | tp->Hour, tp->Minute, tp->Second); |
1484 | |
1485 | ts->tv_nsec = tp->MilliSecond * NSEC_PER_MSEC; |
1486 | diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c |
1487 | index 3b31e83a92155..bc6ba41686fa3 100644 |
1488 | --- a/drivers/vfio/vfio_iommu_type1.c |
1489 | +++ b/drivers/vfio/vfio_iommu_type1.c |
1490 | @@ -2303,6 +2303,24 @@ out_unlock: |
1491 | return ret; |
1492 | } |
1493 | |
1494 | +static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu, |
1495 | + struct vfio_info_cap *caps) |
1496 | +{ |
1497 | + struct vfio_iommu_type1_info_dma_avail cap_dma_avail; |
1498 | + int ret; |
1499 | + |
1500 | + mutex_lock(&iommu->lock); |
1501 | + cap_dma_avail.header.id = VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL; |
1502 | + cap_dma_avail.header.version = 1; |
1503 | + |
1504 | + cap_dma_avail.avail = iommu->dma_avail; |
1505 | + |
1506 | + ret = vfio_info_add_capability(caps, &cap_dma_avail.header, |
1507 | + sizeof(cap_dma_avail)); |
1508 | + mutex_unlock(&iommu->lock); |
1509 | + return ret; |
1510 | +} |
1511 | + |
1512 | static long vfio_iommu_type1_ioctl(void *iommu_data, |
1513 | unsigned int cmd, unsigned long arg) |
1514 | { |
1515 | @@ -2349,6 +2367,10 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, |
1516 | info.iova_pgsizes = vfio_pgsize_bitmap(iommu); |
1517 | |
1518 | ret = vfio_iommu_iova_build_caps(iommu, &caps); |
1519 | + |
1520 | + if (!ret) |
1521 | + ret = vfio_iommu_dma_avail_build_caps(iommu, &caps); |
1522 | + |
1523 | if (ret) |
1524 | return ret; |
1525 | |
1526 | diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h |
1527 | index e5e2425875953..130f16cc0b86d 100644 |
1528 | --- a/include/asm-generic/vmlinux.lds.h |
1529 | +++ b/include/asm-generic/vmlinux.lds.h |
1530 | @@ -520,7 +520,10 @@ |
1531 | */ |
1532 | #define TEXT_TEXT \ |
1533 | ALIGN_FUNCTION(); \ |
1534 | - *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \ |
1535 | + *(.text.hot .text.hot.*) \ |
1536 | + *(TEXT_MAIN .text.fixup) \ |
1537 | + *(.text.unlikely .text.unlikely.*) \ |
1538 | + *(.text.unknown .text.unknown.*) \ |
1539 | *(.text..refcount) \ |
1540 | *(.ref.text) \ |
1541 | MEM_KEEP(init.text*) \ |
1542 | diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h |
1543 | index 9e843a147ead0..cabc93118f9c8 100644 |
1544 | --- a/include/uapi/linux/vfio.h |
1545 | +++ b/include/uapi/linux/vfio.h |
1546 | @@ -748,6 +748,21 @@ struct vfio_iommu_type1_info_cap_iova_range { |
1547 | struct vfio_iova_range iova_ranges[]; |
1548 | }; |
1549 | |
1550 | +/* |
1551 | + * The DMA available capability allows to report the current number of |
1552 | + * simultaneously outstanding DMA mappings that are allowed. |
1553 | + * |
1554 | + * The structure below defines version 1 of this capability. |
1555 | + * |
1556 | + * avail: specifies the current number of outstanding DMA mappings allowed. |
1557 | + */ |
1558 | +#define VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL 3 |
1559 | + |
1560 | +struct vfio_iommu_type1_info_dma_avail { |
1561 | + struct vfio_info_cap_header header; |
1562 | + __u32 avail; |
1563 | +}; |
1564 | + |
1565 | #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) |
1566 | |
1567 | /** |
1568 | diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c |
1569 | index d4bcfd8f95bf6..3f47abf9ef4a6 100644 |
1570 | --- a/net/8021q/vlan.c |
1571 | +++ b/net/8021q/vlan.c |
1572 | @@ -280,7 +280,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id) |
1573 | return 0; |
1574 | |
1575 | out_free_newdev: |
1576 | - if (new_dev->reg_state == NETREG_UNINITIALIZED) |
1577 | + if (new_dev->reg_state == NETREG_UNINITIALIZED || |
1578 | + new_dev->reg_state == NETREG_UNREGISTERED) |
1579 | free_netdev(new_dev); |
1580 | return err; |
1581 | } |
1582 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
1583 | index a0486dcf5425b..49d923c227a21 100644 |
1584 | --- a/net/core/skbuff.c |
1585 | +++ b/net/core/skbuff.c |
1586 | @@ -2017,6 +2017,12 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) |
1587 | skb->csum = csum_block_sub(skb->csum, |
1588 | skb_checksum(skb, len, delta, 0), |
1589 | len); |
1590 | + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1591 | + int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; |
1592 | + int offset = skb_checksum_start_offset(skb) + skb->csum_offset; |
1593 | + |
1594 | + if (offset + sizeof(__sum16) > hdlen) |
1595 | + return -EINVAL; |
1596 | } |
1597 | return __pskb_trim(skb, len); |
1598 | } |
1599 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
1600 | index 079dcf9f0c56d..7a394479dd56c 100644 |
1601 | --- a/net/ipv4/ip_output.c |
1602 | +++ b/net/ipv4/ip_output.c |
1603 | @@ -303,7 +303,7 @@ static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff * |
1604 | if (skb_is_gso(skb)) |
1605 | return ip_finish_output_gso(net, sk, skb, mtu); |
1606 | |
1607 | - if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) |
1608 | + if (skb->len > mtu || IPCB(skb)->frag_max_size) |
1609 | return ip_fragment(net, sk, skb, mtu, ip_finish_output2); |
1610 | |
1611 | return ip_finish_output2(net, sk, skb); |
1612 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
1613 | index f61c5a0b502a8..ca525cf681a4e 100644 |
1614 | --- a/net/ipv4/ip_tunnel.c |
1615 | +++ b/net/ipv4/ip_tunnel.c |
1616 | @@ -765,8 +765,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
1617 | goto tx_error; |
1618 | } |
1619 | |
1620 | - if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph, |
1621 | - 0, 0, false)) { |
1622 | + df = tnl_params->frag_off; |
1623 | + if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) |
1624 | + df |= (inner_iph->frag_off & htons(IP_DF)); |
1625 | + |
1626 | + if (tnl_update_pmtu(dev, skb, rt, df, inner_iph, 0, 0, false)) { |
1627 | ip_rt_put(rt); |
1628 | goto tx_error; |
1629 | } |
1630 | @@ -794,10 +797,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, |
1631 | ttl = ip4_dst_hoplimit(&rt->dst); |
1632 | } |
1633 | |
1634 | - df = tnl_params->frag_off; |
1635 | - if (skb->protocol == htons(ETH_P_IP) && !tunnel->ignore_df) |
1636 | - df |= (inner_iph->frag_off&htons(IP_DF)); |
1637 | - |
1638 | max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) |
1639 | + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); |
1640 | if (max_headroom > dev->needed_headroom) |
1641 | diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c |
1642 | index ea32b113089d3..c2b7d43d92b0e 100644 |
1643 | --- a/net/ipv4/nexthop.c |
1644 | +++ b/net/ipv4/nexthop.c |
1645 | @@ -1157,8 +1157,10 @@ static struct nexthop *nexthop_create_group(struct net *net, |
1646 | return nh; |
1647 | |
1648 | out_no_nh: |
1649 | - for (; i >= 0; --i) |
1650 | + for (i--; i >= 0; --i) { |
1651 | + list_del(&nhg->nh_entries[i].nh_list); |
1652 | nexthop_put(nhg->nh_entries[i].nh); |
1653 | + } |
1654 | |
1655 | kfree(nhg->spare); |
1656 | kfree(nhg); |
1657 | diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
1658 | index 0646fce31b67a..906ac5e6d96cd 100644 |
1659 | --- a/net/ipv6/ip6_fib.c |
1660 | +++ b/net/ipv6/ip6_fib.c |
1661 | @@ -973,6 +973,8 @@ static void fib6_purge_rt(struct fib6_info *rt, struct fib6_node *fn, |
1662 | { |
1663 | struct fib6_table *table = rt->fib6_table; |
1664 | |
1665 | + /* Flush all cached dst in exception table */ |
1666 | + rt6_flush_exceptions(rt); |
1667 | fib6_drop_pcpu_from(rt, table); |
1668 | |
1669 | if (rt->nh && !list_empty(&rt->nh_list)) |
1670 | @@ -1839,9 +1841,6 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, |
1671 | net->ipv6.rt6_stats->fib_rt_entries--; |
1672 | net->ipv6.rt6_stats->fib_discarded_routes++; |
1673 | |
1674 | - /* Flush all cached dst in exception table */ |
1675 | - rt6_flush_exceptions(rt); |
1676 | - |
1677 | /* Reset round-robin state, if necessary */ |
1678 | if (rcu_access_pointer(fn->rr_ptr) == rt) |
1679 | fn->rr_ptr = NULL; |
1680 | diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c |
1681 | index bb311ccc6c487..c6787a1daa481 100644 |
1682 | --- a/tools/bpf/bpftool/net.c |
1683 | +++ b/tools/bpf/bpftool/net.c |
1684 | @@ -9,7 +9,6 @@ |
1685 | #include <unistd.h> |
1686 | #include <libbpf.h> |
1687 | #include <net/if.h> |
1688 | -#include <linux/if.h> |
1689 | #include <linux/rtnetlink.h> |
1690 | #include <linux/tc_act/tc_bpf.h> |
1691 | #include <sys/socket.h> |
1692 | diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh |
1693 | index 71a62e7e35b1c..3429767cadcdd 100755 |
1694 | --- a/tools/testing/selftests/net/pmtu.sh |
1695 | +++ b/tools/testing/selftests/net/pmtu.sh |
1696 | @@ -119,7 +119,15 @@ |
1697 | # - list_flush_ipv6_exception |
1698 | # Using the same topology as in pmtu_ipv6, create exceptions, and check |
1699 | # they are shown when listing exception caches, gone after flushing them |
1700 | - |
1701 | +# |
1702 | +# - pmtu_ipv4_route_change |
1703 | +# Use the same topology as in pmtu_ipv4, but issue a route replacement |
1704 | +# command and delete the corresponding device afterward. This tests for |
1705 | +# proper cleanup of the PMTU exceptions by the route replacement path. |
1706 | +# Device unregistration should complete successfully |
1707 | +# |
1708 | +# - pmtu_ipv6_route_change |
1709 | +# Same as above but with IPv6 |
1710 | |
1711 | # Kselftest framework requirement - SKIP code is 4. |
1712 | ksft_skip=4 |
1713 | @@ -161,7 +169,9 @@ tests=" |
1714 | cleanup_ipv4_exception ipv4: cleanup of cached exceptions 1 |
1715 | cleanup_ipv6_exception ipv6: cleanup of cached exceptions 1 |
1716 | list_flush_ipv4_exception ipv4: list and flush cached exceptions 1 |
1717 | - list_flush_ipv6_exception ipv6: list and flush cached exceptions 1" |
1718 | + list_flush_ipv6_exception ipv6: list and flush cached exceptions 1 |
1719 | + pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1 |
1720 | + pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1" |
1721 | |
1722 | NS_A="ns-A" |
1723 | NS_B="ns-B" |
1724 | @@ -1316,6 +1326,63 @@ test_list_flush_ipv6_exception() { |
1725 | return ${fail} |
1726 | } |
1727 | |
1728 | +test_pmtu_ipvX_route_change() { |
1729 | + family=${1} |
1730 | + |
1731 | + setup namespaces routing || return 2 |
1732 | + trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ |
1733 | + "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \ |
1734 | + "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \ |
1735 | + "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2 |
1736 | + |
1737 | + if [ ${family} -eq 4 ]; then |
1738 | + ping=ping |
1739 | + dst1="${prefix4}.${b_r1}.1" |
1740 | + dst2="${prefix4}.${b_r2}.1" |
1741 | + gw="${prefix4}.${a_r1}.2" |
1742 | + else |
1743 | + ping=${ping6} |
1744 | + dst1="${prefix6}:${b_r1}::1" |
1745 | + dst2="${prefix6}:${b_r2}::1" |
1746 | + gw="${prefix6}:${a_r1}::2" |
1747 | + fi |
1748 | + |
1749 | + # Set up initial MTU values |
1750 | + mtu "${ns_a}" veth_A-R1 2000 |
1751 | + mtu "${ns_r1}" veth_R1-A 2000 |
1752 | + mtu "${ns_r1}" veth_R1-B 1400 |
1753 | + mtu "${ns_b}" veth_B-R1 1400 |
1754 | + |
1755 | + mtu "${ns_a}" veth_A-R2 2000 |
1756 | + mtu "${ns_r2}" veth_R2-A 2000 |
1757 | + mtu "${ns_r2}" veth_R2-B 1500 |
1758 | + mtu "${ns_b}" veth_B-R2 1500 |
1759 | + |
1760 | + # Create route exceptions |
1761 | + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst1} |
1762 | + run_cmd ${ns_a} ${ping} -q -M want -i 0.1 -w 1 -s 1800 ${dst2} |
1763 | + |
1764 | + # Check that exceptions have been created with the correct PMTU |
1765 | + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst1})" |
1766 | + check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 |
1767 | + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" ${dst2})" |
1768 | + check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 |
1769 | + |
1770 | + # Replace the route from A to R1 |
1771 | + run_cmd ${ns_a} ip route change default via ${gw} |
1772 | + |
1773 | + # Delete the device in A |
1774 | + run_cmd ${ns_a} ip link del "veth_A-R1" |
1775 | +} |
1776 | + |
1777 | +test_pmtu_ipv4_route_change() { |
1778 | + test_pmtu_ipvX_route_change 4 |
1779 | +} |
1780 | + |
1781 | +test_pmtu_ipv6_route_change() { |
1782 | + test_pmtu_ipvX_route_change 6 |
1783 | +} |
1784 | + |
1785 | usage() { |
1786 | echo |
1787 | echo "$0 [OPTIONS] [TEST]..." |