Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0155-5.4.56-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3556 - (show annotations) (download)
Wed Aug 5 14:11:34 2020 UTC (3 years, 8 months ago) by niro
File size: 94772 byte(s)
-linux-5.4.56
1 diff --git a/Makefile b/Makefile
2 index 072fe0eaa740..c33fb4eebd4d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 55
10 +SUBLEVEL = 56
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
15 index 3f4bb44d85f0..669da3a33d82 100644
16 --- a/arch/arm/boot/dts/armada-38x.dtsi
17 +++ b/arch/arm/boot/dts/armada-38x.dtsi
18 @@ -339,7 +339,8 @@
19
20 comphy: phy@18300 {
21 compatible = "marvell,armada-380-comphy";
22 - reg = <0x18300 0x100>;
23 + reg-names = "comphy", "conf";
24 + reg = <0x18300 0x100>, <0x18460 4>;
25 #address-cells = <1>;
26 #size-cells = <0>;
27
28 diff --git a/arch/arm/boot/dts/imx6qdl-icore.dtsi b/arch/arm/boot/dts/imx6qdl-icore.dtsi
29 index 7814f1ef0804..fde56f98398d 100644
30 --- a/arch/arm/boot/dts/imx6qdl-icore.dtsi
31 +++ b/arch/arm/boot/dts/imx6qdl-icore.dtsi
32 @@ -384,7 +384,7 @@
33
34 pinctrl_usbotg: usbotggrp {
35 fsl,pins = <
36 - MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
37 + MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
38 >;
39 };
40
41 @@ -396,6 +396,7 @@
42 MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
43 MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
44 MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
45 + MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
46 >;
47 };
48
49 diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
50 index 315044ccd65f..e4719566133c 100644
51 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
52 +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
53 @@ -99,7 +99,7 @@
54 &fec2 {
55 pinctrl-names = "default";
56 pinctrl-0 = <&pinctrl_enet2>;
57 - phy-mode = "rgmii";
58 + phy-mode = "rgmii-id";
59 phy-handle = <&ethphy0>;
60 fsl,magic-packet;
61 status = "okay";
62 diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
63 index f6972deb5e39..865528b134d8 100644
64 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
65 +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
66 @@ -213,7 +213,7 @@
67 &fec2 {
68 pinctrl-names = "default";
69 pinctrl-0 = <&pinctrl_enet2>;
70 - phy-mode = "rgmii";
71 + phy-mode = "rgmii-id";
72 phy-handle = <&ethphy2>;
73 status = "okay";
74 };
75 diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
76 index 4c268b70b735..e0a9b371c248 100644
77 --- a/arch/arm/boot/dts/sun4i-a10.dtsi
78 +++ b/arch/arm/boot/dts/sun4i-a10.dtsi
79 @@ -198,7 +198,7 @@
80 default-pool {
81 compatible = "shared-dma-pool";
82 size = <0x6000000>;
83 - alloc-ranges = <0x4a000000 0x6000000>;
84 + alloc-ranges = <0x40000000 0x10000000>;
85 reusable;
86 linux,cma-default;
87 };
88 diff --git a/arch/arm/boot/dts/sun5i.dtsi b/arch/arm/boot/dts/sun5i.dtsi
89 index 6befa236ba99..fd31da8fd311 100644
90 --- a/arch/arm/boot/dts/sun5i.dtsi
91 +++ b/arch/arm/boot/dts/sun5i.dtsi
92 @@ -117,7 +117,7 @@
93 default-pool {
94 compatible = "shared-dma-pool";
95 size = <0x6000000>;
96 - alloc-ranges = <0x4a000000 0x6000000>;
97 + alloc-ranges = <0x40000000 0x10000000>;
98 reusable;
99 linux,cma-default;
100 };
101 diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
102 index 8aebefd6accf..1f8b45f07e58 100644
103 --- a/arch/arm/boot/dts/sun7i-a20.dtsi
104 +++ b/arch/arm/boot/dts/sun7i-a20.dtsi
105 @@ -180,7 +180,7 @@
106 default-pool {
107 compatible = "shared-dma-pool";
108 size = <0x6000000>;
109 - alloc-ranges = <0x4a000000 0x6000000>;
110 + alloc-ranges = <0x40000000 0x10000000>;
111 reusable;
112 linux,cma-default;
113 };
114 diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
115 index b0c195e3a06d..5f95e4b911a0 100644
116 --- a/arch/arm/kernel/hw_breakpoint.c
117 +++ b/arch/arm/kernel/hw_breakpoint.c
118 @@ -680,6 +680,12 @@ static void disable_single_step(struct perf_event *bp)
119 arch_install_hw_breakpoint(bp);
120 }
121
122 +static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
123 + struct arch_hw_breakpoint *info)
124 +{
125 + return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
126 +}
127 +
128 static void watchpoint_handler(unsigned long addr, unsigned int fsr,
129 struct pt_regs *regs)
130 {
131 @@ -739,16 +745,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
132 }
133
134 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
135 +
136 + /*
137 + * If we triggered a user watchpoint from a uaccess routine,
138 + * then handle the stepping ourselves since userspace really
139 + * can't help us with this.
140 + */
141 + if (watchpoint_fault_on_uaccess(regs, info))
142 + goto step;
143 +
144 perf_bp_event(wp, regs);
145
146 /*
147 - * If no overflow handler is present, insert a temporary
148 - * mismatch breakpoint so we can single-step over the
149 - * watchpoint trigger.
150 + * Defer stepping to the overflow handler if one is installed.
151 + * Otherwise, insert a temporary mismatch breakpoint so that
152 + * we can single-step over the watchpoint trigger.
153 */
154 - if (is_default_overflow_handler(wp))
155 - enable_single_step(wp, instruction_pointer(regs));
156 + if (!is_default_overflow_handler(wp))
157 + goto unlock;
158
159 +step:
160 + enable_single_step(wp, instruction_pointer(regs));
161 unlock:
162 rcu_read_unlock();
163 }
164 diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
165 index 12f0eb56a1cc..619db9b4c9d5 100644
166 --- a/arch/arm64/include/asm/alternative.h
167 +++ b/arch/arm64/include/asm/alternative.h
168 @@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
169 "663:\n\t" \
170 newinstr "\n" \
171 "664:\n\t" \
172 - ".previous\n\t" \
173 ".org . - (664b-663b) + (662b-661b)\n\t" \
174 - ".org . - (662b-661b) + (664b-663b)\n" \
175 + ".org . - (662b-661b) + (664b-663b)\n\t" \
176 + ".previous\n" \
177 ".endif\n"
178
179 #define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
180 diff --git a/arch/arm64/include/asm/checksum.h b/arch/arm64/include/asm/checksum.h
181 index d064a50deb5f..5665a3fc14be 100644
182 --- a/arch/arm64/include/asm/checksum.h
183 +++ b/arch/arm64/include/asm/checksum.h
184 @@ -19,16 +19,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
185 {
186 __uint128_t tmp;
187 u64 sum;
188 + int n = ihl; /* we want it signed */
189
190 tmp = *(const __uint128_t *)iph;
191 iph += 16;
192 - ihl -= 4;
193 + n -= 4;
194 tmp += ((tmp >> 64) | (tmp << 64));
195 sum = tmp >> 64;
196 do {
197 sum += *(const u32 *)iph;
198 iph += 4;
199 - } while (--ihl);
200 + } while (--n > 0);
201
202 sum += ((sum >> 32) | (sum << 32));
203 return csum_fold((__force u32)(sum >> 32));
204 diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
205 index ab5c215cf46c..068958575871 100644
206 --- a/arch/parisc/include/asm/cmpxchg.h
207 +++ b/arch/parisc/include/asm/cmpxchg.h
208 @@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
209 extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
210 unsigned int new_);
211 extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
212 +extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
213
214 /* don't worry...optimizer will get rid of most of this */
215 static inline unsigned long
216 @@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
217 #endif
218 case 4: return __cmpxchg_u32((unsigned int *)ptr,
219 (unsigned int)old, (unsigned int)new_);
220 + case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
221 }
222 __cmpxchg_called_with_bad_pointer();
223 return old;
224 diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c
225 index 70ffbcf889b8..2e4d1f05a926 100644
226 --- a/arch/parisc/lib/bitops.c
227 +++ b/arch/parisc/lib/bitops.c
228 @@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
229 _atomic_spin_unlock_irqrestore(ptr, flags);
230 return (unsigned long)prev;
231 }
232 +
233 +u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
234 +{
235 + unsigned long flags;
236 + u8 prev;
237 +
238 + _atomic_spin_lock_irqsave(ptr, flags);
239 + if ((prev = *ptr) == old)
240 + *ptr = new;
241 + _atomic_spin_unlock_irqrestore(ptr, flags);
242 + return prev;
243 +}
244 diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
245 index 319812923012..b1eb6a041118 100644
246 --- a/arch/riscv/mm/init.c
247 +++ b/arch/riscv/mm/init.c
248 @@ -115,9 +115,9 @@ void __init setup_bootmem(void)
249 /* Reserve from the start of the kernel to the end of the kernel */
250 memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
251
252 - set_max_mapnr(PFN_DOWN(mem_size));
253 max_pfn = PFN_DOWN(memblock_end_of_DRAM());
254 max_low_pfn = max_pfn;
255 + set_max_mapnr(max_low_pfn);
256
257 #ifdef CONFIG_BLK_DEV_INITRD
258 setup_initrd();
259 diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
260 index 22d968bfe9bb..d770da3f8b6f 100644
261 --- a/arch/sh/include/asm/pgalloc.h
262 +++ b/arch/sh/include/asm/pgalloc.h
263 @@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
264 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
265 extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
266 extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
267 +#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
268 #endif
269
270 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
271 @@ -33,13 +34,4 @@ do { \
272 tlb_remove_page((tlb), (pte)); \
273 } while (0)
274
275 -#if CONFIG_PGTABLE_LEVELS > 2
276 -#define __pmd_free_tlb(tlb, pmdp, addr) \
277 -do { \
278 - struct page *page = virt_to_page(pmdp); \
279 - pgtable_pmd_page_dtor(page); \
280 - tlb_remove_page((tlb), page); \
281 -} while (0);
282 -#endif
283 -
284 #endif /* __ASM_SH_PGALLOC_H */
285 diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S
286 index d31f66e82ce5..4a8ec9e40cc2 100644
287 --- a/arch/sh/kernel/entry-common.S
288 +++ b/arch/sh/kernel/entry-common.S
289 @@ -199,7 +199,7 @@ syscall_trace_entry:
290 mov.l @(OFF_R7,r15), r7 ! arg3
291 mov.l @(OFF_R3,r15), r3 ! syscall_nr
292 !
293 - mov.l 2f, r10 ! Number of syscalls
294 + mov.l 6f, r10 ! Number of syscalls
295 cmp/hs r10, r3
296 bf syscall_call
297 mov #-ENOSYS, r0
298 @@ -353,7 +353,7 @@ ENTRY(system_call)
299 tst r9, r8
300 bf syscall_trace_entry
301 !
302 - mov.l 2f, r8 ! Number of syscalls
303 + mov.l 6f, r8 ! Number of syscalls
304 cmp/hs r8, r3
305 bt syscall_badsys
306 !
307 @@ -392,7 +392,7 @@ syscall_exit:
308 #if !defined(CONFIG_CPU_SH2)
309 1: .long TRA
310 #endif
311 -2: .long NR_syscalls
312 +6: .long NR_syscalls
313 3: .long sys_call_table
314 7: .long do_syscall_trace_enter
315 8: .long do_syscall_trace_leave
316 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
317 index 519649ddf100..fe522691ac71 100644
318 --- a/arch/x86/kernel/i8259.c
319 +++ b/arch/x86/kernel/i8259.c
320 @@ -207,7 +207,7 @@ spurious_8259A_irq:
321 * lets ACK and report it. [once per IRQ]
322 */
323 if (!(spurious_irq_mask & irqmask)) {
324 - printk(KERN_DEBUG
325 + printk_deferred(KERN_DEBUG
326 "spurious 8259A interrupt: IRQ%d.\n", irq);
327 spurious_irq_mask |= irqmask;
328 }
329 diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
330 index 2d6898c2cb64..6d83b4b857e6 100644
331 --- a/arch/x86/kernel/stacktrace.c
332 +++ b/arch/x86/kernel/stacktrace.c
333 @@ -58,7 +58,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
334 * or a page fault), which can make frame pointers
335 * unreliable.
336 */
337 -
338 if (IS_ENABLED(CONFIG_FRAME_POINTER))
339 return -EINVAL;
340 }
341 @@ -81,10 +80,6 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
342 if (unwind_error(&state))
343 return -EINVAL;
344
345 - /* Success path for non-user tasks, i.e. kthreads and idle tasks */
346 - if (!(task->flags & (PF_KTHREAD | PF_IDLE)))
347 - return -EINVAL;
348 -
349 return 0;
350 }
351
352 diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
353 index aa0f39dc8129..187a86e0e753 100644
354 --- a/arch/x86/kernel/unwind_orc.c
355 +++ b/arch/x86/kernel/unwind_orc.c
356 @@ -431,8 +431,11 @@ bool unwind_next_frame(struct unwind_state *state)
357 /*
358 * Find the orc_entry associated with the text address.
359 *
360 - * Decrement call return addresses by one so they work for sibling
361 - * calls and calls to noreturn functions.
362 + * For a call frame (as opposed to a signal frame), state->ip points to
363 + * the instruction after the call. That instruction's stack layout
364 + * could be different from the call instruction's layout, for example
365 + * if the call was to a noreturn function. So get the ORC data for the
366 + * call instruction itself.
367 */
368 orc = orc_find(state->signal ? state->ip : state->ip - 1);
369 if (!orc) {
370 @@ -653,6 +656,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
371 state->sp = task->thread.sp;
372 state->bp = READ_ONCE_NOCHECK(frame->bp);
373 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
374 + state->signal = (void *)state->ip == ret_from_fork;
375 }
376
377 if (get_stack_info((unsigned long *)state->sp, state->task,
378 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
379 index 5d2587005d0e..6920f1d3b66f 100644
380 --- a/arch/x86/kvm/lapic.c
381 +++ b/arch/x86/kvm/lapic.c
382 @@ -2085,7 +2085,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
383 {
384 struct kvm_lapic *apic = vcpu->arch.apic;
385
386 - if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
387 + if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
388 apic_lvtt_period(apic))
389 return;
390
391 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
392 index c8da8eb160da..422193690fd4 100644
393 --- a/drivers/crypto/ccp/ccp-ops.c
394 +++ b/drivers/crypto/ccp/ccp-ops.c
395 @@ -1777,8 +1777,9 @@ ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
396 LSB_ITEM_SIZE);
397 break;
398 default:
399 + kfree(hmac_buf);
400 ret = -EINVAL;
401 - goto e_ctx;
402 + goto e_data;
403 }
404
405 memset(&hmac_cmd, 0, sizeof(hmac_cmd));
406 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
407 index a73206784cba..2a7da26008a2 100644
408 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
409 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
410 @@ -667,9 +667,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
411 return n ? -EFAULT : 0;
412 }
413 case AMDGPU_INFO_DEV_INFO: {
414 - struct drm_amdgpu_info_device dev_info = {};
415 + struct drm_amdgpu_info_device dev_info;
416 uint64_t vm_size;
417
418 + memset(&dev_info, 0, sizeof(dev_info));
419 dev_info.device_id = dev->pdev->device;
420 dev_info.chip_rev = adev->rev_id;
421 dev_info.external_rev = adev->external_rev_id;
422 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
423 index b66554b40db4..3f744e72912f 100644
424 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
425 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
426 @@ -691,7 +691,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
427 tmp_str++;
428 while (isspace(*++tmp_str));
429
430 - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
431 + while (tmp_str[0]) {
432 + sub_str = strsep(&tmp_str, delimiter);
433 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
434 if (ret)
435 return -EINVAL;
436 @@ -882,7 +883,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
437 memcpy(buf_cpy, buf, bytes);
438 buf_cpy[bytes] = '\0';
439 tmp = buf_cpy;
440 - while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
441 + while (tmp[0]) {
442 + sub_str = strsep(&tmp, delimiter);
443 if (strlen(sub_str)) {
444 ret = kstrtol(sub_str, 0, &level);
445 if (ret)
446 @@ -1298,7 +1300,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
447 i++;
448 memcpy(buf_cpy, buf, count-i);
449 tmp_str = buf_cpy;
450 - while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
451 + while (tmp_str[0]) {
452 + sub_str = strsep(&tmp_str, delimiter);
453 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
454 if (ret) {
455 count = -EINVAL;
456 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
457 index c7d8edf450d3..6091194a3955 100644
458 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
459 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
460 @@ -7464,20 +7464,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
461 * the same resource. If we have a new DC context as part of
462 * the DM atomic state from validation we need to free it and
463 * retain the existing one instead.
464 + *
465 + * Furthermore, since the DM atomic state only contains the DC
466 + * context and can safely be annulled, we can free the state
467 + * and clear the associated private object now to free
468 + * some memory and avoid a possible use-after-free later.
469 */
470 - struct dm_atomic_state *new_dm_state, *old_dm_state;
471
472 - new_dm_state = dm_atomic_get_new_state(state);
473 - old_dm_state = dm_atomic_get_old_state(state);
474 + for (i = 0; i < state->num_private_objs; i++) {
475 + struct drm_private_obj *obj = state->private_objs[i].ptr;
476
477 - if (new_dm_state && old_dm_state) {
478 - if (new_dm_state->context)
479 - dc_release_state(new_dm_state->context);
480 + if (obj->funcs == adev->dm.atomic_obj.funcs) {
481 + int j = state->num_private_objs-1;
482
483 - new_dm_state->context = old_dm_state->context;
484 + dm_atomic_destroy_state(obj,
485 + state->private_objs[i].state);
486 +
487 + /* If i is not at the end of the array then the
488 + * last element needs to be moved to where i was
489 + * before the array can safely be truncated.
490 + */
491 + if (i != j)
492 + state->private_objs[i] =
493 + state->private_objs[j];
494
495 - if (old_dm_state->context)
496 - dc_retain_state(old_dm_state->context);
497 + state->private_objs[j].ptr = NULL;
498 + state->private_objs[j].state = NULL;
499 + state->private_objs[j].old_state = NULL;
500 + state->private_objs[j].new_state = NULL;
501 +
502 + state->num_private_objs = j;
503 + break;
504 + }
505 }
506 }
507
508 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
509 index 6854f5867d51..46ad14470d06 100644
510 --- a/drivers/gpu/drm/drm_gem.c
511 +++ b/drivers/gpu/drm/drm_gem.c
512 @@ -872,9 +872,6 @@ err:
513 * @file_priv: drm file-private structure
514 *
515 * Open an object using the global name, returning a handle and the size.
516 - *
517 - * This handle (of course) holds a reference to the object, so the object
518 - * will not go away until the handle is deleted.
519 */
520 int
521 drm_gem_open_ioctl(struct drm_device *dev, void *data,
522 @@ -899,14 +896,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
523
524 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
525 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
526 - drm_gem_object_put_unlocked(obj);
527 if (ret)
528 - return ret;
529 + goto err;
530
531 args->handle = handle;
532 args->size = obj->size;
533
534 - return 0;
535 +err:
536 + drm_gem_object_put_unlocked(obj);
537 + return ret;
538 }
539
540 /**
541 diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
542 index a05e64e3d80b..4042f5b39765 100644
543 --- a/drivers/gpu/drm/drm_mipi_dbi.c
544 +++ b/drivers/gpu/drm/drm_mipi_dbi.c
545 @@ -937,7 +937,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
546 }
547 }
548
549 - tr.len = chunk;
550 + tr.len = chunk * 2;
551 len -= chunk;
552
553 ret = spi_sync(spi, &m);
554 diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
555 index 9d71ce15db05..a51d3b795770 100644
556 --- a/drivers/i2c/busses/i2c-cadence.c
557 +++ b/drivers/i2c/busses/i2c-cadence.c
558 @@ -377,10 +377,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
559 * Check for the message size against FIFO depth and set the
560 * 'hold bus' bit if it is greater than FIFO depth.
561 */
562 - if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
563 + if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
564 ctrl_reg |= CDNS_I2C_CR_HOLD;
565 - else
566 - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
567
568 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
569
570 @@ -437,11 +435,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
571 * Check for the message size against FIFO depth and set the
572 * 'hold bus' bit if it is greater than FIFO depth.
573 */
574 - if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
575 + if (id->send_count > CDNS_I2C_FIFO_DEPTH)
576 ctrl_reg |= CDNS_I2C_CR_HOLD;
577 - else
578 - ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
579 -
580 cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
581
582 /* Clear the interrupts in interrupt status register. */
583 diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
584 index 19556c62c7ea..d14ad523f96c 100644
585 --- a/drivers/infiniband/sw/rdmavt/qp.c
586 +++ b/drivers/infiniband/sw/rdmavt/qp.c
587 @@ -898,8 +898,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
588 qp->s_tail_ack_queue = 0;
589 qp->s_acked_ack_queue = 0;
590 qp->s_num_rd_atomic = 0;
591 - if (qp->r_rq.kwq)
592 - qp->r_rq.kwq->count = qp->r_rq.size;
593 qp->r_sge.num_sge = 0;
594 atomic_set(&qp->s_reserved_used, 0);
595 }
596 @@ -2352,31 +2350,6 @@ bad_lkey:
597 return 0;
598 }
599
600 -/**
601 - * get_count - count numbers of request work queue entries
602 - * in circular buffer
603 - * @rq: data structure for request queue entry
604 - * @tail: tail indices of the circular buffer
605 - * @head: head indices of the circular buffer
606 - *
607 - * Return - total number of entries in the circular buffer
608 - */
609 -static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
610 -{
611 - u32 count;
612 -
613 - count = head;
614 -
615 - if (count >= rq->size)
616 - count = 0;
617 - if (count < tail)
618 - count += rq->size - tail;
619 - else
620 - count -= tail;
621 -
622 - return count;
623 -}
624 -
625 /**
626 * get_rvt_head - get head indices of the circular buffer
627 * @rq: data structure for request queue entry
628 @@ -2451,7 +2424,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
629
630 if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
631 head = get_rvt_head(rq, ip);
632 - kwq->count = get_count(rq, tail, head);
633 + kwq->count = rvt_get_rq_count(rq, head, tail);
634 }
635 if (unlikely(kwq->count == 0)) {
636 ret = 0;
637 @@ -2486,7 +2459,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
638 * the number of remaining WQEs.
639 */
640 if (kwq->count < srq->limit) {
641 - kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
642 + kwq->count =
643 + rvt_get_rq_count(rq,
644 + get_rvt_head(rq, ip), tail);
645 if (kwq->count < srq->limit) {
646 struct ib_event ev;
647
648 diff --git a/drivers/infiniband/sw/rdmavt/rc.c b/drivers/infiniband/sw/rdmavt/rc.c
649 index 890d7b760d2e..27415185d862 100644
650 --- a/drivers/infiniband/sw/rdmavt/rc.c
651 +++ b/drivers/infiniband/sw/rdmavt/rc.c
652 @@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
653 * not atomic, which is OK, since the fuzziness is
654 * resolved as further ACKs go out.
655 */
656 - credits = head - tail;
657 - if ((int)credits < 0)
658 - credits += qp->r_rq.size;
659 + credits = rvt_get_rq_count(&qp->r_rq, head, tail);
660 }
661 /*
662 * Binary search the credit table to find the code to
663 diff --git a/drivers/media/pci/cx23885/cx23888-ir.c b/drivers/media/pci/cx23885/cx23888-ir.c
664 index e880afe37f15..d59ca3601785 100644
665 --- a/drivers/media/pci/cx23885/cx23888-ir.c
666 +++ b/drivers/media/pci/cx23885/cx23888-ir.c
667 @@ -1167,8 +1167,11 @@ int cx23888_ir_probe(struct cx23885_dev *dev)
668 return -ENOMEM;
669
670 spin_lock_init(&state->rx_kfifo_lock);
671 - if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL))
672 + if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE,
673 + GFP_KERNEL)) {
674 + kfree(state);
675 return -ENOMEM;
676 + }
677
678 state->dev = dev;
679 sd = &state->sd;
680 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
681 index 506170fe3a8b..049f1bbe27ab 100644
682 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
683 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
684 @@ -2441,6 +2441,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
685 txq_info = adap->sge.uld_txq_info[tx_uld_type];
686 if (unlikely(!txq_info)) {
687 WARN_ON(true);
688 + kfree_skb(skb);
689 return NET_XMIT_DROP;
690 }
691
692 diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
693 index 2814b96751b4..01ae113f122a 100644
694 --- a/drivers/net/ethernet/cortina/gemini.c
695 +++ b/drivers/net/ethernet/cortina/gemini.c
696 @@ -2445,6 +2445,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
697 port->reset = devm_reset_control_get_exclusive(dev, NULL);
698 if (IS_ERR(port->reset)) {
699 dev_err(dev, "no reset\n");
700 + clk_disable_unprepare(port->pclk);
701 return PTR_ERR(port->reset);
702 }
703 reset_control_reset(port->reset);
704 @@ -2500,8 +2501,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
705 IRQF_SHARED,
706 port_names[port->id],
707 port);
708 - if (ret)
709 + if (ret) {
710 + clk_disable_unprepare(port->pclk);
711 return ret;
712 + }
713
714 ret = register_netdev(netdev);
715 if (!ret) {
716 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
717 index 506381224559..a8ce6ca0f508 100644
718 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
719 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
720 @@ -4014,8 +4014,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
721 return;
722
723 if (linkup) {
724 - netif_carrier_on(netdev);
725 netif_tx_wake_all_queues(netdev);
726 + netif_carrier_on(netdev);
727 if (netif_msg_link(handle))
728 netdev_info(netdev, "link up\n");
729 } else {
730 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
731 index d4652dea4569..6c3d13110993 100644
732 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
733 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
734 @@ -5627,9 +5627,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
735 /* to avoid rule conflict, when user configure rule by ethtool,
736 * we need to clear all arfs rules
737 */
738 + spin_lock_bh(&hdev->fd_rule_lock);
739 hclge_clear_arfs_rules(handle);
740
741 - spin_lock_bh(&hdev->fd_rule_lock);
742 ret = hclge_fd_config_rule(hdev, rule);
743
744 spin_unlock_bh(&hdev->fd_rule_lock);
745 @@ -5672,6 +5672,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
746 return ret;
747 }
748
749 +/* make sure being called after lock up with fd_rule_lock */
750 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
751 bool clear_list)
752 {
753 @@ -5684,7 +5685,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
754 if (!hnae3_dev_fd_supported(hdev))
755 return;
756
757 - spin_lock_bh(&hdev->fd_rule_lock);
758 for_each_set_bit(location, hdev->fd_bmap,
759 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
760 hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
761 @@ -5701,8 +5701,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
762 bitmap_zero(hdev->fd_bmap,
763 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
764 }
765 -
766 - spin_unlock_bh(&hdev->fd_rule_lock);
767 }
768
769 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
770 @@ -6069,7 +6067,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
771 u16 flow_id, struct flow_keys *fkeys)
772 {
773 struct hclge_vport *vport = hclge_get_vport(handle);
774 - struct hclge_fd_rule_tuples new_tuples;
775 + struct hclge_fd_rule_tuples new_tuples = {};
776 struct hclge_dev *hdev = vport->back;
777 struct hclge_fd_rule *rule;
778 u16 tmp_queue_id;
779 @@ -6079,20 +6077,18 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
780 if (!hnae3_dev_fd_supported(hdev))
781 return -EOPNOTSUPP;
782
783 - memset(&new_tuples, 0, sizeof(new_tuples));
784 - hclge_fd_get_flow_tuples(fkeys, &new_tuples);
785 -
786 - spin_lock_bh(&hdev->fd_rule_lock);
787 -
788 /* when there is already fd rule existed add by user,
789 * arfs should not work
790 */
791 + spin_lock_bh(&hdev->fd_rule_lock);
792 if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
793 spin_unlock_bh(&hdev->fd_rule_lock);
794
795 return -EOPNOTSUPP;
796 }
797
798 + hclge_fd_get_flow_tuples(fkeys, &new_tuples);
799 +
800 /* check is there flow director filter existed for this flow,
801 * if not, create a new filter for it;
802 * if filter exist with different queue id, modify the filter;
803 @@ -6177,6 +6173,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
804 #endif
805 }
806
807 +/* make sure being called after lock up with fd_rule_lock */
808 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
809 {
810 #ifdef CONFIG_RFS_ACCEL
811 @@ -6221,10 +6218,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
812
813 hdev->fd_en = enable;
814 clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
815 - if (!enable)
816 +
817 + if (!enable) {
818 + spin_lock_bh(&hdev->fd_rule_lock);
819 hclge_del_all_fd_entries(handle, clear);
820 - else
821 + spin_unlock_bh(&hdev->fd_rule_lock);
822 + } else {
823 hclge_restore_fd_entries(handle);
824 + }
825 }
826
827 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
828 @@ -6678,8 +6679,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
829 int i;
830
831 set_bit(HCLGE_STATE_DOWN, &hdev->state);
832 -
833 + spin_lock_bh(&hdev->fd_rule_lock);
834 hclge_clear_arfs_rules(handle);
835 + spin_unlock_bh(&hdev->fd_rule_lock);
836
837 /* If it is not PF reset, the firmware will disable the MAC,
838 * so it only need to stop phy here.
839 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
840 index d58597360699..2d20a48f0ba0 100644
841 --- a/drivers/net/ethernet/ibm/ibmvnic.c
842 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
843 @@ -3086,7 +3086,7 @@ req_rx_irq_failed:
844 req_tx_irq_failed:
845 for (j = 0; j < i; j++) {
846 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
847 - irq_dispose_mapping(adapter->rx_scrq[j]->irq);
848 + irq_dispose_mapping(adapter->tx_scrq[j]->irq);
849 }
850 release_sub_crqs(adapter, 1);
851 return rc;
852 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
853 index 87c2e8de6102..942646fb2256 100644
854 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
855 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
856 @@ -4354,12 +4354,14 @@ end:
857 static void mlx4_shutdown(struct pci_dev *pdev)
858 {
859 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
860 + struct mlx4_dev *dev = persist->dev;
861
862 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
863 mutex_lock(&persist->interface_state_mutex);
864 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
865 mlx4_unload_one(pdev);
866 mutex_unlock(&persist->interface_state_mutex);
867 + mlx4_pci_disable_device(dev);
868 }
869
870 static const struct pci_error_handlers mlx4_err_handler = {
871 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
872 index c133beb6a7a5..ee0d78f801af 100644
873 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
874 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
875 @@ -432,7 +432,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
876 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
877 &rq->wq_ctrl);
878 if (err)
879 - return err;
880 + goto err_rq_wq_destroy;
881
882 rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
883
884 @@ -485,7 +485,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
885 err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
886 &rq->wq_ctrl);
887 if (err)
888 - return err;
889 + goto err_rq_wq_destroy;
890
891 rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
892
893 @@ -3038,6 +3038,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
894 priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
895 }
896
897 +static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
898 + enum mlx5_port_status state)
899 +{
900 + struct mlx5_eswitch *esw = mdev->priv.eswitch;
901 + int vport_admin_state;
902 +
903 + mlx5_set_port_admin_status(mdev, state);
904 +
905 + if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
906 + return;
907 +
908 + if (state == MLX5_PORT_UP)
909 + vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
910 + else
911 + vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
912 +
913 + mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
914 +}
915 +
916 int mlx5e_open_locked(struct net_device *netdev)
917 {
918 struct mlx5e_priv *priv = netdev_priv(netdev);
919 @@ -3070,7 +3089,7 @@ int mlx5e_open(struct net_device *netdev)
920 mutex_lock(&priv->state_lock);
921 err = mlx5e_open_locked(netdev);
922 if (!err)
923 - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
924 + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
925 mutex_unlock(&priv->state_lock);
926
927 if (mlx5_vxlan_allowed(priv->mdev->vxlan))
928 @@ -3107,7 +3126,7 @@ int mlx5e_close(struct net_device *netdev)
929 return -ENODEV;
930
931 mutex_lock(&priv->state_lock);
932 - mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
933 + mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
934 err = mlx5e_close_locked(netdev);
935 mutex_unlock(&priv->state_lock);
936
937 @@ -5172,7 +5191,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
938
939 /* Marking the link as currently not needed by the Driver */
940 if (!netif_running(netdev))
941 - mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
942 + mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
943
944 mlx5e_set_netdev_mtu_boundaries(priv);
945 mlx5e_set_dev_port_mtu(priv);
946 @@ -5356,6 +5375,8 @@ err_cleanup_tx:
947 profile->cleanup_tx(priv);
948
949 out:
950 + set_bit(MLX5E_STATE_DESTROYING, &priv->state);
951 + cancel_work_sync(&priv->update_stats_work);
952 return err;
953 }
954
955 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
956 index 9b232ef36d53..88b51f64a64e 100644
957 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
958 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
959 @@ -1736,6 +1736,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
960 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
961 mlx5e_tc_reoffload_flows_work);
962
963 + mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
964 + 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
965 mlx5_lag_add(mdev, netdev);
966 priv->events_nb.notifier_call = uplink_rep_async_event;
967 mlx5_notifier_register(mdev, &priv->events_nb);
968 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
969 index c6ed4b7f4f97..8e6ab8201939 100644
970 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
971 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
972 @@ -1919,7 +1919,7 @@ abort:
973 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
974 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
975 }
976 -
977 + esw_destroy_tsar(esw);
978 return err;
979 }
980
981 @@ -2094,6 +2094,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
982 u16 vport, int link_state)
983 {
984 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
985 + int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
986 + int other_vport = 1;
987 int err = 0;
988
989 if (!ESW_ALLOWED(esw))
990 @@ -2101,15 +2103,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
991 if (IS_ERR(evport))
992 return PTR_ERR(evport);
993
994 + if (vport == MLX5_VPORT_UPLINK) {
995 + opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
996 + other_vport = 0;
997 + vport = 0;
998 + }
999 mutex_lock(&esw->state_lock);
1000
1001 - err = mlx5_modify_vport_admin_state(esw->dev,
1002 - MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1003 - vport, 1, link_state);
1004 + err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1005 if (err) {
1006 - mlx5_core_warn(esw->dev,
1007 - "Failed to set vport %d link state, err = %d",
1008 - vport, err);
1009 + mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1010 + vport, opmod, err);
1011 goto unlock;
1012 }
1013
1014 @@ -2151,8 +2155,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1015 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1016 int err = 0;
1017
1018 - if (!ESW_ALLOWED(esw))
1019 - return -EPERM;
1020 if (IS_ERR(evport))
1021 return PTR_ERR(evport);
1022 if (vlan > 4095 || qos > 7)
1023 @@ -2180,6 +2182,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1024 u8 set_flags = 0;
1025 int err;
1026
1027 + if (!ESW_ALLOWED(esw))
1028 + return -EPERM;
1029 +
1030 if (vlan || qos)
1031 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
1032
1033 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
1034 index 6bd6f5895244..0ddbae1e64fa 100644
1035 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
1036 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
1037 @@ -606,6 +606,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode) { ret
1038 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
1039 static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
1040 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
1041 +static inline
1042 +int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
1043 static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1044 {
1045 return ERR_PTR(-EOPNOTSUPP);
1046 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1047 index 43f97601b500..75fc283cacc3 100644
1048 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1049 +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1050 @@ -388,10 +388,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
1051 return 0;
1052 }
1053
1054 +enum {
1055 + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
1056 + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
1057 +};
1058 +
1059 static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
1060 enum ptp_pin_function func, unsigned int chan)
1061 {
1062 - return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
1063 + struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
1064 + ptp_info);
1065 +
1066 + switch (func) {
1067 + case PTP_PF_NONE:
1068 + return 0;
1069 + case PTP_PF_EXTTS:
1070 + return !(clock->pps_info.pin_caps[pin] &
1071 + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
1072 + case PTP_PF_PEROUT:
1073 + return !(clock->pps_info.pin_caps[pin] &
1074 + MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
1075 + default:
1076 + return -EOPNOTSUPP;
1077 + }
1078 +
1079 + return -EOPNOTSUPP;
1080 }
1081
1082 static const struct ptp_clock_info mlx5_ptp_clock_info = {
1083 diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
1084 index 1b204ce30ee4..c7c3fc7d1126 100644
1085 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c
1086 +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
1087 @@ -1577,7 +1577,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1088 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1089 bulk_list, cb, cb_priv, tid);
1090 if (err) {
1091 - kfree(trans);
1092 + kfree_rcu(trans, rcu);
1093 return err;
1094 }
1095 return 0;
1096 @@ -1802,11 +1802,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1097 break;
1098 }
1099 }
1100 - rcu_read_unlock();
1101 - if (!found)
1102 + if (!found) {
1103 + rcu_read_unlock();
1104 goto drop;
1105 + }
1106
1107 rxl->func(skb, local_port, rxl_item->priv);
1108 + rcu_read_unlock();
1109 return;
1110
1111 drop:
1112 diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
1113 index 2761f3a3ae50..56f285985b43 100644
1114 --- a/drivers/net/ethernet/ni/nixge.c
1115 +++ b/drivers/net/ethernet/ni/nixge.c
1116 @@ -1318,19 +1318,21 @@ static int nixge_probe(struct platform_device *pdev)
1117 netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
1118 err = nixge_of_get_resources(pdev);
1119 if (err)
1120 - return err;
1121 + goto free_netdev;
1122 __nixge_hw_set_mac_address(ndev);
1123
1124 priv->tx_irq = platform_get_irq_byname(pdev, "tx");
1125 if (priv->tx_irq < 0) {
1126 netdev_err(ndev, "could not find 'tx' irq");
1127 - return priv->tx_irq;
1128 + err = priv->tx_irq;
1129 + goto free_netdev;
1130 }
1131
1132 priv->rx_irq = platform_get_irq_byname(pdev, "rx");
1133 if (priv->rx_irq < 0) {
1134 netdev_err(ndev, "could not find 'rx' irq");
1135 - return priv->rx_irq;
1136 + err = priv->rx_irq;
1137 + goto free_netdev;
1138 }
1139
1140 priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
1141 diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
1142 index 8d106063e927..666e43748a5f 100644
1143 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c
1144 +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
1145 @@ -1180,7 +1180,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
1146 index, attn_bits, attn_acks, asserted_bits,
1147 deasserted_bits, p_sb_attn_sw->known_attn);
1148 } else if (asserted_bits == 0x100) {
1149 - DP_INFO(p_hwfn, "MFW indication via attention\n");
1150 + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1151 + "MFW indication via attention\n");
1152 } else {
1153 DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
1154 "MFW indication [deassertion]\n");
1155 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
1156 index 3f165c137236..30cdabf64ccc 100644
1157 --- a/drivers/net/ethernet/renesas/ravb_main.c
1158 +++ b/drivers/net/ethernet/renesas/ravb_main.c
1159 @@ -1444,6 +1444,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1160 struct ravb_private *priv = container_of(work, struct ravb_private,
1161 work);
1162 struct net_device *ndev = priv->ndev;
1163 + int error;
1164
1165 netif_tx_stop_all_queues(ndev);
1166
1167 @@ -1452,15 +1453,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1168 ravb_ptp_stop(ndev);
1169
1170 /* Wait for DMA stopping */
1171 - ravb_stop_dma(ndev);
1172 + if (ravb_stop_dma(ndev)) {
1173 + /* If ravb_stop_dma() fails, the hardware is still operating
1174 + * for TX and/or RX. So, this should not call the following
1175 + * functions because ravb_dmac_init() is possible to fail too.
1176 + * Also, this should not retry ravb_stop_dma() again and again
1177 + * here because it's possible to wait forever. So, this just
1178 + * re-enables the TX and RX and skip the following
1179 + * re-initialization procedure.
1180 + */
1181 + ravb_rcv_snd_enable(ndev);
1182 + goto out;
1183 + }
1184
1185 ravb_ring_free(ndev, RAVB_BE);
1186 ravb_ring_free(ndev, RAVB_NC);
1187
1188 /* Device init */
1189 - ravb_dmac_init(ndev);
1190 + error = ravb_dmac_init(ndev);
1191 + if (error) {
1192 + /* If ravb_dmac_init() fails, descriptors are freed. So, this
1193 + * should return here to avoid re-enabling the TX and RX in
1194 + * ravb_emac_init().
1195 + */
1196 + netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
1197 + __func__, error);
1198 + return;
1199 + }
1200 ravb_emac_init(ndev);
1201
1202 +out:
1203 /* Initialise PTP Clock driver */
1204 if (priv->chip_id == RCAR_GEN2)
1205 ravb_ptp_init(ndev, priv->pdev);
1206 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
1207 index 74849da031fa..66a8b835aa94 100644
1208 --- a/drivers/net/usb/hso.c
1209 +++ b/drivers/net/usb/hso.c
1210 @@ -1389,8 +1389,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
1211 unsigned long flags;
1212
1213 if (old)
1214 - hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
1215 - tty->termios.c_cflag, old->c_cflag);
1216 + hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
1217 + (unsigned int)tty->termios.c_cflag,
1218 + (unsigned int)old->c_cflag);
1219
1220 /* the actual setup */
1221 spin_lock_irqsave(&serial->serial_lock, flags);
1222 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1223 index 0170a441208a..1da99abc6ed1 100644
1224 --- a/drivers/net/usb/lan78xx.c
1225 +++ b/drivers/net/usb/lan78xx.c
1226 @@ -3767,6 +3767,11 @@ static int lan78xx_probe(struct usb_interface *intf,
1227 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
1228 netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
1229
1230 + if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
1231 + ret = -ENODEV;
1232 + goto out3;
1233 + }
1234 +
1235 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
1236 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
1237 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
1238 @@ -3791,6 +3796,7 @@ static int lan78xx_probe(struct usb_interface *intf,
1239 usb_fill_int_urb(dev->urb_intr, dev->udev,
1240 dev->pipe_intr, buf, maxp,
1241 intr_complete, dev, period);
1242 + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
1243 }
1244 }
1245
1246 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1247 index 03434db36b5c..b49b6f0cee50 100644
1248 --- a/drivers/net/vxlan.c
1249 +++ b/drivers/net/vxlan.c
1250 @@ -2863,8 +2863,10 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
1251 if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
1252 continue;
1253 /* the all_zeros_mac entry is deleted at vxlan_uninit */
1254 - if (!is_zero_ether_addr(f->eth_addr))
1255 - vxlan_fdb_destroy(vxlan, f, true, true);
1256 + if (is_zero_ether_addr(f->eth_addr) &&
1257 + f->vni == vxlan->cfg.vni)
1258 + continue;
1259 + vxlan_fdb_destroy(vxlan, f, true, true);
1260 }
1261 spin_unlock_bh(&vxlan->hash_lock[h]);
1262 }
1263 diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
1264 index bf78073ee7fd..e2a83f4cd3bb 100644
1265 --- a/drivers/net/wan/hdlc_x25.c
1266 +++ b/drivers/net/wan/hdlc_x25.c
1267 @@ -62,8 +62,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
1268 {
1269 unsigned char *ptr;
1270
1271 - if (skb_cow(skb, 1))
1272 + if (skb_cow(skb, 1)) {
1273 + kfree_skb(skb);
1274 return NET_RX_DROP;
1275 + }
1276
1277 skb_push(skb, 1);
1278 skb_reset_network_header(skb);
1279 diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
1280 index 5a6f27298b90..134e4dd916c1 100644
1281 --- a/drivers/net/wan/lapbether.c
1282 +++ b/drivers/net/wan/lapbether.c
1283 @@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
1284 {
1285 unsigned char *ptr;
1286
1287 - skb_push(skb, 1);
1288 -
1289 - if (skb_cow(skb, 1))
1290 + if (skb_cow(skb, 1)) {
1291 + kfree_skb(skb);
1292 return NET_RX_DROP;
1293 + }
1294 +
1295 + skb_push(skb, 1);
1296
1297 ptr = skb->data;
1298 *ptr = X25_IFACE_DATA;
1299 diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
1300 index c415e971735b..004af89a02b8 100644
1301 --- a/drivers/net/wireless/ath/ath10k/hw.c
1302 +++ b/drivers/net/wireless/ath/ath10k/hw.c
1303 @@ -1145,6 +1145,7 @@ static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd)
1304 const struct ath10k_hw_ops qca99x0_ops = {
1305 .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
1306 .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error,
1307 + .is_rssi_enable = ath10k_htt_tx_rssi_enable,
1308 };
1309
1310 const struct ath10k_hw_ops qca6174_ops = {
1311 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1312 index 482c6c8b0fb7..88280057e032 100644
1313 --- a/drivers/net/xen-netfront.c
1314 +++ b/drivers/net/xen-netfront.c
1315 @@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
1316 MODULE_PARM_DESC(max_queues,
1317 "Maximum number of queues per virtual interface");
1318
1319 +#define XENNET_TIMEOUT (5 * HZ)
1320 +
1321 static const struct ethtool_ops xennet_ethtool_ops;
1322
1323 struct netfront_cb {
1324 @@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1325
1326 netif_carrier_off(netdev);
1327
1328 - xenbus_switch_state(dev, XenbusStateInitialising);
1329 - wait_event(module_wq,
1330 - xenbus_read_driver_state(dev->otherend) !=
1331 - XenbusStateClosed &&
1332 - xenbus_read_driver_state(dev->otherend) !=
1333 - XenbusStateUnknown);
1334 + do {
1335 + xenbus_switch_state(dev, XenbusStateInitialising);
1336 + err = wait_event_timeout(module_wq,
1337 + xenbus_read_driver_state(dev->otherend) !=
1338 + XenbusStateClosed &&
1339 + xenbus_read_driver_state(dev->otherend) !=
1340 + XenbusStateUnknown, XENNET_TIMEOUT);
1341 + } while (!err);
1342 +
1343 return netdev;
1344
1345 exit:
1346 @@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = {
1347 };
1348 #endif /* CONFIG_SYSFS */
1349
1350 -static int xennet_remove(struct xenbus_device *dev)
1351 +static void xennet_bus_close(struct xenbus_device *dev)
1352 {
1353 - struct netfront_info *info = dev_get_drvdata(&dev->dev);
1354 -
1355 - dev_dbg(&dev->dev, "%s\n", dev->nodename);
1356 + int ret;
1357
1358 - if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
1359 + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
1360 + return;
1361 + do {
1362 xenbus_switch_state(dev, XenbusStateClosing);
1363 - wait_event(module_wq,
1364 - xenbus_read_driver_state(dev->otherend) ==
1365 - XenbusStateClosing ||
1366 - xenbus_read_driver_state(dev->otherend) ==
1367 - XenbusStateUnknown);
1368 + ret = wait_event_timeout(module_wq,
1369 + xenbus_read_driver_state(dev->otherend) ==
1370 + XenbusStateClosing ||
1371 + xenbus_read_driver_state(dev->otherend) ==
1372 + XenbusStateClosed ||
1373 + xenbus_read_driver_state(dev->otherend) ==
1374 + XenbusStateUnknown,
1375 + XENNET_TIMEOUT);
1376 + } while (!ret);
1377 +
1378 + if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
1379 + return;
1380
1381 + do {
1382 xenbus_switch_state(dev, XenbusStateClosed);
1383 - wait_event(module_wq,
1384 - xenbus_read_driver_state(dev->otherend) ==
1385 - XenbusStateClosed ||
1386 - xenbus_read_driver_state(dev->otherend) ==
1387 - XenbusStateUnknown);
1388 - }
1389 + ret = wait_event_timeout(module_wq,
1390 + xenbus_read_driver_state(dev->otherend) ==
1391 + XenbusStateClosed ||
1392 + xenbus_read_driver_state(dev->otherend) ==
1393 + XenbusStateUnknown,
1394 + XENNET_TIMEOUT);
1395 + } while (!ret);
1396 +}
1397 +
1398 +static int xennet_remove(struct xenbus_device *dev)
1399 +{
1400 + struct netfront_info *info = dev_get_drvdata(&dev->dev);
1401
1402 + xennet_bus_close(dev);
1403 xennet_disconnect_backend(info);
1404
1405 if (info->netdev->reg_state == NETREG_REGISTERED)
1406 diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
1407 index 91d4d5b28a7d..ba6c486d6465 100644
1408 --- a/drivers/nfc/s3fwrn5/core.c
1409 +++ b/drivers/nfc/s3fwrn5/core.c
1410 @@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
1411 case S3FWRN5_MODE_FW:
1412 return s3fwrn5_fw_recv_frame(ndev, skb);
1413 default:
1414 + kfree_skb(skb);
1415 return -ENODEV;
1416 }
1417 }
1418 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
1419 index 7900814355c2..53e113a18a54 100644
1420 --- a/drivers/nvme/host/tcp.c
1421 +++ b/drivers/nvme/host/tcp.c
1422 @@ -1319,6 +1319,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
1423 }
1424 }
1425
1426 + /* Set 10 seconds timeout for icresp recvmsg */
1427 + queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1428 +
1429 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1430 if (!qid)
1431 n = 0;
1432 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1433 index a1ec8a1977d3..4ac4b28e0ebb 100644
1434 --- a/drivers/pci/quirks.c
1435 +++ b/drivers/pci/quirks.c
1436 @@ -2330,6 +2330,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
1437 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
1438 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
1439
1440 +static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev)
1441 +{
1442 + pci_info(dev, "Disabling ASPM L0s/L1\n");
1443 + pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
1444 +}
1445 +
1446 +/*
1447 + * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the
1448 + * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected;
1449 + * disable both L0s and L1 for now to be safe.
1450 + */
1451 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1);
1452 +
1453 /*
1454 * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain
1455 * Link bit cleared after starting the link retrain process to allow this
1456 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1457 index 206c9f53e9e7..e6944e1cba2b 100644
1458 --- a/drivers/scsi/scsi_lib.c
1459 +++ b/drivers/scsi/scsi_lib.c
1460 @@ -568,6 +568,15 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
1461 scsi_del_cmd_from_list(cmd);
1462 }
1463
1464 +static void scsi_run_queue_async(struct scsi_device *sdev)
1465 +{
1466 + if (scsi_target(sdev)->single_lun ||
1467 + !list_empty(&sdev->host->starved_list))
1468 + kblockd_schedule_work(&sdev->requeue_work);
1469 + else
1470 + blk_mq_run_hw_queues(sdev->request_queue, true);
1471 +}
1472 +
1473 /* Returns false when no more bytes to process, true if there are more */
1474 static bool scsi_end_request(struct request *req, blk_status_t error,
1475 unsigned int bytes)
1476 @@ -612,11 +621,7 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
1477
1478 __blk_mq_end_request(req, error);
1479
1480 - if (scsi_target(sdev)->single_lun ||
1481 - !list_empty(&sdev->host->starved_list))
1482 - kblockd_schedule_work(&sdev->requeue_work);
1483 - else
1484 - blk_mq_run_hw_queues(q, true);
1485 + scsi_run_queue_async(sdev);
1486
1487 percpu_ref_put(&q->q_usage_counter);
1488 return false;
1489 @@ -1729,6 +1734,7 @@ out_put_budget:
1490 */
1491 if (req->rq_flags & RQF_DONTPREP)
1492 scsi_mq_uninit_cmd(cmd);
1493 + scsi_run_queue_async(sdev);
1494 break;
1495 }
1496 return ret;
1497 diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
1498 index 88ce114790d7..f63f84a25725 100644
1499 --- a/drivers/vhost/scsi.c
1500 +++ b/drivers/vhost/scsi.c
1501 @@ -1215,7 +1215,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
1502 continue;
1503 }
1504
1505 - switch (v_req.type) {
1506 + switch (vhost32_to_cpu(vq, v_req.type)) {
1507 case VIRTIO_SCSI_T_TMF:
1508 vc.req = &v_req.tmf;
1509 vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
1510 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
1511 index 7aaf150f89ba..1e444826a66e 100644
1512 --- a/drivers/virtio/virtio_balloon.c
1513 +++ b/drivers/virtio/virtio_balloon.c
1514 @@ -529,10 +529,14 @@ static int init_vqs(struct virtio_balloon *vb)
1515 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb)
1516 {
1517 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID,
1518 - &vb->config_read_bitmap))
1519 + &vb->config_read_bitmap)) {
1520 virtio_cread(vb->vdev, struct virtio_balloon_config,
1521 free_page_report_cmd_id,
1522 &vb->cmd_id_received_cache);
1523 + /* Legacy balloon config space is LE, unlike all other devices. */
1524 + if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1))
1525 + vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache);
1526 + }
1527
1528 return vb->cmd_id_received_cache;
1529 }
1530 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1531 index acd859ea09d4..aba56077cfda 100644
1532 --- a/include/linux/mlx5/mlx5_ifc.h
1533 +++ b/include/linux/mlx5/mlx5_ifc.h
1534 @@ -4177,6 +4177,7 @@ struct mlx5_ifc_query_vport_state_out_bits {
1535 enum {
1536 MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
1537 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
1538 + MLX5_VPORT_STATE_OP_MOD_UPLINK = 0x2,
1539 };
1540
1541 struct mlx5_ifc_arm_monitor_counter_in_bits {
1542 diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
1543 index beb9a9da1699..c5bf21261bb1 100644
1544 --- a/include/linux/rhashtable.h
1545 +++ b/include/linux/rhashtable.h
1546 @@ -349,11 +349,11 @@ static inline void rht_unlock(struct bucket_table *tbl,
1547 local_bh_enable();
1548 }
1549
1550 -static inline struct rhash_head __rcu *__rht_ptr(
1551 - struct rhash_lock_head *const *bkt)
1552 +static inline struct rhash_head *__rht_ptr(
1553 + struct rhash_lock_head *p, struct rhash_lock_head __rcu *const *bkt)
1554 {
1555 - return (struct rhash_head __rcu *)
1556 - ((unsigned long)*bkt & ~BIT(0) ?:
1557 + return (struct rhash_head *)
1558 + ((unsigned long)p & ~BIT(0) ?:
1559 (unsigned long)RHT_NULLS_MARKER(bkt));
1560 }
1561
1562 @@ -365,25 +365,26 @@ static inline struct rhash_head __rcu *__rht_ptr(
1563 * access is guaranteed, such as when destroying the table.
1564 */
1565 static inline struct rhash_head *rht_ptr_rcu(
1566 - struct rhash_lock_head *const *bkt)
1567 + struct rhash_lock_head *const *p)
1568 {
1569 - struct rhash_head __rcu *p = __rht_ptr(bkt);
1570 -
1571 - return rcu_dereference(p);
1572 + struct rhash_lock_head __rcu *const *bkt = (void *)p;
1573 + return __rht_ptr(rcu_dereference(*bkt), bkt);
1574 }
1575
1576 static inline struct rhash_head *rht_ptr(
1577 - struct rhash_lock_head *const *bkt,
1578 + struct rhash_lock_head *const *p,
1579 struct bucket_table *tbl,
1580 unsigned int hash)
1581 {
1582 - return rht_dereference_bucket(__rht_ptr(bkt), tbl, hash);
1583 + struct rhash_lock_head __rcu *const *bkt = (void *)p;
1584 + return __rht_ptr(rht_dereference_bucket(*bkt, tbl, hash), bkt);
1585 }
1586
1587 static inline struct rhash_head *rht_ptr_exclusive(
1588 - struct rhash_lock_head *const *bkt)
1589 + struct rhash_lock_head *const *p)
1590 {
1591 - return rcu_dereference_protected(__rht_ptr(bkt), 1);
1592 + struct rhash_lock_head __rcu *const *bkt = (void *)p;
1593 + return __rht_ptr(rcu_dereference_protected(*bkt, 1), bkt);
1594 }
1595
1596 static inline void rht_assign_locked(struct rhash_lock_head **bkt,
1597 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
1598 index fb391c00c19a..12aa6e15e43f 100644
1599 --- a/include/net/xfrm.h
1600 +++ b/include/net/xfrm.h
1601 @@ -945,7 +945,7 @@ struct xfrm_dst {
1602 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1603 {
1604 #ifdef CONFIG_XFRM
1605 - if (dst->xfrm) {
1606 + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1607 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1608
1609 return xdst->path;
1610 @@ -957,7 +957,7 @@ static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1611 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1612 {
1613 #ifdef CONFIG_XFRM
1614 - if (dst->xfrm) {
1615 + if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1616 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1617 return xdst->child;
1618 }
1619 @@ -1636,13 +1636,16 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1620 void *);
1621 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1622 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1623 -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1624 - u8 type, int dir,
1625 +struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1626 + const struct xfrm_mark *mark,
1627 + u32 if_id, u8 type, int dir,
1628 struct xfrm_selector *sel,
1629 struct xfrm_sec_ctx *ctx, int delete,
1630 int *err);
1631 -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
1632 - int dir, u32 id, int delete, int *err);
1633 +struct xfrm_policy *xfrm_policy_byid(struct net *net,
1634 + const struct xfrm_mark *mark, u32 if_id,
1635 + u8 type, int dir, u32 id, int delete,
1636 + int *err);
1637 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1638 void xfrm_policy_hash_rebuild(struct net *net);
1639 u32 xfrm_get_acqseq(void);
1640 diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
1641 index b550ae89bf85..6dd3b5284fd1 100644
1642 --- a/include/rdma/rdmavt_qp.h
1643 +++ b/include/rdma/rdmavt_qp.h
1644 @@ -278,6 +278,25 @@ struct rvt_rq {
1645 spinlock_t lock ____cacheline_aligned_in_smp;
1646 };
1647
1648 +/**
1649 + * rvt_get_rq_count - count numbers of request work queue entries
1650 + * in circular buffer
1651 + * @rq: data structure for request queue entry
1652 + * @head: head indices of the circular buffer
1653 + * @tail: tail indices of the circular buffer
1654 + *
1655 + * Return - total number of entries in the Receive Queue
1656 + */
1657 +
1658 +static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
1659 +{
1660 + u32 count = head - tail;
1661 +
1662 + if ((s32)count < 0)
1663 + count += rq->size;
1664 + return count;
1665 +}
1666 +
1667 /*
1668 * This structure holds the information that the send tasklet needs
1669 * to send a RDMA read response or atomic operation.
1670 diff --git a/include/uapi/linux/wireless.h b/include/uapi/linux/wireless.h
1671 index 86eca3208b6b..a2c006a364e0 100644
1672 --- a/include/uapi/linux/wireless.h
1673 +++ b/include/uapi/linux/wireless.h
1674 @@ -74,6 +74,8 @@
1675 #include <linux/socket.h> /* for "struct sockaddr" et al */
1676 #include <linux/if.h> /* for IFNAMSIZ and co... */
1677
1678 +#include <stddef.h> /* for offsetof */
1679 +
1680 /***************************** VERSION *****************************/
1681 /*
1682 * This constant is used to know the availability of the wireless
1683 @@ -1090,8 +1092,7 @@ struct iw_event {
1684 /* iw_point events are special. First, the payload (extra data) come at
1685 * the end of the event, so they are bigger than IW_EV_POINT_LEN. Second,
1686 * we omit the pointer, so start at an offset. */
1687 -#define IW_EV_POINT_OFF (((char *) &(((struct iw_point *) NULL)->length)) - \
1688 - (char *) NULL)
1689 +#define IW_EV_POINT_OFF offsetof(struct iw_point, length)
1690 #define IW_EV_POINT_LEN (IW_EV_LCP_LEN + sizeof(struct iw_point) - \
1691 IW_EV_POINT_OFF)
1692
1693 diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
1694 index 22066a62c8c9..039d64b1bfb7 100644
1695 --- a/kernel/bpf/hashtab.c
1696 +++ b/kernel/bpf/hashtab.c
1697 @@ -675,15 +675,20 @@ static void htab_elem_free_rcu(struct rcu_head *head)
1698 preempt_enable();
1699 }
1700
1701 -static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
1702 +static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
1703 {
1704 struct bpf_map *map = &htab->map;
1705 + void *ptr;
1706
1707 if (map->ops->map_fd_put_ptr) {
1708 - void *ptr = fd_htab_map_get_ptr(map, l);
1709 -
1710 + ptr = fd_htab_map_get_ptr(map, l);
1711 map->ops->map_fd_put_ptr(ptr);
1712 }
1713 +}
1714 +
1715 +static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
1716 +{
1717 + htab_put_fd_value(htab, l);
1718
1719 if (htab_is_prealloc(htab)) {
1720 __pcpu_freelist_push(&htab->freelist, &l->fnode);
1721 @@ -735,6 +740,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
1722 */
1723 pl_new = this_cpu_ptr(htab->extra_elems);
1724 l_new = *pl_new;
1725 + htab_put_fd_value(htab, old_elem);
1726 *pl_new = old_elem;
1727 } else {
1728 struct pcpu_freelist_node *l;
1729 diff --git a/mm/filemap.c b/mm/filemap.c
1730 index 1f5731768222..18c1f5830074 100644
1731 --- a/mm/filemap.c
1732 +++ b/mm/filemap.c
1733 @@ -2438,7 +2438,7 @@ static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
1734 pgoff_t offset = vmf->pgoff;
1735
1736 /* If we don't want any read-ahead, don't bother */
1737 - if (vmf->vma->vm_flags & VM_RAND_READ)
1738 + if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
1739 return fpin;
1740 if (ra->mmap_miss > 0)
1741 ra->mmap_miss--;
1742 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
1743 index 13cd683a658a..3f67803123be 100644
1744 --- a/net/9p/trans_fd.c
1745 +++ b/net/9p/trans_fd.c
1746 @@ -362,6 +362,10 @@ static void p9_read_work(struct work_struct *work)
1747 if (m->rreq->status == REQ_STATUS_SENT) {
1748 list_del(&m->rreq->req_list);
1749 p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
1750 + } else if (m->rreq->status == REQ_STATUS_FLSHD) {
1751 + /* Ignore replies associated with a cancelled request. */
1752 + p9_debug(P9_DEBUG_TRANS,
1753 + "Ignore replies associated with a cancelled request\n");
1754 } else {
1755 spin_unlock(&m->client->lock);
1756 p9_debug(P9_DEBUG_ERROR,
1757 @@ -703,11 +707,20 @@ static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
1758 {
1759 p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
1760
1761 + spin_lock(&client->lock);
1762 + /* Ignore cancelled request if message has been received
1763 + * before lock.
1764 + */
1765 + if (req->status == REQ_STATUS_RCVD) {
1766 + spin_unlock(&client->lock);
1767 + return 0;
1768 + }
1769 +
1770 /* we haven't received a response for oldreq,
1771 * remove it from the list.
1772 */
1773 - spin_lock(&client->lock);
1774 list_del(&req->req_list);
1775 + req->status = REQ_STATUS_FLSHD;
1776 spin_unlock(&client->lock);
1777 p9_req_put(req);
1778
1779 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
1780 index 88cd410e5728..44385252d7b6 100644
1781 --- a/net/bluetooth/hci_event.c
1782 +++ b/net/bluetooth/hci_event.c
1783 @@ -1274,6 +1274,9 @@ static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1784 {
1785 struct discovery_state *d = &hdev->discovery;
1786
1787 + if (len > HCI_MAX_AD_LENGTH)
1788 + return;
1789 +
1790 bacpy(&d->last_adv_addr, bdaddr);
1791 d->last_adv_addr_type = bdaddr_type;
1792 d->last_adv_rssi = rssi;
1793 @@ -5231,7 +5234,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
1794
1795 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
1796 u8 bdaddr_type, bdaddr_t *direct_addr,
1797 - u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
1798 + u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
1799 + bool ext_adv)
1800 {
1801 struct discovery_state *d = &hdev->discovery;
1802 struct smp_irk *irk;
1803 @@ -5253,6 +5257,11 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
1804 return;
1805 }
1806
1807 + if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
1808 + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
1809 + return;
1810 + }
1811 +
1812 /* Find the end of the data in case the report contains padded zero
1813 * bytes at the end causing an invalid length value.
1814 *
1815 @@ -5312,7 +5321,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
1816 */
1817 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
1818 direct_addr);
1819 - if (conn && type == LE_ADV_IND) {
1820 + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
1821 /* Store report for later inclusion by
1822 * mgmt_device_connected
1823 */
1824 @@ -5366,7 +5375,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
1825 * event or send an immediate device found event if the data
1826 * should not be stored for later.
1827 */
1828 - if (!has_pending_adv_report(hdev)) {
1829 + if (!ext_adv && !has_pending_adv_report(hdev)) {
1830 /* If the report will trigger a SCAN_REQ store it for
1831 * later merging.
1832 */
1833 @@ -5401,7 +5410,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
1834 /* If the new report will trigger a SCAN_REQ store it for
1835 * later merging.
1836 */
1837 - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
1838 + if (!ext_adv && (type == LE_ADV_IND ||
1839 + type == LE_ADV_SCAN_IND)) {
1840 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
1841 rssi, flags, data, len);
1842 return;
1843 @@ -5441,7 +5451,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
1844 rssi = ev->data[ev->length];
1845 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
1846 ev->bdaddr_type, NULL, 0, rssi,
1847 - ev->data, ev->length);
1848 + ev->data, ev->length, false);
1849 } else {
1850 bt_dev_err(hdev, "Dropping invalid advertising data");
1851 }
1852 @@ -5515,7 +5525,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
1853 if (legacy_evt_type != LE_ADV_INVALID) {
1854 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
1855 ev->bdaddr_type, NULL, 0, ev->rssi,
1856 - ev->data, ev->length);
1857 + ev->data, ev->length,
1858 + !(evt_type & LE_EXT_ADV_LEGACY_PDU));
1859 }
1860
1861 ptr += sizeof(*ev) + ev->length;
1862 @@ -5713,7 +5724,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
1863
1864 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
1865 ev->bdaddr_type, &ev->direct_addr,
1866 - ev->direct_addr_type, ev->rssi, NULL, 0);
1867 + ev->direct_addr_type, ev->rssi, NULL, 0,
1868 + false);
1869
1870 ptr += sizeof(*ev);
1871 }
1872 diff --git a/net/key/af_key.c b/net/key/af_key.c
1873 index b67ed3a8486c..979c579afc63 100644
1874 --- a/net/key/af_key.c
1875 +++ b/net/key/af_key.c
1876 @@ -2400,7 +2400,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
1877 return err;
1878 }
1879
1880 - xp = xfrm_policy_bysel_ctx(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
1881 + xp = xfrm_policy_bysel_ctx(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
1882 pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
1883 1, &err);
1884 security_xfrm_policy_free(pol_ctx);
1885 @@ -2651,7 +2651,7 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, const struct sadb_
1886 return -EINVAL;
1887
1888 delete = (hdr->sadb_msg_type == SADB_X_SPDDELETE2);
1889 - xp = xfrm_policy_byid(net, DUMMY_MARK, 0, XFRM_POLICY_TYPE_MAIN,
1890 + xp = xfrm_policy_byid(net, &dummy_mark, 0, XFRM_POLICY_TYPE_MAIN,
1891 dir, pol->sadb_x_policy_id, delete, &err);
1892 if (xp == NULL)
1893 return -ENOENT;
1894 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
1895 index 0daaf7e37a21..a9dda5c228f6 100644
1896 --- a/net/mac80211/cfg.c
1897 +++ b/net/mac80211/cfg.c
1898 @@ -2140,6 +2140,7 @@ static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev)
1899 ieee80211_stop_mesh(sdata);
1900 mutex_lock(&sdata->local->mtx);
1901 ieee80211_vif_release_channel(sdata);
1902 + kfree(sdata->u.mesh.ie);
1903 mutex_unlock(&sdata->local->mtx);
1904
1905 return 0;
1906 diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
1907 index 117519bf33d6..aca608ae313f 100644
1908 --- a/net/mac80211/mesh_pathtbl.c
1909 +++ b/net/mac80211/mesh_pathtbl.c
1910 @@ -521,6 +521,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
1911 del_timer_sync(&mpath->timer);
1912 atomic_dec(&sdata->u.mesh.mpaths);
1913 atomic_dec(&tbl->entries);
1914 + mesh_path_flush_pending(mpath);
1915 kfree_rcu(mpath, rcu);
1916 }
1917
1918 diff --git a/net/rds/recv.c b/net/rds/recv.c
1919 index c8404971d5ab..aba4afe4dfed 100644
1920 --- a/net/rds/recv.c
1921 +++ b/net/rds/recv.c
1922 @@ -450,12 +450,13 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
1923 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
1924 {
1925 struct rds_notifier *notifier;
1926 - struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
1927 + struct rds_rdma_notify cmsg;
1928 unsigned int count = 0, max_messages = ~0U;
1929 unsigned long flags;
1930 LIST_HEAD(copy);
1931 int err = 0;
1932
1933 + memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */
1934
1935 /* put_cmsg copies to user space and thus may sleep. We can't do this
1936 * with rs_lock held, so first grab as many notifications as we can stuff
1937 diff --git a/net/sunrpc/sunrpc.h b/net/sunrpc/sunrpc.h
1938 index c9bacb3c930f..82035fa65b8f 100644
1939 --- a/net/sunrpc/sunrpc.h
1940 +++ b/net/sunrpc/sunrpc.h
1941 @@ -56,4 +56,5 @@ int svc_send_common(struct socket *sock, struct xdr_buf *xdr,
1942
1943 int rpc_clients_notifier_register(void);
1944 void rpc_clients_notifier_unregister(void);
1945 +void auth_domain_cleanup(void);
1946 #endif /* _NET_SUNRPC_SUNRPC_H */
1947 diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
1948 index f9edaa9174a4..236fadc4a439 100644
1949 --- a/net/sunrpc/sunrpc_syms.c
1950 +++ b/net/sunrpc/sunrpc_syms.c
1951 @@ -23,6 +23,7 @@
1952 #include <linux/sunrpc/rpc_pipe_fs.h>
1953 #include <linux/sunrpc/xprtsock.h>
1954
1955 +#include "sunrpc.h"
1956 #include "netns.h"
1957
1958 unsigned int sunrpc_net_id;
1959 @@ -131,6 +132,7 @@ cleanup_sunrpc(void)
1960 unregister_rpc_pipefs();
1961 rpc_destroy_mempool();
1962 unregister_pernet_subsys(&sunrpc_net_ops);
1963 + auth_domain_cleanup();
1964 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1965 rpc_unregister_sysctl();
1966 #endif
1967 diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
1968 index 550b214cb001..998b196b6176 100644
1969 --- a/net/sunrpc/svcauth.c
1970 +++ b/net/sunrpc/svcauth.c
1971 @@ -19,6 +19,10 @@
1972 #include <linux/err.h>
1973 #include <linux/hash.h>
1974
1975 +#include <trace/events/sunrpc.h>
1976 +
1977 +#include "sunrpc.h"
1978 +
1979 #define RPCDBG_FACILITY RPCDBG_AUTH
1980
1981
1982 @@ -203,3 +207,26 @@ struct auth_domain *auth_domain_find(char *name)
1983 return NULL;
1984 }
1985 EXPORT_SYMBOL_GPL(auth_domain_find);
1986 +
1987 +/**
1988 + * auth_domain_cleanup - check that the auth_domain table is empty
1989 + *
1990 + * On module unload the auth_domain_table must be empty. To make it
1991 + * easier to catch bugs which don't clean up domains properly, we
1992 + * warn if anything remains in the table at cleanup time.
1993 + *
1994 + * Note that we cannot proactively remove the domains at this stage.
1995 + * The ->release() function might be in a module that has already been
1996 + * unloaded.
1997 + */
1998 +
1999 +void auth_domain_cleanup(void)
2000 +{
2001 + int h;
2002 + struct auth_domain *hp;
2003 +
2004 + for (h = 0; h < DN_HASHMAX; h++)
2005 + hlist_for_each_entry(hp, &auth_domain_table[h], hash)
2006 + pr_warn("svc: domain %s still present at module unload.\n",
2007 + hp->name);
2008 +}
2009 diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
2010 index 8aa415a38814..0285aaa1e93c 100644
2011 --- a/net/x25/x25_subr.c
2012 +++ b/net/x25/x25_subr.c
2013 @@ -357,6 +357,12 @@ void x25_disconnect(struct sock *sk, int reason, unsigned char cause,
2014 sk->sk_state_change(sk);
2015 sock_set_flag(sk, SOCK_DEAD);
2016 }
2017 + if (x25->neighbour) {
2018 + read_lock_bh(&x25_list_lock);
2019 + x25_neigh_put(x25->neighbour);
2020 + x25->neighbour = NULL;
2021 + read_unlock_bh(&x25_list_lock);
2022 + }
2023 }
2024
2025 /*
2026 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
2027 index 6a1a21ae47bb..2917711ff8ab 100644
2028 --- a/net/xfrm/xfrm_policy.c
2029 +++ b/net/xfrm/xfrm_policy.c
2030 @@ -1430,14 +1430,10 @@ static void xfrm_policy_requeue(struct xfrm_policy *old,
2031 spin_unlock_bh(&pq->hold_queue.lock);
2032 }
2033
2034 -static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
2035 - struct xfrm_policy *pol)
2036 +static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
2037 + struct xfrm_policy *pol)
2038 {
2039 - if (policy->mark.v == pol->mark.v &&
2040 - policy->priority == pol->priority)
2041 - return true;
2042 -
2043 - return false;
2044 + return mark->v == pol->mark.v && mark->m == pol->mark.m;
2045 }
2046
2047 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
2048 @@ -1500,7 +1496,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
2049 if (pol->type == policy->type &&
2050 pol->if_id == policy->if_id &&
2051 !selector_cmp(&pol->selector, &policy->selector) &&
2052 - xfrm_policy_mark_match(policy, pol) &&
2053 + xfrm_policy_mark_match(&policy->mark, pol) &&
2054 xfrm_sec_ctx_match(pol->security, policy->security) &&
2055 !WARN_ON(delpol)) {
2056 delpol = pol;
2057 @@ -1535,7 +1531,7 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
2058 if (pol->type == policy->type &&
2059 pol->if_id == policy->if_id &&
2060 !selector_cmp(&pol->selector, &policy->selector) &&
2061 - xfrm_policy_mark_match(policy, pol) &&
2062 + xfrm_policy_mark_match(&policy->mark, pol) &&
2063 xfrm_sec_ctx_match(pol->security, policy->security) &&
2064 !WARN_ON(delpol)) {
2065 if (excl)
2066 @@ -1607,9 +1603,8 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
2067 EXPORT_SYMBOL(xfrm_policy_insert);
2068
2069 static struct xfrm_policy *
2070 -__xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
2071 - u8 type, int dir,
2072 - struct xfrm_selector *sel,
2073 +__xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
2074 + u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
2075 struct xfrm_sec_ctx *ctx)
2076 {
2077 struct xfrm_policy *pol;
2078 @@ -1620,7 +1615,7 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
2079 hlist_for_each_entry(pol, chain, bydst) {
2080 if (pol->type == type &&
2081 pol->if_id == if_id &&
2082 - (mark & pol->mark.m) == pol->mark.v &&
2083 + xfrm_policy_mark_match(mark, pol) &&
2084 !selector_cmp(sel, &pol->selector) &&
2085 xfrm_sec_ctx_match(ctx, pol->security))
2086 return pol;
2087 @@ -1629,11 +1624,10 @@ __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
2088 return NULL;
2089 }
2090
2091 -struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
2092 - u8 type, int dir,
2093 - struct xfrm_selector *sel,
2094 - struct xfrm_sec_ctx *ctx, int delete,
2095 - int *err)
2096 +struct xfrm_policy *
2097 +xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
2098 + u8 type, int dir, struct xfrm_selector *sel,
2099 + struct xfrm_sec_ctx *ctx, int delete, int *err)
2100 {
2101 struct xfrm_pol_inexact_bin *bin = NULL;
2102 struct xfrm_policy *pol, *ret = NULL;
2103 @@ -1700,9 +1694,9 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
2104 }
2105 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
2106
2107 -struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
2108 - u8 type, int dir, u32 id, int delete,
2109 - int *err)
2110 +struct xfrm_policy *
2111 +xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
2112 + u8 type, int dir, u32 id, int delete, int *err)
2113 {
2114 struct xfrm_policy *pol, *ret;
2115 struct hlist_head *chain;
2116 @@ -1717,8 +1711,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
2117 ret = NULL;
2118 hlist_for_each_entry(pol, chain, byidx) {
2119 if (pol->type == type && pol->index == id &&
2120 - pol->if_id == if_id &&
2121 - (mark & pol->mark.m) == pol->mark.v) {
2122 + pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
2123 xfrm_pol_hold(pol);
2124 if (delete) {
2125 *err = security_xfrm_policy_delete(
2126 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
2127 index e6cfaa680ef3..fbb7d9d06478 100644
2128 --- a/net/xfrm/xfrm_user.c
2129 +++ b/net/xfrm/xfrm_user.c
2130 @@ -1863,7 +1863,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2131 struct km_event c;
2132 int delete;
2133 struct xfrm_mark m;
2134 - u32 mark = xfrm_mark_get(attrs, &m);
2135 u32 if_id = 0;
2136
2137 p = nlmsg_data(nlh);
2138 @@ -1880,8 +1879,11 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2139 if (attrs[XFRMA_IF_ID])
2140 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2141
2142 + xfrm_mark_get(attrs, &m);
2143 +
2144 if (p->index)
2145 - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, delete, &err);
2146 + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
2147 + p->index, delete, &err);
2148 else {
2149 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2150 struct xfrm_sec_ctx *ctx;
2151 @@ -1898,8 +1900,8 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2152 if (err)
2153 return err;
2154 }
2155 - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir, &p->sel,
2156 - ctx, delete, &err);
2157 + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2158 + &p->sel, ctx, delete, &err);
2159 security_xfrm_policy_free(ctx);
2160 }
2161 if (xp == NULL)
2162 @@ -2166,7 +2168,6 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2163 u8 type = XFRM_POLICY_TYPE_MAIN;
2164 int err = -ENOENT;
2165 struct xfrm_mark m;
2166 - u32 mark = xfrm_mark_get(attrs, &m);
2167 u32 if_id = 0;
2168
2169 err = copy_from_user_policy_type(&type, attrs);
2170 @@ -2180,8 +2181,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2171 if (attrs[XFRMA_IF_ID])
2172 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2173
2174 + xfrm_mark_get(attrs, &m);
2175 +
2176 if (p->index)
2177 - xp = xfrm_policy_byid(net, mark, if_id, type, p->dir, p->index, 0, &err);
2178 + xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
2179 + 0, &err);
2180 else {
2181 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2182 struct xfrm_sec_ctx *ctx;
2183 @@ -2198,7 +2202,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2184 if (err)
2185 return err;
2186 }
2187 - xp = xfrm_policy_bysel_ctx(net, mark, if_id, type, p->dir,
2188 + xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2189 &p->sel, ctx, 0, &err);
2190 security_xfrm_policy_free(ctx);
2191 }
2192 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2193 index 820f534a67b1..908b68fda24c 100644
2194 --- a/sound/pci/hda/patch_hdmi.c
2195 +++ b/sound/pci/hda/patch_hdmi.c
2196 @@ -2483,6 +2483,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
2197 mutex_lock(&spec->bind_lock);
2198 spec->use_acomp_notifier = use_acomp;
2199 spec->codec->relaxed_resume = use_acomp;
2200 + spec->codec->bus->keep_power = 0;
2201 /* reprogram each jack detection logic depending on the notifier */
2202 if (spec->use_jack_detect) {
2203 for (i = 0; i < spec->num_pins; i++)
2204 @@ -2578,7 +2579,6 @@ static void generic_acomp_init(struct hda_codec *codec,
2205 if (!snd_hdac_acomp_init(&codec->bus->core, &spec->drm_audio_ops,
2206 match_bound_vga, 0)) {
2207 spec->acomp_registered = true;
2208 - codec->bus->keep_power = 0;
2209 }
2210 }
2211
2212 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2213 index bf205621d7ac..f50d71da1226 100644
2214 --- a/sound/pci/hda/patch_realtek.c
2215 +++ b/sound/pci/hda/patch_realtek.c
2216 @@ -5940,6 +5940,16 @@ static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
2217 snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
2218 }
2219
2220 +static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec,
2221 + const struct hda_fixup *fix, int action)
2222 +{
2223 + if (action != HDA_FIXUP_ACT_INIT)
2224 + return;
2225 +
2226 + msleep(100);
2227 + alc_write_coef_idx(codec, 0x65, 0x0);
2228 +}
2229 +
2230 /* for hda_fixup_thinkpad_acpi() */
2231 #include "thinkpad_helper.c"
2232
2233 @@ -6117,8 +6127,10 @@ enum {
2234 ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS,
2235 ALC269VC_FIXUP_ACER_HEADSET_MIC,
2236 ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE,
2237 - ALC289_FIXUP_ASUS_G401,
2238 + ALC289_FIXUP_ASUS_GA401,
2239 + ALC289_FIXUP_ASUS_GA502,
2240 ALC256_FIXUP_ACER_MIC_NO_PRESENCE,
2241 + ALC285_FIXUP_HP_GPIO_AMP_INIT,
2242 };
2243
2244 static const struct hda_fixup alc269_fixups[] = {
2245 @@ -7328,7 +7340,14 @@ static const struct hda_fixup alc269_fixups[] = {
2246 .chained = true,
2247 .chain_id = ALC269_FIXUP_HEADSET_MIC
2248 },
2249 - [ALC289_FIXUP_ASUS_G401] = {
2250 + [ALC289_FIXUP_ASUS_GA401] = {
2251 + .type = HDA_FIXUP_PINS,
2252 + .v.pins = (const struct hda_pintbl[]) {
2253 + { 0x19, 0x03a11020 }, /* headset mic with jack detect */
2254 + { }
2255 + },
2256 + },
2257 + [ALC289_FIXUP_ASUS_GA502] = {
2258 .type = HDA_FIXUP_PINS,
2259 .v.pins = (const struct hda_pintbl[]) {
2260 { 0x19, 0x03a11020 }, /* headset mic with jack detect */
2261 @@ -7344,6 +7363,12 @@ static const struct hda_fixup alc269_fixups[] = {
2262 .chained = true,
2263 .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
2264 },
2265 + [ALC285_FIXUP_HP_GPIO_AMP_INIT] = {
2266 + .type = HDA_FIXUP_FUNC,
2267 + .v.func = alc285_fixup_hp_gpio_amp_init,
2268 + .chained = true,
2269 + .chain_id = ALC285_FIXUP_HP_GPIO_LED
2270 + },
2271 };
2272
2273 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2274 @@ -7494,7 +7519,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2275 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2276 SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
2277 SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
2278 - SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_LED),
2279 + SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
2280 SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED),
2281 SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED),
2282 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
2283 @@ -7526,7 +7551,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2284 SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
2285 SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
2286 SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
2287 - SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_G401),
2288 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
2289 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
2290 SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
2291 SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
2292 SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
2293 @@ -7546,7 +7572,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2294 SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
2295 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
2296 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
2297 - SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
2298 + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
2299 SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
2300 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
2301 SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
2302 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2303 index 086244c70743..d11d00efc574 100644
2304 --- a/sound/usb/pcm.c
2305 +++ b/sound/usb/pcm.c
2306 @@ -354,6 +354,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
2307 ifnum = 0;
2308 goto add_sync_ep_from_ifnum;
2309 case USB_ID(0x07fd, 0x0008): /* MOTU M Series */
2310 + case USB_ID(0x31e9, 0x0001): /* Solid State Logic SSL2 */
2311 case USB_ID(0x31e9, 0x0002): /* Solid State Logic SSL2+ */
2312 case USB_ID(0x0d9a, 0x00df): /* RTX6001 */
2313 ep = 0x81;
2314 diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile
2315 index f440989fa55e..23c3535bcbd6 100644
2316 --- a/tools/lib/traceevent/plugins/Makefile
2317 +++ b/tools/lib/traceevent/plugins/Makefile
2318 @@ -196,7 +196,7 @@ define do_generate_dynamic_list_file
2319 xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
2320 if [ "$$symbol_type" = "U W" ];then \
2321 (echo '{'; \
2322 - $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
2323 + $(NM) -u -D $1 | awk 'NF>1 {sub("@.*", "", $$2); print "\t"$$2";"}' | sort -u;\
2324 echo '};'; \
2325 ) > $2; \
2326 else \
2327 diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
2328 index 0a6e75b8777a..28a5d0c18b1d 100644
2329 --- a/tools/perf/arch/arm/util/auxtrace.c
2330 +++ b/tools/perf/arch/arm/util/auxtrace.c
2331 @@ -56,7 +56,7 @@ struct auxtrace_record
2332 struct perf_pmu *cs_etm_pmu;
2333 struct evsel *evsel;
2334 bool found_etm = false;
2335 - bool found_spe = false;
2336 + struct perf_pmu *found_spe = NULL;
2337 static struct perf_pmu **arm_spe_pmus = NULL;
2338 static int nr_spes = 0;
2339 int i = 0;
2340 @@ -74,12 +74,12 @@ struct auxtrace_record
2341 evsel->core.attr.type == cs_etm_pmu->type)
2342 found_etm = true;
2343
2344 - if (!nr_spes)
2345 + if (!nr_spes || found_spe)
2346 continue;
2347
2348 for (i = 0; i < nr_spes; i++) {
2349 if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
2350 - found_spe = true;
2351 + found_spe = arm_spe_pmus[i];
2352 break;
2353 }
2354 }
2355 @@ -96,7 +96,7 @@ struct auxtrace_record
2356
2357 #if defined(__aarch64__)
2358 if (found_spe)
2359 - return arm_spe_recording_init(err, arm_spe_pmus[i]);
2360 + return arm_spe_recording_init(err, found_spe);
2361 #endif
2362
2363 /*
2364 diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
2365 index fddb3ced9db6..4aa6de1aa67d 100644
2366 --- a/tools/perf/bench/bench.h
2367 +++ b/tools/perf/bench/bench.h
2368 @@ -2,6 +2,10 @@
2369 #ifndef BENCH_H
2370 #define BENCH_H
2371
2372 +#include <sys/time.h>
2373 +
2374 +extern struct timeval bench__start, bench__end, bench__runtime;
2375 +
2376 /*
2377 * The madvise transparent hugepage constants were added in glibc
2378 * 2.13. For compatibility with older versions of glibc, define these
2379 diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
2380 index bb617e568841..a7526c05df38 100644
2381 --- a/tools/perf/bench/epoll-ctl.c
2382 +++ b/tools/perf/bench/epoll-ctl.c
2383 @@ -35,7 +35,6 @@
2384
2385 static unsigned int nthreads = 0;
2386 static unsigned int nsecs = 8;
2387 -struct timeval start, end, runtime;
2388 static bool done, __verbose, randomize;
2389
2390 /*
2391 @@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused,
2392 {
2393 /* inform all threads that we're done for the day */
2394 done = true;
2395 - gettimeofday(&end, NULL);
2396 - timersub(&end, &start, &runtime);
2397 + gettimeofday(&bench__end, NULL);
2398 + timersub(&bench__end, &bench__start, &bench__runtime);
2399 }
2400
2401 static void nest_epollfd(void)
2402 @@ -361,7 +360,7 @@ int bench_epoll_ctl(int argc, const char **argv)
2403
2404 threads_starting = nthreads;
2405
2406 - gettimeofday(&start, NULL);
2407 + gettimeofday(&bench__start, NULL);
2408
2409 do_threads(worker, cpu);
2410
2411 diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
2412 index 7af694437f4e..d1c5cb526b9f 100644
2413 --- a/tools/perf/bench/epoll-wait.c
2414 +++ b/tools/perf/bench/epoll-wait.c
2415 @@ -90,7 +90,6 @@
2416
2417 static unsigned int nthreads = 0;
2418 static unsigned int nsecs = 8;
2419 -struct timeval start, end, runtime;
2420 static bool wdone, done, __verbose, randomize, nonblocking;
2421
2422 /*
2423 @@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused,
2424 {
2425 /* inform all threads that we're done for the day */
2426 done = true;
2427 - gettimeofday(&end, NULL);
2428 - timersub(&end, &start, &runtime);
2429 + gettimeofday(&bench__end, NULL);
2430 + timersub(&bench__end, &bench__start, &bench__runtime);
2431 }
2432
2433 static void print_summary(void)
2434 @@ -287,7 +286,7 @@ static void print_summary(void)
2435
2436 printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
2437 avg, rel_stddev_stats(stddev, avg),
2438 - (int) runtime.tv_sec);
2439 + (int)bench__runtime.tv_sec);
2440 }
2441
2442 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
2443 @@ -479,7 +478,7 @@ int bench_epoll_wait(int argc, const char **argv)
2444
2445 threads_starting = nthreads;
2446
2447 - gettimeofday(&start, NULL);
2448 + gettimeofday(&bench__start, NULL);
2449
2450 do_threads(worker, cpu);
2451
2452 @@ -519,7 +518,7 @@ int bench_epoll_wait(int argc, const char **argv)
2453 qsort(worker, nthreads, sizeof(struct worker), cmpworker);
2454
2455 for (i = 0; i < nthreads; i++) {
2456 - unsigned long t = worker[i].ops/runtime.tv_sec;
2457 + unsigned long t = worker[i].ops / bench__runtime.tv_sec;
2458
2459 update_stats(&throughput_stats, t);
2460
2461 diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
2462 index 8ba0c3330a9a..21776862e940 100644
2463 --- a/tools/perf/bench/futex-hash.c
2464 +++ b/tools/perf/bench/futex-hash.c
2465 @@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024;
2466 static bool fshared = false, done = false, silent = false;
2467 static int futex_flag = 0;
2468
2469 -struct timeval start, end, runtime;
2470 +struct timeval bench__start, bench__end, bench__runtime;
2471 static pthread_mutex_t thread_lock;
2472 static unsigned int threads_starting;
2473 static struct stats throughput_stats;
2474 @@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused,
2475 {
2476 /* inform all threads that we're done for the day */
2477 done = true;
2478 - gettimeofday(&end, NULL);
2479 - timersub(&end, &start, &runtime);
2480 + gettimeofday(&bench__end, NULL);
2481 + timersub(&bench__end, &bench__start, &bench__runtime);
2482 }
2483
2484 static void print_summary(void)
2485 @@ -114,7 +114,7 @@ static void print_summary(void)
2486
2487 printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
2488 !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
2489 - (int) runtime.tv_sec);
2490 + (int)bench__runtime.tv_sec);
2491 }
2492
2493 int bench_futex_hash(int argc, const char **argv)
2494 @@ -161,7 +161,7 @@ int bench_futex_hash(int argc, const char **argv)
2495
2496 threads_starting = nthreads;
2497 pthread_attr_init(&thread_attr);
2498 - gettimeofday(&start, NULL);
2499 + gettimeofday(&bench__start, NULL);
2500 for (i = 0; i < nthreads; i++) {
2501 worker[i].tid = i;
2502 worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
2503 @@ -204,7 +204,7 @@ int bench_futex_hash(int argc, const char **argv)
2504 pthread_mutex_destroy(&thread_lock);
2505
2506 for (i = 0; i < nthreads; i++) {
2507 - unsigned long t = worker[i].ops/runtime.tv_sec;
2508 + unsigned long t = worker[i].ops / bench__runtime.tv_sec;
2509 update_stats(&throughput_stats, t);
2510 if (!silent) {
2511 if (nfutexes == 1)
2512 diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
2513 index d0cae8125423..30d97121dc4f 100644
2514 --- a/tools/perf/bench/futex-lock-pi.c
2515 +++ b/tools/perf/bench/futex-lock-pi.c
2516 @@ -37,7 +37,6 @@ static bool silent = false, multi = false;
2517 static bool done = false, fshared = false;
2518 static unsigned int nthreads = 0;
2519 static int futex_flag = 0;
2520 -struct timeval start, end, runtime;
2521 static pthread_mutex_t thread_lock;
2522 static unsigned int threads_starting;
2523 static struct stats throughput_stats;
2524 @@ -64,7 +63,7 @@ static void print_summary(void)
2525
2526 printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
2527 !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
2528 - (int) runtime.tv_sec);
2529 + (int)bench__runtime.tv_sec);
2530 }
2531
2532 static void toggle_done(int sig __maybe_unused,
2533 @@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused,
2534 {
2535 /* inform all threads that we're done for the day */
2536 done = true;
2537 - gettimeofday(&end, NULL);
2538 - timersub(&end, &start, &runtime);
2539 + gettimeofday(&bench__end, NULL);
2540 + timersub(&bench__end, &bench__start, &bench__runtime);
2541 }
2542
2543 static void *workerfn(void *arg)
2544 @@ -185,7 +184,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
2545
2546 threads_starting = nthreads;
2547 pthread_attr_init(&thread_attr);
2548 - gettimeofday(&start, NULL);
2549 + gettimeofday(&bench__start, NULL);
2550
2551 create_threads(worker, thread_attr, cpu);
2552 pthread_attr_destroy(&thread_attr);
2553 @@ -211,7 +210,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
2554 pthread_mutex_destroy(&thread_lock);
2555
2556 for (i = 0; i < nthreads; i++) {
2557 - unsigned long t = worker[i].ops/runtime.tv_sec;
2558 + unsigned long t = worker[i].ops / bench__runtime.tv_sec;
2559
2560 update_stats(&throughput_stats, t);
2561 if (!silent)
2562 diff --git a/tools/perf/tests/bp_account.c b/tools/perf/tests/bp_account.c
2563 index 016bba2c142d..55a9de311d7b 100644
2564 --- a/tools/perf/tests/bp_account.c
2565 +++ b/tools/perf/tests/bp_account.c
2566 @@ -23,7 +23,7 @@
2567 #include "../perf-sys.h"
2568 #include "cloexec.h"
2569
2570 -volatile long the_var;
2571 +static volatile long the_var;
2572
2573 static noinline int test_function(void)
2574 {
2575 diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
2576 index 3baca06786fb..018ecf7b6da9 100644
2577 --- a/tools/perf/util/env.c
2578 +++ b/tools/perf/util/env.c
2579 @@ -326,11 +326,11 @@ static const char *normalize_arch(char *arch)
2580
2581 const char *perf_env__arch(struct perf_env *env)
2582 {
2583 - struct utsname uts;
2584 char *arch_name;
2585
2586 if (!env || !env->arch) { /* Assume local operation */
2587 - if (uname(&uts) < 0)
2588 + static struct utsname uts = { .machine[0] = '\0', };
2589 + if (uts.machine[0] == '\0' && uname(&uts) < 0)
2590 return NULL;
2591 arch_name = uts.machine;
2592 } else
2593 diff --git a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
2594 index 9dc35a16e415..51df5e305855 100755
2595 --- a/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
2596 +++ b/tools/testing/selftests/net/fib_nexthop_multiprefix.sh
2597 @@ -144,7 +144,7 @@ setup()
2598
2599 cleanup()
2600 {
2601 - for n in h1 r1 h2 h3 h4
2602 + for n in h0 r1 h1 h2 h3
2603 do
2604 ip netns del ${n} 2>/dev/null
2605 done
2606 diff --git a/tools/testing/selftests/net/ip_defrag.sh b/tools/testing/selftests/net/ip_defrag.sh
2607 index 15d3489ecd9c..ceb7ad4dbd94 100755
2608 --- a/tools/testing/selftests/net/ip_defrag.sh
2609 +++ b/tools/testing/selftests/net/ip_defrag.sh
2610 @@ -6,6 +6,8 @@
2611 set +x
2612 set -e
2613
2614 +modprobe -q nf_defrag_ipv6
2615 +
2616 readonly NETNS="ns-$(mktemp -u XXXXXX)"
2617
2618 setup() {
2619 diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
2620 index 8c8c7d79c38d..2c522f7a0aec 100644
2621 --- a/tools/testing/selftests/net/psock_fanout.c
2622 +++ b/tools/testing/selftests/net/psock_fanout.c
2623 @@ -350,7 +350,8 @@ static int test_datapath(uint16_t typeflags, int port_off,
2624 int fds[2], fds_udp[2][2], ret;
2625
2626 fprintf(stderr, "\ntest: datapath 0x%hx ports %hu,%hu\n",
2627 - typeflags, PORT_BASE, PORT_BASE + port_off);
2628 + typeflags, (uint16_t)PORT_BASE,
2629 + (uint16_t)(PORT_BASE + port_off));
2630
2631 fds[0] = sock_fanout_open(typeflags, 0);
2632 fds[1] = sock_fanout_open(typeflags, 0);
2633 diff --git a/tools/testing/selftests/net/so_txtime.c b/tools/testing/selftests/net/so_txtime.c
2634 index ceaad78e9667..3155fbbf644b 100644
2635 --- a/tools/testing/selftests/net/so_txtime.c
2636 +++ b/tools/testing/selftests/net/so_txtime.c
2637 @@ -121,7 +121,7 @@ static bool do_recv_one(int fdr, struct timed_send *ts)
2638 if (rbuf[0] != ts->data)
2639 error(1, 0, "payload mismatch. expected %c", ts->data);
2640
2641 - if (labs(tstop - texpect) > cfg_variance_us)
2642 + if (llabs(tstop - texpect) > cfg_variance_us)
2643 error(1, 0, "exceeds variance (%d us)", cfg_variance_us);
2644
2645 return false;
2646 diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
2647 index 422e7761254d..bcb79ba1f214 100644
2648 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c
2649 +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c
2650 @@ -329,8 +329,7 @@ int main(int argc, char **argv)
2651 bool all_tests = true;
2652 int arg_index = 0;
2653 int failures = 0;
2654 - int s, t;
2655 - char opt;
2656 + int s, t, opt;
2657
2658 while ((opt = getopt_long(argc, argv, "", long_options,
2659 &arg_index)) != -1) {
2660 diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
2661 index ce7fa37987e1..767ac4eab4fe 100644
2662 --- a/virt/kvm/arm/mmu.c
2663 +++ b/virt/kvm/arm/mmu.c
2664 @@ -1199,7 +1199,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
2665 return true;
2666 }
2667
2668 -static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
2669 +static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
2670 {
2671 pud_t *pudp;
2672 pmd_t *pmdp;
2673 @@ -1211,11 +1211,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
2674 return false;
2675
2676 if (pudp)
2677 - return kvm_s2pud_exec(pudp);
2678 + return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
2679 else if (pmdp)
2680 - return kvm_s2pmd_exec(pmdp);
2681 + return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
2682 else
2683 - return kvm_s2pte_exec(ptep);
2684 + return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
2685 }
2686
2687 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
2688 @@ -1805,7 +1805,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
2689 * execute permissions, and we preserve whatever we have.
2690 */
2691 needs_exec = exec_fault ||
2692 - (fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa));
2693 + (fault_status == FSC_PERM &&
2694 + stage2_is_exec(kvm, fault_ipa, vma_pagesize));
2695
2696 if (vma_pagesize == PUD_SIZE) {
2697 pud_t new_pud = kvm_pfn_pud(pfn, mem_type);