Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.17/0109-4.17.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3160 - (show annotations) (download)
Tue Jul 31 06:32:25 2018 UTC (5 years, 9 months ago) by niro
File size: 87619 byte(s)
-linux-4.16.10
1 diff --git a/Makefile b/Makefile
2 index 693fde3aa317..0ab689c38e82 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 17
9 -SUBLEVEL = 9
10 +SUBLEVEL = 10
11 EXTRAVERSION =
12 NAME = Merciless Moray
13
14 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
15 index 89faa6f4de47..6a92843c0699 100644
16 --- a/arch/alpha/kernel/osf_sys.c
17 +++ b/arch/alpha/kernel/osf_sys.c
18 @@ -1183,13 +1183,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
19 SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
20 struct rusage32 __user *, ur)
21 {
22 - unsigned int status = 0;
23 struct rusage r;
24 - long err = kernel_wait4(pid, &status, options, &r);
25 + long err = kernel_wait4(pid, ustatus, options, &r);
26 if (err <= 0)
27 return err;
28 - if (put_user(status, ustatus))
29 - return -EFAULT;
30 if (!ur)
31 return err;
32 if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
33 diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
34 index d76bf4a83740..bc0bcf01ec98 100644
35 --- a/arch/arc/Kconfig
36 +++ b/arch/arc/Kconfig
37 @@ -408,7 +408,7 @@ config ARC_HAS_DIV_REM
38
39 config ARC_HAS_ACCL_REGS
40 bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
41 - default n
42 + default y
43 help
44 Depending on the configuration, CPU can contain accumulator reg-pair
45 (also referred to as r58:r59). These can also be used by gcc as GPR so
46 diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
47 index 09f85154c5a4..a635ea972304 100644
48 --- a/arch/arc/configs/axs101_defconfig
49 +++ b/arch/arc/configs/axs101_defconfig
50 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
51 # CONFIG_UTS_NS is not set
52 # CONFIG_PID_NS is not set
53 CONFIG_BLK_DEV_INITRD=y
54 -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
55 CONFIG_EMBEDDED=y
56 CONFIG_PERF_EVENTS=y
57 # CONFIG_VM_EVENT_COUNTERS is not set
58 diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
59 index 09fed3ef22b6..aa507e423075 100644
60 --- a/arch/arc/configs/axs103_defconfig
61 +++ b/arch/arc/configs/axs103_defconfig
62 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
63 # CONFIG_UTS_NS is not set
64 # CONFIG_PID_NS is not set
65 CONFIG_BLK_DEV_INITRD=y
66 -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
67 CONFIG_EMBEDDED=y
68 CONFIG_PERF_EVENTS=y
69 # CONFIG_VM_EVENT_COUNTERS is not set
70 diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
71 index ea2f6d817d1a..eba07f468654 100644
72 --- a/arch/arc/configs/axs103_smp_defconfig
73 +++ b/arch/arc/configs/axs103_smp_defconfig
74 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
75 # CONFIG_UTS_NS is not set
76 # CONFIG_PID_NS is not set
77 CONFIG_BLK_DEV_INITRD=y
78 -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
79 CONFIG_EMBEDDED=y
80 CONFIG_PERF_EVENTS=y
81 # CONFIG_VM_EVENT_COUNTERS is not set
82 diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
83 index ab231c040efe..098b19fbaa51 100644
84 --- a/arch/arc/configs/haps_hs_defconfig
85 +++ b/arch/arc/configs/haps_hs_defconfig
86 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
87 # CONFIG_UTS_NS is not set
88 # CONFIG_PID_NS is not set
89 CONFIG_BLK_DEV_INITRD=y
90 -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
91 CONFIG_EXPERT=y
92 CONFIG_PERF_EVENTS=y
93 # CONFIG_COMPAT_BRK is not set
94 diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
95 index cf449cbf440d..0104c404d897 100644
96 --- a/arch/arc/configs/haps_hs_smp_defconfig
97 +++ b/arch/arc/configs/haps_hs_smp_defconfig
98 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
99 # CONFIG_UTS_NS is not set
100 # CONFIG_PID_NS is not set
101 CONFIG_BLK_DEV_INITRD=y
102 -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
103 CONFIG_EMBEDDED=y
104 CONFIG_PERF_EVENTS=y
105 # CONFIG_VM_EVENT_COUNTERS is not set
106 diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
107 index 1b54c72f4296..6491be0ddbc9 100644
108 --- a/arch/arc/configs/hsdk_defconfig
109 +++ b/arch/arc/configs/hsdk_defconfig
110 @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
111 # CONFIG_UTS_NS is not set
112 # CONFIG_PID_NS is not set
113 CONFIG_BLK_DEV_INITRD=y
114 -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
115 CONFIG_EMBEDDED=y
116 CONFIG_PERF_EVENTS=y
117 # CONFIG_VM_EVENT_COUNTERS is not set
118 diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
119 index 31c2c70b34a1..99e05cf63fca 100644
120 --- a/arch/arc/configs/nsim_700_defconfig
121 +++ b/arch/arc/configs/nsim_700_defconfig
122 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
123 # CONFIG_UTS_NS is not set
124 # CONFIG_PID_NS is not set
125 CONFIG_BLK_DEV_INITRD=y
126 -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
127 CONFIG_KALLSYMS_ALL=y
128 CONFIG_EMBEDDED=y
129 CONFIG_PERF_EVENTS=y
130 diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
131 index a578c721d50f..0dc4f9b737e7 100644
132 --- a/arch/arc/configs/nsim_hs_defconfig
133 +++ b/arch/arc/configs/nsim_hs_defconfig
134 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
135 # CONFIG_UTS_NS is not set
136 # CONFIG_PID_NS is not set
137 CONFIG_BLK_DEV_INITRD=y
138 -CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
139 CONFIG_KALLSYMS_ALL=y
140 CONFIG_EMBEDDED=y
141 CONFIG_PERF_EVENTS=y
142 diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
143 index 37d7395f3272..be3c30a15e54 100644
144 --- a/arch/arc/configs/nsim_hs_smp_defconfig
145 +++ b/arch/arc/configs/nsim_hs_smp_defconfig
146 @@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
147 # CONFIG_UTS_NS is not set
148 # CONFIG_PID_NS is not set
149 CONFIG_BLK_DEV_INITRD=y
150 -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
151 CONFIG_KALLSYMS_ALL=y
152 CONFIG_EMBEDDED=y
153 CONFIG_PERF_EVENTS=y
154 diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
155 index 1e1470e2a7f0..3a74b9b21772 100644
156 --- a/arch/arc/configs/nsimosci_defconfig
157 +++ b/arch/arc/configs/nsimosci_defconfig
158 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
159 # CONFIG_UTS_NS is not set
160 # CONFIG_PID_NS is not set
161 CONFIG_BLK_DEV_INITRD=y
162 -CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
163 CONFIG_KALLSYMS_ALL=y
164 CONFIG_EMBEDDED=y
165 CONFIG_PERF_EVENTS=y
166 diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
167 index 084a6e42685b..ea2834b4dc1d 100644
168 --- a/arch/arc/configs/nsimosci_hs_defconfig
169 +++ b/arch/arc/configs/nsimosci_hs_defconfig
170 @@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
171 # CONFIG_UTS_NS is not set
172 # CONFIG_PID_NS is not set
173 CONFIG_BLK_DEV_INITRD=y
174 -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
175 CONFIG_KALLSYMS_ALL=y
176 CONFIG_EMBEDDED=y
177 CONFIG_PERF_EVENTS=y
178 diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
179 index f36d47990415..80a5a1b4924b 100644
180 --- a/arch/arc/configs/nsimosci_hs_smp_defconfig
181 +++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
182 @@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
183 # CONFIG_UTS_NS is not set
184 # CONFIG_PID_NS is not set
185 CONFIG_BLK_DEV_INITRD=y
186 -CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
187 CONFIG_PERF_EVENTS=y
188 # CONFIG_COMPAT_BRK is not set
189 CONFIG_KPROBES=y
190 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
191 index 109baa06831c..09ddddf71cc5 100644
192 --- a/arch/arc/include/asm/page.h
193 +++ b/arch/arc/include/asm/page.h
194 @@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
195 #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
196
197 /* Default Permissions for stack/heaps pages (Non Executable) */
198 -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
199 +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
200
201 #define WANT_PAGE_VIRTUAL 1
202
203 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
204 index 08fe33830d4b..77676e18da69 100644
205 --- a/arch/arc/include/asm/pgtable.h
206 +++ b/arch/arc/include/asm/pgtable.h
207 @@ -379,7 +379,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
208
209 /* Decode a PTE containing swap "identifier "into constituents */
210 #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
211 -#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
212 +#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
213
214 /* NOPs, to keep generic kernel happy */
215 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
216 diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
217 index 19ab3cf98f0f..fcc9a9e27e9c 100644
218 --- a/arch/arc/plat-hsdk/Kconfig
219 +++ b/arch/arc/plat-hsdk/Kconfig
220 @@ -7,5 +7,7 @@
221
222 menuconfig ARC_SOC_HSDK
223 bool "ARC HS Development Kit SOC"
224 + depends on ISA_ARCV2
225 + select ARC_HAS_ACCL_REGS
226 select CLK_HSDK
227 select RESET_HSDK
228 diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
229 index e734f6e45abc..689306118b48 100644
230 --- a/arch/powerpc/kernel/idle_book3s.S
231 +++ b/arch/powerpc/kernel/idle_book3s.S
232 @@ -144,7 +144,9 @@ power9_restore_additional_sprs:
233 mtspr SPRN_MMCR1, r4
234
235 ld r3, STOP_MMCR2(r13)
236 + ld r4, PACA_SPRG_VDSO(r13)
237 mtspr SPRN_MMCR2, r3
238 + mtspr SPRN_SPRG3, r4
239 blr
240
241 /*
242 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
243 index 8a10a045b57b..8cf03f101938 100644
244 --- a/arch/x86/events/intel/ds.c
245 +++ b/arch/x86/events/intel/ds.c
246 @@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
247 ds->bts_buffer_base = (unsigned long) cea;
248 ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
249 ds->bts_index = ds->bts_buffer_base;
250 - max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
251 - ds->bts_absolute_maximum = ds->bts_buffer_base + max;
252 - ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
253 + max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
254 + ds->bts_absolute_maximum = ds->bts_buffer_base +
255 + max * BTS_RECORD_SIZE;
256 + ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
257 + (max / 16) * BTS_RECORD_SIZE;
258 return 0;
259 }
260
261 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
262 index c356098b6fb9..4d4015ddcf26 100644
263 --- a/arch/x86/include/asm/apm.h
264 +++ b/arch/x86/include/asm/apm.h
265 @@ -7,8 +7,6 @@
266 #ifndef _ASM_X86_MACH_DEFAULT_APM_H
267 #define _ASM_X86_MACH_DEFAULT_APM_H
268
269 -#include <asm/nospec-branch.h>
270 -
271 #ifdef APM_ZERO_SEGS
272 # define APM_DO_ZERO_SEGS \
273 "pushl %%ds\n\t" \
274 @@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
275 * N.B. We do NOT need a cld after the BIOS call
276 * because we always save and restore the flags.
277 */
278 - firmware_restrict_branch_speculation_start();
279 __asm__ __volatile__(APM_DO_ZERO_SEGS
280 "pushl %%edi\n\t"
281 "pushl %%ebp\n\t"
282 @@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
283 "=S" (*esi)
284 : "a" (func), "b" (ebx_in), "c" (ecx_in)
285 : "memory", "cc");
286 - firmware_restrict_branch_speculation_end();
287 }
288
289 static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
290 @@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
291 * N.B. We do NOT need a cld after the BIOS call
292 * because we always save and restore the flags.
293 */
294 - firmware_restrict_branch_speculation_start();
295 __asm__ __volatile__(APM_DO_ZERO_SEGS
296 "pushl %%edi\n\t"
297 "pushl %%ebp\n\t"
298 @@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
299 "=S" (si)
300 : "a" (func), "b" (ebx_in), "c" (ecx_in)
301 : "memory", "cc");
302 - firmware_restrict_branch_speculation_end();
303 return error;
304 }
305
306 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
307 index dfcbe6924eaf..3dd661dcc3f7 100644
308 --- a/arch/x86/kernel/apm_32.c
309 +++ b/arch/x86/kernel/apm_32.c
310 @@ -240,6 +240,7 @@
311 #include <asm/olpc.h>
312 #include <asm/paravirt.h>
313 #include <asm/reboot.h>
314 +#include <asm/nospec-branch.h>
315
316 #if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
317 extern int (*console_blank_hook)(int);
318 @@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
319 gdt[0x40 / 8] = bad_bios_desc;
320
321 apm_irq_save(flags);
322 + firmware_restrict_branch_speculation_start();
323 APM_DO_SAVE_SEGS;
324 apm_bios_call_asm(call->func, call->ebx, call->ecx,
325 &call->eax, &call->ebx, &call->ecx, &call->edx,
326 &call->esi);
327 APM_DO_RESTORE_SEGS;
328 + firmware_restrict_branch_speculation_end();
329 apm_irq_restore(flags);
330 gdt[0x40 / 8] = save_desc_40;
331 put_cpu();
332 @@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
333 gdt[0x40 / 8] = bad_bios_desc;
334
335 apm_irq_save(flags);
336 + firmware_restrict_branch_speculation_start();
337 APM_DO_SAVE_SEGS;
338 error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
339 &call->eax);
340 APM_DO_RESTORE_SEGS;
341 + firmware_restrict_branch_speculation_end();
342 apm_irq_restore(flags);
343 gdt[0x40 / 8] = save_desc_40;
344 put_cpu();
345 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
346 index 6f7eda9d5297..79ae1423b619 100644
347 --- a/arch/x86/kernel/cpu/mcheck/mce.c
348 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
349 @@ -2147,9 +2147,6 @@ static ssize_t store_int_with_restart(struct device *s,
350 if (check_interval == old_check_interval)
351 return ret;
352
353 - if (check_interval < 1)
354 - check_interval = 1;
355 -
356 mutex_lock(&mce_sysfs_mutex);
357 mce_restart();
358 mutex_unlock(&mce_sysfs_mutex);
359 diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
360 index 8b26c9e01cc4..d79a18b4cf9d 100644
361 --- a/arch/x86/kernel/kvmclock.c
362 +++ b/arch/x86/kernel/kvmclock.c
363 @@ -319,6 +319,8 @@ void __init kvmclock_init(void)
364 printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
365 msr_kvm_system_time, msr_kvm_wall_clock);
366
367 + pvclock_set_pvti_cpu0_va(hv_clock);
368 +
369 if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
370 pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
371
372 @@ -366,14 +368,11 @@ int __init kvm_setup_vsyscall_timeinfo(void)
373 vcpu_time = &hv_clock[cpu].pvti;
374 flags = pvclock_read_flags(vcpu_time);
375
376 - if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
377 - put_cpu();
378 - return 1;
379 - }
380 -
381 - pvclock_set_pvti_cpu0_va(hv_clock);
382 put_cpu();
383
384 + if (!(flags & PVCLOCK_TSC_STABLE_BIT))
385 + return 1;
386 +
387 kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
388 #endif
389 return 0;
390 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
391 index dd4366edc200..a3bbac8ef4d0 100644
392 --- a/arch/x86/kvm/vmx.c
393 +++ b/arch/x86/kvm/vmx.c
394 @@ -2376,6 +2376,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
395 struct vcpu_vmx *vmx = to_vmx(vcpu);
396 #ifdef CONFIG_X86_64
397 int cpu = raw_smp_processor_id();
398 + unsigned long fs_base, kernel_gs_base;
399 #endif
400 int i;
401
402 @@ -2391,12 +2392,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
403 vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
404
405 #ifdef CONFIG_X86_64
406 - save_fsgs_for_kvm();
407 - vmx->host_state.fs_sel = current->thread.fsindex;
408 - vmx->host_state.gs_sel = current->thread.gsindex;
409 -#else
410 - savesegment(fs, vmx->host_state.fs_sel);
411 - savesegment(gs, vmx->host_state.gs_sel);
412 + if (likely(is_64bit_mm(current->mm))) {
413 + save_fsgs_for_kvm();
414 + vmx->host_state.fs_sel = current->thread.fsindex;
415 + vmx->host_state.gs_sel = current->thread.gsindex;
416 + fs_base = current->thread.fsbase;
417 + kernel_gs_base = current->thread.gsbase;
418 + } else {
419 +#endif
420 + savesegment(fs, vmx->host_state.fs_sel);
421 + savesegment(gs, vmx->host_state.gs_sel);
422 +#ifdef CONFIG_X86_64
423 + fs_base = read_msr(MSR_FS_BASE);
424 + kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
425 + }
426 #endif
427 if (!(vmx->host_state.fs_sel & 7)) {
428 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
429 @@ -2416,10 +2425,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
430 savesegment(ds, vmx->host_state.ds_sel);
431 savesegment(es, vmx->host_state.es_sel);
432
433 - vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
434 + vmcs_writel(HOST_FS_BASE, fs_base);
435 vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
436
437 - vmx->msr_host_kernel_gs_base = current->thread.gsbase;
438 + vmx->msr_host_kernel_gs_base = kernel_gs_base;
439 if (is_long_mode(&vmx->vcpu))
440 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
441 #else
442 @@ -4110,11 +4119,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
443 vmcs_conf->order = get_order(vmcs_conf->size);
444 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
445
446 - /* KVM supports Enlightened VMCS v1 only */
447 - if (static_branch_unlikely(&enable_evmcs))
448 - vmcs_conf->revision_id = KVM_EVMCS_VERSION;
449 - else
450 - vmcs_conf->revision_id = vmx_msr_low;
451 + vmcs_conf->revision_id = vmx_msr_low;
452
453 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
454 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
455 @@ -4184,7 +4189,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
456 return NULL;
457 vmcs = page_address(pages);
458 memset(vmcs, 0, vmcs_config.size);
459 - vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
460 +
461 + /* KVM supports Enlightened VMCS v1 only */
462 + if (static_branch_unlikely(&enable_evmcs))
463 + vmcs->revision_id = KVM_EVMCS_VERSION;
464 + else
465 + vmcs->revision_id = vmcs_config.revision_id;
466 +
467 return vmcs;
468 }
469
470 @@ -4343,6 +4354,19 @@ static __init int alloc_kvm_area(void)
471 return -ENOMEM;
472 }
473
474 + /*
475 + * When eVMCS is enabled, alloc_vmcs_cpu() sets
476 + * vmcs->revision_id to KVM_EVMCS_VERSION instead of
477 + * revision_id reported by MSR_IA32_VMX_BASIC.
478 + *
479 + * However, even though not explictly documented by
480 + * TLFS, VMXArea passed as VMXON argument should
481 + * still be marked with revision_id reported by
482 + * physical CPU.
483 + */
484 + if (static_branch_unlikely(&enable_evmcs))
485 + vmcs->revision_id = vmcs_config.revision_id;
486 +
487 per_cpu(vmxarea, cpu) = vmcs;
488 }
489 return 0;
490 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
491 index bd3f0a9d5e60..b357f81bfba6 100644
492 --- a/drivers/cpufreq/intel_pstate.c
493 +++ b/drivers/cpufreq/intel_pstate.c
494 @@ -2179,6 +2179,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
495 return true;
496 }
497
498 +static bool __init intel_pstate_no_acpi_pcch(void)
499 +{
500 + acpi_status status;
501 + acpi_handle handle;
502 +
503 + status = acpi_get_handle(NULL, "\\_SB", &handle);
504 + if (ACPI_FAILURE(status))
505 + return true;
506 +
507 + return !acpi_has_method(handle, "PCCH");
508 +}
509 +
510 static bool __init intel_pstate_has_acpi_ppc(void)
511 {
512 int i;
513 @@ -2238,7 +2250,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
514
515 switch (plat_info[idx].data) {
516 case PSS:
517 - return intel_pstate_no_acpi_pss();
518 + if (!intel_pstate_no_acpi_pss())
519 + return false;
520 +
521 + return intel_pstate_no_acpi_pcch();
522 case PPC:
523 return intel_pstate_has_acpi_ppc() && !force_load;
524 }
525 diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
526 index 3f0ce2ae35ee..0c56c9759672 100644
527 --- a/drivers/cpufreq/pcc-cpufreq.c
528 +++ b/drivers/cpufreq/pcc-cpufreq.c
529 @@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
530 {
531 int ret;
532
533 + /* Skip initialization if another cpufreq driver is there. */
534 + if (cpufreq_get_current_driver())
535 + return 0;
536 +
537 if (acpi_disabled)
538 return 0;
539
540 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
541 index dc34b50e6b29..b11e9659e312 100644
542 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
543 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
544 @@ -925,6 +925,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
545 r = amdgpu_bo_vm_update_pte(p);
546 if (r)
547 return r;
548 +
549 + r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
550 + if (r)
551 + return r;
552 }
553
554 return amdgpu_cs_sync_rings(p);
555 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
556 index 4304d9e408b8..ace9ad578ca0 100644
557 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
558 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
559 @@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
560 enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
561 I2C_MOT_TRUE : I2C_MOT_FALSE;
562 enum ddc_result res;
563 - uint32_t read_bytes = msg->size;
564 + ssize_t read_bytes;
565
566 if (WARN_ON(msg->size > 16))
567 return -E2BIG;
568
569 switch (msg->request & ~DP_AUX_I2C_MOT) {
570 case DP_AUX_NATIVE_READ:
571 - res = dal_ddc_service_read_dpcd_data(
572 + read_bytes = dal_ddc_service_read_dpcd_data(
573 TO_DM_AUX(aux)->ddc_service,
574 false,
575 I2C_MOT_UNDEF,
576 msg->address,
577 msg->buffer,
578 - msg->size,
579 - &read_bytes);
580 - break;
581 + msg->size);
582 + return read_bytes;
583 case DP_AUX_NATIVE_WRITE:
584 res = dal_ddc_service_write_dpcd_data(
585 TO_DM_AUX(aux)->ddc_service,
586 @@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
587 msg->size);
588 break;
589 case DP_AUX_I2C_READ:
590 - res = dal_ddc_service_read_dpcd_data(
591 + read_bytes = dal_ddc_service_read_dpcd_data(
592 TO_DM_AUX(aux)->ddc_service,
593 true,
594 mot,
595 msg->address,
596 msg->buffer,
597 - msg->size,
598 - &read_bytes);
599 - break;
600 + msg->size);
601 + return read_bytes;
602 case DP_AUX_I2C_WRITE:
603 res = dal_ddc_service_write_dpcd_data(
604 TO_DM_AUX(aux)->ddc_service,
605 @@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
606 r == DDC_RESULT_SUCESSFULL);
607 #endif
608
609 - if (res != DDC_RESULT_SUCESSFULL)
610 - return -EIO;
611 - return read_bytes;
612 + return msg->size;
613 }
614
615 static enum drm_connector_status
616 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
617 index ae48d603ebd6..49c2face1e7a 100644
618 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
619 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
620 @@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
621 return ret;
622 }
623
624 -enum ddc_result dal_ddc_service_read_dpcd_data(
625 +ssize_t dal_ddc_service_read_dpcd_data(
626 struct ddc_service *ddc,
627 bool i2c,
628 enum i2c_mot_mode mot,
629 uint32_t address,
630 uint8_t *data,
631 - uint32_t len,
632 - uint32_t *read)
633 + uint32_t len)
634 {
635 struct aux_payload read_payload = {
636 .i2c_over_aux = i2c,
637 @@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
638 .mot = mot
639 };
640
641 - *read = 0;
642 -
643 if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
644 BREAK_TO_DEBUGGER();
645 return DDC_RESULT_FAILED_INVALID_OPERATION;
646 @@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
647 ddc->ctx->i2caux,
648 ddc->ddc_pin,
649 &command)) {
650 - *read = command.payloads->length;
651 - return DDC_RESULT_SUCESSFULL;
652 + return (ssize_t)command.payloads->length;
653 }
654
655 return DDC_RESULT_FAILED_OPERATION;
656 diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
657 index 30b3a08b91be..090b7a8dd67b 100644
658 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
659 +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
660 @@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
661 uint8_t *read_buf,
662 uint32_t read_size);
663
664 -enum ddc_result dal_ddc_service_read_dpcd_data(
665 +ssize_t dal_ddc_service_read_dpcd_data(
666 struct ddc_service *ddc,
667 bool i2c,
668 enum i2c_mot_mode mot,
669 uint32_t address,
670 uint8_t *data,
671 - uint32_t len,
672 - uint32_t *read);
673 + uint32_t len);
674
675 enum ddc_result dal_ddc_service_write_dpcd_data(
676 struct ddc_service *ddc,
677 diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
678 index d345563fdff3..ce281d651ae8 100644
679 --- a/drivers/gpu/drm/drm_lease.c
680 +++ b/drivers/gpu/drm/drm_lease.c
681 @@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
682
683 /* Clone the lessor file to create a new file for us */
684 DRM_DEBUG_LEASE("Allocating lease file\n");
685 - path_get(&lessor_file->f_path);
686 - lessee_file = alloc_file(&lessor_file->f_path,
687 - lessor_file->f_mode,
688 - fops_get(lessor_file->f_inode->i_fop));
689 -
690 + lessee_file = filp_clone_open(lessor_file);
691 if (IS_ERR(lessee_file)) {
692 ret = PTR_ERR(lessee_file);
693 goto out_lessee;
694 }
695
696 - /* Initialize the new file for DRM */
697 - DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
698 - ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
699 - if (ret)
700 - goto out_lessee_file;
701 -
702 lessee_priv = lessee_file->private_data;
703 -
704 /* Change the file to a master one */
705 drm_master_put(&lessee_priv->master);
706 lessee_priv->master = lessee;
707 @@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
708 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
709 return 0;
710
711 -out_lessee_file:
712 - fput(lessee_file);
713 -
714 out_lessee:
715 drm_master_put(&lessee);
716
717 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
718 index b25cc5aa8fbe..d793b2bbd6c2 100644
719 --- a/drivers/gpu/drm/i915/i915_irq.c
720 +++ b/drivers/gpu/drm/i915/i915_irq.c
721 @@ -1967,10 +1967,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
722
723 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
724 {
725 - u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
726 + u32 hotplug_status = 0, hotplug_status_mask;
727 + int i;
728 +
729 + if (IS_G4X(dev_priv) ||
730 + IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
731 + hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
732 + DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
733 + else
734 + hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
735
736 - if (hotplug_status)
737 + /*
738 + * We absolutely have to clear all the pending interrupt
739 + * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
740 + * interrupt bit won't have an edge, and the i965/g4x
741 + * edge triggered IIR will not notice that an interrupt
742 + * is still pending. We can't use PORT_HOTPLUG_EN to
743 + * guarantee the edge as the act of toggling the enable
744 + * bits can itself generate a new hotplug interrupt :(
745 + */
746 + for (i = 0; i < 10; i++) {
747 + u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
748 +
749 + if (tmp == 0)
750 + return hotplug_status;
751 +
752 + hotplug_status |= tmp;
753 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
754 + }
755 +
756 + WARN_ONCE(1,
757 + "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
758 + I915_READ(PORT_HOTPLUG_STAT));
759
760 return hotplug_status;
761 }
762 diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
763 index debbbf0fd4bd..408b955e5c39 100644
764 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
765 +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
766 @@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
767 struct nouveau_drm *drm = nouveau_drm(dev);
768 struct nvif_device *device = &drm->client.device;
769 struct drm_connector *connector;
770 + struct drm_connector_list_iter conn_iter;
771
772 INIT_LIST_HEAD(&drm->bl_connectors);
773
774 @@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
775 return 0;
776 }
777
778 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
779 + drm_connector_list_iter_begin(dev, &conn_iter);
780 + drm_for_each_connector_iter(connector, &conn_iter) {
781 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
782 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
783 continue;
784 @@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
785 break;
786 }
787 }
788 -
789 + drm_connector_list_iter_end(&conn_iter);
790
791 return 0;
792 }
793 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
794 index 6ed9cb053dfa..359fecce8cc0 100644
795 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
796 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
797 @@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
798 struct nouveau_display *disp = nouveau_display(dev);
799 struct nouveau_connector *nv_connector = NULL;
800 struct drm_connector *connector;
801 + struct drm_connector_list_iter conn_iter;
802 int type, ret = 0;
803 bool dummy;
804
805 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
806 + drm_connector_list_iter_begin(dev, &conn_iter);
807 + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
808 nv_connector = nouveau_connector(connector);
809 - if (nv_connector->index == index)
810 + if (nv_connector->index == index) {
811 + drm_connector_list_iter_end(&conn_iter);
812 return connector;
813 + }
814 }
815 + drm_connector_list_iter_end(&conn_iter);
816
817 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
818 if (!nv_connector)
819 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
820 index a4d1a059bd3d..dc7454e7f19a 100644
821 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h
822 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
823 @@ -33,6 +33,7 @@
824 #include <drm/drm_encoder.h>
825 #include <drm/drm_dp_helper.h>
826 #include "nouveau_crtc.h"
827 +#include "nouveau_encoder.h"
828
829 struct nvkm_i2c_port;
830
831 @@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
832 return container_of(con, struct nouveau_connector, base);
833 }
834
835 +static inline bool
836 +nouveau_connector_is_mst(struct drm_connector *connector)
837 +{
838 + const struct nouveau_encoder *nv_encoder;
839 + const struct drm_encoder *encoder;
840 +
841 + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
842 + return false;
843 +
844 + nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
845 + if (!nv_encoder)
846 + return false;
847 +
848 + encoder = &nv_encoder->base.base;
849 + return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
850 +}
851 +
852 +#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
853 + drm_for_each_connector_iter(connector, iter) \
854 + for_each_if(!nouveau_connector_is_mst(connector))
855 +
856 static inline struct nouveau_connector *
857 nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
858 {
859 struct drm_device *dev = nv_crtc->base.dev;
860 struct drm_connector *connector;
861 + struct drm_connector_list_iter conn_iter;
862 + struct nouveau_connector *nv_connector = NULL;
863 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
864
865 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
866 - if (connector->encoder && connector->encoder->crtc == crtc)
867 - return nouveau_connector(connector);
868 + drm_connector_list_iter_begin(dev, &conn_iter);
869 + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
870 + if (connector->encoder && connector->encoder->crtc == crtc) {
871 + nv_connector = nouveau_connector(connector);
872 + break;
873 + }
874 }
875 + drm_connector_list_iter_end(&conn_iter);
876
877 - return NULL;
878 + return nv_connector;
879 }
880
881 struct drm_connector *
882 diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
883 index 009713404cc4..4cba117e81fc 100644
884 --- a/drivers/gpu/drm/nouveau/nouveau_display.c
885 +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
886 @@ -406,6 +406,7 @@ nouveau_display_init(struct drm_device *dev)
887 struct nouveau_display *disp = nouveau_display(dev);
888 struct nouveau_drm *drm = nouveau_drm(dev);
889 struct drm_connector *connector;
890 + struct drm_connector_list_iter conn_iter;
891 int ret;
892
893 ret = disp->init(dev);
894 @@ -413,10 +414,12 @@ nouveau_display_init(struct drm_device *dev)
895 return ret;
896
897 /* enable hotplug interrupts */
898 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
899 + drm_connector_list_iter_begin(dev, &conn_iter);
900 + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
901 struct nouveau_connector *conn = nouveau_connector(connector);
902 nvif_notify_get(&conn->hpd);
903 }
904 + drm_connector_list_iter_end(&conn_iter);
905
906 /* enable flip completion events */
907 nvif_notify_get(&drm->flip);
908 @@ -429,6 +432,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
909 struct nouveau_display *disp = nouveau_display(dev);
910 struct nouveau_drm *drm = nouveau_drm(dev);
911 struct drm_connector *connector;
912 + struct drm_connector_list_iter conn_iter;
913
914 if (!suspend) {
915 if (drm_drv_uses_atomic_modeset(dev))
916 @@ -441,10 +445,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
917 nvif_notify_put(&drm->flip);
918
919 /* disable hotplug interrupts */
920 - list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
921 + drm_connector_list_iter_begin(dev, &conn_iter);
922 + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
923 struct nouveau_connector *conn = nouveau_connector(connector);
924 nvif_notify_put(&conn->hpd);
925 }
926 + drm_connector_list_iter_end(&conn_iter);
927
928 drm_kms_helper_poll_disable(dev);
929 disp->fini(dev);
930 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
931 index bbbf353682e1..0bffeb95b072 100644
932 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
933 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
934 @@ -866,22 +866,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
935 static int
936 nouveau_pmops_runtime_idle(struct device *dev)
937 {
938 - struct pci_dev *pdev = to_pci_dev(dev);
939 - struct drm_device *drm_dev = pci_get_drvdata(pdev);
940 - struct nouveau_drm *drm = nouveau_drm(drm_dev);
941 - struct drm_crtc *crtc;
942 -
943 if (!nouveau_pmops_runtime()) {
944 pm_runtime_forbid(dev);
945 return -EBUSY;
946 }
947
948 - list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
949 - if (crtc->enabled) {
950 - DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
951 - return -EBUSY;
952 - }
953 - }
954 pm_runtime_mark_last_busy(dev);
955 pm_runtime_autosuspend(dev);
956 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
957 diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
958 index 753b1a698fc4..6b16946f9b05 100644
959 --- a/drivers/misc/cxl/api.c
960 +++ b/drivers/misc/cxl/api.c
961 @@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name,
962 d_instantiate(path.dentry, inode);
963
964 file = alloc_file(&path, OPEN_FMODE(flags), fops);
965 - if (IS_ERR(file))
966 - goto err_dput;
967 + if (IS_ERR(file)) {
968 + path_put(&path);
969 + goto err_fs;
970 + }
971 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
972 file->private_data = priv;
973
974 return file;
975
976 -err_dput:
977 - path_put(&path);
978 err_inode:
979 iput(inode);
980 err_fs:
981 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
982 index fc7383106946..91eb8910b1c9 100644
983 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
984 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
985 @@ -63,8 +63,6 @@
986
987 #define AQ_CFG_NAPI_WEIGHT 64U
988
989 -#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
990 -
991 /*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
992
993 #define AQ_NIC_FC_OFF 0U
994 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
995 index a2d416b24ffc..2c6ebd91a9f2 100644
996 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
997 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
998 @@ -98,6 +98,8 @@ struct aq_stats_s {
999 #define AQ_HW_MEDIA_TYPE_TP 1U
1000 #define AQ_HW_MEDIA_TYPE_FIBRE 2U
1001
1002 +#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
1003 +
1004 struct aq_hw_s {
1005 atomic_t flags;
1006 u8 rbl_enabled:1;
1007 @@ -177,7 +179,7 @@ struct aq_hw_ops {
1008 unsigned int packet_filter);
1009
1010 int (*hw_multicast_list_set)(struct aq_hw_s *self,
1011 - u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
1012 + u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
1013 [ETH_ALEN],
1014 u32 count);
1015
1016 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
1017 index ba5fe8c4125d..e3ae29e523f0 100644
1018 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
1019 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
1020 @@ -135,17 +135,10 @@ static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
1021 static void aq_ndev_set_multicast_settings(struct net_device *ndev)
1022 {
1023 struct aq_nic_s *aq_nic = netdev_priv(ndev);
1024 - int err = 0;
1025
1026 - err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
1027 - if (err < 0)
1028 - return;
1029 + aq_nic_set_packet_filter(aq_nic, ndev->flags);
1030
1031 - if (netdev_mc_count(ndev)) {
1032 - err = aq_nic_set_multicast_list(aq_nic, ndev);
1033 - if (err < 0)
1034 - return;
1035 - }
1036 + aq_nic_set_multicast_list(aq_nic, ndev);
1037 }
1038
1039 static const struct net_device_ops aq_ndev_ops = {
1040 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1041 index 1a1a6380c128..7a22d0257e04 100644
1042 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1043 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
1044 @@ -563,34 +563,41 @@ int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
1045
1046 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
1047 {
1048 + unsigned int packet_filter = self->packet_filter;
1049 struct netdev_hw_addr *ha = NULL;
1050 unsigned int i = 0U;
1051
1052 - self->mc_list.count = 0U;
1053 -
1054 - netdev_for_each_mc_addr(ha, ndev) {
1055 - ether_addr_copy(self->mc_list.ar[i++], ha->addr);
1056 - ++self->mc_list.count;
1057 + self->mc_list.count = 0;
1058 + if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
1059 + packet_filter |= IFF_PROMISC;
1060 + } else {
1061 + netdev_for_each_uc_addr(ha, ndev) {
1062 + ether_addr_copy(self->mc_list.ar[i++], ha->addr);
1063
1064 - if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
1065 - break;
1066 + if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
1067 + break;
1068 + }
1069 }
1070
1071 - if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
1072 - /* Number of filters is too big: atlantic does not support this.
1073 - * Force all multi filter to support this.
1074 - * With this we disable all UC filters and setup "all pass"
1075 - * multicast mask
1076 - */
1077 - self->packet_filter |= IFF_ALLMULTI;
1078 - self->aq_nic_cfg.mc_list_count = 0;
1079 - return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
1080 - self->packet_filter);
1081 + if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
1082 + packet_filter |= IFF_ALLMULTI;
1083 } else {
1084 - return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
1085 - self->mc_list.ar,
1086 - self->mc_list.count);
1087 + netdev_for_each_mc_addr(ha, ndev) {
1088 + ether_addr_copy(self->mc_list.ar[i++], ha->addr);
1089 +
1090 + if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
1091 + break;
1092 + }
1093 + }
1094 +
1095 + if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
1096 + packet_filter |= IFF_MULTICAST;
1097 + self->mc_list.count = i;
1098 + self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
1099 + self->mc_list.ar,
1100 + self->mc_list.count);
1101 }
1102 + return aq_nic_set_packet_filter(self, packet_filter);
1103 }
1104
1105 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
1106 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
1107 index faa533a0ec47..fecfc401f95d 100644
1108 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
1109 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
1110 @@ -75,7 +75,7 @@ struct aq_nic_s {
1111 struct aq_hw_link_status_s link_status;
1112 struct {
1113 u32 count;
1114 - u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
1115 + u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
1116 } mc_list;
1117
1118 struct pci_dev *pdev;
1119 diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
1120 index 67e2f9fb9402..8cc6abadc03b 100644
1121 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
1122 +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
1123 @@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
1124
1125 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
1126 u8 ar_mac
1127 - [AQ_CFG_MULTICAST_ADDRESS_MAX]
1128 + [AQ_HW_MULTICAST_ADDRESS_MAX]
1129 [ETH_ALEN],
1130 u32 count)
1131 {
1132 diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1133 index 819f6bcf9b4e..956860a69797 100644
1134 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1135 +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
1136 @@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
1137
1138 static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
1139 u8 ar_mac
1140 - [AQ_CFG_MULTICAST_ADDRESS_MAX]
1141 + [AQ_HW_MULTICAST_ADDRESS_MAX]
1142 [ETH_ALEN],
1143 u32 count)
1144 {
1145 @@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
1146
1147 hw_atl_rpfl2_uc_flr_en_set(self,
1148 (self->aq_nic_cfg->is_mc_list_enabled),
1149 - HW_ATL_B0_MAC_MIN + i);
1150 + HW_ATL_B0_MAC_MIN + i);
1151 }
1152
1153 err = aq_hw_err_from_flags(self);
1154 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
1155 index f33b25fbca63..7db072fe5f22 100644
1156 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
1157 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
1158 @@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
1159 if (!priv->is_lite)
1160 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1161 else
1162 - priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
1163 - GIB_FCS_STRIP);
1164 + priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
1165 + GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
1166
1167 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1168 0, priv->phy_interface);
1169 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
1170 index d6e5d0cbf3a3..cf440b91fd04 100644
1171 --- a/drivers/net/ethernet/broadcom/bcmsysport.h
1172 +++ b/drivers/net/ethernet/broadcom/bcmsysport.h
1173 @@ -278,7 +278,8 @@ struct bcm_rsb {
1174 #define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
1175 #define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
1176 #define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
1177 -#define GIB_FCS_STRIP (1 << 6)
1178 +#define GIB_FCS_STRIP_SHIFT 6
1179 +#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT)
1180 #define GIB_LCL_LOOP_EN (1 << 7)
1181 #define GIB_LCL_LOOP_TXEN (1 << 8)
1182 #define GIB_RMT_LOOP_EN (1 << 9)
1183 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1184 index 9f59b1270a7c..3e0e7f18ecf9 100644
1185 --- a/drivers/net/ethernet/broadcom/tg3.c
1186 +++ b/drivers/net/ethernet/broadcom/tg3.c
1187 @@ -9289,6 +9289,15 @@ static int tg3_chip_reset(struct tg3 *tp)
1188
1189 tg3_restore_clk(tp);
1190
1191 + /* Increase the core clock speed to fix tx timeout issue for 5762
1192 + * with 100Mbps link speed.
1193 + */
1194 + if (tg3_asic_rev(tp) == ASIC_REV_5762) {
1195 + val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
1196 + tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
1197 + TG3_CPMU_MAC_ORIDE_ENABLE);
1198 + }
1199 +
1200 /* Reprobe ASF enable state. */
1201 tg3_flag_clear(tp, ENABLE_ASF);
1202 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
1203 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1204 index 5c613c6663da..2ca0f1dad54c 100644
1205 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1206 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
1207 @@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
1208 {
1209 const struct mlx4_en_frag_info *frag_info = priv->frag_info;
1210 unsigned int truesize = 0;
1211 + bool release = true;
1212 int nr, frag_size;
1213 struct page *page;
1214 dma_addr_t dma;
1215 - bool release;
1216
1217 /* Collect used fragments while replacing them in the HW descriptors */
1218 for (nr = 0;; frags++) {
1219 @@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
1220 release = page_count(page) != 1 ||
1221 page_is_pfmemalloc(page) ||
1222 page_to_nid(page) != numa_mem_id();
1223 - } else {
1224 + } else if (!priv->rx_headroom) {
1225 + /* rx_headroom for non XDP setup is always 0.
1226 + * When XDP is set, the above condition will
1227 + * guarantee page is always released.
1228 + */
1229 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
1230
1231 frags->page_offset += sz_align;
1232 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1233 index c418113c6b20..c10ca3c20b36 100644
1234 --- a/drivers/net/hyperv/netvsc.c
1235 +++ b/drivers/net/hyperv/netvsc.c
1236 @@ -1291,6 +1291,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1237 struct hv_device *device = netvsc_channel_to_device(channel);
1238 struct net_device *ndev = hv_get_drvdata(device);
1239 int work_done = 0;
1240 + int ret;
1241
1242 /* If starting a new interval */
1243 if (!nvchan->desc)
1244 @@ -1302,16 +1303,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1245 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1246 }
1247
1248 - /* If send of pending receive completions suceeded
1249 - * and did not exhaust NAPI budget this time
1250 - * and not doing busy poll
1251 + /* Send any pending receive completions */
1252 + ret = send_recv_completions(ndev, net_device, nvchan);
1253 +
1254 + /* If it did not exhaust NAPI budget this time
1255 + * and not doing busy poll
1256 * then re-enable host interrupts
1257 - * and reschedule if ring is not empty.
1258 + * and reschedule if ring is not empty
1259 + * or sending receive completion failed.
1260 */
1261 - if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
1262 - work_done < budget &&
1263 + if (work_done < budget &&
1264 napi_complete_done(napi, work_done) &&
1265 - hv_end_read(&channel->inbound) &&
1266 + (ret || hv_end_read(&channel->inbound)) &&
1267 napi_schedule_prep(napi)) {
1268 hv_begin_read(&channel->inbound);
1269 __napi_schedule(napi);
1270 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1271 index 9e4ba8e80a18..5aa081fda447 100644
1272 --- a/drivers/net/phy/phy_device.c
1273 +++ b/drivers/net/phy/phy_device.c
1274 @@ -1720,11 +1720,8 @@ EXPORT_SYMBOL(genphy_loopback);
1275
1276 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
1277 {
1278 - /* The default values for phydev->supported are provided by the PHY
1279 - * driver "features" member, we want to reset to sane defaults first
1280 - * before supporting higher speeds.
1281 - */
1282 - phydev->supported &= PHY_DEFAULT_FEATURES;
1283 + phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
1284 + PHY_10BT_FEATURES);
1285
1286 switch (max_speed) {
1287 default:
1288 diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
1289 index 3d4f7959dabb..b1b3d8f7e67d 100644
1290 --- a/drivers/net/usb/asix_devices.c
1291 +++ b/drivers/net/usb/asix_devices.c
1292 @@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
1293 priv->presvd_phy_advertise);
1294
1295 /* Restore BMCR */
1296 + if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
1297 + priv->presvd_phy_bmcr |= BMCR_ANRESTART;
1298 +
1299 asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
1300 priv->presvd_phy_bmcr);
1301
1302 - mii_nway_restart(&dev->mii);
1303 priv->presvd_phy_advertise = 0;
1304 priv->presvd_phy_bmcr = 0;
1305 }
1306 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1307 index 04c22f508ed9..f8f90d77cf0f 100644
1308 --- a/drivers/net/usb/qmi_wwan.c
1309 +++ b/drivers/net/usb/qmi_wwan.c
1310 @@ -1253,6 +1253,7 @@ static const struct usb_device_id products[] = {
1311 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
1312 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
1313 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
1314 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
1315 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
1316 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
1317
1318 diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
1319 index 767c485af59b..522719b494f3 100644
1320 --- a/drivers/ptp/ptp_chardev.c
1321 +++ b/drivers/ptp/ptp_chardev.c
1322 @@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
1323 case PTP_PF_PHYSYNC:
1324 if (chan != 0)
1325 return -EINVAL;
1326 + break;
1327 default:
1328 return -EINVAL;
1329 }
1330 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
1331 index eb2ec1fb07cb..209de7cd9358 100644
1332 --- a/drivers/scsi/qla2xxx/qla_def.h
1333 +++ b/drivers/scsi/qla2xxx/qla_def.h
1334 @@ -361,6 +361,8 @@ struct ct_arg {
1335 dma_addr_t rsp_dma;
1336 u32 req_size;
1337 u32 rsp_size;
1338 + u32 req_allocated_size;
1339 + u32 rsp_allocated_size;
1340 void *req;
1341 void *rsp;
1342 port_id_t id;
1343 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
1344 index 05abe5aaab7f..cbfbab5d9a59 100644
1345 --- a/drivers/scsi/qla2xxx/qla_gs.c
1346 +++ b/drivers/scsi/qla2xxx/qla_gs.c
1347 @@ -556,7 +556,7 @@ static void qla2x00_async_sns_sp_done(void *s, int rc)
1348 /* please ignore kernel warning. otherwise, we have mem leak. */
1349 if (sp->u.iocb_cmd.u.ctarg.req) {
1350 dma_free_coherent(&vha->hw->pdev->dev,
1351 - sizeof(struct ct_sns_pkt),
1352 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1353 sp->u.iocb_cmd.u.ctarg.req,
1354 sp->u.iocb_cmd.u.ctarg.req_dma);
1355 sp->u.iocb_cmd.u.ctarg.req = NULL;
1356 @@ -564,7 +564,7 @@ static void qla2x00_async_sns_sp_done(void *s, int rc)
1357
1358 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1359 dma_free_coherent(&vha->hw->pdev->dev,
1360 - sizeof(struct ct_sns_pkt),
1361 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1362 sp->u.iocb_cmd.u.ctarg.rsp,
1363 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1364 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1365 @@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
1366 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
1367 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
1368 GFP_KERNEL);
1369 + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
1370 if (!sp->u.iocb_cmd.u.ctarg.req) {
1371 ql_log(ql_log_warn, vha, 0xd041,
1372 "%s: Failed to allocate ct_sns request.\n",
1373 @@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
1374 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
1375 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
1376 GFP_KERNEL);
1377 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
1378 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
1379 ql_log(ql_log_warn, vha, 0xd042,
1380 "%s: Failed to allocate ct_sns request.\n",
1381 @@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
1382 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
1383 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
1384 GFP_KERNEL);
1385 + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
1386 if (!sp->u.iocb_cmd.u.ctarg.req) {
1387 ql_log(ql_log_warn, vha, 0xd041,
1388 "%s: Failed to allocate ct_sns request.\n",
1389 @@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
1390 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
1391 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
1392 GFP_KERNEL);
1393 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
1394 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
1395 ql_log(ql_log_warn, vha, 0xd042,
1396 "%s: Failed to allocate ct_sns request.\n",
1397 @@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
1398 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
1399 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
1400 GFP_KERNEL);
1401 + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
1402 if (!sp->u.iocb_cmd.u.ctarg.req) {
1403 ql_log(ql_log_warn, vha, 0xd041,
1404 "%s: Failed to allocate ct_sns request.\n",
1405 @@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
1406 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
1407 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
1408 GFP_KERNEL);
1409 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
1410 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
1411 ql_log(ql_log_warn, vha, 0xd042,
1412 "%s: Failed to allocate ct_sns request.\n",
1413 @@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
1414 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
1415 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
1416 GFP_KERNEL);
1417 + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
1418 if (!sp->u.iocb_cmd.u.ctarg.req) {
1419 ql_log(ql_log_warn, vha, 0xd041,
1420 "%s: Failed to allocate ct_sns request.\n",
1421 @@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
1422 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
1423 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
1424 GFP_KERNEL);
1425 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
1426 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
1427 ql_log(ql_log_warn, vha, 0xd042,
1428 "%s: Failed to allocate ct_sns request.\n",
1429 @@ -3392,14 +3400,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
1430 {
1431 if (sp->u.iocb_cmd.u.ctarg.req) {
1432 dma_free_coherent(&vha->hw->pdev->dev,
1433 - sizeof(struct ct_sns_pkt),
1434 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1435 sp->u.iocb_cmd.u.ctarg.req,
1436 sp->u.iocb_cmd.u.ctarg.req_dma);
1437 sp->u.iocb_cmd.u.ctarg.req = NULL;
1438 }
1439 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1440 dma_free_coherent(&vha->hw->pdev->dev,
1441 - sizeof(struct ct_sns_pkt),
1442 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1443 sp->u.iocb_cmd.u.ctarg.rsp,
1444 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1445 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1446 @@ -3600,14 +3608,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
1447 /* please ignore kernel warning. otherwise, we have mem leak. */
1448 if (sp->u.iocb_cmd.u.ctarg.req) {
1449 dma_free_coherent(&vha->hw->pdev->dev,
1450 - sizeof(struct ct_sns_pkt),
1451 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1452 sp->u.iocb_cmd.u.ctarg.req,
1453 sp->u.iocb_cmd.u.ctarg.req_dma);
1454 sp->u.iocb_cmd.u.ctarg.req = NULL;
1455 }
1456 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1457 dma_free_coherent(&vha->hw->pdev->dev,
1458 - sizeof(struct ct_sns_pkt),
1459 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1460 sp->u.iocb_cmd.u.ctarg.rsp,
1461 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1462 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1463 @@ -3658,6 +3666,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
1464 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
1465 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
1466 GFP_KERNEL);
1467 + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
1468 if (!sp->u.iocb_cmd.u.ctarg.req) {
1469 ql_log(ql_log_warn, vha, 0xd041,
1470 "Failed to allocate ct_sns request.\n");
1471 @@ -3667,6 +3676,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
1472 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
1473 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
1474 GFP_KERNEL);
1475 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
1476 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
1477 ql_log(ql_log_warn, vha, 0xd042,
1478 "Failed to allocate ct_sns request.\n");
1479 @@ -4125,14 +4135,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
1480 */
1481 if (sp->u.iocb_cmd.u.ctarg.req) {
1482 dma_free_coherent(&vha->hw->pdev->dev,
1483 - sizeof(struct ct_sns_pkt),
1484 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1485 sp->u.iocb_cmd.u.ctarg.req,
1486 sp->u.iocb_cmd.u.ctarg.req_dma);
1487 sp->u.iocb_cmd.u.ctarg.req = NULL;
1488 }
1489 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1490 dma_free_coherent(&vha->hw->pdev->dev,
1491 - sizeof(struct ct_sns_pkt),
1492 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1493 sp->u.iocb_cmd.u.ctarg.rsp,
1494 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1495 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1496 @@ -4162,14 +4172,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
1497 /* please ignore kernel warning. Otherwise, we have mem leak. */
1498 if (sp->u.iocb_cmd.u.ctarg.req) {
1499 dma_free_coherent(&vha->hw->pdev->dev,
1500 - sizeof(struct ct_sns_pkt),
1501 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1502 sp->u.iocb_cmd.u.ctarg.req,
1503 sp->u.iocb_cmd.u.ctarg.req_dma);
1504 sp->u.iocb_cmd.u.ctarg.req = NULL;
1505 }
1506 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1507 dma_free_coherent(&vha->hw->pdev->dev,
1508 - sizeof(struct ct_sns_pkt),
1509 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1510 sp->u.iocb_cmd.u.ctarg.rsp,
1511 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1512 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1513 @@ -4264,14 +4274,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
1514 done_free_sp:
1515 if (sp->u.iocb_cmd.u.ctarg.req) {
1516 dma_free_coherent(&vha->hw->pdev->dev,
1517 - sizeof(struct ct_sns_pkt),
1518 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1519 sp->u.iocb_cmd.u.ctarg.req,
1520 sp->u.iocb_cmd.u.ctarg.req_dma);
1521 sp->u.iocb_cmd.u.ctarg.req = NULL;
1522 }
1523 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1524 dma_free_coherent(&vha->hw->pdev->dev,
1525 - sizeof(struct ct_sns_pkt),
1526 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1527 sp->u.iocb_cmd.u.ctarg.rsp,
1528 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1529 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1530 @@ -4332,6 +4342,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
1531 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
1532 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
1533 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
1534 + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
1535 if (!sp->u.iocb_cmd.u.ctarg.req) {
1536 ql_log(ql_log_warn, vha, 0xffff,
1537 "Failed to allocate ct_sns request.\n");
1538 @@ -4349,6 +4360,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
1539 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
1540 &vha->hw->pdev->dev, rspsz,
1541 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
1542 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
1543 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
1544 ql_log(ql_log_warn, vha, 0xffff,
1545 "Failed to allocate ct_sns request.\n");
1546 @@ -4408,14 +4420,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
1547 done_free_sp:
1548 if (sp->u.iocb_cmd.u.ctarg.req) {
1549 dma_free_coherent(&vha->hw->pdev->dev,
1550 - sizeof(struct ct_sns_pkt),
1551 + sp->u.iocb_cmd.u.ctarg.req_allocated_size,
1552 sp->u.iocb_cmd.u.ctarg.req,
1553 sp->u.iocb_cmd.u.ctarg.req_dma);
1554 sp->u.iocb_cmd.u.ctarg.req = NULL;
1555 }
1556 if (sp->u.iocb_cmd.u.ctarg.rsp) {
1557 dma_free_coherent(&vha->hw->pdev->dev,
1558 - sizeof(struct ct_sns_pkt),
1559 + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
1560 sp->u.iocb_cmd.u.ctarg.rsp,
1561 sp->u.iocb_cmd.u.ctarg.rsp_dma);
1562 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
1563 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
1564 index 636960ad029a..0cb552268be3 100644
1565 --- a/drivers/scsi/qla2xxx/qla_init.c
1566 +++ b/drivers/scsi/qla2xxx/qla_init.c
1567 @@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
1568 conflict_fcport =
1569 qla2x00_find_fcport_by_wwpn(vha,
1570 e->port_name, 0);
1571 - ql_dbg(ql_dbg_disc, vha, 0x20e6,
1572 - "%s %d %8phC post del sess\n",
1573 - __func__, __LINE__,
1574 - conflict_fcport->port_name);
1575 - qlt_schedule_sess_for_deletion
1576 - (conflict_fcport);
1577 + if (conflict_fcport) {
1578 + qlt_schedule_sess_for_deletion
1579 + (conflict_fcport);
1580 + ql_dbg(ql_dbg_disc, vha, 0x20e6,
1581 + "%s %d %8phC post del sess\n",
1582 + __func__, __LINE__,
1583 + conflict_fcport->port_name);
1584 + }
1585 }
1586
1587 /* FW already picked this loop id for another fcport */
1588 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1589 index 15eaa6dded04..2b0816dfe9bd 100644
1590 --- a/drivers/scsi/qla2xxx/qla_os.c
1591 +++ b/drivers/scsi/qla2xxx/qla_os.c
1592 @@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1593 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
1594 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
1595
1596 + ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
1597 +
1598 if (ha->isp_ops->initialize_adapter(base_vha)) {
1599 ql_log(ql_log_fatal, base_vha, 0x00d6,
1600 "Failed to initialize adapter - Adapter flags %x.\n",
1601 @@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1602 host->can_queue, base_vha->req,
1603 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
1604
1605 - ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
1606 -
1607 if (ha->mqenable) {
1608 bool mq = false;
1609 bool startit = false;
1610 diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
1611 index 210407cd2341..da868f6c9638 100644
1612 --- a/drivers/scsi/sd_zbc.c
1613 +++ b/drivers/scsi/sd_zbc.c
1614 @@ -401,7 +401,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
1615 * Check that all zones of the device are equal. The last zone can however
1616 * be smaller. The zone size must also be a power of two number of LBAs.
1617 *
1618 - * Returns the zone size in bytes upon success or an error code upon failure.
1619 + * Returns the zone size in number of blocks upon success or an error code
1620 + * upon failure.
1621 */
1622 static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
1623 {
1624 @@ -411,7 +412,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
1625 unsigned char *rec;
1626 unsigned int buf_len;
1627 unsigned int list_length;
1628 - int ret;
1629 + s64 ret;
1630 u8 same;
1631
1632 /* Get a buffer */
1633 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1634 index 711da3306b14..61c3dc2f3be5 100644
1635 --- a/drivers/usb/host/xhci.c
1636 +++ b/drivers/usb/host/xhci.c
1637 @@ -844,6 +844,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
1638 spin_unlock_irqrestore(&xhci->lock, flags);
1639 }
1640
1641 +static bool xhci_pending_portevent(struct xhci_hcd *xhci)
1642 +{
1643 + __le32 __iomem **port_array;
1644 + int port_index;
1645 + u32 status;
1646 + u32 portsc;
1647 +
1648 + status = readl(&xhci->op_regs->status);
1649 + if (status & STS_EINT)
1650 + return true;
1651 + /*
1652 + * Checking STS_EINT is not enough as there is a lag between a change
1653 + * bit being set and the Port Status Change Event that it generated
1654 + * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
1655 + */
1656 +
1657 + port_index = xhci->num_usb2_ports;
1658 + port_array = xhci->usb2_ports;
1659 + while (port_index--) {
1660 + portsc = readl(port_array[port_index]);
1661 + if (portsc & PORT_CHANGE_MASK ||
1662 + (portsc & PORT_PLS_MASK) == XDEV_RESUME)
1663 + return true;
1664 + }
1665 + port_index = xhci->num_usb3_ports;
1666 + port_array = xhci->usb3_ports;
1667 + while (port_index--) {
1668 + portsc = readl(port_array[port_index]);
1669 + if (portsc & PORT_CHANGE_MASK ||
1670 + (portsc & PORT_PLS_MASK) == XDEV_RESUME)
1671 + return true;
1672 + }
1673 + return false;
1674 +}
1675 +
1676 /*
1677 * Stop HC (not bus-specific)
1678 *
1679 @@ -945,7 +980,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
1680 */
1681 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1682 {
1683 - u32 command, temp = 0, status;
1684 + u32 command, temp = 0;
1685 struct usb_hcd *hcd = xhci_to_hcd(xhci);
1686 struct usb_hcd *secondary_hcd;
1687 int retval = 0;
1688 @@ -1069,8 +1104,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1689 done:
1690 if (retval == 0) {
1691 /* Resume root hubs only when have pending events. */
1692 - status = readl(&xhci->op_regs->status);
1693 - if (status & STS_EINT) {
1694 + if (xhci_pending_portevent(xhci)) {
1695 usb_hcd_resume_root_hub(xhci->shared_hcd);
1696 usb_hcd_resume_root_hub(hcd);
1697 }
1698 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
1699 index 6dfc4867dbcf..9751c1373fbb 100644
1700 --- a/drivers/usb/host/xhci.h
1701 +++ b/drivers/usb/host/xhci.h
1702 @@ -382,6 +382,10 @@ struct xhci_op_regs {
1703 #define PORT_PLC (1 << 22)
1704 /* port configure error change - port failed to configure its link partner */
1705 #define PORT_CEC (1 << 23)
1706 +#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
1707 + PORT_RC | PORT_PLC | PORT_CEC)
1708 +
1709 +
1710 /* Cold Attach Status - xHC can set this bit to report device attached during
1711 * Sx state. Warm port reset should be perfomed to clear this bit and move port
1712 * to connected state.
1713 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
1714 index b423a309a6e0..125b58eff936 100644
1715 --- a/drivers/vfio/pci/vfio_pci.c
1716 +++ b/drivers/vfio/pci/vfio_pci.c
1717 @@ -28,6 +28,7 @@
1718 #include <linux/uaccess.h>
1719 #include <linux/vfio.h>
1720 #include <linux/vgaarb.h>
1721 +#include <linux/nospec.h>
1722
1723 #include "vfio_pci_private.h"
1724
1725 @@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
1726 if (info.index >=
1727 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1728 return -EINVAL;
1729 + info.index = array_index_nospec(info.index,
1730 + VFIO_PCI_NUM_REGIONS +
1731 + vdev->num_regions);
1732
1733 i = info.index - VFIO_PCI_NUM_REGIONS;
1734
1735 diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
1736 index 759a5bdd40e1..2da5f054257a 100644
1737 --- a/drivers/vfio/vfio_iommu_spapr_tce.c
1738 +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
1739 @@ -457,13 +457,13 @@ static void tce_iommu_unuse_page(struct tce_container *container,
1740 }
1741
1742 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
1743 - unsigned long tce, unsigned long size,
1744 + unsigned long tce, unsigned long shift,
1745 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
1746 {
1747 long ret = 0;
1748 struct mm_iommu_table_group_mem_t *mem;
1749
1750 - mem = mm_iommu_lookup(container->mm, tce, size);
1751 + mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
1752 if (!mem)
1753 return -EINVAL;
1754
1755 @@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
1756 if (!pua)
1757 return;
1758
1759 - ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
1760 + ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
1761 &hpa, &mem);
1762 if (ret)
1763 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
1764 @@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
1765 entry + i);
1766
1767 ret = tce_iommu_prereg_ua_to_hpa(container,
1768 - tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
1769 + tce, tbl->it_page_shift, &hpa, &mem);
1770 if (ret)
1771 break;
1772
1773 diff --git a/fs/fat/inode.c b/fs/fat/inode.c
1774 index ffbbf0520d9e..6aa49dcaa938 100644
1775 --- a/fs/fat/inode.c
1776 +++ b/fs/fat/inode.c
1777 @@ -697,13 +697,21 @@ static void fat_set_state(struct super_block *sb,
1778 brelse(bh);
1779 }
1780
1781 +static void fat_reset_iocharset(struct fat_mount_options *opts)
1782 +{
1783 + if (opts->iocharset != fat_default_iocharset) {
1784 + /* Note: opts->iocharset can be NULL here */
1785 + kfree(opts->iocharset);
1786 + opts->iocharset = fat_default_iocharset;
1787 + }
1788 +}
1789 +
1790 static void delayed_free(struct rcu_head *p)
1791 {
1792 struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
1793 unload_nls(sbi->nls_disk);
1794 unload_nls(sbi->nls_io);
1795 - if (sbi->options.iocharset != fat_default_iocharset)
1796 - kfree(sbi->options.iocharset);
1797 + fat_reset_iocharset(&sbi->options);
1798 kfree(sbi);
1799 }
1800
1801 @@ -1118,7 +1126,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
1802 opts->fs_fmask = opts->fs_dmask = current_umask();
1803 opts->allow_utime = -1;
1804 opts->codepage = fat_default_codepage;
1805 - opts->iocharset = fat_default_iocharset;
1806 + fat_reset_iocharset(opts);
1807 if (is_vfat) {
1808 opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
1809 opts->rodir = 0;
1810 @@ -1275,8 +1283,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
1811
1812 /* vfat specific */
1813 case Opt_charset:
1814 - if (opts->iocharset != fat_default_iocharset)
1815 - kfree(opts->iocharset);
1816 + fat_reset_iocharset(opts);
1817 iocharset = match_strdup(&args[0]);
1818 if (!iocharset)
1819 return -ENOMEM;
1820 @@ -1867,8 +1874,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1821 iput(fat_inode);
1822 unload_nls(sbi->nls_io);
1823 unload_nls(sbi->nls_disk);
1824 - if (sbi->options.iocharset != fat_default_iocharset)
1825 - kfree(sbi->options.iocharset);
1826 + fat_reset_iocharset(&sbi->options);
1827 sb->s_fs_info = NULL;
1828 kfree(sbi);
1829 return error;
1830 diff --git a/fs/internal.h b/fs/internal.h
1831 index 980d005b21b4..5645b4ebf494 100644
1832 --- a/fs/internal.h
1833 +++ b/fs/internal.h
1834 @@ -127,7 +127,6 @@ int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
1835
1836 extern int open_check_o_direct(struct file *f);
1837 extern int vfs_open(const struct path *, struct file *, const struct cred *);
1838 -extern struct file *filp_clone_open(struct file *);
1839
1840 /*
1841 * inode.c
1842 diff --git a/include/linux/fs.h b/include/linux/fs.h
1843 index 760d8da1b6c7..81fe0292a7ac 100644
1844 --- a/include/linux/fs.h
1845 +++ b/include/linux/fs.h
1846 @@ -2401,6 +2401,7 @@ extern struct file *filp_open(const char *, int, umode_t);
1847 extern struct file *file_open_root(struct dentry *, struct vfsmount *,
1848 const char *, int, umode_t);
1849 extern struct file * dentry_open(const struct path *, int, const struct cred *);
1850 +extern struct file *filp_clone_open(struct file *);
1851 extern int filp_close(struct file *, fl_owner_t id);
1852
1853 extern struct filename *getname_flags(const char __user *, int, int *);
1854 diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
1855 index 5be31eb7b266..108ede99e533 100644
1856 --- a/include/linux/sched/task.h
1857 +++ b/include/linux/sched/task.h
1858 @@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
1859 extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
1860 struct task_struct *fork_idle(int);
1861 extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
1862 -extern long kernel_wait4(pid_t, int *, int, struct rusage *);
1863 +extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
1864
1865 extern void free_task(struct task_struct *tsk);
1866
1867 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1868 index 9065477ed255..15d8f9c84ca5 100644
1869 --- a/include/linux/skbuff.h
1870 +++ b/include/linux/skbuff.h
1871 @@ -628,6 +628,7 @@ typedef unsigned char *sk_buff_data_t;
1872 * @hash: the packet hash
1873 * @queue_mapping: Queue mapping for multiqueue devices
1874 * @xmit_more: More SKBs are pending for this queue
1875 + * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
1876 * @ndisc_nodetype: router type (from link layer)
1877 * @ooo_okay: allow the mapping of a socket to a queue to be changed
1878 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
1879 @@ -733,7 +734,7 @@ struct sk_buff {
1880 peeked:1,
1881 head_frag:1,
1882 xmit_more:1,
1883 - __unused:1; /* one bit hole */
1884 + pfmemalloc:1;
1885
1886 /* fields enclosed in headers_start/headers_end are copied
1887 * using a single memcpy() in __copy_skb_header()
1888 @@ -752,31 +753,30 @@ struct sk_buff {
1889
1890 __u8 __pkt_type_offset[0];
1891 __u8 pkt_type:3;
1892 - __u8 pfmemalloc:1;
1893 __u8 ignore_df:1;
1894 -
1895 __u8 nf_trace:1;
1896 __u8 ip_summed:2;
1897 __u8 ooo_okay:1;
1898 +
1899 __u8 l4_hash:1;
1900 __u8 sw_hash:1;
1901 __u8 wifi_acked_valid:1;
1902 __u8 wifi_acked:1;
1903 -
1904 __u8 no_fcs:1;
1905 /* Indicates the inner headers are valid in the skbuff. */
1906 __u8 encapsulation:1;
1907 __u8 encap_hdr_csum:1;
1908 __u8 csum_valid:1;
1909 +
1910 __u8 csum_complete_sw:1;
1911 __u8 csum_level:2;
1912 __u8 csum_not_inet:1;
1913 -
1914 __u8 dst_pending_confirm:1;
1915 #ifdef CONFIG_IPV6_NDISC_NODETYPE
1916 __u8 ndisc_nodetype:2;
1917 #endif
1918 __u8 ipvs_property:1;
1919 +
1920 __u8 inner_protocol_type:1;
1921 __u8 remcsum_offload:1;
1922 #ifdef CONFIG_NET_SWITCHDEV
1923 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1924 index a406f2e8680a..aeebbbb9e0bd 100644
1925 --- a/include/net/ipv6.h
1926 +++ b/include/net/ipv6.h
1927 @@ -829,7 +829,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
1928 * to minimize possbility that any useful information to an
1929 * attacker is leaked. Only lower 20 bits are relevant.
1930 */
1931 - rol32(hash, 16);
1932 + hash = rol32(hash, 16);
1933
1934 flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
1935
1936 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
1937 index 35498e613ff5..edfa9d0f6005 100644
1938 --- a/include/net/sctp/sctp.h
1939 +++ b/include/net/sctp/sctp.h
1940 @@ -609,10 +609,15 @@ static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *
1941 return t->dst;
1942 }
1943
1944 +static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
1945 +{
1946 + return SCTP_TRUNC4(max_t(__u32, dst_mtu(dst),
1947 + SCTP_DEFAULT_MINSEGMENT));
1948 +}
1949 +
1950 static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
1951 {
1952 - __u32 pmtu = max_t(size_t, SCTP_TRUNC4(dst_mtu(t->dst)),
1953 - SCTP_DEFAULT_MINSEGMENT);
1954 + __u32 pmtu = sctp_dst_mtu(t->dst);
1955
1956 if (t->pathmtu == pmtu)
1957 return true;
1958 diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
1959 index 64c0291b579c..2f6fa95de2d8 100644
1960 --- a/kernel/stop_machine.c
1961 +++ b/kernel/stop_machine.c
1962 @@ -270,7 +270,11 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
1963 goto retry;
1964 }
1965
1966 - wake_up_q(&wakeq);
1967 + if (!err) {
1968 + preempt_disable();
1969 + wake_up_q(&wakeq);
1970 + preempt_enable();
1971 + }
1972
1973 return err;
1974 }
1975 diff --git a/lib/rhashtable.c b/lib/rhashtable.c
1976 index 2b2b79974b61..240a8b864d5b 100644
1977 --- a/lib/rhashtable.c
1978 +++ b/lib/rhashtable.c
1979 @@ -923,8 +923,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
1980
1981 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
1982 {
1983 - return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1984 - (unsigned long)params->min_size);
1985 + size_t retsize;
1986 +
1987 + if (params->nelem_hint)
1988 + retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
1989 + (unsigned long)params->min_size);
1990 + else
1991 + retsize = max(HASH_DEFAULT_SIZE,
1992 + (unsigned long)params->min_size);
1993 +
1994 + return retsize;
1995 }
1996
1997 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
1998 @@ -981,8 +989,6 @@ int rhashtable_init(struct rhashtable *ht,
1999 struct bucket_table *tbl;
2000 size_t size;
2001
2002 - size = HASH_DEFAULT_SIZE;
2003 -
2004 if ((!params->key_len && !params->obj_hashfn) ||
2005 (params->obj_hashfn && !params->obj_cmpfn))
2006 return -EINVAL;
2007 @@ -1009,8 +1015,7 @@ int rhashtable_init(struct rhashtable *ht,
2008
2009 ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
2010
2011 - if (params->nelem_hint)
2012 - size = rounded_hashtable_size(&ht->p);
2013 + size = rounded_hashtable_size(&ht->p);
2014
2015 if (params->locks_mul)
2016 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
2017 @@ -1102,13 +1107,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
2018 void (*free_fn)(void *ptr, void *arg),
2019 void *arg)
2020 {
2021 - struct bucket_table *tbl;
2022 + struct bucket_table *tbl, *next_tbl;
2023 unsigned int i;
2024
2025 cancel_work_sync(&ht->run_work);
2026
2027 mutex_lock(&ht->mutex);
2028 tbl = rht_dereference(ht->tbl, ht);
2029 +restart:
2030 if (free_fn) {
2031 for (i = 0; i < tbl->size; i++) {
2032 struct rhash_head *pos, *next;
2033 @@ -1125,7 +1131,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
2034 }
2035 }
2036
2037 + next_tbl = rht_dereference(tbl->future_tbl, ht);
2038 bucket_table_free(tbl);
2039 + if (next_tbl) {
2040 + tbl = next_tbl;
2041 + goto restart;
2042 + }
2043 mutex_unlock(&ht->mutex);
2044 }
2045 EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
2046 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2047 index b9f3dbd885bd..327e12679dd5 100644
2048 --- a/mm/huge_memory.c
2049 +++ b/mm/huge_memory.c
2050 @@ -2087,6 +2087,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
2051 if (vma_is_dax(vma))
2052 return;
2053 page = pmd_page(_pmd);
2054 + if (!PageDirty(page) && pmd_dirty(_pmd))
2055 + set_page_dirty(page);
2056 if (!PageReferenced(page) && pmd_young(_pmd))
2057 SetPageReferenced(page);
2058 page_remove_rmap(page, true);
2059 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2060 index 2bd3df3d101a..95c0980a6f7e 100644
2061 --- a/mm/memcontrol.c
2062 +++ b/mm/memcontrol.c
2063 @@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
2064 int nid;
2065 int i;
2066
2067 - while ((memcg = parent_mem_cgroup(memcg))) {
2068 + for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2069 for_each_node(nid) {
2070 mz = mem_cgroup_nodeinfo(memcg, nid);
2071 for (i = 0; i <= DEF_PRIORITY; i++) {
2072 diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
2073 index b2b2323bdc84..188d693cb251 100644
2074 --- a/net/core/gen_stats.c
2075 +++ b/net/core/gen_stats.c
2076 @@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
2077 d->lock = lock;
2078 spin_lock_bh(lock);
2079 }
2080 - if (d->tail)
2081 - return gnet_stats_copy(d, type, NULL, 0, padattr);
2082 + if (d->tail) {
2083 + int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
2084 +
2085 + /* The initial attribute added in gnet_stats_copy() may be
2086 + * preceded by a padding attribute, in which case d->tail will
2087 + * end up pointing at the padding instead of the real attribute.
2088 + * Fix this so gnet_stats_finish_copy() adjusts the length of
2089 + * the right attribute.
2090 + */
2091 + if (ret == 0 && d->tail->nla_type == padattr)
2092 + d->tail = (struct nlattr *)((char *)d->tail +
2093 + NLA_ALIGN(d->tail->nla_len));
2094 + return ret;
2095 + }
2096
2097 return 0;
2098 }
2099 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2100 index 345b51837ca8..a84d69c047ac 100644
2101 --- a/net/core/skbuff.c
2102 +++ b/net/core/skbuff.c
2103 @@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
2104 n->cloned = 1;
2105 n->nohdr = 0;
2106 n->peeked = 0;
2107 + C(pfmemalloc);
2108 n->destructor = NULL;
2109 C(tail);
2110 C(end);
2111 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2112 index e66172aaf241..511d6748ea5f 100644
2113 --- a/net/ipv4/fib_frontend.c
2114 +++ b/net/ipv4/fib_frontend.c
2115 @@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
2116 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
2117 struct flowi4 fl4 = {
2118 .flowi4_iif = LOOPBACK_IFINDEX,
2119 + .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
2120 .daddr = ip_hdr(skb)->saddr,
2121 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
2122 .flowi4_scope = scope,
2123 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
2124 index 2f600f261690..61e42a3390ba 100644
2125 --- a/net/ipv4/sysctl_net_ipv4.c
2126 +++ b/net/ipv4/sysctl_net_ipv4.c
2127 @@ -187,8 +187,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
2128 if (write && ret == 0) {
2129 low = make_kgid(user_ns, urange[0]);
2130 high = make_kgid(user_ns, urange[1]);
2131 - if (!gid_valid(low) || !gid_valid(high) ||
2132 - (urange[1] < urange[0]) || gid_lt(high, low)) {
2133 + if (!gid_valid(low) || !gid_valid(high))
2134 + return -EINVAL;
2135 + if (urange[1] < urange[0] || gid_lt(high, low)) {
2136 low = make_kgid(&init_user_ns, 1);
2137 high = make_kgid(&init_user_ns, 0);
2138 }
2139 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2140 index c9d00ef54dec..58e316cf6607 100644
2141 --- a/net/ipv4/tcp.c
2142 +++ b/net/ipv4/tcp.c
2143 @@ -3524,8 +3524,7 @@ int tcp_abort(struct sock *sk, int err)
2144 struct request_sock *req = inet_reqsk(sk);
2145
2146 local_bh_disable();
2147 - inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
2148 - req);
2149 + inet_csk_reqsk_queue_drop(req->rsk_listener, req);
2150 local_bh_enable();
2151 return 0;
2152 }
2153 diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
2154 index 11e4e80cf7e9..0efb914695ac 100644
2155 --- a/net/ipv6/Kconfig
2156 +++ b/net/ipv6/Kconfig
2157 @@ -108,6 +108,7 @@ config IPV6_MIP6
2158 config IPV6_ILA
2159 tristate "IPv6: Identifier Locator Addressing (ILA)"
2160 depends on NETFILTER
2161 + select DST_CACHE
2162 select LWTUNNEL
2163 ---help---
2164 Support for IPv6 Identifier Locator Addressing (ILA).
2165 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
2166 index 458de353f5d9..1a4d6897d17f 100644
2167 --- a/net/ipv6/ip6_gre.c
2168 +++ b/net/ipv6/ip6_gre.c
2169 @@ -927,7 +927,6 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
2170 static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
2171 struct net_device *dev)
2172 {
2173 - struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2174 struct ip6_tnl *t = netdev_priv(dev);
2175 struct dst_entry *dst = skb_dst(skb);
2176 struct net_device_stats *stats;
2177 @@ -998,6 +997,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
2178 goto tx_err;
2179 }
2180 } else {
2181 + struct ipv6hdr *ipv6h = ipv6_hdr(skb);
2182 +
2183 switch (skb->protocol) {
2184 case htons(ETH_P_IP):
2185 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
2186 diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
2187 index 525051a886bc..3ff9316616d8 100644
2188 --- a/net/ipv6/ndisc.c
2189 +++ b/net/ipv6/ndisc.c
2190 @@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
2191 return;
2192 }
2193 }
2194 - if (ndopts.nd_opts_nonce)
2195 + if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
2196 memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
2197
2198 inc = ipv6_addr_is_multicast(daddr);
2199 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2200 index b94345e657f7..3ed4de230830 100644
2201 --- a/net/ipv6/route.c
2202 +++ b/net/ipv6/route.c
2203 @@ -4274,6 +4274,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
2204 err_nh = nh;
2205 goto add_errout;
2206 }
2207 + if (!rt6_qualify_for_ecmp(rt)) {
2208 + err = -EINVAL;
2209 + NL_SET_ERR_MSG(extack,
2210 + "Device only routes can not be added for IPv6 using the multipath API.");
2211 + dst_release_immediate(&rt->dst);
2212 + goto cleanup;
2213 + }
2214
2215 /* Because each route is added like a single route we remove
2216 * these flags after the first nexthop: if there is a collision,
2217 diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
2218 index 22fa13cf5d8b..846883907cd4 100644
2219 --- a/net/sched/sch_fq_codel.c
2220 +++ b/net/sched/sch_fq_codel.c
2221 @@ -479,23 +479,27 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
2222 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
2223
2224 if (opt) {
2225 - int err = fq_codel_change(sch, opt, extack);
2226 + err = fq_codel_change(sch, opt, extack);
2227 if (err)
2228 - return err;
2229 + goto init_failure;
2230 }
2231
2232 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2233 if (err)
2234 - return err;
2235 + goto init_failure;
2236
2237 if (!q->flows) {
2238 q->flows = kvzalloc(q->flows_cnt *
2239 sizeof(struct fq_codel_flow), GFP_KERNEL);
2240 - if (!q->flows)
2241 - return -ENOMEM;
2242 + if (!q->flows) {
2243 + err = -ENOMEM;
2244 + goto init_failure;
2245 + }
2246 q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
2247 - if (!q->backlogs)
2248 - return -ENOMEM;
2249 + if (!q->backlogs) {
2250 + err = -ENOMEM;
2251 + goto alloc_failure;
2252 + }
2253 for (i = 0; i < q->flows_cnt; i++) {
2254 struct fq_codel_flow *flow = q->flows + i;
2255
2256 @@ -508,6 +512,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
2257 else
2258 sch->flags &= ~TCQ_F_CAN_BYPASS;
2259 return 0;
2260 +
2261 +alloc_failure:
2262 + kvfree(q->flows);
2263 + q->flows = NULL;
2264 +init_failure:
2265 + q->flows_cnt = 0;
2266 + return err;
2267 }
2268
2269 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
2270 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
2271 index a47179da24e6..ef8adac1be83 100644
2272 --- a/net/sctp/associola.c
2273 +++ b/net/sctp/associola.c
2274 @@ -1446,11 +1446,9 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
2275 return;
2276
2277 /* Get the lowest pmtu of all the transports. */
2278 - list_for_each_entry(t, &asoc->peer.transport_addr_list,
2279 - transports) {
2280 + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
2281 if (t->pmtu_pending && t->dst) {
2282 - sctp_transport_update_pmtu(
2283 - t, SCTP_TRUNC4(dst_mtu(t->dst)));
2284 + sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
2285 t->pmtu_pending = 0;
2286 }
2287 if (!pmtu || (t->pathmtu < pmtu))
2288 diff --git a/net/sctp/transport.c b/net/sctp/transport.c
2289 index 03fc2c427aca..e890ceb55939 100644
2290 --- a/net/sctp/transport.c
2291 +++ b/net/sctp/transport.c
2292 @@ -242,9 +242,9 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
2293 &transport->fl, sk);
2294 }
2295
2296 - if (transport->dst) {
2297 - transport->pathmtu = SCTP_TRUNC4(dst_mtu(transport->dst));
2298 - } else
2299 + if (transport->dst)
2300 + transport->pathmtu = sctp_dst_mtu(transport->dst);
2301 + else
2302 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
2303 }
2304
2305 @@ -273,7 +273,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
2306
2307 if (dst) {
2308 /* Re-fetch, as under layers may have a higher minimum size */
2309 - pmtu = SCTP_TRUNC4(dst_mtu(dst));
2310 + pmtu = sctp_dst_mtu(dst);
2311 change = t->pathmtu != pmtu;
2312 }
2313 t->pathmtu = pmtu;
2314 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
2315 index 69616d00481c..b53026a72e73 100644
2316 --- a/sound/core/rawmidi.c
2317 +++ b/sound/core/rawmidi.c
2318 @@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
2319 int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
2320 struct snd_rawmidi_params * params)
2321 {
2322 - char *newbuf;
2323 + char *newbuf, *oldbuf;
2324 struct snd_rawmidi_runtime *runtime = substream->runtime;
2325
2326 if (substream->append && substream->use_count > 1)
2327 @@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
2328 return -EINVAL;
2329 }
2330 if (params->buffer_size != runtime->buffer_size) {
2331 - newbuf = krealloc(runtime->buffer, params->buffer_size,
2332 - GFP_KERNEL);
2333 + newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
2334 if (!newbuf)
2335 return -ENOMEM;
2336 + spin_lock_irq(&runtime->lock);
2337 + oldbuf = runtime->buffer;
2338 runtime->buffer = newbuf;
2339 runtime->buffer_size = params->buffer_size;
2340 runtime->avail = runtime->buffer_size;
2341 + runtime->appl_ptr = runtime->hw_ptr = 0;
2342 + spin_unlock_irq(&runtime->lock);
2343 + kfree(oldbuf);
2344 }
2345 runtime->avail_min = params->avail_min;
2346 substream->active_sensing = !params->no_active_sensing;
2347 @@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
2348 int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
2349 struct snd_rawmidi_params * params)
2350 {
2351 - char *newbuf;
2352 + char *newbuf, *oldbuf;
2353 struct snd_rawmidi_runtime *runtime = substream->runtime;
2354
2355 snd_rawmidi_drain_input(substream);
2356 @@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
2357 return -EINVAL;
2358 }
2359 if (params->buffer_size != runtime->buffer_size) {
2360 - newbuf = krealloc(runtime->buffer, params->buffer_size,
2361 - GFP_KERNEL);
2362 + newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
2363 if (!newbuf)
2364 return -ENOMEM;
2365 + spin_lock_irq(&runtime->lock);
2366 + oldbuf = runtime->buffer;
2367 runtime->buffer = newbuf;
2368 runtime->buffer_size = params->buffer_size;
2369 + runtime->appl_ptr = runtime->hw_ptr = 0;
2370 + spin_unlock_irq(&runtime->lock);
2371 + kfree(oldbuf);
2372 }
2373 runtime->avail_min = params->avail_min;
2374 return 0;
2375 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2376 index ba9a7e552183..88ce2f1022e1 100644
2377 --- a/sound/pci/hda/patch_conexant.c
2378 +++ b/sound/pci/hda/patch_conexant.c
2379 @@ -965,6 +965,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2380 SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
2381 SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
2382 SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
2383 + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
2384 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2385 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2386 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
2387 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2388 index 066efe783fe8..7bba415cb850 100644
2389 --- a/sound/pci/hda/patch_realtek.c
2390 +++ b/sound/pci/hda/patch_realtek.c
2391 @@ -2363,6 +2363,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2392 SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
2393 SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
2394 SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
2395 + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
2396 SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
2397 SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
2398 SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
2399 @@ -6543,6 +6544,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2400 SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
2401 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
2402 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
2403 + SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
2404 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
2405 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
2406 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
2407 diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
2408 index 6e865e8b5b10..fe6eb0fe07f6 100644
2409 --- a/virt/kvm/eventfd.c
2410 +++ b/virt/kvm/eventfd.c
2411 @@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
2412 {
2413 struct kvm_kernel_irqfd *irqfd =
2414 container_of(work, struct kvm_kernel_irqfd, shutdown);
2415 + struct kvm *kvm = irqfd->kvm;
2416 u64 cnt;
2417
2418 + /* Make sure irqfd has been initalized in assign path. */
2419 + synchronize_srcu(&kvm->irq_srcu);
2420 +
2421 /*
2422 * Synchronize with the wait-queue and unhook ourselves to prevent
2423 * further events.
2424 @@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
2425
2426 idx = srcu_read_lock(&kvm->irq_srcu);
2427 irqfd_update(kvm, irqfd);
2428 - srcu_read_unlock(&kvm->irq_srcu, idx);
2429
2430 list_add_tail(&irqfd->list, &kvm->irqfds.items);
2431
2432 @@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
2433 if (events & EPOLLIN)
2434 schedule_work(&irqfd->inject);
2435
2436 - /*
2437 - * do not drop the file until the irqfd is fully initialized, otherwise
2438 - * we might race against the EPOLLHUP
2439 - */
2440 - fdput(f);
2441 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
2442 if (kvm_arch_has_irq_bypass()) {
2443 irqfd->consumer.token = (void *)irqfd->eventfd;
2444 @@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
2445 }
2446 #endif
2447
2448 + srcu_read_unlock(&kvm->irq_srcu, idx);
2449 +
2450 + /*
2451 + * do not drop the file until the irqfd is fully initialized, otherwise
2452 + * we might race against the EPOLLHUP
2453 + */
2454 + fdput(f);
2455 return 0;
2456
2457 fail: