Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0177-5.4.78-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 198323 byte(s)
-add missing
1 diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst
2 index f5be243d250a4..4b0db514b2010 100644
3 --- a/Documentation/networking/j1939.rst
4 +++ b/Documentation/networking/j1939.rst
5 @@ -414,8 +414,8 @@ Send:
6 .can_family = AF_CAN,
7 .can_addr.j1939 = {
8 .name = J1939_NO_NAME;
9 - .pgn = 0x30,
10 - .addr = 0x12300,
11 + .addr = 0x30,
12 + .pgn = 0x12300,
13 },
14 };
15
16 diff --git a/Makefile b/Makefile
17 index 2e24b568b93fd..5725b07aaddf0 100644
18 --- a/Makefile
19 +++ b/Makefile
20 @@ -1,7 +1,7 @@
21 # SPDX-License-Identifier: GPL-2.0
22 VERSION = 5
23 PATCHLEVEL = 4
24 -SUBLEVEL = 77
25 +SUBLEVEL = 78
26 EXTRAVERSION =
27 NAME = Kleptomaniac Octopus
28
29 diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
30 index 213607a1f45c1..e26a278d301ab 100644
31 --- a/arch/arm/include/asm/kprobes.h
32 +++ b/arch/arm/include/asm/kprobes.h
33 @@ -44,20 +44,20 @@ int kprobe_exceptions_notify(struct notifier_block *self,
34 unsigned long val, void *data);
35
36 /* optinsn template addresses */
37 -extern __visible kprobe_opcode_t optprobe_template_entry;
38 -extern __visible kprobe_opcode_t optprobe_template_val;
39 -extern __visible kprobe_opcode_t optprobe_template_call;
40 -extern __visible kprobe_opcode_t optprobe_template_end;
41 -extern __visible kprobe_opcode_t optprobe_template_sub_sp;
42 -extern __visible kprobe_opcode_t optprobe_template_add_sp;
43 -extern __visible kprobe_opcode_t optprobe_template_restore_begin;
44 -extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn;
45 -extern __visible kprobe_opcode_t optprobe_template_restore_end;
46 +extern __visible kprobe_opcode_t optprobe_template_entry[];
47 +extern __visible kprobe_opcode_t optprobe_template_val[];
48 +extern __visible kprobe_opcode_t optprobe_template_call[];
49 +extern __visible kprobe_opcode_t optprobe_template_end[];
50 +extern __visible kprobe_opcode_t optprobe_template_sub_sp[];
51 +extern __visible kprobe_opcode_t optprobe_template_add_sp[];
52 +extern __visible kprobe_opcode_t optprobe_template_restore_begin[];
53 +extern __visible kprobe_opcode_t optprobe_template_restore_orig_insn[];
54 +extern __visible kprobe_opcode_t optprobe_template_restore_end[];
55
56 #define MAX_OPTIMIZED_LENGTH 4
57 #define MAX_OPTINSN_SIZE \
58 - ((unsigned long)&optprobe_template_end - \
59 - (unsigned long)&optprobe_template_entry)
60 + ((unsigned long)optprobe_template_end - \
61 + (unsigned long)optprobe_template_entry)
62 #define RELATIVEJUMP_SIZE 4
63
64 struct arch_optimized_insn {
65 diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
66 index 7a449df0b3591..c78180172120f 100644
67 --- a/arch/arm/probes/kprobes/opt-arm.c
68 +++ b/arch/arm/probes/kprobes/opt-arm.c
69 @@ -85,21 +85,21 @@ asm (
70 "optprobe_template_end:\n");
71
72 #define TMPL_VAL_IDX \
73 - ((unsigned long *)&optprobe_template_val - (unsigned long *)&optprobe_template_entry)
74 + ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
75 #define TMPL_CALL_IDX \
76 - ((unsigned long *)&optprobe_template_call - (unsigned long *)&optprobe_template_entry)
77 + ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
78 #define TMPL_END_IDX \
79 - ((unsigned long *)&optprobe_template_end - (unsigned long *)&optprobe_template_entry)
80 + ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
81 #define TMPL_ADD_SP \
82 - ((unsigned long *)&optprobe_template_add_sp - (unsigned long *)&optprobe_template_entry)
83 + ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
84 #define TMPL_SUB_SP \
85 - ((unsigned long *)&optprobe_template_sub_sp - (unsigned long *)&optprobe_template_entry)
86 + ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
87 #define TMPL_RESTORE_BEGIN \
88 - ((unsigned long *)&optprobe_template_restore_begin - (unsigned long *)&optprobe_template_entry)
89 + ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
90 #define TMPL_RESTORE_ORIGN_INSN \
91 - ((unsigned long *)&optprobe_template_restore_orig_insn - (unsigned long *)&optprobe_template_entry)
92 + ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
93 #define TMPL_RESTORE_END \
94 - ((unsigned long *)&optprobe_template_restore_end - (unsigned long *)&optprobe_template_entry)
95 + ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
96
97 /*
98 * ARM can always optimize an instruction when using ARM ISA, except
99 @@ -234,7 +234,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
100 }
101
102 /* Copy arch-dep-instance from template. */
103 - memcpy(code, (unsigned long *)&optprobe_template_entry,
104 + memcpy(code, (unsigned long *)optprobe_template_entry,
105 TMPL_END_IDX * sizeof(kprobe_opcode_t));
106
107 /* Adjust buffer according to instruction. */
108 diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
109 index 0ed7598dfa6a0..f1f4f42e8ef46 100644
110 --- a/arch/arm64/kvm/sys_regs.c
111 +++ b/arch/arm64/kvm/sys_regs.c
112 @@ -1132,16 +1132,6 @@ static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
113 return REG_HIDDEN_USER | REG_HIDDEN_GUEST;
114 }
115
116 -/* Visibility overrides for SVE-specific ID registers */
117 -static unsigned int sve_id_visibility(const struct kvm_vcpu *vcpu,
118 - const struct sys_reg_desc *rd)
119 -{
120 - if (vcpu_has_sve(vcpu))
121 - return 0;
122 -
123 - return REG_HIDDEN_USER;
124 -}
125 -
126 /* Generate the emulated ID_AA64ZFR0_EL1 value exposed to the guest */
127 static u64 guest_id_aa64zfr0_el1(const struct kvm_vcpu *vcpu)
128 {
129 @@ -1168,9 +1158,6 @@ static int get_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
130 {
131 u64 val;
132
133 - if (WARN_ON(!vcpu_has_sve(vcpu)))
134 - return -ENOENT;
135 -
136 val = guest_id_aa64zfr0_el1(vcpu);
137 return reg_to_user(uaddr, &val, reg->id);
138 }
139 @@ -1183,9 +1170,6 @@ static int set_id_aa64zfr0_el1(struct kvm_vcpu *vcpu,
140 int err;
141 u64 val;
142
143 - if (WARN_ON(!vcpu_has_sve(vcpu)))
144 - return -ENOENT;
145 -
146 err = reg_from_user(&val, uaddr, id);
147 if (err)
148 return err;
149 @@ -1448,7 +1432,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
150 ID_SANITISED(ID_AA64PFR1_EL1),
151 ID_UNALLOCATED(4,2),
152 ID_UNALLOCATED(4,3),
153 - { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, .visibility = sve_id_visibility },
154 + { SYS_DESC(SYS_ID_AA64ZFR0_EL1), access_id_aa64zfr0_el1, .get_user = get_id_aa64zfr0_el1, .set_user = set_id_aa64zfr0_el1, },
155 ID_UNALLOCATED(4,5),
156 ID_UNALLOCATED(4,6),
157 ID_UNALLOCATED(4,7),
158 diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
159 index cf11277ebd020..000ebb5a6fb3c 100644
160 --- a/arch/powerpc/kernel/eeh_cache.c
161 +++ b/arch/powerpc/kernel/eeh_cache.c
162 @@ -272,8 +272,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
163 {
164 struct pci_io_addr_range *piar;
165 struct rb_node *n;
166 + unsigned long flags;
167
168 - spin_lock(&pci_io_addr_cache_root.piar_lock);
169 + spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
170 for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
171 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
172
173 @@ -281,7 +282,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
174 (piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
175 &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
176 }
177 - spin_unlock(&pci_io_addr_cache_root.piar_lock);
178 + spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
179
180 return 0;
181 }
182 diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
183 index 5e2f2fd78b94f..126ba54384300 100644
184 --- a/arch/powerpc/kernel/head_32.S
185 +++ b/arch/powerpc/kernel/head_32.S
186 @@ -418,11 +418,7 @@ InstructionTLBMiss:
187 cmplw 0,r1,r3
188 #endif
189 mfspr r2, SPRN_SPRG_PGDIR
190 -#ifdef CONFIG_SWAP
191 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
192 -#else
193 - li r1,_PAGE_PRESENT | _PAGE_EXEC
194 -#endif
195 #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
196 bge- 112f
197 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
198 @@ -484,11 +480,7 @@ DataLoadTLBMiss:
199 lis r1,PAGE_OFFSET@h /* check if kernel address */
200 cmplw 0,r1,r3
201 mfspr r2, SPRN_SPRG_PGDIR
202 -#ifdef CONFIG_SWAP
203 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
204 -#else
205 - li r1, _PAGE_PRESENT
206 -#endif
207 bge- 112f
208 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
209 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
210 @@ -564,11 +556,7 @@ DataStoreTLBMiss:
211 lis r1,PAGE_OFFSET@h /* check if kernel address */
212 cmplw 0,r1,r3
213 mfspr r2, SPRN_SPRG_PGDIR
214 -#ifdef CONFIG_SWAP
215 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
216 -#else
217 - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
218 -#endif
219 bge- 112f
220 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
221 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
222 diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
223 index 72f89b7590dd6..344793159b97d 100644
224 --- a/arch/riscv/kernel/head.S
225 +++ b/arch/riscv/kernel/head.S
226 @@ -26,12 +26,17 @@ ENTRY(_start)
227 /* reserved */
228 .word 0
229 .balign 8
230 +#ifdef CONFIG_RISCV_M_MODE
231 + /* Image load offset (0MB) from start of RAM for M-mode */
232 + .dword 0
233 +#else
234 #if __riscv_xlen == 64
235 /* Image load offset(2MB) from start of RAM */
236 .dword 0x200000
237 #else
238 /* Image load offset(4MB) from start of RAM */
239 .dword 0x400000
240 +#endif
241 #endif
242 /* Effective size of kernel image */
243 .dword _end - _start
244 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
245 index ad426cc656e56..66d7ba61803c8 100644
246 --- a/arch/s390/kernel/smp.c
247 +++ b/arch/s390/kernel/smp.c
248 @@ -845,13 +845,14 @@ void __init smp_detect_cpus(void)
249
250 static void smp_init_secondary(void)
251 {
252 - int cpu = smp_processor_id();
253 + int cpu = raw_smp_processor_id();
254
255 S390_lowcore.last_update_clock = get_tod_clock();
256 restore_access_regs(S390_lowcore.access_regs_save_area);
257 set_cpu_flag(CIF_ASCE_PRIMARY);
258 set_cpu_flag(CIF_ASCE_SECONDARY);
259 cpu_init();
260 + rcu_cpu_starting(cpu);
261 preempt_disable();
262 init_cpu_timer();
263 vtime_init();
264 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
265 index acbf3dbb8bf25..bdc1ed7ff6692 100644
266 --- a/arch/x86/kernel/cpu/bugs.c
267 +++ b/arch/x86/kernel/cpu/bugs.c
268 @@ -1252,6 +1252,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
269 return 0;
270 }
271
272 +static bool is_spec_ib_user_controlled(void)
273 +{
274 + return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
275 + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
276 + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
277 + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
278 +}
279 +
280 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
281 {
282 switch (ctrl) {
283 @@ -1259,17 +1267,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
284 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
285 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
286 return 0;
287 - /*
288 - * Indirect branch speculation is always disabled in strict
289 - * mode. It can neither be enabled if it was force-disabled
290 - * by a previous prctl call.
291
292 + /*
293 + * With strict mode for both IBPB and STIBP, the instruction
294 + * code paths avoid checking this task flag and instead,
295 + * unconditionally run the instruction. However, STIBP and IBPB
296 + * are independent and either can be set to conditionally
297 + * enabled regardless of the mode of the other.
298 + *
299 + * If either is set to conditional, allow the task flag to be
300 + * updated, unless it was force-disabled by a previous prctl
301 + * call. Currently, this is possible on an AMD CPU which has the
302 + * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
303 + * kernel is booted with 'spectre_v2_user=seccomp', then
304 + * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
305 + * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
306 */
307 - if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
308 - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
309 - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
310 + if (!is_spec_ib_user_controlled() ||
311 task_spec_ib_force_disable(task))
312 return -EPERM;
313 +
314 task_clear_spec_ib_disable(task);
315 task_update_spec_tif(task);
316 break;
317 @@ -1282,10 +1299,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
318 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
319 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
320 return -EPERM;
321 - if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
322 - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
323 - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
324 +
325 + if (!is_spec_ib_user_controlled())
326 return 0;
327 +
328 task_set_spec_ib_disable(task);
329 if (ctrl == PR_SPEC_FORCE_DISABLE)
330 task_set_spec_ib_force_disable(task);
331 @@ -1350,20 +1367,17 @@ static int ib_prctl_get(struct task_struct *task)
332 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
333 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
334 return PR_SPEC_ENABLE;
335 - else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
336 - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
337 - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
338 - return PR_SPEC_DISABLE;
339 - else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
340 - spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
341 - spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
342 - spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) {
343 + else if (is_spec_ib_user_controlled()) {
344 if (task_spec_ib_force_disable(task))
345 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
346 if (task_spec_ib_disable(task))
347 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
348 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
349 - } else
350 + } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
351 + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
352 + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
353 + return PR_SPEC_DISABLE;
354 + else
355 return PR_SPEC_NOT_AFFECTED;
356 }
357
358 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
359 index 12e83297ea020..880a24889291c 100644
360 --- a/arch/x86/kvm/x86.c
361 +++ b/arch/x86/kvm/x86.c
362 @@ -5235,6 +5235,10 @@ static void kvm_init_msr_list(void)
363 if (!kvm_x86_ops->rdtscp_supported())
364 continue;
365 break;
366 + case MSR_IA32_UMWAIT_CONTROL:
367 + if (!boot_cpu_has(X86_FEATURE_WAITPKG))
368 + continue;
369 + break;
370 case MSR_IA32_RTIT_CTL:
371 case MSR_IA32_RTIT_STATUS:
372 if (!kvm_x86_ops->pt_supported())
373 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
374 index 742f8160b6e28..a3037fe54c3ab 100644
375 --- a/drivers/block/nbd.c
376 +++ b/drivers/block/nbd.c
377 @@ -296,7 +296,7 @@ static void nbd_size_clear(struct nbd_device *nbd)
378 }
379 }
380
381 -static void nbd_size_update(struct nbd_device *nbd)
382 +static void nbd_size_update(struct nbd_device *nbd, bool start)
383 {
384 struct nbd_config *config = nbd->config;
385 struct block_device *bdev = bdget_disk(nbd->disk, 0);
386 @@ -312,7 +312,8 @@ static void nbd_size_update(struct nbd_device *nbd)
387 if (bdev) {
388 if (bdev->bd_disk) {
389 bd_set_size(bdev, config->bytesize);
390 - set_blocksize(bdev, config->blksize);
391 + if (start)
392 + set_blocksize(bdev, config->blksize);
393 } else
394 bdev->bd_invalidated = 1;
395 bdput(bdev);
396 @@ -327,7 +328,7 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
397 config->blksize = blocksize;
398 config->bytesize = blocksize * nr_blocks;
399 if (nbd->task_recv != NULL)
400 - nbd_size_update(nbd);
401 + nbd_size_update(nbd, false);
402 }
403
404 static void nbd_complete_rq(struct request *req)
405 @@ -1293,7 +1294,7 @@ static int nbd_start_device(struct nbd_device *nbd)
406 args->index = i;
407 queue_work(nbd->recv_workq, &args->work);
408 }
409 - nbd_size_update(nbd);
410 + nbd_size_update(nbd, true);
411 return error;
412 }
413
414 @@ -1502,6 +1503,7 @@ static void nbd_release(struct gendisk *disk, fmode_t mode)
415 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
416 bdev->bd_openers == 0)
417 nbd_disconnect_and_put(nbd);
418 + bdput(bdev);
419
420 nbd_config_put(nbd);
421 nbd_put(nbd);
422 diff --git a/drivers/char/random.c b/drivers/char/random.c
423 index 75a8f7f572697..2c29f83ae3d5a 100644
424 --- a/drivers/char/random.c
425 +++ b/drivers/char/random.c
426 @@ -1330,7 +1330,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
427
428 fast_mix(fast_pool);
429 add_interrupt_bench(cycles);
430 - this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
431
432 if (unlikely(crng_init == 0)) {
433 if ((fast_pool->count >= 64) &&
434 diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
435 index 6bb023de17f1f..35229e5143cac 100644
436 --- a/drivers/char/tpm/eventlog/efi.c
437 +++ b/drivers/char/tpm/eventlog/efi.c
438 @@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
439 log_size = log_tbl->size;
440 memunmap(log_tbl);
441
442 + if (!log_size) {
443 + pr_warn("UEFI TPM log area empty\n");
444 + return -EIO;
445 + }
446 +
447 log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size,
448 MEMREMAP_WB);
449 if (!log_tbl) {
450 diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
451 index e7df342a317d6..c722e3b3121a8 100644
452 --- a/drivers/char/tpm/tpm_tis.c
453 +++ b/drivers/char/tpm/tpm_tis.c
454 @@ -27,6 +27,7 @@
455 #include <linux/of.h>
456 #include <linux/of_device.h>
457 #include <linux/kernel.h>
458 +#include <linux/dmi.h>
459 #include "tpm.h"
460 #include "tpm_tis_core.h"
461
462 @@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
463 return container_of(data, struct tpm_tis_tcg_phy, priv);
464 }
465
466 -static bool interrupts = true;
467 -module_param(interrupts, bool, 0444);
468 +static int interrupts = -1;
469 +module_param(interrupts, int, 0444);
470 MODULE_PARM_DESC(interrupts, "Enable interrupts");
471
472 static bool itpm;
473 @@ -63,6 +64,28 @@ module_param(force, bool, 0444);
474 MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
475 #endif
476
477 +static int tpm_tis_disable_irq(const struct dmi_system_id *d)
478 +{
479 + if (interrupts == -1) {
480 + pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident);
481 + interrupts = 0;
482 + }
483 +
484 + return 0;
485 +}
486 +
487 +static const struct dmi_system_id tpm_tis_dmi_table[] = {
488 + {
489 + .callback = tpm_tis_disable_irq,
490 + .ident = "ThinkPad T490s",
491 + .matches = {
492 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
493 + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
494 + },
495 + },
496 + {}
497 +};
498 +
499 #if defined(CONFIG_PNP) && defined(CONFIG_ACPI)
500 static int has_hid(struct acpi_device *dev, const char *hid)
501 {
502 @@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info)
503 int irq = -1;
504 int rc;
505
506 + dmi_check_system(tpm_tis_dmi_table);
507 +
508 rc = check_acpi_tpm2(dev);
509 if (rc)
510 return rc;
511 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
512 index 9ebce2c12c432..5eabbf73fdef7 100644
513 --- a/drivers/char/virtio_console.c
514 +++ b/drivers/char/virtio_console.c
515 @@ -435,12 +435,12 @@ static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size
516 /*
517 * Allocate DMA memory from ancestor. When a virtio
518 * device is created by remoteproc, the DMA memory is
519 - * associated with the grandparent device:
520 - * vdev => rproc => platform-dev.
521 + * associated with the parent device:
522 + * virtioY => remoteprocX#vdevYbuffer.
523 */
524 - if (!vdev->dev.parent || !vdev->dev.parent->parent)
525 + buf->dev = vdev->dev.parent;
526 + if (!buf->dev)
527 goto free_buf;
528 - buf->dev = vdev->dev.parent->parent;
529
530 /* Increase device refcnt to avoid freeing it */
531 get_device(buf->dev);
532 diff --git a/drivers/gpio/gpio-pcie-idio-24.c b/drivers/gpio/gpio-pcie-idio-24.c
533 index 52f1647a46fdf..0cfeccb4ffe2b 100644
534 --- a/drivers/gpio/gpio-pcie-idio-24.c
535 +++ b/drivers/gpio/gpio-pcie-idio-24.c
536 @@ -28,6 +28,47 @@
537 #include <linux/spinlock.h>
538 #include <linux/types.h>
539
540 +/*
541 + * PLX PEX8311 PCI LCS_INTCSR Interrupt Control/Status
542 + *
543 + * Bit: Description
544 + * 0: Enable Interrupt Sources (Bit 0)
545 + * 1: Enable Interrupt Sources (Bit 1)
546 + * 2: Generate Internal PCI Bus Internal SERR# Interrupt
547 + * 3: Mailbox Interrupt Enable
548 + * 4: Power Management Interrupt Enable
549 + * 5: Power Management Interrupt
550 + * 6: Slave Read Local Data Parity Check Error Enable
551 + * 7: Slave Read Local Data Parity Check Error Status
552 + * 8: Internal PCI Wire Interrupt Enable
553 + * 9: PCI Express Doorbell Interrupt Enable
554 + * 10: PCI Abort Interrupt Enable
555 + * 11: Local Interrupt Input Enable
556 + * 12: Retry Abort Enable
557 + * 13: PCI Express Doorbell Interrupt Active
558 + * 14: PCI Abort Interrupt Active
559 + * 15: Local Interrupt Input Active
560 + * 16: Local Interrupt Output Enable
561 + * 17: Local Doorbell Interrupt Enable
562 + * 18: DMA Channel 0 Interrupt Enable
563 + * 19: DMA Channel 1 Interrupt Enable
564 + * 20: Local Doorbell Interrupt Active
565 + * 21: DMA Channel 0 Interrupt Active
566 + * 22: DMA Channel 1 Interrupt Active
567 + * 23: Built-In Self-Test (BIST) Interrupt Active
568 + * 24: Direct Master was the Bus Master during a Master or Target Abort
569 + * 25: DMA Channel 0 was the Bus Master during a Master or Target Abort
570 + * 26: DMA Channel 1 was the Bus Master during a Master or Target Abort
571 + * 27: Target Abort after internal 256 consecutive Master Retrys
572 + * 28: PCI Bus wrote data to LCS_MBOX0
573 + * 29: PCI Bus wrote data to LCS_MBOX1
574 + * 30: PCI Bus wrote data to LCS_MBOX2
575 + * 31: PCI Bus wrote data to LCS_MBOX3
576 + */
577 +#define PLX_PEX8311_PCI_LCS_INTCSR 0x68
578 +#define INTCSR_INTERNAL_PCI_WIRE BIT(8)
579 +#define INTCSR_LOCAL_INPUT BIT(11)
580 +
581 /**
582 * struct idio_24_gpio_reg - GPIO device registers structure
583 * @out0_7: Read: FET Outputs 0-7
584 @@ -92,6 +133,7 @@ struct idio_24_gpio_reg {
585 struct idio_24_gpio {
586 struct gpio_chip chip;
587 raw_spinlock_t lock;
588 + __u8 __iomem *plx;
589 struct idio_24_gpio_reg __iomem *reg;
590 unsigned long irq_mask;
591 };
592 @@ -360,13 +402,13 @@ static void idio_24_irq_mask(struct irq_data *data)
593 unsigned long flags;
594 const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
595 unsigned char new_irq_mask;
596 - const unsigned long bank_offset = bit_offset/8 * 8;
597 + const unsigned long bank_offset = bit_offset / 8;
598 unsigned char cos_enable_state;
599
600 raw_spin_lock_irqsave(&idio24gpio->lock, flags);
601
602 - idio24gpio->irq_mask &= BIT(bit_offset);
603 - new_irq_mask = idio24gpio->irq_mask >> bank_offset;
604 + idio24gpio->irq_mask &= ~BIT(bit_offset);
605 + new_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
606
607 if (!new_irq_mask) {
608 cos_enable_state = ioread8(&idio24gpio->reg->cos_enable);
609 @@ -389,12 +431,12 @@ static void idio_24_irq_unmask(struct irq_data *data)
610 unsigned long flags;
611 unsigned char prev_irq_mask;
612 const unsigned long bit_offset = irqd_to_hwirq(data) - 24;
613 - const unsigned long bank_offset = bit_offset/8 * 8;
614 + const unsigned long bank_offset = bit_offset / 8;
615 unsigned char cos_enable_state;
616
617 raw_spin_lock_irqsave(&idio24gpio->lock, flags);
618
619 - prev_irq_mask = idio24gpio->irq_mask >> bank_offset;
620 + prev_irq_mask = idio24gpio->irq_mask >> bank_offset * 8;
621 idio24gpio->irq_mask |= BIT(bit_offset);
622
623 if (!prev_irq_mask) {
624 @@ -481,6 +523,7 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
625 struct device *const dev = &pdev->dev;
626 struct idio_24_gpio *idio24gpio;
627 int err;
628 + const size_t pci_plx_bar_index = 1;
629 const size_t pci_bar_index = 2;
630 const char *const name = pci_name(pdev);
631
632 @@ -494,12 +537,13 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
633 return err;
634 }
635
636 - err = pcim_iomap_regions(pdev, BIT(pci_bar_index), name);
637 + err = pcim_iomap_regions(pdev, BIT(pci_plx_bar_index) | BIT(pci_bar_index), name);
638 if (err) {
639 dev_err(dev, "Unable to map PCI I/O addresses (%d)\n", err);
640 return err;
641 }
642
643 + idio24gpio->plx = pcim_iomap_table(pdev)[pci_plx_bar_index];
644 idio24gpio->reg = pcim_iomap_table(pdev)[pci_bar_index];
645
646 idio24gpio->chip.label = name;
647 @@ -520,6 +564,12 @@ static int idio_24_probe(struct pci_dev *pdev, const struct pci_device_id *id)
648
649 /* Software board reset */
650 iowrite8(0, &idio24gpio->reg->soft_reset);
651 + /*
652 + * enable PLX PEX8311 internal PCI wire interrupt and local interrupt
653 + * input
654 + */
655 + iowrite8((INTCSR_INTERNAL_PCI_WIRE | INTCSR_LOCAL_INPUT) >> 8,
656 + idio24gpio->plx + PLX_PEX8311_PCI_LCS_INTCSR + 1);
657
658 err = devm_gpiochip_add_data(dev, &idio24gpio->chip, idio24gpio);
659 if (err) {
660 diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
661 index 4af9acc2dc4f9..450ad7d5e21a0 100644
662 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
663 +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
664 @@ -1071,22 +1071,19 @@ static int cik_sdma_soft_reset(void *handle)
665 {
666 u32 srbm_soft_reset = 0;
667 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
668 - u32 tmp = RREG32(mmSRBM_STATUS2);
669 + u32 tmp;
670
671 - if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
672 - /* sdma0 */
673 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
674 - tmp |= SDMA0_F32_CNTL__HALT_MASK;
675 - WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
676 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
677 - }
678 - if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
679 - /* sdma1 */
680 - tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
681 - tmp |= SDMA0_F32_CNTL__HALT_MASK;
682 - WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
683 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
684 - }
685 + /* sdma0 */
686 + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
687 + tmp |= SDMA0_F32_CNTL__HALT_MASK;
688 + WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
689 + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
690 +
691 + /* sdma1 */
692 + tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
693 + tmp |= SDMA0_F32_CNTL__HALT_MASK;
694 + WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
695 + srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
696
697 if (srbm_soft_reset) {
698 tmp = RREG32(mmSRBM_SOFT_RESET);
699 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
700 index c086262cc181d..317aa257c06bb 100644
701 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
702 +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
703 @@ -1144,8 +1144,7 @@ static int soc15_common_early_init(void *handle)
704
705 adev->pg_flags = AMD_PG_SUPPORT_SDMA |
706 AMD_PG_SUPPORT_MMHUB |
707 - AMD_PG_SUPPORT_VCN |
708 - AMD_PG_SUPPORT_VCN_DPG;
709 + AMD_PG_SUPPORT_VCN;
710 } else {
711 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
712 AMD_CG_SUPPORT_GFX_MGLS |
713 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
714 index 35e6cbe805eb4..7cde55854b65c 100644
715 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
716 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
717 @@ -1533,6 +1533,10 @@ int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
718 PP_ASSERT_WITH_CODE((tmp_result == 0),
719 "Failed to reset to default!", result = tmp_result);
720
721 + tmp_result = smum_stop_smc(hwmgr);
722 + PP_ASSERT_WITH_CODE((tmp_result == 0),
723 + "Failed to stop smc!", result = tmp_result);
724 +
725 tmp_result = smu7_force_switch_to_arbf0(hwmgr);
726 PP_ASSERT_WITH_CODE((tmp_result == 0),
727 "Failed to force to switch arbf0!", result = tmp_result);
728 diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
729 index 7bf9a14bfa0be..f6490a1284384 100644
730 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
731 +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
732 @@ -229,6 +229,7 @@ struct pp_smumgr_func {
733 bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
734 int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting);
735 int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */
736 + int (*stop_smc)(struct pp_hwmgr *hwmgr);
737 };
738
739 struct pp_hwmgr_func {
740 diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
741 index c5288831aa15c..05a55e850b5e0 100644
742 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
743 +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
744 @@ -114,4 +114,6 @@ extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_settin
745
746 extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw);
747
748 +extern int smum_stop_smc(struct pp_hwmgr *hwmgr);
749 +
750 #endif
751 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
752 index 09a3d8ae44491..42c8f8731a504 100644
753 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
754 +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
755 @@ -2725,10 +2725,7 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
756
757 static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
758 {
759 - return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
760 - CGS_IND_REG__SMC, FEATURE_STATUS,
761 - VOLTAGE_CONTROLLER_ON))
762 - ? true : false;
763 + return ci_is_smc_ram_running(hwmgr);
764 }
765
766 static int ci_smu_init(struct pp_hwmgr *hwmgr)
767 @@ -2936,6 +2933,29 @@ static int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
768 return 0;
769 }
770
771 +static void ci_reset_smc(struct pp_hwmgr *hwmgr)
772 +{
773 + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
774 + SMC_SYSCON_RESET_CNTL,
775 + rst_reg, 1);
776 +}
777 +
778 +
779 +static void ci_stop_smc_clock(struct pp_hwmgr *hwmgr)
780 +{
781 + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
782 + SMC_SYSCON_CLOCK_CNTL_0,
783 + ck_disable, 1);
784 +}
785 +
786 +static int ci_stop_smc(struct pp_hwmgr *hwmgr)
787 +{
788 + ci_reset_smc(hwmgr);
789 + ci_stop_smc_clock(hwmgr);
790 +
791 + return 0;
792 +}
793 +
794 const struct pp_smumgr_func ci_smu_funcs = {
795 .name = "ci_smu",
796 .smu_init = ci_smu_init,
797 @@ -2960,4 +2980,5 @@ const struct pp_smumgr_func ci_smu_funcs = {
798 .is_dpm_running = ci_is_dpm_running,
799 .update_dpm_settings = ci_update_dpm_settings,
800 .update_smc_table = ci_update_smc_table,
801 + .stop_smc = ci_stop_smc,
802 };
803 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
804 index 4240aeec9000e..83d06f8e99ec2 100644
805 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
806 +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
807 @@ -217,3 +217,11 @@ int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t tabl
808
809 return -EINVAL;
810 }
811 +
812 +int smum_stop_smc(struct pp_hwmgr *hwmgr)
813 +{
814 + if (hwmgr->smumgr_funcs->stop_smc)
815 + return hwmgr->smumgr_funcs->stop_smc(hwmgr);
816 +
817 + return 0;
818 +}
819 diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
820 index e6265fb85626e..56bb34d043326 100644
821 --- a/drivers/gpu/drm/gma500/psb_irq.c
822 +++ b/drivers/gpu/drm/gma500/psb_irq.c
823 @@ -337,6 +337,7 @@ int psb_irq_postinstall(struct drm_device *dev)
824 {
825 struct drm_psb_private *dev_priv = dev->dev_private;
826 unsigned long irqflags;
827 + unsigned int i;
828
829 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
830
831 @@ -349,20 +350,12 @@ int psb_irq_postinstall(struct drm_device *dev)
832 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
833 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
834
835 - if (dev->vblank[0].enabled)
836 - psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
837 - else
838 - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
839 -
840 - if (dev->vblank[1].enabled)
841 - psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
842 - else
843 - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
844 -
845 - if (dev->vblank[2].enabled)
846 - psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
847 - else
848 - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
849 + for (i = 0; i < dev->num_crtcs; ++i) {
850 + if (dev->vblank[i].enabled)
851 + psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
852 + else
853 + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
854 + }
855
856 if (dev_priv->ops->hotplug_enable)
857 dev_priv->ops->hotplug_enable(dev, true);
858 @@ -375,6 +368,7 @@ void psb_irq_uninstall(struct drm_device *dev)
859 {
860 struct drm_psb_private *dev_priv = dev->dev_private;
861 unsigned long irqflags;
862 + unsigned int i;
863
864 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
865
866 @@ -383,14 +377,10 @@ void psb_irq_uninstall(struct drm_device *dev)
867
868 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
869
870 - if (dev->vblank[0].enabled)
871 - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE);
872 -
873 - if (dev->vblank[1].enabled)
874 - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE);
875 -
876 - if (dev->vblank[2].enabled)
877 - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE);
878 + for (i = 0; i < dev->num_crtcs; ++i) {
879 + if (dev->vblank[i].enabled)
880 + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
881 + }
882
883 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
884 _PSB_IRQ_MSVDX_FLAG |
885 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
886 index 9c58e8fac1d97..a4b48c9abeacd 100644
887 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
888 +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
889 @@ -605,21 +605,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
890 if (!obj)
891 return -ENOENT;
892
893 - /*
894 - * Already in the desired write domain? Nothing for us to do!
895 - *
896 - * We apply a little bit of cunning here to catch a broader set of
897 - * no-ops. If obj->write_domain is set, we must be in the same
898 - * obj->read_domains, and only that domain. Therefore, if that
899 - * obj->write_domain matches the request read_domains, we are
900 - * already in the same read/write domain and can skip the operation,
901 - * without having to further check the requested write_domain.
902 - */
903 - if (READ_ONCE(obj->write_domain) == read_domains) {
904 - err = 0;
905 - goto out;
906 - }
907 -
908 /*
909 * Try to flush the object off the GPU without holding the lock.
910 * We will repeat the flush holding the lock in the normal manner
911 @@ -657,6 +642,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
912 if (err)
913 goto out;
914
915 + /*
916 + * Already in the desired write domain? Nothing for us to do!
917 + *
918 + * We apply a little bit of cunning here to catch a broader set of
919 + * no-ops. If obj->write_domain is set, we must be in the same
920 + * obj->read_domains, and only that domain. Therefore, if that
921 + * obj->write_domain matches the request read_domains, we are
922 + * already in the same read/write domain and can skip the operation,
923 + * without having to further check the requested write_domain.
924 + */
925 + if (READ_ONCE(obj->write_domain) == read_domains)
926 + goto out_unpin;
927 +
928 err = i915_gem_object_lock_interruptible(obj);
929 if (err)
930 goto out_unpin;
931 diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
932 index 4ce8626b140ed..8073758d10368 100644
933 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
934 +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
935 @@ -354,7 +354,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
936 * instances.
937 */
938 if ((INTEL_GEN(i915) >= 11 &&
939 - RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
940 + (RUNTIME_INFO(i915)->vdbox_sfc_access &
941 + BIT(engine->instance))) ||
942 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
943 engine->uabi_capabilities |=
944 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
945 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
946 index 930674117533e..bd4e72f6dfd49 100644
947 --- a/drivers/hv/hv_balloon.c
948 +++ b/drivers/hv/hv_balloon.c
949 @@ -1277,7 +1277,7 @@ static void balloon_up(struct work_struct *dummy)
950
951 /* Refuse to balloon below the floor. */
952 if (avail_pages < num_pages || avail_pages - num_pages < floor) {
953 - pr_warn("Balloon request will be partially fulfilled. %s\n",
954 + pr_info("Balloon request will be partially fulfilled. %s\n",
955 avail_pages < num_pages ? "Not enough memory." :
956 "Balloon floor reached.");
957
958 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
959 index 2152ec5f535c1..5a9f0d17f52c8 100644
960 --- a/drivers/i2c/busses/i2c-mt65xx.c
961 +++ b/drivers/i2c/busses/i2c-mt65xx.c
962 @@ -389,6 +389,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
963 {
964 u16 control_reg;
965
966 + writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
967 + udelay(50);
968 + writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
969 +
970 mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
971
972 /* Set ioconfig */
973 @@ -419,10 +423,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
974
975 mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL);
976 mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN);
977 -
978 - writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
979 - udelay(50);
980 - writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
981 }
982
983 /*
984 diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
985 index 8777af4c695e9..d5dd58c27ce5f 100644
986 --- a/drivers/i2c/busses/i2c-sh_mobile.c
987 +++ b/drivers/i2c/busses/i2c-sh_mobile.c
988 @@ -129,6 +129,7 @@ struct sh_mobile_i2c_data {
989 int sr;
990 bool send_stop;
991 bool stop_after_dma;
992 + bool atomic_xfer;
993
994 struct resource *res;
995 struct dma_chan *dma_tx;
996 @@ -333,13 +334,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op
997 ret = iic_rd(pd, ICDR);
998 break;
999 case OP_RX_STOP: /* enable DTE interrupt, issue stop */
1000 - iic_wr(pd, ICIC,
1001 - ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
1002 + if (!pd->atomic_xfer)
1003 + iic_wr(pd, ICIC,
1004 + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
1005 iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
1006 break;
1007 case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */
1008 - iic_wr(pd, ICIC,
1009 - ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
1010 + if (!pd->atomic_xfer)
1011 + iic_wr(pd, ICIC,
1012 + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE);
1013 ret = iic_rd(pd, ICDR);
1014 iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK);
1015 break;
1016 @@ -435,7 +438,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
1017
1018 if (wakeup) {
1019 pd->sr |= SW_DONE;
1020 - wake_up(&pd->wait);
1021 + if (!pd->atomic_xfer)
1022 + wake_up(&pd->wait);
1023 }
1024
1025 /* defeat write posting to avoid spurious WAIT interrupts */
1026 @@ -587,6 +591,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg,
1027 pd->pos = -1;
1028 pd->sr = 0;
1029
1030 + if (pd->atomic_xfer)
1031 + return;
1032 +
1033 pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8);
1034 if (pd->dma_buf)
1035 sh_mobile_i2c_xfer_dma(pd);
1036 @@ -643,15 +650,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd)
1037 return i ? 0 : -ETIMEDOUT;
1038 }
1039
1040 -static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
1041 - struct i2c_msg *msgs,
1042 - int num)
1043 +static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
1044 + struct i2c_msg *msgs, int num)
1045 {
1046 - struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
1047 struct i2c_msg *msg;
1048 int err = 0;
1049 int i;
1050 - long timeout;
1051 + long time_left;
1052
1053 /* Wake up device and enable clock */
1054 pm_runtime_get_sync(pd->dev);
1055 @@ -668,15 +673,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
1056 if (do_start)
1057 i2c_op(pd, OP_START);
1058
1059 - /* The interrupt handler takes care of the rest... */
1060 - timeout = wait_event_timeout(pd->wait,
1061 - pd->sr & (ICSR_TACK | SW_DONE),
1062 - adapter->timeout);
1063 -
1064 - /* 'stop_after_dma' tells if DMA transfer was complete */
1065 - i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma);
1066 + if (pd->atomic_xfer) {
1067 + unsigned long j = jiffies + pd->adap.timeout;
1068 +
1069 + time_left = time_before_eq(jiffies, j);
1070 + while (time_left &&
1071 + !(pd->sr & (ICSR_TACK | SW_DONE))) {
1072 + unsigned char sr = iic_rd(pd, ICSR);
1073 +
1074 + if (sr & (ICSR_AL | ICSR_TACK |
1075 + ICSR_WAIT | ICSR_DTE)) {
1076 + sh_mobile_i2c_isr(0, pd);
1077 + udelay(150);
1078 + } else {
1079 + cpu_relax();
1080 + }
1081 + time_left = time_before_eq(jiffies, j);
1082 + }
1083 + } else {
1084 + /* The interrupt handler takes care of the rest... */
1085 + time_left = wait_event_timeout(pd->wait,
1086 + pd->sr & (ICSR_TACK | SW_DONE),
1087 + pd->adap.timeout);
1088 +
1089 + /* 'stop_after_dma' tells if DMA xfer was complete */
1090 + i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg,
1091 + pd->stop_after_dma);
1092 + }
1093
1094 - if (!timeout) {
1095 + if (!time_left) {
1096 dev_err(pd->dev, "Transfer request timed out\n");
1097 if (pd->dma_direction != DMA_NONE)
1098 sh_mobile_i2c_cleanup_dma(pd);
1099 @@ -702,14 +727,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
1100 return err ?: num;
1101 }
1102
1103 +static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
1104 + struct i2c_msg *msgs,
1105 + int num)
1106 +{
1107 + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
1108 +
1109 + pd->atomic_xfer = false;
1110 + return sh_mobile_xfer(pd, msgs, num);
1111 +}
1112 +
1113 +static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter,
1114 + struct i2c_msg *msgs,
1115 + int num)
1116 +{
1117 + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter);
1118 +
1119 + pd->atomic_xfer = true;
1120 + return sh_mobile_xfer(pd, msgs, num);
1121 +}
1122 +
1123 static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter)
1124 {
1125 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
1126 }
1127
1128 static const struct i2c_algorithm sh_mobile_i2c_algorithm = {
1129 - .functionality = sh_mobile_i2c_func,
1130 - .master_xfer = sh_mobile_i2c_xfer,
1131 + .functionality = sh_mobile_i2c_func,
1132 + .master_xfer = sh_mobile_i2c_xfer,
1133 + .master_xfer_atomic = sh_mobile_i2c_xfer_atomic,
1134 };
1135
1136 static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = {
1137 diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
1138 index 0679896b9e2e1..3ec090adcdae7 100644
1139 --- a/drivers/iommu/amd_iommu_types.h
1140 +++ b/drivers/iommu/amd_iommu_types.h
1141 @@ -406,7 +406,11 @@ extern bool amd_iommu_np_cache;
1142 /* Only true if all IOMMUs support device IOTLBs */
1143 extern bool amd_iommu_iotlb_sup;
1144
1145 -#define MAX_IRQS_PER_TABLE 256
1146 +/*
1147 + * AMD IOMMU hardware only support 512 IRTEs despite
1148 + * the architectural limitation of 2048 entries.
1149 + */
1150 +#define MAX_IRQS_PER_TABLE 512
1151 #define IRQ_TABLE_ALIGNMENT 128
1152
1153 struct irq_remap_table {
1154 diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
1155 index 1d3816cd65d57..ec69a99b99bab 100644
1156 --- a/drivers/iommu/intel-svm.c
1157 +++ b/drivers/iommu/intel-svm.c
1158 @@ -646,7 +646,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
1159 resp.qw0 = QI_PGRP_PASID(req->pasid) |
1160 QI_PGRP_DID(req->rid) |
1161 QI_PGRP_PASID_P(req->pasid_present) |
1162 - QI_PGRP_PDP(req->pasid_present) |
1163 + QI_PGRP_PDP(req->priv_data_present) |
1164 QI_PGRP_RESP_CODE(result) |
1165 QI_PGRP_RESP_TYPE;
1166 resp.qw1 = QI_PGRP_IDX(req->prg_index) |
1167 diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
1168 index c0529a1cd5ea0..20529ff48f002 100644
1169 --- a/drivers/mfd/sprd-sc27xx-spi.c
1170 +++ b/drivers/mfd/sprd-sc27xx-spi.c
1171 @@ -204,7 +204,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
1172 }
1173
1174 ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq,
1175 - IRQF_ONESHOT | IRQF_NO_SUSPEND, 0,
1176 + IRQF_ONESHOT, 0,
1177 &ddata->irq_chip, &ddata->irq_data);
1178 if (ret) {
1179 dev_err(&spi->dev, "Failed to add PMIC irq chip %d\n", ret);
1180 @@ -220,9 +220,34 @@ static int sprd_pmic_probe(struct spi_device *spi)
1181 return ret;
1182 }
1183
1184 + device_init_wakeup(&spi->dev, true);
1185 return 0;
1186 }
1187
1188 +#ifdef CONFIG_PM_SLEEP
1189 +static int sprd_pmic_suspend(struct device *dev)
1190 +{
1191 + struct sprd_pmic *ddata = dev_get_drvdata(dev);
1192 +
1193 + if (device_may_wakeup(dev))
1194 + enable_irq_wake(ddata->irq);
1195 +
1196 + return 0;
1197 +}
1198 +
1199 +static int sprd_pmic_resume(struct device *dev)
1200 +{
1201 + struct sprd_pmic *ddata = dev_get_drvdata(dev);
1202 +
1203 + if (device_may_wakeup(dev))
1204 + disable_irq_wake(ddata->irq);
1205 +
1206 + return 0;
1207 +}
1208 +#endif
1209 +
1210 +static SIMPLE_DEV_PM_OPS(sprd_pmic_pm_ops, sprd_pmic_suspend, sprd_pmic_resume);
1211 +
1212 static const struct of_device_id sprd_pmic_match[] = {
1213 { .compatible = "sprd,sc2731", .data = &sc2731_data },
1214 {},
1215 @@ -234,6 +259,7 @@ static struct spi_driver sprd_pmic_driver = {
1216 .name = "sc27xx-pmic",
1217 .bus = &spi_bus_type,
1218 .of_match_table = sprd_pmic_match,
1219 + .pm = &sprd_pmic_pm_ops,
1220 },
1221 .probe = sprd_pmic_probe,
1222 };
1223 diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
1224 index c1f9e810cf813..030d0e7b148b8 100644
1225 --- a/drivers/misc/mei/client.h
1226 +++ b/drivers/misc/mei/client.h
1227 @@ -128,11 +128,11 @@ static inline u8 mei_cl_me_id(const struct mei_cl *cl)
1228 *
1229 * @cl: host client
1230 *
1231 - * Return: mtu
1232 + * Return: mtu or 0 if client is not connected
1233 */
1234 static inline size_t mei_cl_mtu(const struct mei_cl *cl)
1235 {
1236 - return cl->me_cl->props.max_msg_length;
1237 + return cl->me_cl ? cl->me_cl->props.max_msg_length : 0;
1238 }
1239
1240 /**
1241 diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
1242 index 234551a68739b..689eb119d44fc 100644
1243 --- a/drivers/mmc/host/renesas_sdhi_core.c
1244 +++ b/drivers/mmc/host/renesas_sdhi_core.c
1245 @@ -874,6 +874,7 @@ int renesas_sdhi_remove(struct platform_device *pdev)
1246
1247 tmio_mmc_host_remove(host);
1248 renesas_sdhi_clk_disable(host);
1249 + tmio_mmc_host_free(host);
1250
1251 return 0;
1252 }
1253 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
1254 index 64196c1b1c8f0..5922ae021d869 100644
1255 --- a/drivers/mmc/host/sdhci-of-esdhc.c
1256 +++ b/drivers/mmc/host/sdhci-of-esdhc.c
1257 @@ -1212,6 +1212,8 @@ static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
1258
1259 static struct soc_device_attribute soc_unreliable_pulse_detection[] = {
1260 { .family = "QorIQ LX2160A", .revision = "1.0", },
1261 + { .family = "QorIQ LX2160A", .revision = "2.0", },
1262 + { .family = "QorIQ LS1028A", .revision = "1.0", },
1263 { },
1264 };
1265
1266 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1267 index 3a33fb5034005..448d1548cca39 100644
1268 --- a/drivers/net/can/dev.c
1269 +++ b/drivers/net/can/dev.c
1270 @@ -486,9 +486,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
1271 */
1272 struct sk_buff *skb = priv->echo_skb[idx];
1273 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1274 - u8 len = cf->len;
1275
1276 - *len_ptr = len;
1277 + /* get the real payload length for netdev statistics */
1278 + if (cf->can_id & CAN_RTR_FLAG)
1279 + *len_ptr = 0;
1280 + else
1281 + *len_ptr = cf->len;
1282 +
1283 priv->echo_skb[idx] = NULL;
1284
1285 return skb;
1286 @@ -512,7 +516,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
1287 if (!skb)
1288 return 0;
1289
1290 - netif_rx(skb);
1291 + skb_get(skb);
1292 + if (netif_rx(skb) == NET_RX_SUCCESS)
1293 + dev_consume_skb_any(skb);
1294 + else
1295 + dev_kfree_skb_any(skb);
1296
1297 return len;
1298 }
1299 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
1300 index d59c6c87164f4..130f3022d3396 100644
1301 --- a/drivers/net/can/flexcan.c
1302 +++ b/drivers/net/can/flexcan.c
1303 @@ -321,8 +321,7 @@ static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
1304
1305 static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
1306 .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
1307 - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
1308 - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
1309 + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
1310 };
1311
1312 static const struct can_bittiming_const flexcan_bittiming_const = {
1313 @@ -1677,6 +1676,8 @@ static int flexcan_remove(struct platform_device *pdev)
1314 {
1315 struct net_device *dev = platform_get_drvdata(pdev);
1316
1317 + device_set_wakeup_enable(&pdev->dev, false);
1318 + device_set_wakeup_capable(&pdev->dev, false);
1319 unregister_flexcandev(dev);
1320 pm_runtime_disable(&pdev->dev);
1321 free_candev(dev);
1322 diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
1323 index 6b0c6a99fc8d6..91b156b2123a3 100644
1324 --- a/drivers/net/can/peak_canfd/peak_canfd.c
1325 +++ b/drivers/net/can/peak_canfd/peak_canfd.c
1326 @@ -248,8 +248,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
1327 cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
1328
1329 /* if this frame is an echo, */
1330 - if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
1331 - !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
1332 + if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
1333 unsigned long flags;
1334
1335 spin_lock_irqsave(&priv->echo_lock, flags);
1336 @@ -263,7 +262,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
1337 netif_wake_queue(priv->ndev);
1338
1339 spin_unlock_irqrestore(&priv->echo_lock, flags);
1340 - return 0;
1341 +
1342 + /* if this frame is only an echo, stop here. Otherwise,
1343 + * continue to push this application self-received frame into
1344 + * its own rx queue.
1345 + */
1346 + if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
1347 + return 0;
1348 }
1349
1350 /* otherwise, it should be pushed into rx fifo */
1351 diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
1352 index 84cae167e42f6..7e75a87a8a6a9 100644
1353 --- a/drivers/net/can/rx-offload.c
1354 +++ b/drivers/net/can/rx-offload.c
1355 @@ -272,7 +272,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
1356
1357 if (skb_queue_len(&offload->skb_queue) >
1358 offload->skb_queue_len_max) {
1359 - kfree_skb(skb);
1360 + dev_kfree_skb_any(skb);
1361 return -ENOBUFS;
1362 }
1363
1364 @@ -317,7 +317,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
1365 {
1366 if (skb_queue_len(&offload->skb_queue) >
1367 offload->skb_queue_len_max) {
1368 - kfree_skb(skb);
1369 + dev_kfree_skb_any(skb);
1370 return -ENOBUFS;
1371 }
1372
1373 diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
1374 index 31ad364a89bbe..d3a7631eecaf2 100644
1375 --- a/drivers/net/can/ti_hecc.c
1376 +++ b/drivers/net/can/ti_hecc.c
1377 @@ -936,7 +936,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
1378 err = clk_prepare_enable(priv->clk);
1379 if (err) {
1380 dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
1381 - goto probe_exit_clk;
1382 + goto probe_exit_release_clk;
1383 }
1384
1385 priv->offload.mailbox_read = ti_hecc_mailbox_read;
1386 @@ -945,7 +945,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
1387 err = can_rx_offload_add_timestamp(ndev, &priv->offload);
1388 if (err) {
1389 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
1390 - goto probe_exit_clk;
1391 + goto probe_exit_disable_clk;
1392 }
1393
1394 err = register_candev(ndev);
1395 @@ -963,7 +963,9 @@ static int ti_hecc_probe(struct platform_device *pdev)
1396
1397 probe_exit_offload:
1398 can_rx_offload_del(&priv->offload);
1399 -probe_exit_clk:
1400 +probe_exit_disable_clk:
1401 + clk_disable_unprepare(priv->clk);
1402 +probe_exit_release_clk:
1403 clk_put(priv->clk);
1404 probe_exit_candev:
1405 free_candev(ndev);
1406 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1407 index 0b7766b715fd2..c844c6abe5fcd 100644
1408 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1409 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1410 @@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time)
1411 /* protect from getting time before setting now */
1412 if (ktime_to_ns(time_ref->tv_host)) {
1413 u64 delta_us;
1414 + s64 delta_ts = 0;
1415 +
1416 + /* General case: dev_ts_1 < dev_ts_2 < ts, with:
1417 + *
1418 + * - dev_ts_1 = previous sync timestamp
1419 + * - dev_ts_2 = last sync timestamp
1420 + * - ts = event timestamp
1421 + * - ts_period = known sync period (theoretical)
1422 + * ~ dev_ts2 - dev_ts1
1423 + * *but*:
1424 + *
1425 + * - time counters wrap (see adapter->ts_used_bits)
1426 + * - sometimes, dev_ts_1 < ts < dev_ts2
1427 + *
1428 + * "normal" case (sync time counters increase):
1429 + * must take into account case when ts wraps (tsw)
1430 + *
1431 + * < ts_period > < >
1432 + * | | |
1433 + * ---+--------+----+-------0-+--+-->
1434 + * ts_dev_1 | ts_dev_2 |
1435 + * ts tsw
1436 + */
1437 + if (time_ref->ts_dev_1 < time_ref->ts_dev_2) {
1438 + /* case when event time (tsw) wraps */
1439 + if (ts < time_ref->ts_dev_1)
1440 + delta_ts = 1 << time_ref->adapter->ts_used_bits;
1441 +
1442 + /* Otherwise, sync time counter (ts_dev_2) has wrapped:
1443 + * handle case when event time (tsn) hasn't.
1444 + *
1445 + * < ts_period > < >
1446 + * | | |
1447 + * ---+--------+--0-+---------+--+-->
1448 + * ts_dev_1 | ts_dev_2 |
1449 + * tsn ts
1450 + */
1451 + } else if (time_ref->ts_dev_1 < ts) {
1452 + delta_ts = -(1 << time_ref->adapter->ts_used_bits);
1453 + }
1454
1455 - delta_us = ts - time_ref->ts_dev_2;
1456 - if (ts < time_ref->ts_dev_2)
1457 - delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1;
1458 + /* add delay between last sync and event timestamps */
1459 + delta_ts += (signed int)(ts - time_ref->ts_dev_2);
1460
1461 - delta_us += time_ref->ts_total;
1462 + /* add time from beginning to last sync */
1463 + delta_ts += time_ref->ts_total;
1464
1465 - delta_us *= time_ref->adapter->us_per_ts_scale;
1466 + /* convert ticks number into microseconds */
1467 + delta_us = delta_ts * time_ref->adapter->us_per_ts_scale;
1468 delta_us >>= time_ref->adapter->us_per_ts_shift;
1469
1470 *time = ktime_add_us(time_ref->tv_host_0, delta_us);
1471 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
1472 index 47cc1ff5b88e8..dee3e689b54da 100644
1473 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
1474 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
1475 @@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
1476 struct pucan_msg *rx_msg)
1477 {
1478 struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg;
1479 - struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)];
1480 - struct net_device *netdev = dev->netdev;
1481 + struct peak_usb_device *dev;
1482 + struct net_device *netdev;
1483 struct canfd_frame *cfd;
1484 struct sk_buff *skb;
1485 const u16 rx_msg_flags = le16_to_cpu(rm->flags);
1486
1487 + if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev))
1488 + return -ENOMEM;
1489 +
1490 + dev = usb_if->dev[pucan_msg_get_channel(rm)];
1491 + netdev = dev->netdev;
1492 +
1493 if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
1494 /* CANFD frame case */
1495 skb = alloc_canfd_skb(netdev, &cfd);
1496 @@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
1497 struct pucan_msg *rx_msg)
1498 {
1499 struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg;
1500 - struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
1501 - struct pcan_usb_fd_device *pdev =
1502 - container_of(dev, struct pcan_usb_fd_device, dev);
1503 + struct pcan_usb_fd_device *pdev;
1504 enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
1505 enum can_state rx_state, tx_state;
1506 - struct net_device *netdev = dev->netdev;
1507 + struct peak_usb_device *dev;
1508 + struct net_device *netdev;
1509 struct can_frame *cf;
1510 struct sk_buff *skb;
1511
1512 + if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev))
1513 + return -ENOMEM;
1514 +
1515 + dev = usb_if->dev[pucan_stmsg_get_channel(sm)];
1516 + pdev = container_of(dev, struct pcan_usb_fd_device, dev);
1517 + netdev = dev->netdev;
1518 +
1519 /* nothing should be sent while in BUS_OFF state */
1520 if (dev->can.state == CAN_STATE_BUS_OFF)
1521 return 0;
1522 @@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if,
1523 struct pucan_msg *rx_msg)
1524 {
1525 struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg;
1526 - struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)];
1527 - struct pcan_usb_fd_device *pdev =
1528 - container_of(dev, struct pcan_usb_fd_device, dev);
1529 + struct pcan_usb_fd_device *pdev;
1530 + struct peak_usb_device *dev;
1531 +
1532 + if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev))
1533 + return -EINVAL;
1534 +
1535 + dev = usb_if->dev[pucan_ermsg_get_channel(er)];
1536 + pdev = container_of(dev, struct pcan_usb_fd_device, dev);
1537
1538 /* keep a trace of tx and rx error counters for later use */
1539 pdev->bec.txerr = er->tx_err_cnt;
1540 @@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
1541 struct pucan_msg *rx_msg)
1542 {
1543 struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg;
1544 - struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)];
1545 - struct net_device *netdev = dev->netdev;
1546 + struct peak_usb_device *dev;
1547 + struct net_device *netdev;
1548 struct can_frame *cf;
1549 struct sk_buff *skb;
1550
1551 + if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev))
1552 + return -EINVAL;
1553 +
1554 + dev = usb_if->dev[pufd_omsg_get_channel(ov)];
1555 + netdev = dev->netdev;
1556 +
1557 /* allocate an skb to store the error frame */
1558 skb = alloc_can_err_skb(netdev, &cf);
1559 if (!skb)
1560 @@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev,
1561 u16 tx_msg_size, tx_msg_flags;
1562 u8 can_dlc;
1563
1564 + if (cfd->len > CANFD_MAX_DLEN)
1565 + return -EINVAL;
1566 +
1567 tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4);
1568 tx_msg->size = cpu_to_le16(tx_msg_size);
1569 tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
1570 diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
1571 index 2be846ee627d1..0de39ebb35662 100644
1572 --- a/drivers/net/can/xilinx_can.c
1573 +++ b/drivers/net/can/xilinx_can.c
1574 @@ -1384,7 +1384,7 @@ static int xcan_open(struct net_device *ndev)
1575 if (ret < 0) {
1576 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1577 __func__, ret);
1578 - return ret;
1579 + goto err;
1580 }
1581
1582 ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1583 @@ -1468,6 +1468,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev,
1584 if (ret < 0) {
1585 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1586 __func__, ret);
1587 + pm_runtime_put(priv->dev);
1588 return ret;
1589 }
1590
1591 @@ -1783,7 +1784,7 @@ static int xcan_probe(struct platform_device *pdev)
1592 if (ret < 0) {
1593 netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1594 __func__, ret);
1595 - goto err_pmdisable;
1596 + goto err_disableclks;
1597 }
1598
1599 if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1600 @@ -1818,7 +1819,6 @@ static int xcan_probe(struct platform_device *pdev)
1601
1602 err_disableclks:
1603 pm_runtime_put(priv->dev);
1604 -err_pmdisable:
1605 pm_runtime_disable(&pdev->dev);
1606 err_free:
1607 free_candev(ndev);
1608 diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
1609 index 24888676f69ba..6b43e1c5b1c3e 100644
1610 --- a/drivers/net/ethernet/intel/igc/igc_main.c
1611 +++ b/drivers/net/ethernet/intel/igc/igc_main.c
1612 @@ -2222,21 +2222,23 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
1613 }
1614
1615 /**
1616 - * igc_get_stats - Get System Network Statistics
1617 + * igc_get_stats64 - Get System Network Statistics
1618 * @netdev: network interface device structure
1619 + * @stats: rtnl_link_stats64 pointer
1620 *
1621 * Returns the address of the device statistics structure.
1622 * The statistics are updated here and also from the timer callback.
1623 */
1624 -static struct net_device_stats *igc_get_stats(struct net_device *netdev)
1625 +static void igc_get_stats64(struct net_device *netdev,
1626 + struct rtnl_link_stats64 *stats)
1627 {
1628 struct igc_adapter *adapter = netdev_priv(netdev);
1629
1630 + spin_lock(&adapter->stats64_lock);
1631 if (!test_bit(__IGC_RESETTING, &adapter->state))
1632 igc_update_stats(adapter);
1633 -
1634 - /* only return the current stats */
1635 - return &netdev->stats;
1636 + memcpy(stats, &adapter->stats64, sizeof(*stats));
1637 + spin_unlock(&adapter->stats64_lock);
1638 }
1639
1640 static netdev_features_t igc_fix_features(struct net_device *netdev,
1641 @@ -3984,7 +3986,7 @@ static const struct net_device_ops igc_netdev_ops = {
1642 .ndo_start_xmit = igc_xmit_frame,
1643 .ndo_set_mac_address = igc_set_mac,
1644 .ndo_change_mtu = igc_change_mtu,
1645 - .ndo_get_stats = igc_get_stats,
1646 + .ndo_get_stats64 = igc_get_stats64,
1647 .ndo_fix_features = igc_fix_features,
1648 .ndo_set_features = igc_set_features,
1649 .ndo_features_check = igc_features_check,
1650 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1651 index 9ac2f52187ea4..16511f6485531 100644
1652 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1653 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
1654 @@ -1923,10 +1923,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1655 down_write_ref_node(&fte->node, false);
1656 for (i = handle->num_rules - 1; i >= 0; i--)
1657 tree_remove_node(&handle->rule[i]->node, true);
1658 - if (fte->modify_mask && fte->dests_size) {
1659 - modify_fte(fte);
1660 + if (fte->dests_size) {
1661 + if (fte->modify_mask)
1662 + modify_fte(fte);
1663 up_write_ref_node(&fte->node, false);
1664 - } else {
1665 + } else if (list_empty(&fte->node.children)) {
1666 del_hw_fte(&fte->node);
1667 /* Avoid double call to del_hw_fte */
1668 fte->node.del_hw_func = NULL;
1669 diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
1670 index a43140f7b5eb8..b8e0e08b79de2 100644
1671 --- a/drivers/net/ethernet/microchip/lan743x_main.c
1672 +++ b/drivers/net/ethernet/microchip/lan743x_main.c
1673 @@ -672,14 +672,12 @@ clean_up:
1674 static int lan743x_dp_write(struct lan743x_adapter *adapter,
1675 u32 select, u32 addr, u32 length, u32 *buf)
1676 {
1677 - int ret = -EIO;
1678 u32 dp_sel;
1679 int i;
1680
1681 - mutex_lock(&adapter->dp_lock);
1682 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1683 1, 40, 100, 100))
1684 - goto unlock;
1685 + return -EIO;
1686 dp_sel = lan743x_csr_read(adapter, DP_SEL);
1687 dp_sel &= ~DP_SEL_MASK_;
1688 dp_sel |= select;
1689 @@ -691,13 +689,10 @@ static int lan743x_dp_write(struct lan743x_adapter *adapter,
1690 lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_);
1691 if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_,
1692 1, 40, 100, 100))
1693 - goto unlock;
1694 + return -EIO;
1695 }
1696 - ret = 0;
1697
1698 -unlock:
1699 - mutex_unlock(&adapter->dp_lock);
1700 - return ret;
1701 + return 0;
1702 }
1703
1704 static u32 lan743x_mac_mii_access(u16 id, u16 index, int read)
1705 @@ -2674,7 +2669,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter,
1706
1707 adapter->intr.irq = adapter->pdev->irq;
1708 lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF);
1709 - mutex_init(&adapter->dp_lock);
1710
1711 ret = lan743x_gpio_init(adapter);
1712 if (ret)
1713 diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
1714 index 3b02eeae5f45d..1fbcef3910989 100644
1715 --- a/drivers/net/ethernet/microchip/lan743x_main.h
1716 +++ b/drivers/net/ethernet/microchip/lan743x_main.h
1717 @@ -706,9 +706,6 @@ struct lan743x_adapter {
1718 struct lan743x_csr csr;
1719 struct lan743x_intr intr;
1720
1721 - /* lock, used to prevent concurrent access to data port */
1722 - struct mutex dp_lock;
1723 -
1724 struct lan743x_gpio gpio;
1725 struct lan743x_ptp ptp;
1726
1727 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
1728 index d8881ba773dee..fd5adb0c54d29 100644
1729 --- a/drivers/net/ethernet/realtek/r8169_main.c
1730 +++ b/drivers/net/ethernet/realtek/r8169_main.c
1731 @@ -5846,7 +5846,8 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
1732 opts[1] |= transport_offset << TCPHO_SHIFT;
1733 } else {
1734 if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
1735 - return !eth_skb_pad(skb);
1736 + /* eth_skb_pad would free the skb on error */
1737 + return !__skb_put_padto(skb, ETH_ZLEN, false);
1738 }
1739
1740 return true;
1741 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1742 index 6716deeb35e33..0c7d746c03304 100644
1743 --- a/drivers/net/vrf.c
1744 +++ b/drivers/net/vrf.c
1745 @@ -332,8 +332,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
1746 return ret;
1747 }
1748
1749 -static int vrf_finish_direct(struct net *net, struct sock *sk,
1750 - struct sk_buff *skb)
1751 +static void vrf_finish_direct(struct sk_buff *skb)
1752 {
1753 struct net_device *vrf_dev = skb->dev;
1754
1755 @@ -352,7 +351,8 @@ static int vrf_finish_direct(struct net *net, struct sock *sk,
1756 skb_pull(skb, ETH_HLEN);
1757 }
1758
1759 - return 1;
1760 + /* reset skb device */
1761 + nf_reset_ct(skb);
1762 }
1763
1764 #if IS_ENABLED(CONFIG_IPV6)
1765 @@ -431,15 +431,41 @@ static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
1766 return skb;
1767 }
1768
1769 +static int vrf_output6_direct_finish(struct net *net, struct sock *sk,
1770 + struct sk_buff *skb)
1771 +{
1772 + vrf_finish_direct(skb);
1773 +
1774 + return vrf_ip6_local_out(net, sk, skb);
1775 +}
1776 +
1777 static int vrf_output6_direct(struct net *net, struct sock *sk,
1778 struct sk_buff *skb)
1779 {
1780 + int err = 1;
1781 +
1782 skb->protocol = htons(ETH_P_IPV6);
1783
1784 - return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
1785 - net, sk, skb, NULL, skb->dev,
1786 - vrf_finish_direct,
1787 - !(IPCB(skb)->flags & IPSKB_REROUTED));
1788 + if (!(IPCB(skb)->flags & IPSKB_REROUTED))
1789 + err = nf_hook(NFPROTO_IPV6, NF_INET_POST_ROUTING, net, sk, skb,
1790 + NULL, skb->dev, vrf_output6_direct_finish);
1791 +
1792 + if (likely(err == 1))
1793 + vrf_finish_direct(skb);
1794 +
1795 + return err;
1796 +}
1797 +
1798 +static int vrf_ip6_out_direct_finish(struct net *net, struct sock *sk,
1799 + struct sk_buff *skb)
1800 +{
1801 + int err;
1802 +
1803 + err = vrf_output6_direct(net, sk, skb);
1804 + if (likely(err == 1))
1805 + err = vrf_ip6_local_out(net, sk, skb);
1806 +
1807 + return err;
1808 }
1809
1810 static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
1811 @@ -452,18 +478,15 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
1812 skb->dev = vrf_dev;
1813
1814 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
1815 - skb, NULL, vrf_dev, vrf_output6_direct);
1816 + skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
1817
1818 if (likely(err == 1))
1819 err = vrf_output6_direct(net, sk, skb);
1820
1821 - /* reset skb device */
1822 if (likely(err == 1))
1823 - nf_reset_ct(skb);
1824 - else
1825 - skb = NULL;
1826 + return skb;
1827
1828 - return skb;
1829 + return NULL;
1830 }
1831
1832 static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
1833 @@ -643,15 +666,41 @@ static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
1834 return skb;
1835 }
1836
1837 +static int vrf_output_direct_finish(struct net *net, struct sock *sk,
1838 + struct sk_buff *skb)
1839 +{
1840 + vrf_finish_direct(skb);
1841 +
1842 + return vrf_ip_local_out(net, sk, skb);
1843 +}
1844 +
1845 static int vrf_output_direct(struct net *net, struct sock *sk,
1846 struct sk_buff *skb)
1847 {
1848 + int err = 1;
1849 +
1850 skb->protocol = htons(ETH_P_IP);
1851
1852 - return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
1853 - net, sk, skb, NULL, skb->dev,
1854 - vrf_finish_direct,
1855 - !(IPCB(skb)->flags & IPSKB_REROUTED));
1856 + if (!(IPCB(skb)->flags & IPSKB_REROUTED))
1857 + err = nf_hook(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb,
1858 + NULL, skb->dev, vrf_output_direct_finish);
1859 +
1860 + if (likely(err == 1))
1861 + vrf_finish_direct(skb);
1862 +
1863 + return err;
1864 +}
1865 +
1866 +static int vrf_ip_out_direct_finish(struct net *net, struct sock *sk,
1867 + struct sk_buff *skb)
1868 +{
1869 + int err;
1870 +
1871 + err = vrf_output_direct(net, sk, skb);
1872 + if (likely(err == 1))
1873 + err = vrf_ip_local_out(net, sk, skb);
1874 +
1875 + return err;
1876 }
1877
1878 static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
1879 @@ -664,18 +713,15 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
1880 skb->dev = vrf_dev;
1881
1882 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
1883 - skb, NULL, vrf_dev, vrf_output_direct);
1884 + skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
1885
1886 if (likely(err == 1))
1887 err = vrf_output_direct(net, sk, skb);
1888
1889 - /* reset skb device */
1890 if (likely(err == 1))
1891 - nf_reset_ct(skb);
1892 - else
1893 - skb = NULL;
1894 + return skb;
1895
1896 - return skb;
1897 + return NULL;
1898 }
1899
1900 static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
1901 diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
1902 index af539151d6638..61428076f32e4 100644
1903 --- a/drivers/net/wan/cosa.c
1904 +++ b/drivers/net/wan/cosa.c
1905 @@ -889,6 +889,7 @@ static ssize_t cosa_write(struct file *file,
1906 chan->tx_status = 1;
1907 spin_unlock_irqrestore(&cosa->lock, flags);
1908 up(&chan->wsem);
1909 + kfree(kbuf);
1910 return -ERESTARTSYS;
1911 }
1912 }
1913 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
1914 index 118e5550b10c2..628f45c8c06f2 100644
1915 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
1916 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
1917 @@ -973,7 +973,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
1918 struct ath_htc_rx_status *rxstatus;
1919 struct ath_rx_status rx_stats;
1920 bool decrypt_error = false;
1921 - __be16 rs_datalen;
1922 + u16 rs_datalen;
1923 bool is_phyerr;
1924
1925 if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
1926 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1927 index ce69aaea581a5..7a964271959d8 100644
1928 --- a/drivers/nvme/host/core.c
1929 +++ b/drivers/nvme/host/core.c
1930 @@ -4226,8 +4226,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
1931 }
1932 EXPORT_SYMBOL_GPL(nvme_start_queues);
1933
1934 -
1935 -void nvme_sync_queues(struct nvme_ctrl *ctrl)
1936 +void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
1937 {
1938 struct nvme_ns *ns;
1939
1940 @@ -4235,7 +4234,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl)
1941 list_for_each_entry(ns, &ctrl->namespaces, list)
1942 blk_sync_queue(ns->queue);
1943 up_read(&ctrl->namespaces_rwsem);
1944 +}
1945 +EXPORT_SYMBOL_GPL(nvme_sync_io_queues);
1946
1947 +void nvme_sync_queues(struct nvme_ctrl *ctrl)
1948 +{
1949 + nvme_sync_io_queues(ctrl);
1950 if (ctrl->admin_q)
1951 blk_sync_queue(ctrl->admin_q);
1952 }
1953 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
1954 index d7132d8cb7c5d..e392d6cd92ced 100644
1955 --- a/drivers/nvme/host/nvme.h
1956 +++ b/drivers/nvme/host/nvme.h
1957 @@ -494,6 +494,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl);
1958 void nvme_start_queues(struct nvme_ctrl *ctrl);
1959 void nvme_kill_queues(struct nvme_ctrl *ctrl);
1960 void nvme_sync_queues(struct nvme_ctrl *ctrl);
1961 +void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
1962 void nvme_unfreeze(struct nvme_ctrl *ctrl);
1963 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
1964 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
1965 diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
1966 index e957ad0a07f58..8a62c2fe5a5ec 100644
1967 --- a/drivers/nvme/host/rdma.c
1968 +++ b/drivers/nvme/host/rdma.c
1969 @@ -110,7 +110,6 @@ struct nvme_rdma_ctrl {
1970 struct sockaddr_storage src_addr;
1971
1972 struct nvme_ctrl ctrl;
1973 - struct mutex teardown_lock;
1974 bool use_inline_data;
1975 u32 io_queues[HCTX_MAX_TYPES];
1976 };
1977 @@ -933,8 +932,8 @@ out_free_io_queues:
1978 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
1979 bool remove)
1980 {
1981 - mutex_lock(&ctrl->teardown_lock);
1982 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
1983 + blk_sync_queue(ctrl->ctrl.admin_q);
1984 nvme_rdma_stop_queue(&ctrl->queues[0]);
1985 if (ctrl->ctrl.admin_tagset) {
1986 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
1987 @@ -944,16 +943,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
1988 if (remove)
1989 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
1990 nvme_rdma_destroy_admin_queue(ctrl, remove);
1991 - mutex_unlock(&ctrl->teardown_lock);
1992 }
1993
1994 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
1995 bool remove)
1996 {
1997 - mutex_lock(&ctrl->teardown_lock);
1998 if (ctrl->ctrl.queue_count > 1) {
1999 nvme_start_freeze(&ctrl->ctrl);
2000 nvme_stop_queues(&ctrl->ctrl);
2001 + nvme_sync_io_queues(&ctrl->ctrl);
2002 nvme_rdma_stop_io_queues(ctrl);
2003 if (ctrl->ctrl.tagset) {
2004 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
2005 @@ -964,7 +962,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
2006 nvme_start_queues(&ctrl->ctrl);
2007 nvme_rdma_destroy_io_queues(ctrl, remove);
2008 }
2009 - mutex_unlock(&ctrl->teardown_lock);
2010 }
2011
2012 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
2013 @@ -1728,16 +1725,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
2014 {
2015 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2016 struct nvme_rdma_queue *queue = req->queue;
2017 - struct nvme_rdma_ctrl *ctrl = queue->ctrl;
2018
2019 - /* fence other contexts that may complete the command */
2020 - mutex_lock(&ctrl->teardown_lock);
2021 nvme_rdma_stop_queue(queue);
2022 - if (!blk_mq_request_completed(rq)) {
2023 + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2024 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2025 blk_mq_complete_request(rq);
2026 }
2027 - mutex_unlock(&ctrl->teardown_lock);
2028 }
2029
2030 static enum blk_eh_timer_return
2031 @@ -2029,7 +2022,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2032 return ERR_PTR(-ENOMEM);
2033 ctrl->ctrl.opts = opts;
2034 INIT_LIST_HEAD(&ctrl->list);
2035 - mutex_init(&ctrl->teardown_lock);
2036
2037 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2038 opts->trsvcid =
2039 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
2040 index e159b78b5f3b4..a31c6e1f6063a 100644
2041 --- a/drivers/nvme/host/tcp.c
2042 +++ b/drivers/nvme/host/tcp.c
2043 @@ -110,7 +110,6 @@ struct nvme_tcp_ctrl {
2044 struct sockaddr_storage src_addr;
2045 struct nvme_ctrl ctrl;
2046
2047 - struct mutex teardown_lock;
2048 struct work_struct err_work;
2049 struct delayed_work connect_work;
2050 struct nvme_tcp_request async_req;
2051 @@ -1797,8 +1796,8 @@ out_free_queue:
2052 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2053 bool remove)
2054 {
2055 - mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2056 blk_mq_quiesce_queue(ctrl->admin_q);
2057 + blk_sync_queue(ctrl->admin_q);
2058 nvme_tcp_stop_queue(ctrl, 0);
2059 if (ctrl->admin_tagset) {
2060 blk_mq_tagset_busy_iter(ctrl->admin_tagset,
2061 @@ -1808,18 +1807,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2062 if (remove)
2063 blk_mq_unquiesce_queue(ctrl->admin_q);
2064 nvme_tcp_destroy_admin_queue(ctrl, remove);
2065 - mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2066 }
2067
2068 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2069 bool remove)
2070 {
2071 - mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2072 if (ctrl->queue_count <= 1)
2073 - goto out;
2074 + return;
2075 blk_mq_quiesce_queue(ctrl->admin_q);
2076 nvme_start_freeze(ctrl);
2077 nvme_stop_queues(ctrl);
2078 + nvme_sync_io_queues(ctrl);
2079 nvme_tcp_stop_io_queues(ctrl);
2080 if (ctrl->tagset) {
2081 blk_mq_tagset_busy_iter(ctrl->tagset,
2082 @@ -1829,8 +1827,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2083 if (remove)
2084 nvme_start_queues(ctrl);
2085 nvme_tcp_destroy_io_queues(ctrl, remove);
2086 -out:
2087 - mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2088 }
2089
2090 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2091 @@ -2074,14 +2070,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
2092 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2093 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2094
2095 - /* fence other contexts that may complete the command */
2096 - mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
2097 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2098 - if (!blk_mq_request_completed(rq)) {
2099 + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
2100 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
2101 blk_mq_complete_request(rq);
2102 }
2103 - mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock);
2104 }
2105
2106 static enum blk_eh_timer_return
2107 @@ -2344,7 +2337,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2108 nvme_tcp_reconnect_ctrl_work);
2109 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2110 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2111 - mutex_init(&ctrl->teardown_lock);
2112
2113 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2114 opts->trsvcid =
2115 diff --git a/drivers/of/address.c b/drivers/of/address.c
2116 index 8f74c4626e0ef..5abb056b2b515 100644
2117 --- a/drivers/of/address.c
2118 +++ b/drivers/of/address.c
2119 @@ -1003,11 +1003,13 @@ EXPORT_SYMBOL_GPL(of_dma_get_range);
2120 */
2121 bool of_dma_is_coherent(struct device_node *np)
2122 {
2123 - struct device_node *node = of_node_get(np);
2124 + struct device_node *node;
2125
2126 if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
2127 return true;
2128
2129 + node = of_node_get(np);
2130 +
2131 while (node) {
2132 if (of_property_read_bool(node, "dma-coherent")) {
2133 of_node_put(node);
2134 diff --git a/drivers/opp/core.c b/drivers/opp/core.c
2135 index 8867bab72e171..088c93dc0085c 100644
2136 --- a/drivers/opp/core.c
2137 +++ b/drivers/opp/core.c
2138 @@ -1046,6 +1046,10 @@ static void _opp_table_kref_release(struct kref *kref)
2139 struct opp_table *opp_table = container_of(kref, struct opp_table, kref);
2140 struct opp_device *opp_dev, *temp;
2141
2142 + /* Drop the lock as soon as we can */
2143 + list_del(&opp_table->node);
2144 + mutex_unlock(&opp_table_lock);
2145 +
2146 _of_clear_opp_table(opp_table);
2147
2148 /* Release clk */
2149 @@ -1067,10 +1071,7 @@ static void _opp_table_kref_release(struct kref *kref)
2150
2151 mutex_destroy(&opp_table->genpd_virt_dev_lock);
2152 mutex_destroy(&opp_table->lock);
2153 - list_del(&opp_table->node);
2154 kfree(opp_table);
2155 -
2156 - mutex_unlock(&opp_table_lock);
2157 }
2158
2159 void dev_pm_opp_put_opp_table(struct opp_table *opp_table)
2160 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
2161 index 374db5d59cf87..14196c0287a24 100644
2162 --- a/drivers/pci/controller/dwc/pcie-qcom.c
2163 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
2164 @@ -303,6 +303,9 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
2165 clk_disable_unprepare(res->core_clk);
2166 clk_disable_unprepare(res->aux_clk);
2167 clk_disable_unprepare(res->ref_clk);
2168 +
2169 + writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
2170 +
2171 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
2172 }
2173
2174 @@ -315,6 +318,16 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2175 u32 val;
2176 int ret;
2177
2178 + /* reset the PCIe interface as uboot can leave it undefined state */
2179 + reset_control_assert(res->pci_reset);
2180 + reset_control_assert(res->axi_reset);
2181 + reset_control_assert(res->ahb_reset);
2182 + reset_control_assert(res->por_reset);
2183 + reset_control_assert(res->ext_reset);
2184 + reset_control_assert(res->phy_reset);
2185 +
2186 + writel(1, pcie->parf + PCIE20_PARF_PHY_CTRL);
2187 +
2188 ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
2189 if (ret < 0) {
2190 dev_err(dev, "cannot enable regulators\n");
2191 diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
2192 index 54933665b5f8b..93b5654ff2828 100644
2193 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
2194 +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
2195 @@ -277,13 +277,14 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
2196 static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
2197 {
2198 /*
2199 - * The signal type is GPIO if the signal name has "GPIO" as a prefix.
2200 + * The signal type is GPIO if the signal name has "GPI" as a prefix.
2201 * strncmp (rather than strcmp) is used to implement the prefix
2202 * requirement.
2203 *
2204 - * expr->signal might look like "GPIOT3" in the GPIO case.
2205 + * expr->signal might look like "GPIOB1" in the GPIO case.
2206 + * expr->signal might look like "GPIT0" in the GPI case.
2207 */
2208 - return strncmp(expr->signal, "GPIO", 4) == 0;
2209 + return strncmp(expr->signal, "GPI", 3) == 0;
2210 }
2211
2212 static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
2213 diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
2214 index 83981ad66a71e..4e89bbf6b76a0 100644
2215 --- a/drivers/pinctrl/intel/pinctrl-intel.c
2216 +++ b/drivers/pinctrl/intel/pinctrl-intel.c
2217 @@ -662,6 +662,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
2218
2219 value |= PADCFG1_TERM_UP;
2220
2221 + /* Set default strength value in case none is given */
2222 + if (arg == 1)
2223 + arg = 5000;
2224 +
2225 switch (arg) {
2226 case 20000:
2227 value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
2228 @@ -684,6 +688,10 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
2229 case PIN_CONFIG_BIAS_PULL_DOWN:
2230 value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
2231
2232 + /* Set default strength value in case none is given */
2233 + if (arg == 1)
2234 + arg = 5000;
2235 +
2236 switch (arg) {
2237 case 20000:
2238 value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
2239 diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
2240 index eab078244a4c3..12b2707296b64 100644
2241 --- a/drivers/pinctrl/pinctrl-amd.c
2242 +++ b/drivers/pinctrl/pinctrl-amd.c
2243 @@ -153,7 +153,7 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
2244 pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
2245 pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
2246 } else if (debounce < 250000) {
2247 - time = debounce / 15600;
2248 + time = debounce / 15625;
2249 pin_reg |= time & DB_TMR_OUT_MASK;
2250 pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
2251 pin_reg |= BIT(DB_TMR_LARGE_OFF);
2252 @@ -163,14 +163,14 @@ static int amd_gpio_set_debounce(struct gpio_chip *gc, unsigned offset,
2253 pin_reg |= BIT(DB_TMR_OUT_UNIT_OFF);
2254 pin_reg |= BIT(DB_TMR_LARGE_OFF);
2255 } else {
2256 - pin_reg &= ~DB_CNTRl_MASK;
2257 + pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
2258 ret = -EINVAL;
2259 }
2260 } else {
2261 pin_reg &= ~BIT(DB_TMR_OUT_UNIT_OFF);
2262 pin_reg &= ~BIT(DB_TMR_LARGE_OFF);
2263 pin_reg &= ~DB_TMR_OUT_MASK;
2264 - pin_reg &= ~DB_CNTRl_MASK;
2265 + pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
2266 }
2267 writel(pin_reg, gpio_dev->base + offset * 4);
2268 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
2269 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
2270 index f32da0ca529e0..308bda2e9c000 100644
2271 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
2272 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
2273 @@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
2274 rcu_read_lock();
2275 list_for_each_entry_rcu(h,
2276 &tmp_pg->dh_list, node) {
2277 - /* h->sdev should always be valid */
2278 - BUG_ON(!h->sdev);
2279 + if (!h->sdev)
2280 + continue;
2281 h->sdev->access_state = desc[0];
2282 }
2283 rcu_read_unlock();
2284 @@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
2285 pg->expiry = 0;
2286 rcu_read_lock();
2287 list_for_each_entry_rcu(h, &pg->dh_list, node) {
2288 - BUG_ON(!h->sdev);
2289 + if (!h->sdev)
2290 + continue;
2291 h->sdev->access_state =
2292 (pg->state & SCSI_ACCESS_STATE_MASK);
2293 if (pg->pref)
2294 @@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev)
2295 spin_lock(&h->pg_lock);
2296 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
2297 rcu_assign_pointer(h->pg, NULL);
2298 - h->sdev = NULL;
2299 spin_unlock(&h->pg_lock);
2300 if (pg) {
2301 spin_lock_irq(&pg->lock);
2302 @@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev)
2303 kref_put(&pg->kref, release_port_group);
2304 }
2305 sdev->handler_data = NULL;
2306 + synchronize_rcu();
2307 kfree(h);
2308 }
2309
2310 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
2311 index e67cb4561aace..bac705990a961 100644
2312 --- a/drivers/scsi/hpsa.c
2313 +++ b/drivers/scsi/hpsa.c
2314 @@ -8854,7 +8854,7 @@ reinit_after_soft_reset:
2315 /* hook into SCSI subsystem */
2316 rc = hpsa_scsi_add_host(h);
2317 if (rc)
2318 - goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
2319 + goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
2320
2321 /* Monitor the controller for firmware lockups */
2322 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
2323 @@ -8869,6 +8869,8 @@ reinit_after_soft_reset:
2324 HPSA_EVENT_MONITOR_INTERVAL);
2325 return 0;
2326
2327 +clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
2328 + kfree(h->lastlogicals);
2329 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
2330 hpsa_free_performant_mode(h);
2331 h->access.set_intr_mask(h, HPSA_INTR_OFF);
2332 diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
2333 index 3d58d24de6b61..8be8c510fdf79 100644
2334 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c
2335 +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
2336 @@ -1641,6 +1641,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget)
2337 reply_q->irq_poll_scheduled = false;
2338 reply_q->irq_line_enable = true;
2339 enable_irq(reply_q->os_irq);
2340 + /*
2341 + * Go for one more round of processing the
2342 + * reply descriptor post queue incase if HBA
2343 + * Firmware has posted some reply descriptors
2344 + * while reenabling the IRQ.
2345 + */
2346 + _base_process_reply_queue(reply_q);
2347 }
2348
2349 return num_entries;
2350 diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
2351 index cfd9176e6413c..9ae1c96f4d3d4 100644
2352 --- a/drivers/spi/spi-bcm2835.c
2353 +++ b/drivers/spi/spi-bcm2835.c
2354 @@ -1179,7 +1179,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
2355 struct spi_controller *ctlr = spi->controller;
2356 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
2357 struct gpio_chip *chip;
2358 - enum gpio_lookup_flags lflags;
2359 u32 cs;
2360
2361 /*
2362 @@ -1247,7 +1246,7 @@ static int bcm2835_spi_setup(struct spi_device *spi)
2363
2364 spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
2365 DRV_NAME,
2366 - lflags,
2367 + GPIO_LOOKUP_FLAGS_DEFAULT,
2368 GPIOD_OUT_LOW);
2369 if (IS_ERR(spi->cs_gpiod))
2370 return PTR_ERR(spi->cs_gpiod);
2371 diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
2372 index 641b21b544604..73a698eec7430 100644
2373 --- a/drivers/thunderbolt/nhi.c
2374 +++ b/drivers/thunderbolt/nhi.c
2375 @@ -410,12 +410,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
2376
2377 ring->vector = ret;
2378
2379 - ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
2380 - if (ring->irq < 0)
2381 - return ring->irq;
2382 + ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
2383 + if (ret < 0)
2384 + goto err_ida_remove;
2385 +
2386 + ring->irq = ret;
2387
2388 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
2389 - return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
2390 + ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
2391 + if (ret)
2392 + goto err_ida_remove;
2393 +
2394 + return 0;
2395 +
2396 +err_ida_remove:
2397 + ida_simple_remove(&nhi->msix_ida, ring->vector);
2398 +
2399 + return ret;
2400 }
2401
2402 static void ring_release_msix(struct tb_ring *ring)
2403 diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
2404 index 4e17a7c7bf0ae..9e9bf87713458 100644
2405 --- a/drivers/thunderbolt/xdomain.c
2406 +++ b/drivers/thunderbolt/xdomain.c
2407 @@ -830,6 +830,7 @@ static void enumerate_services(struct tb_xdomain *xd)
2408
2409 id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
2410 if (id < 0) {
2411 + kfree(svc->key);
2412 kfree(svc);
2413 break;
2414 }
2415 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
2416 index 8313f81968d51..d91e051d13673 100644
2417 --- a/drivers/uio/uio.c
2418 +++ b/drivers/uio/uio.c
2419 @@ -413,10 +413,10 @@ static int uio_get_minor(struct uio_device *idev)
2420 return retval;
2421 }
2422
2423 -static void uio_free_minor(struct uio_device *idev)
2424 +static void uio_free_minor(unsigned long minor)
2425 {
2426 mutex_lock(&minor_lock);
2427 - idr_remove(&uio_idr, idev->minor);
2428 + idr_remove(&uio_idr, minor);
2429 mutex_unlock(&minor_lock);
2430 }
2431
2432 @@ -990,7 +990,7 @@ err_request_irq:
2433 err_uio_dev_add_attributes:
2434 device_del(&idev->dev);
2435 err_device_create:
2436 - uio_free_minor(idev);
2437 + uio_free_minor(idev->minor);
2438 put_device(&idev->dev);
2439 return ret;
2440 }
2441 @@ -1004,11 +1004,13 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
2442 void uio_unregister_device(struct uio_info *info)
2443 {
2444 struct uio_device *idev;
2445 + unsigned long minor;
2446
2447 if (!info || !info->uio_dev)
2448 return;
2449
2450 idev = info->uio_dev;
2451 + minor = idev->minor;
2452
2453 mutex_lock(&idev->info_lock);
2454 uio_dev_del_attributes(idev);
2455 @@ -1024,7 +1026,7 @@ void uio_unregister_device(struct uio_info *info)
2456
2457 device_unregister(&idev->dev);
2458
2459 - uio_free_minor(idev);
2460 + uio_free_minor(minor);
2461
2462 return;
2463 }
2464 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2465 index ed99d98172f40..16c98e718001b 100644
2466 --- a/drivers/usb/class/cdc-acm.c
2467 +++ b/drivers/usb/class/cdc-acm.c
2468 @@ -1706,6 +1706,15 @@ static const struct usb_device_id acm_ids[] = {
2469 { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
2470 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2471 },
2472 + { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
2473 + .driver_info = DISABLE_ECHO, /* Don't echo banner */
2474 + },
2475 + { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
2476 + .driver_info = DISABLE_ECHO, /* Don't echo banner */
2477 + },
2478 + { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
2479 + .driver_info = DISABLE_ECHO, /* Don't echo banner */
2480 + },
2481 { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
2482 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
2483 },
2484 diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
2485 index ba88039449e03..58b8801ce8816 100644
2486 --- a/drivers/usb/dwc3/dwc3-pci.c
2487 +++ b/drivers/usb/dwc3/dwc3-pci.c
2488 @@ -40,6 +40,7 @@
2489 #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
2490 #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
2491 #define PCI_DEVICE_ID_INTEL_JSP 0x4dee
2492 +#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
2493
2494 #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
2495 #define PCI_INTEL_BXT_FUNC_PMU_PWR 4
2496 @@ -367,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
2497 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
2498 (kernel_ulong_t) &dwc3_pci_intel_properties, },
2499
2500 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
2501 + (kernel_ulong_t) &dwc3_pci_intel_properties, },
2502 +
2503 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
2504 (kernel_ulong_t) &dwc3_pci_amd_properties, },
2505 { } /* Terminating Entry */
2506 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2507 index 1d65de84464d5..9269cda4c1831 100644
2508 --- a/drivers/usb/dwc3/gadget.c
2509 +++ b/drivers/usb/dwc3/gadget.c
2510 @@ -2627,6 +2627,11 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
2511 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
2512 status);
2513
2514 + req->request.actual = req->request.length - req->remaining;
2515 +
2516 + if (!dwc3_gadget_ep_request_completed(req))
2517 + goto out;
2518 +
2519 if (req->needs_extra_trb) {
2520 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
2521
2522 @@ -2642,13 +2647,6 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
2523 req->needs_extra_trb = false;
2524 }
2525
2526 - req->request.actual = req->request.length - req->remaining;
2527 -
2528 - if (!dwc3_gadget_ep_request_completed(req)) {
2529 - __dwc3_gadget_kick_transfer(dep);
2530 - goto out;
2531 - }
2532 -
2533 dwc3_gadget_giveback(dep, req, status);
2534
2535 out:
2536 @@ -2671,6 +2669,24 @@ static void dwc3_gadget_ep_cleanup_completed_requests(struct dwc3_ep *dep,
2537 }
2538 }
2539
2540 +static bool dwc3_gadget_ep_should_continue(struct dwc3_ep *dep)
2541 +{
2542 + struct dwc3_request *req;
2543 +
2544 + if (!list_empty(&dep->pending_list))
2545 + return true;
2546 +
2547 + /*
2548 + * We only need to check the first entry of the started list. We can
2549 + * assume the completed requests are removed from the started list.
2550 + */
2551 + req = next_request(&dep->started_list);
2552 + if (!req)
2553 + return false;
2554 +
2555 + return !dwc3_gadget_ep_request_completed(req);
2556 +}
2557 +
2558 static void dwc3_gadget_endpoint_frame_from_event(struct dwc3_ep *dep,
2559 const struct dwc3_event_depevt *event)
2560 {
2561 @@ -2700,6 +2716,8 @@ static void dwc3_gadget_endpoint_transfer_in_progress(struct dwc3_ep *dep,
2562
2563 if (stop)
2564 dwc3_stop_active_transfer(dep, true, true);
2565 + else if (dwc3_gadget_ep_should_continue(dep))
2566 + __dwc3_gadget_kick_transfer(dep);
2567
2568 /*
2569 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
2570 diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
2571 index c3721225b61ed..b706ad3034bc1 100644
2572 --- a/drivers/usb/gadget/udc/goku_udc.c
2573 +++ b/drivers/usb/gadget/udc/goku_udc.c
2574 @@ -1757,6 +1757,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2575 goto err;
2576 }
2577
2578 + pci_set_drvdata(pdev, dev);
2579 spin_lock_init(&dev->lock);
2580 dev->pdev = pdev;
2581 dev->gadget.ops = &goku_ops;
2582 @@ -1790,7 +1791,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2583 }
2584 dev->regs = (struct goku_udc_regs __iomem *) base;
2585
2586 - pci_set_drvdata(pdev, dev);
2587 INFO(dev, "%s\n", driver_desc);
2588 INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
2589 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
2590 diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
2591 index 3c4abb5a1c3fc..73aba464b66ab 100644
2592 --- a/drivers/usb/host/xhci-histb.c
2593 +++ b/drivers/usb/host/xhci-histb.c
2594 @@ -241,7 +241,7 @@ static int xhci_histb_probe(struct platform_device *pdev)
2595 /* Initialize dma_mask and coherent_dma_mask to 32-bits */
2596 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2597 if (ret)
2598 - return ret;
2599 + goto disable_pm;
2600
2601 hcd = usb_create_hcd(driver, dev, dev_name(dev));
2602 if (!hcd) {
2603 diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
2604 index a72fd5309b09f..443a35dde7f52 100644
2605 --- a/drivers/vfio/pci/vfio_pci.c
2606 +++ b/drivers/vfio/pci/vfio_pci.c
2607 @@ -334,7 +334,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev)
2608 pdev->vendor == PCI_VENDOR_ID_INTEL &&
2609 IS_ENABLED(CONFIG_VFIO_PCI_IGD)) {
2610 ret = vfio_pci_igd_init(vdev);
2611 - if (ret) {
2612 + if (ret && ret != -ENODEV) {
2613 pci_warn(pdev, "Failed to setup Intel IGD regions\n");
2614 goto disable_exit;
2615 }
2616 diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
2617 index e8f2bdbe05428..152e5188183ce 100644
2618 --- a/drivers/vfio/platform/vfio_platform_common.c
2619 +++ b/drivers/vfio/platform/vfio_platform_common.c
2620 @@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data)
2621
2622 ret = pm_runtime_get_sync(vdev->device);
2623 if (ret < 0)
2624 - goto err_pm;
2625 + goto err_rst;
2626
2627 ret = vfio_platform_call_reset(vdev, &extra_dbg);
2628 if (ret && vdev->reset_required) {
2629 @@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data)
2630
2631 err_rst:
2632 pm_runtime_put(vdev->device);
2633 -err_pm:
2634 vfio_platform_irq_cleanup(vdev);
2635 err_irq:
2636 vfio_platform_regions_cleanup(vdev);
2637 diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
2638 index d21cf61d86b9f..3b19b009452a2 100644
2639 --- a/fs/afs/yfsclient.c
2640 +++ b/fs/afs/yfsclient.c
2641 @@ -2162,6 +2162,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
2642 memcpy(bp, acl->data, acl->size);
2643 if (acl->size != size)
2644 memset((void *)bp + acl->size, 0, size - acl->size);
2645 + bp += size / sizeof(__be32);
2646 yfs_check_req(call, bp);
2647
2648 trace_afs_make_fs_call(call, &vnode->fid);
2649 diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
2650 index 96843934dcbba..1cb7f5d79765d 100644
2651 --- a/fs/btrfs/dev-replace.c
2652 +++ b/fs/btrfs/dev-replace.c
2653 @@ -55,6 +55,17 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
2654 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
2655 if (ret) {
2656 no_valid_dev_replace_entry_found:
2657 + /*
2658 + * We don't have a replace item or it's corrupted. If there is
2659 + * a replace target, fail the mount.
2660 + */
2661 + if (btrfs_find_device(fs_info->fs_devices,
2662 + BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
2663 + btrfs_err(fs_info,
2664 + "found replace target device without a valid replace item");
2665 + ret = -EUCLEAN;
2666 + goto out;
2667 + }
2668 ret = 0;
2669 dev_replace->replace_state =
2670 BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
2671 @@ -107,8 +118,19 @@ no_valid_dev_replace_entry_found:
2672 case BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED:
2673 case BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED:
2674 case BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED:
2675 - dev_replace->srcdev = NULL;
2676 - dev_replace->tgtdev = NULL;
2677 + /*
2678 + * We don't have an active replace item but if there is a
2679 + * replace target, fail the mount.
2680 + */
2681 + if (btrfs_find_device(fs_info->fs_devices,
2682 + BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
2683 + btrfs_err(fs_info,
2684 + "replace devid present without an active replace item");
2685 + ret = -EUCLEAN;
2686 + } else {
2687 + dev_replace->srcdev = NULL;
2688 + dev_replace->tgtdev = NULL;
2689 + }
2690 break;
2691 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
2692 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
2693 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2694 index 388449101705e..c6d9e8c07c236 100644
2695 --- a/fs/btrfs/extent-tree.c
2696 +++ b/fs/btrfs/extent-tree.c
2697 @@ -3800,11 +3800,12 @@ static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info,
2698 * |- Push harder to find free extents
2699 * |- If not found, re-iterate all block groups
2700 */
2701 -static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
2702 +static noinline int find_free_extent(struct btrfs_root *root,
2703 u64 ram_bytes, u64 num_bytes, u64 empty_size,
2704 u64 hint_byte, struct btrfs_key *ins,
2705 u64 flags, int delalloc)
2706 {
2707 + struct btrfs_fs_info *fs_info = root->fs_info;
2708 int ret = 0;
2709 int cache_block_group_error = 0;
2710 struct btrfs_free_cluster *last_ptr = NULL;
2711 @@ -3833,7 +3834,7 @@ static noinline int find_free_extent(struct btrfs_fs_info *fs_info,
2712 ins->objectid = 0;
2713 ins->offset = 0;
2714
2715 - trace_find_free_extent(fs_info, num_bytes, empty_size, flags);
2716 + trace_find_free_extent(root, num_bytes, empty_size, flags);
2717
2718 space_info = btrfs_find_space_info(fs_info, flags);
2719 if (!space_info) {
2720 @@ -4141,7 +4142,7 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes,
2721 flags = get_alloc_profile_by_root(root, is_data);
2722 again:
2723 WARN_ON(num_bytes < fs_info->sectorsize);
2724 - ret = find_free_extent(fs_info, ram_bytes, num_bytes, empty_size,
2725 + ret = find_free_extent(root, ram_bytes, num_bytes, empty_size,
2726 hint_byte, ins, flags, delalloc);
2727 if (!ret && !is_data) {
2728 btrfs_dec_block_group_reservations(fs_info, ins->objectid);
2729 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2730 index 63394b450afcc..f58e03d1775d8 100644
2731 --- a/fs/btrfs/ioctl.c
2732 +++ b/fs/btrfs/ioctl.c
2733 @@ -1255,6 +1255,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
2734 u64 page_start;
2735 u64 page_end;
2736 u64 page_cnt;
2737 + u64 start = (u64)start_index << PAGE_SHIFT;
2738 int ret;
2739 int i;
2740 int i_done;
2741 @@ -1271,8 +1272,7 @@ static int cluster_pages_for_defrag(struct inode *inode,
2742 page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1);
2743
2744 ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
2745 - start_index << PAGE_SHIFT,
2746 - page_cnt << PAGE_SHIFT);
2747 + start, page_cnt << PAGE_SHIFT);
2748 if (ret)
2749 return ret;
2750 i_done = 0;
2751 @@ -1361,8 +1361,7 @@ again:
2752 btrfs_mod_outstanding_extents(BTRFS_I(inode), 1);
2753 spin_unlock(&BTRFS_I(inode)->lock);
2754 btrfs_delalloc_release_space(inode, data_reserved,
2755 - start_index << PAGE_SHIFT,
2756 - (page_cnt - i_done) << PAGE_SHIFT, true);
2757 + start, (page_cnt - i_done) << PAGE_SHIFT, true);
2758 }
2759
2760
2761 @@ -1389,8 +1388,7 @@ out:
2762 put_page(pages[i]);
2763 }
2764 btrfs_delalloc_release_space(inode, data_reserved,
2765 - start_index << PAGE_SHIFT,
2766 - page_cnt << PAGE_SHIFT, true);
2767 + start, page_cnt << PAGE_SHIFT, true);
2768 btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
2769 extent_changeset_free(data_reserved);
2770 return ret;
2771 @@ -3752,6 +3750,8 @@ process_slot:
2772 ret = -EINTR;
2773 goto out;
2774 }
2775 +
2776 + cond_resched();
2777 }
2778 ret = 0;
2779
2780 diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
2781 index 9a2f15f4c80e0..bbd63535965c4 100644
2782 --- a/fs/btrfs/ref-verify.c
2783 +++ b/fs/btrfs/ref-verify.c
2784 @@ -851,6 +851,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
2785 "dropping a ref for a root that doesn't have a ref on the block");
2786 dump_block_entry(fs_info, be);
2787 dump_ref_action(fs_info, ra);
2788 + kfree(ref);
2789 kfree(ra);
2790 goto out_unlock;
2791 }
2792 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
2793 index 1bc57f7b91cfa..001f13cf9ab8f 100644
2794 --- a/fs/btrfs/relocation.c
2795 +++ b/fs/btrfs/relocation.c
2796 @@ -2287,6 +2287,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2797 struct btrfs_root_item *root_item;
2798 struct btrfs_path *path;
2799 struct extent_buffer *leaf;
2800 + int reserve_level;
2801 int level;
2802 int max_level;
2803 int replaced = 0;
2804 @@ -2335,7 +2336,8 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
2805 * Thus the needed metadata size is at most root_level * nodesize,
2806 * and * 2 since we have two trees to COW.
2807 */
2808 - min_reserved = fs_info->nodesize * btrfs_root_level(root_item) * 2;
2809 + reserve_level = max_t(int, 1, btrfs_root_level(root_item));
2810 + min_reserved = fs_info->nodesize * reserve_level * 2;
2811 memset(&next_key, 0, sizeof(next_key));
2812
2813 while (1) {
2814 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2815 index 58910a0a3e4a4..808c5985904ed 100644
2816 --- a/fs/btrfs/volumes.c
2817 +++ b/fs/btrfs/volumes.c
2818 @@ -1245,22 +1245,13 @@ again:
2819 continue;
2820 }
2821
2822 - if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
2823 - /*
2824 - * In the first step, keep the device which has
2825 - * the correct fsid and the devid that is used
2826 - * for the dev_replace procedure.
2827 - * In the second step, the dev_replace state is
2828 - * read from the device tree and it is known
2829 - * whether the procedure is really active or
2830 - * not, which means whether this device is
2831 - * used or whether it should be removed.
2832 - */
2833 - if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
2834 - &device->dev_state)) {
2835 - continue;
2836 - }
2837 - }
2838 + /*
2839 + * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
2840 + * in btrfs_init_dev_replace() so just continue.
2841 + */
2842 + if (device->devid == BTRFS_DEV_REPLACE_DEVID)
2843 + continue;
2844 +
2845 if (device->bdev) {
2846 blkdev_put(device->bdev, device->mode);
2847 device->bdev = NULL;
2848 @@ -1269,9 +1260,6 @@ again:
2849 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2850 list_del_init(&device->dev_alloc_list);
2851 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2852 - if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
2853 - &device->dev_state))
2854 - fs_devices->rw_devices--;
2855 }
2856 list_del_init(&device->dev_list);
2857 fs_devices->num_devices--;
2858 @@ -2728,9 +2716,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
2859 btrfs_set_super_num_devices(fs_info->super_copy,
2860 orig_super_num_devices + 1);
2861
2862 - /* add sysfs device entry */
2863 - btrfs_sysfs_add_device_link(fs_devices, device);
2864 -
2865 /*
2866 * we've got more storage, clear any full flags on the space
2867 * infos
2868 @@ -2738,6 +2723,10 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
2869 btrfs_clear_space_info_full(fs_info);
2870
2871 mutex_unlock(&fs_info->chunk_mutex);
2872 +
2873 + /* Add sysfs device entry */
2874 + btrfs_sysfs_add_device_link(fs_devices, device);
2875 +
2876 mutex_unlock(&fs_devices->device_list_mutex);
2877
2878 if (seeding_dev) {
2879 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
2880 index 498777d859eb5..9bd03a2310328 100644
2881 --- a/fs/cifs/cifs_unicode.c
2882 +++ b/fs/cifs/cifs_unicode.c
2883 @@ -488,7 +488,13 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
2884 else if (map_chars == SFM_MAP_UNI_RSVD) {
2885 bool end_of_string;
2886
2887 - if (i == srclen - 1)
2888 + /**
2889 + * Remap spaces and periods found at the end of every
2890 + * component of the path. The special cases of '.' and
2891 + * '..' do not need to be dealt with explicitly because
2892 + * they are addressed in namei.c:link_path_walk().
2893 + **/
2894 + if ((i == srclen - 1) || (source[i+1] == '\\'))
2895 end_of_string = true;
2896 else
2897 end_of_string = false;
2898 diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
2899 index b36b414cd7a7e..70fd3af7b8cb5 100644
2900 --- a/fs/erofs/inode.c
2901 +++ b/fs/erofs/inode.c
2902 @@ -107,11 +107,9 @@ static struct page *erofs_read_inode(struct inode *inode,
2903 i_gid_write(inode, le32_to_cpu(die->i_gid));
2904 set_nlink(inode, le32_to_cpu(die->i_nlink));
2905
2906 - /* ns timestamp */
2907 - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
2908 - le64_to_cpu(die->i_ctime);
2909 - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
2910 - le32_to_cpu(die->i_ctime_nsec);
2911 + /* extended inode has its own timestamp */
2912 + inode->i_ctime.tv_sec = le64_to_cpu(die->i_ctime);
2913 + inode->i_ctime.tv_nsec = le32_to_cpu(die->i_ctime_nsec);
2914
2915 inode->i_size = le64_to_cpu(die->i_size);
2916
2917 @@ -149,11 +147,9 @@ static struct page *erofs_read_inode(struct inode *inode,
2918 i_gid_write(inode, le16_to_cpu(dic->i_gid));
2919 set_nlink(inode, le16_to_cpu(dic->i_nlink));
2920
2921 - /* use build time to derive all file time */
2922 - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
2923 - sbi->build_time;
2924 - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
2925 - sbi->build_time_nsec;
2926 + /* use build time for compact inodes */
2927 + inode->i_ctime.tv_sec = sbi->build_time;
2928 + inode->i_ctime.tv_nsec = sbi->build_time_nsec;
2929
2930 inode->i_size = le32_to_cpu(dic->i_size);
2931 if (erofs_inode_is_data_compressed(vi->datalayout))
2932 @@ -167,6 +163,11 @@ static struct page *erofs_read_inode(struct inode *inode,
2933 goto err_out;
2934 }
2935
2936 + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec;
2937 + inode->i_atime.tv_sec = inode->i_ctime.tv_sec;
2938 + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec;
2939 + inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec;
2940 +
2941 if (!nblks)
2942 /* measure inode.i_blocks as generic filesystems */
2943 inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
2944 diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
2945 index 2fec62d764fa9..519378a15bc6b 100644
2946 --- a/fs/ext4/inline.c
2947 +++ b/fs/ext4/inline.c
2948 @@ -1918,6 +1918,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
2949
2950 ext4_write_lock_xattr(inode, &no_expand);
2951 if (!ext4_has_inline_data(inode)) {
2952 + ext4_write_unlock_xattr(inode, &no_expand);
2953 *has_inline = 0;
2954 ext4_journal_stop(handle);
2955 return 0;
2956 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2957 index 6a260cc8bce6b..920658ca8777d 100644
2958 --- a/fs/ext4/super.c
2959 +++ b/fs/ext4/super.c
2960 @@ -1756,8 +1756,8 @@ static const struct mount_opts {
2961 {Opt_noquota, (EXT4_MOUNT_QUOTA | EXT4_MOUNT_USRQUOTA |
2962 EXT4_MOUNT_GRPQUOTA | EXT4_MOUNT_PRJQUOTA),
2963 MOPT_CLEAR | MOPT_Q},
2964 - {Opt_usrjquota, 0, MOPT_Q},
2965 - {Opt_grpjquota, 0, MOPT_Q},
2966 + {Opt_usrjquota, 0, MOPT_Q | MOPT_STRING},
2967 + {Opt_grpjquota, 0, MOPT_Q | MOPT_STRING},
2968 {Opt_offusrjquota, 0, MOPT_Q},
2969 {Opt_offgrpjquota, 0, MOPT_Q},
2970 {Opt_jqfmt_vfsold, QFMT_VFS_OLD, MOPT_QFMT},
2971 diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
2972 index 2466bb44a23c5..3d5aa0c10a4c1 100644
2973 --- a/fs/gfs2/rgrp.c
2974 +++ b/fs/gfs2/rgrp.c
2975 @@ -736,9 +736,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
2976 }
2977
2978 gfs2_free_clones(rgd);
2979 + return_all_reservations(rgd);
2980 kfree(rgd->rd_bits);
2981 rgd->rd_bits = NULL;
2982 - return_all_reservations(rgd);
2983 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
2984 }
2985 }
2986 @@ -1410,6 +1410,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
2987 if (!capable(CAP_SYS_ADMIN))
2988 return -EPERM;
2989
2990 + if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
2991 + return -EROFS;
2992 +
2993 if (!blk_queue_discard(q))
2994 return -EOPNOTSUPP;
2995
2996 diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
2997 index 5935ce5ae5636..50c925d9c6103 100644
2998 --- a/fs/gfs2/super.c
2999 +++ b/fs/gfs2/super.c
3000 @@ -689,6 +689,7 @@ restart:
3001 gfs2_jindex_free(sdp);
3002 /* Take apart glock structures and buffer lists */
3003 gfs2_gl_hash_clear(sdp);
3004 + truncate_inode_pages_final(&sdp->sd_aspace);
3005 gfs2_delete_debugfs_file(sdp);
3006 /* Unmount the locking protocol */
3007 gfs2_lm_unmount(sdp);
3008 diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
3009 index 62cf497f18eb4..5ef99b9ec8be7 100644
3010 --- a/fs/jbd2/checkpoint.c
3011 +++ b/fs/jbd2/checkpoint.c
3012 @@ -106,6 +106,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
3013 * for a checkpoint to free up some space in the log.
3014 */
3015 void __jbd2_log_wait_for_space(journal_t *journal)
3016 +__acquires(&journal->j_state_lock)
3017 +__releases(&journal->j_state_lock)
3018 {
3019 int nblocks, space_left;
3020 /* assert_spin_locked(&journal->j_state_lock); */
3021 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3022 index 90453309345d5..be05fb96757c3 100644
3023 --- a/fs/jbd2/transaction.c
3024 +++ b/fs/jbd2/transaction.c
3025 @@ -171,8 +171,10 @@ static void wait_transaction_switching(journal_t *journal)
3026 DEFINE_WAIT(wait);
3027
3028 if (WARN_ON(!journal->j_running_transaction ||
3029 - journal->j_running_transaction->t_state != T_SWITCH))
3030 + journal->j_running_transaction->t_state != T_SWITCH)) {
3031 + read_unlock(&journal->j_state_lock);
3032 return;
3033 + }
3034 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
3035 TASK_UNINTERRUPTIBLE);
3036 read_unlock(&journal->j_state_lock);
3037 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
3038 index 70d8857b161df..60b4b6df1ed36 100644
3039 --- a/fs/ocfs2/super.c
3040 +++ b/fs/ocfs2/super.c
3041 @@ -1692,6 +1692,7 @@ static void ocfs2_inode_init_once(void *data)
3042
3043 oi->ip_blkno = 0ULL;
3044 oi->ip_clusters = 0;
3045 + oi->ip_next_orphan = NULL;
3046
3047 ocfs2_resv_init_once(&oi->ip_la_data_resv);
3048
3049 diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
3050 index 0a36f532cf86c..436f686a98918 100644
3051 --- a/fs/xfs/libxfs/xfs_alloc.c
3052 +++ b/fs/xfs/libxfs/xfs_alloc.c
3053 @@ -2209,6 +2209,7 @@ xfs_defer_agfl_block(
3054 new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
3055 new->xefi_blockcount = 1;
3056 new->xefi_oinfo = *oinfo;
3057 + new->xefi_skip_discard = false;
3058
3059 trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
3060
3061 diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
3062 index e2798c6f3a5f3..093716a074fb7 100644
3063 --- a/fs/xfs/libxfs/xfs_bmap.h
3064 +++ b/fs/xfs/libxfs/xfs_bmap.h
3065 @@ -52,9 +52,9 @@ struct xfs_extent_free_item
3066 {
3067 xfs_fsblock_t xefi_startblock;/* starting fs block number */
3068 xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
3069 + bool xefi_skip_discard;
3070 struct list_head xefi_list;
3071 struct xfs_owner_info xefi_oinfo; /* extent owner */
3072 - bool xefi_skip_discard;
3073 };
3074
3075 #define XFS_BMAP_MAX_NMAP 4
3076 diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
3077 index 38e9414878b3e..9d3c67b654ca7 100644
3078 --- a/fs/xfs/libxfs/xfs_rmap.c
3079 +++ b/fs/xfs/libxfs/xfs_rmap.c
3080 @@ -1379,7 +1379,7 @@ xfs_rmap_convert_shared(
3081 * record for our insertion point. This will also give us the record for
3082 * start block contiguity tests.
3083 */
3084 - error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
3085 + error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
3086 &PREV, &i);
3087 if (error)
3088 goto done;
3089 diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
3090 index fc78efa52c94e..3780609c7860c 100644
3091 --- a/fs/xfs/libxfs/xfs_rmap_btree.c
3092 +++ b/fs/xfs/libxfs/xfs_rmap_btree.c
3093 @@ -243,8 +243,8 @@ xfs_rmapbt_key_diff(
3094 else if (y > x)
3095 return -1;
3096
3097 - x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
3098 - y = rec->rm_offset;
3099 + x = be64_to_cpu(kp->rm_offset);
3100 + y = xfs_rmap_irec_offset_pack(rec);
3101 if (x > y)
3102 return 1;
3103 else if (y > x)
3104 @@ -275,8 +275,8 @@ xfs_rmapbt_diff_two_keys(
3105 else if (y > x)
3106 return -1;
3107
3108 - x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
3109 - y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
3110 + x = be64_to_cpu(kp1->rm_offset);
3111 + y = be64_to_cpu(kp2->rm_offset);
3112 if (x > y)
3113 return 1;
3114 else if (y > x)
3115 @@ -390,8 +390,8 @@ xfs_rmapbt_keys_inorder(
3116 return 1;
3117 else if (a > b)
3118 return 0;
3119 - a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
3120 - b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
3121 + a = be64_to_cpu(k1->rmap.rm_offset);
3122 + b = be64_to_cpu(k2->rmap.rm_offset);
3123 if (a <= b)
3124 return 1;
3125 return 0;
3126 @@ -420,8 +420,8 @@ xfs_rmapbt_recs_inorder(
3127 return 1;
3128 else if (a > b)
3129 return 0;
3130 - a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
3131 - b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
3132 + a = be64_to_cpu(r1->rmap.rm_offset);
3133 + b = be64_to_cpu(r2->rmap.rm_offset);
3134 if (a <= b)
3135 return 1;
3136 return 0;
3137 diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
3138 index 392fb4df5c127..ec580c0d70fa3 100644
3139 --- a/fs/xfs/scrub/bmap.c
3140 +++ b/fs/xfs/scrub/bmap.c
3141 @@ -113,6 +113,8 @@ xchk_bmap_get_rmap(
3142
3143 if (info->whichfork == XFS_ATTR_FORK)
3144 rflags |= XFS_RMAP_ATTR_FORK;
3145 + if (irec->br_state == XFS_EXT_UNWRITTEN)
3146 + rflags |= XFS_RMAP_UNWRITTEN;
3147
3148 /*
3149 * CoW staging extents are owned (on disk) by the refcountbt, so
3150 diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
3151 index 6d483ab29e639..1bea029b634a6 100644
3152 --- a/fs/xfs/scrub/inode.c
3153 +++ b/fs/xfs/scrub/inode.c
3154 @@ -121,8 +121,7 @@ xchk_inode_flags(
3155 goto bad;
3156
3157 /* rt flags require rt device */
3158 - if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) &&
3159 - !mp->m_rtdev_targp)
3160 + if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
3161 goto bad;
3162
3163 /* new rt bitmap flag only valid for rbmino */
3164 diff --git a/fs/xfs/scrub/refcount.c b/fs/xfs/scrub/refcount.c
3165 index 0cab11a5d3907..5c6b71b75ca10 100644
3166 --- a/fs/xfs/scrub/refcount.c
3167 +++ b/fs/xfs/scrub/refcount.c
3168 @@ -170,7 +170,6 @@ xchk_refcountbt_process_rmap_fragments(
3169 */
3170 INIT_LIST_HEAD(&worklist);
3171 rbno = NULLAGBLOCK;
3172 - nr = 1;
3173
3174 /* Make sure the fragments actually /are/ in agbno order. */
3175 bno = 0;
3176 @@ -184,15 +183,14 @@ xchk_refcountbt_process_rmap_fragments(
3177 * Find all the rmaps that start at or before the refc extent,
3178 * and put them on the worklist.
3179 */
3180 + nr = 0;
3181 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
3182 - if (frag->rm.rm_startblock > refchk->bno)
3183 - goto done;
3184 + if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
3185 + break;
3186 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
3187 if (bno < rbno)
3188 rbno = bno;
3189 list_move_tail(&frag->list, &worklist);
3190 - if (nr == target_nr)
3191 - break;
3192 nr++;
3193 }
3194
3195 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
3196 index fe285d123d69f..dec511823fcbc 100644
3197 --- a/fs/xfs/xfs_iops.c
3198 +++ b/fs/xfs/xfs_iops.c
3199 @@ -885,6 +885,16 @@ xfs_setattr_size(
3200 error = iomap_zero_range(inode, oldsize, newsize - oldsize,
3201 &did_zeroing, &xfs_iomap_ops);
3202 } else {
3203 + /*
3204 + * iomap won't detect a dirty page over an unwritten block (or a
3205 + * cow block over a hole) and subsequently skips zeroing the
3206 + * newly post-EOF portion of the page. Flush the new EOF to
3207 + * convert the block before the pagecache truncate.
3208 + */
3209 + error = filemap_write_and_wait_range(inode->i_mapping, newsize,
3210 + newsize);
3211 + if (error)
3212 + return error;
3213 error = iomap_truncate_page(inode, newsize, &did_zeroing,
3214 &xfs_iomap_ops);
3215 }
3216 diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
3217 index a339bd5fa2604..f63fe8d924a36 100644
3218 --- a/fs/xfs/xfs_pnfs.c
3219 +++ b/fs/xfs/xfs_pnfs.c
3220 @@ -134,7 +134,7 @@ xfs_fs_map_blocks(
3221 goto out_unlock;
3222 error = invalidate_inode_pages2(inode->i_mapping);
3223 if (WARN_ON_ONCE(error))
3224 - return error;
3225 + goto out_unlock;
3226
3227 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
3228 offset_fsb = XFS_B_TO_FSBT(mp, offset);
3229 diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
3230 index 080012a6f0254..157e4a6a83f6d 100644
3231 --- a/include/linux/arm-smccc.h
3232 +++ b/include/linux/arm-smccc.h
3233 @@ -76,6 +76,8 @@
3234 ARM_SMCCC_SMC_32, \
3235 0, 0x7fff)
3236
3237 +#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
3238 +
3239 #ifndef __ASSEMBLY__
3240
3241 #include <linux/linkage.h>
3242 diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
3243 index a954def26c0dd..0783b0c6d9e2f 100644
3244 --- a/include/linux/can/skb.h
3245 +++ b/include/linux/can/skb.h
3246 @@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
3247 */
3248 static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
3249 {
3250 - if (skb_shared(skb)) {
3251 - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
3252 + struct sk_buff *nskb;
3253
3254 - if (likely(nskb)) {
3255 - can_skb_set_owner(nskb, skb->sk);
3256 - consume_skb(skb);
3257 - return nskb;
3258 - } else {
3259 - kfree_skb(skb);
3260 - return NULL;
3261 - }
3262 + nskb = skb_clone(skb, GFP_ATOMIC);
3263 + if (unlikely(!nskb)) {
3264 + kfree_skb(skb);
3265 + return NULL;
3266 }
3267
3268 - /* we can assume to have an unshared skb with proper owner */
3269 - return skb;
3270 + can_skb_set_owner(nskb, skb->sk);
3271 + consume_skb(skb);
3272 + return nskb;
3273 }
3274
3275 #endif /* !_CAN_SKB_H */
3276 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
3277 index d7ee4c6bad482..e8579412ad214 100644
3278 --- a/include/linux/compiler-gcc.h
3279 +++ b/include/linux/compiler-gcc.h
3280 @@ -170,5 +170,3 @@
3281 #else
3282 #define __diag_GCC_8(s)
3283 #endif
3284 -
3285 -#define __no_fgcse __attribute__((optimize("-fno-gcse")))
3286 diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
3287 index 72393a8c1a6c5..77433633572e4 100644
3288 --- a/include/linux/compiler_types.h
3289 +++ b/include/linux/compiler_types.h
3290 @@ -212,10 +212,6 @@ struct ftrace_likely_data {
3291 #define asm_inline asm
3292 #endif
3293
3294 -#ifndef __no_fgcse
3295 -# define __no_fgcse
3296 -#endif
3297 -
3298 /* Are two types/vars the same type (ignoring qualifiers)? */
3299 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
3300
3301 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
3302 index 89016d08f6a27..f6267e2883f26 100644
3303 --- a/include/linux/netfilter/nfnetlink.h
3304 +++ b/include/linux/netfilter/nfnetlink.h
3305 @@ -24,6 +24,12 @@ struct nfnl_callback {
3306 const u_int16_t attr_count; /* number of nlattr's */
3307 };
3308
3309 +enum nfnl_abort_action {
3310 + NFNL_ABORT_NONE = 0,
3311 + NFNL_ABORT_AUTOLOAD,
3312 + NFNL_ABORT_VALIDATE,
3313 +};
3314 +
3315 struct nfnetlink_subsystem {
3316 const char *name;
3317 __u8 subsys_id; /* nfnetlink subsystem ID */
3318 @@ -31,7 +37,8 @@ struct nfnetlink_subsystem {
3319 const struct nfnl_callback *cb; /* callback for individual types */
3320 struct module *owner;
3321 int (*commit)(struct net *net, struct sk_buff *skb);
3322 - int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
3323 + int (*abort)(struct net *net, struct sk_buff *skb,
3324 + enum nfnl_abort_action action);
3325 void (*cleanup)(struct net *net);
3326 bool (*valid_genid)(struct net *net, u32 genid);
3327 };
3328 diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
3329 index 082e2c41b7ff9..5b70ca868bb19 100644
3330 --- a/include/linux/netfilter_ipv4.h
3331 +++ b/include/linux/netfilter_ipv4.h
3332 @@ -16,7 +16,7 @@ struct ip_rt_info {
3333 u_int32_t mark;
3334 };
3335
3336 -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type);
3337 +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type);
3338
3339 struct nf_queue_entry;
3340
3341 diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
3342 index 9b67394471e1c..48314ade1506f 100644
3343 --- a/include/linux/netfilter_ipv6.h
3344 +++ b/include/linux/netfilter_ipv6.h
3345 @@ -42,7 +42,7 @@ struct nf_ipv6_ops {
3346 #if IS_MODULE(CONFIG_IPV6)
3347 int (*chk_addr)(struct net *net, const struct in6_addr *addr,
3348 const struct net_device *dev, int strict);
3349 - int (*route_me_harder)(struct net *net, struct sk_buff *skb);
3350 + int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb);
3351 int (*dev_get_saddr)(struct net *net, const struct net_device *dev,
3352 const struct in6_addr *daddr, unsigned int srcprefs,
3353 struct in6_addr *saddr);
3354 @@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
3355 #endif
3356 }
3357
3358 -int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
3359 +int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb);
3360
3361 -static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
3362 +static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb)
3363 {
3364 #if IS_MODULE(CONFIG_IPV6)
3365 const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
3366 @@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
3367 if (!v6_ops)
3368 return -EHOSTUNREACH;
3369
3370 - return v6_ops->route_me_harder(net, skb);
3371 + return v6_ops->route_me_harder(net, sk, skb);
3372 #elif IS_BUILTIN(CONFIG_IPV6)
3373 - return ip6_route_me_harder(net, skb);
3374 + return ip6_route_me_harder(net, sk, skb);
3375 #else
3376 return -EHOSTUNREACH;
3377 #endif
3378 diff --git a/include/linux/prandom.h b/include/linux/prandom.h
3379 index aa16e6468f91e..cc1e71334e53c 100644
3380 --- a/include/linux/prandom.h
3381 +++ b/include/linux/prandom.h
3382 @@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
3383 void prandom_seed(u32 seed);
3384 void prandom_reseed_late(void);
3385
3386 +#if BITS_PER_LONG == 64
3387 +/*
3388 + * The core SipHash round function. Each line can be executed in
3389 + * parallel given enough CPU resources.
3390 + */
3391 +#define PRND_SIPROUND(v0, v1, v2, v3) ( \
3392 + v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
3393 + v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
3394 + v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
3395 + v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
3396 +)
3397 +
3398 +#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
3399 +#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
3400 +
3401 +#elif BITS_PER_LONG == 32
3402 +/*
3403 + * On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
3404 + * This is weaker, but 32-bit machines are not used for high-traffic
3405 + * applications, so there is less output for an attacker to analyze.
3406 + */
3407 +#define PRND_SIPROUND(v0, v1, v2, v3) ( \
3408 + v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
3409 + v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
3410 + v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
3411 + v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
3412 +)
3413 +#define PRND_K0 0x6c796765
3414 +#define PRND_K1 0x74656462
3415 +
3416 +#else
3417 +#error Unsupported BITS_PER_LONG
3418 +#endif
3419 +
3420 struct rnd_state {
3421 __u32 s1, s2, s3, s4;
3422 };
3423
3424 -DECLARE_PER_CPU(struct rnd_state, net_rand_state);
3425 -
3426 u32 prandom_u32_state(struct rnd_state *state);
3427 void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
3428 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
3429 diff --git a/include/linux/time64.h b/include/linux/time64.h
3430 index 19125489ae948..5eab3f2635186 100644
3431 --- a/include/linux/time64.h
3432 +++ b/include/linux/time64.h
3433 @@ -132,6 +132,10 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
3434 */
3435 static inline s64 timespec64_to_ns(const struct timespec64 *ts)
3436 {
3437 + /* Prevent multiplication overflow */
3438 + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
3439 + return KTIME_MAX;
3440 +
3441 return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
3442 }
3443
3444 diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
3445 index 75ae1899452b9..94a3adb65b8af 100644
3446 --- a/include/trace/events/btrfs.h
3447 +++ b/include/trace/events/btrfs.h
3448 @@ -1159,25 +1159,27 @@ DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
3449
3450 TRACE_EVENT(find_free_extent,
3451
3452 - TP_PROTO(const struct btrfs_fs_info *fs_info, u64 num_bytes,
3453 + TP_PROTO(const struct btrfs_root *root, u64 num_bytes,
3454 u64 empty_size, u64 data),
3455
3456 - TP_ARGS(fs_info, num_bytes, empty_size, data),
3457 + TP_ARGS(root, num_bytes, empty_size, data),
3458
3459 TP_STRUCT__entry_btrfs(
3460 + __field( u64, root_objectid )
3461 __field( u64, num_bytes )
3462 __field( u64, empty_size )
3463 __field( u64, data )
3464 ),
3465
3466 - TP_fast_assign_btrfs(fs_info,
3467 + TP_fast_assign_btrfs(root->fs_info,
3468 + __entry->root_objectid = root->root_key.objectid;
3469 __entry->num_bytes = num_bytes;
3470 __entry->empty_size = empty_size;
3471 __entry->data = data;
3472 ),
3473
3474 TP_printk_btrfs("root=%llu(%s) len=%llu empty_size=%llu flags=%llu(%s)",
3475 - show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
3476 + show_root_type(__entry->root_objectid),
3477 __entry->num_bytes, __entry->empty_size, __entry->data,
3478 __print_flags((unsigned long)__entry->data, "|",
3479 BTRFS_GROUP_FLAGS))
3480 diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
3481 index 28df77a948e56..f16e9fb97e9f4 100644
3482 --- a/include/trace/events/sunrpc.h
3483 +++ b/include/trace/events/sunrpc.h
3484 @@ -357,10 +357,10 @@ TRACE_EVENT(rpc_xdr_overflow,
3485 __field(size_t, tail_len)
3486 __field(unsigned int, page_len)
3487 __field(unsigned int, len)
3488 - __string(progname,
3489 - xdr->rqst->rq_task->tk_client->cl_program->name)
3490 - __string(procedure,
3491 - xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
3492 + __string(progname, xdr->rqst ?
3493 + xdr->rqst->rq_task->tk_client->cl_program->name : "unknown")
3494 + __string(procedure, xdr->rqst ?
3495 + xdr->rqst->rq_task->tk_msg.rpc_proc->p_name : "unknown")
3496 ),
3497
3498 TP_fast_assign(
3499 diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
3500 index e1d9adb212f93..b0d78bc0b1979 100644
3501 --- a/kernel/bpf/Makefile
3502 +++ b/kernel/bpf/Makefile
3503 @@ -1,6 +1,10 @@
3504 # SPDX-License-Identifier: GPL-2.0
3505 obj-y := core.o
3506 -CFLAGS_core.o += $(call cc-disable-warning, override-init)
3507 +ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
3508 +# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
3509 +cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
3510 +endif
3511 +CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
3512
3513 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
3514 obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
3515 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
3516 index ef0e1e3e66f4a..56bc96f5ad208 100644
3517 --- a/kernel/bpf/core.c
3518 +++ b/kernel/bpf/core.c
3519 @@ -1299,7 +1299,7 @@ bool bpf_opcode_in_insntable(u8 code)
3520 *
3521 * Decode and execute eBPF instructions.
3522 */
3523 -static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
3524 +static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
3525 {
3526 #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
3527 #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
3528 diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
3529 index 728ffec52cf36..03a67583f6fb9 100644
3530 --- a/kernel/bpf/hashtab.c
3531 +++ b/kernel/bpf/hashtab.c
3532 @@ -709,6 +709,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
3533 }
3534 }
3535
3536 +static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
3537 + void *value, bool onallcpus)
3538 +{
3539 + /* When using prealloc and not setting the initial value on all cpus,
3540 + * zero-fill element values for other cpus (just as what happens when
3541 + * not using prealloc). Otherwise, bpf program has no way to ensure
3542 + * known initial values for cpus other than current one
3543 + * (onallcpus=false always when coming from bpf prog).
3544 + */
3545 + if (htab_is_prealloc(htab) && !onallcpus) {
3546 + u32 size = round_up(htab->map.value_size, 8);
3547 + int current_cpu = raw_smp_processor_id();
3548 + int cpu;
3549 +
3550 + for_each_possible_cpu(cpu) {
3551 + if (cpu == current_cpu)
3552 + bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
3553 + size);
3554 + else
3555 + memset(per_cpu_ptr(pptr, cpu), 0, size);
3556 + }
3557 + } else {
3558 + pcpu_copy_value(htab, pptr, value, onallcpus);
3559 + }
3560 +}
3561 +
3562 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
3563 {
3564 return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
3565 @@ -779,7 +805,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
3566 }
3567 }
3568
3569 - pcpu_copy_value(htab, pptr, value, onallcpus);
3570 + pcpu_init_value(htab, pptr, value, onallcpus);
3571
3572 if (!prealloc)
3573 htab_elem_set_ptr(l_new, key_size, pptr);
3574 @@ -1075,7 +1101,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
3575 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
3576 value, onallcpus);
3577 } else {
3578 - pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size),
3579 + pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
3580 value, onallcpus);
3581 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
3582 l_new = NULL;
3583 diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
3584 index 673a2cdb2656b..f99b79d7e1235 100644
3585 --- a/kernel/dma/swiotlb.c
3586 +++ b/kernel/dma/swiotlb.c
3587 @@ -230,6 +230,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
3588 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
3589 }
3590 io_tlb_index = 0;
3591 + no_iotlb_memory = false;
3592
3593 if (verbose)
3594 swiotlb_print_info();
3595 @@ -261,9 +262,11 @@ swiotlb_init(int verbose)
3596 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
3597 return;
3598
3599 - if (io_tlb_start)
3600 + if (io_tlb_start) {
3601 memblock_free_early(io_tlb_start,
3602 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
3603 + io_tlb_start = 0;
3604 + }
3605 pr_warn("Cannot allocate buffer");
3606 no_iotlb_memory = true;
3607 }
3608 @@ -361,6 +364,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
3609 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
3610 }
3611 io_tlb_index = 0;
3612 + no_iotlb_memory = false;
3613
3614 swiotlb_print_info();
3615
3616 diff --git a/kernel/events/core.c b/kernel/events/core.c
3617 index 1b60f9c508c9a..9f7c2da992991 100644
3618 --- a/kernel/events/core.c
3619 +++ b/kernel/events/core.c
3620 @@ -5596,11 +5596,11 @@ static void perf_pmu_output_stop(struct perf_event *event);
3621 static void perf_mmap_close(struct vm_area_struct *vma)
3622 {
3623 struct perf_event *event = vma->vm_file->private_data;
3624 -
3625 struct ring_buffer *rb = ring_buffer_get(event);
3626 struct user_struct *mmap_user = rb->mmap_user;
3627 int mmap_locked = rb->mmap_locked;
3628 unsigned long size = perf_data_size(rb);
3629 + bool detach_rest = false;
3630
3631 if (event->pmu->event_unmapped)
3632 event->pmu->event_unmapped(event, vma->vm_mm);
3633 @@ -5631,7 +5631,8 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3634 mutex_unlock(&event->mmap_mutex);
3635 }
3636
3637 - atomic_dec(&rb->mmap_count);
3638 + if (atomic_dec_and_test(&rb->mmap_count))
3639 + detach_rest = true;
3640
3641 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3642 goto out_put;
3643 @@ -5640,7 +5641,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
3644 mutex_unlock(&event->mmap_mutex);
3645
3646 /* If there's still other mmap()s of this buffer, we're done. */
3647 - if (atomic_read(&rb->mmap_count))
3648 + if (!detach_rest)
3649 goto out_put;
3650
3651 /*
3652 diff --git a/kernel/events/internal.h b/kernel/events/internal.h
3653 index 3aef4191798c3..6e87b358e0826 100644
3654 --- a/kernel/events/internal.h
3655 +++ b/kernel/events/internal.h
3656 @@ -210,7 +210,7 @@ static inline int get_recursion_context(int *recursion)
3657 rctx = 3;
3658 else if (in_irq())
3659 rctx = 2;
3660 - else if (in_softirq())
3661 + else if (in_serving_softirq())
3662 rctx = 1;
3663 else
3664 rctx = 0;
3665 diff --git a/kernel/exit.c b/kernel/exit.c
3666 index fa46977b9c079..ece64771a31f5 100644
3667 --- a/kernel/exit.c
3668 +++ b/kernel/exit.c
3669 @@ -456,7 +456,10 @@ static void exit_mm(void)
3670 up_read(&mm->mmap_sem);
3671
3672 self.task = current;
3673 - self.next = xchg(&core_state->dumper.next, &self);
3674 + if (self.task->flags & PF_SIGNALED)
3675 + self.next = xchg(&core_state->dumper.next, &self);
3676 + else
3677 + self.task = NULL;
3678 /*
3679 * Implies mb(), the result of xchg() must be visible
3680 * to core_state->dumper.
3681 diff --git a/kernel/futex.c b/kernel/futex.c
3682 index 9c4f9b868a491..b6dec5f79370c 100644
3683 --- a/kernel/futex.c
3684 +++ b/kernel/futex.c
3685 @@ -880,8 +880,9 @@ static void put_pi_state(struct futex_pi_state *pi_state)
3686 */
3687 if (pi_state->owner) {
3688 struct task_struct *owner;
3689 + unsigned long flags;
3690
3691 - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
3692 + raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags);
3693 owner = pi_state->owner;
3694 if (owner) {
3695 raw_spin_lock(&owner->pi_lock);
3696 @@ -889,7 +890,7 @@ static void put_pi_state(struct futex_pi_state *pi_state)
3697 raw_spin_unlock(&owner->pi_lock);
3698 }
3699 rt_mutex_proxy_unlock(&pi_state->pi_mutex, owner);
3700 - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
3701 + raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags);
3702 }
3703
3704 if (current->pi_state_cache) {
3705 diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
3706 index f92d9a6873720..4e11120265c74 100644
3707 --- a/kernel/irq/Kconfig
3708 +++ b/kernel/irq/Kconfig
3709 @@ -81,6 +81,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS
3710 # Generic IRQ IPI support
3711 config GENERIC_IRQ_IPI
3712 bool
3713 + select IRQ_DOMAIN_HIERARCHY
3714
3715 # Generic MSI interrupt support
3716 config GENERIC_MSI_IRQ
3717 diff --git a/kernel/reboot.c b/kernel/reboot.c
3718 index c4d472b7f1b42..ac19159d71587 100644
3719 --- a/kernel/reboot.c
3720 +++ b/kernel/reboot.c
3721 @@ -551,22 +551,22 @@ static int __init reboot_setup(char *str)
3722 break;
3723
3724 case 's':
3725 - {
3726 - int rc;
3727 -
3728 - if (isdigit(*(str+1))) {
3729 - rc = kstrtoint(str+1, 0, &reboot_cpu);
3730 - if (rc)
3731 - return rc;
3732 - } else if (str[1] == 'm' && str[2] == 'p' &&
3733 - isdigit(*(str+3))) {
3734 - rc = kstrtoint(str+3, 0, &reboot_cpu);
3735 - if (rc)
3736 - return rc;
3737 - } else
3738 + if (isdigit(*(str+1)))
3739 + reboot_cpu = simple_strtoul(str+1, NULL, 0);
3740 + else if (str[1] == 'm' && str[2] == 'p' &&
3741 + isdigit(*(str+3)))
3742 + reboot_cpu = simple_strtoul(str+3, NULL, 0);
3743 + else
3744 *mode = REBOOT_SOFT;
3745 + if (reboot_cpu >= num_possible_cpus()) {
3746 + pr_err("Ignoring the CPU number in reboot= option. "
3747 + "CPU %d exceeds possible cpu number %d\n",
3748 + reboot_cpu, num_possible_cpus());
3749 + reboot_cpu = 0;
3750 + break;
3751 + }
3752 break;
3753 - }
3754 +
3755 case 'g':
3756 *mode = REBOOT_GPIO;
3757 break;
3758 diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
3759 index 77f1e5635cc18..62dc9757118c6 100644
3760 --- a/kernel/time/itimer.c
3761 +++ b/kernel/time/itimer.c
3762 @@ -147,10 +147,6 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
3763 u64 oval, nval, ointerval, ninterval;
3764 struct cpu_itimer *it = &tsk->signal->it[clock_id];
3765
3766 - /*
3767 - * Use the to_ktime conversion because that clamps the maximum
3768 - * value to KTIME_MAX and avoid multiplication overflows.
3769 - */
3770 nval = ktime_to_ns(timeval_to_ktime(value->it_value));
3771 ninterval = ktime_to_ns(timeval_to_ktime(value->it_interval));
3772
3773 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
3774 index 59225b484e4ee..7e5d3524e924d 100644
3775 --- a/kernel/time/tick-common.c
3776 +++ b/kernel/time/tick-common.c
3777 @@ -11,6 +11,7 @@
3778 #include <linux/err.h>
3779 #include <linux/hrtimer.h>
3780 #include <linux/interrupt.h>
3781 +#include <linux/nmi.h>
3782 #include <linux/percpu.h>
3783 #include <linux/profile.h>
3784 #include <linux/sched.h>
3785 @@ -558,6 +559,7 @@ void tick_unfreeze(void)
3786 trace_suspend_resume(TPS("timekeeping_freeze"),
3787 smp_processor_id(), false);
3788 } else {
3789 + touch_softlockup_watchdog();
3790 tick_resume_local();
3791 }
3792
3793 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3794 index a3ae244b1bcd5..87fa73cdb90f6 100644
3795 --- a/kernel/time/timer.c
3796 +++ b/kernel/time/timer.c
3797 @@ -1743,13 +1743,6 @@ void update_process_times(int user_tick)
3798 scheduler_tick();
3799 if (IS_ENABLED(CONFIG_POSIX_TIMERS))
3800 run_posix_cpu_timers();
3801 -
3802 - /* The current CPU might make use of net randoms without receiving IRQs
3803 - * to renew them often enough. Let's update the net_rand_state from a
3804 - * non-constant value that's not affine to the number of calls to make
3805 - * sure it's updated when there's some activity (we don't care in idle).
3806 - */
3807 - this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
3808 }
3809
3810 /**
3811 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3812 index 2a357bda45cf0..f7cac11a90055 100644
3813 --- a/kernel/trace/trace.c
3814 +++ b/kernel/trace/trace.c
3815 @@ -2510,7 +2510,7 @@ trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
3816 /*
3817 * If tracing is off, but we have triggers enabled
3818 * we still need to look at the event data. Use the temp_buffer
3819 - * to store the trace event for the tigger to use. It's recusive
3820 + * to store the trace event for the trigger to use. It's recursive
3821 * safe and will not be recorded anywhere.
3822 */
3823 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
3824 @@ -2832,7 +2832,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
3825 stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;
3826
3827 /* This should never happen. If it does, yell once and skip */
3828 - if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
3829 + if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
3830 goto out;
3831
3832 /*
3833 diff --git a/lib/random32.c b/lib/random32.c
3834 index 1786f78bf4c53..9085b1172015e 100644
3835 --- a/lib/random32.c
3836 +++ b/lib/random32.c
3837 @@ -40,16 +40,6 @@
3838 #include <linux/sched.h>
3839 #include <asm/unaligned.h>
3840
3841 -#ifdef CONFIG_RANDOM32_SELFTEST
3842 -static void __init prandom_state_selftest(void);
3843 -#else
3844 -static inline void prandom_state_selftest(void)
3845 -{
3846 -}
3847 -#endif
3848 -
3849 -DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy;
3850 -
3851 /**
3852 * prandom_u32_state - seeded pseudo-random number generator.
3853 * @state: pointer to state structure holding seeded state.
3854 @@ -69,25 +59,6 @@ u32 prandom_u32_state(struct rnd_state *state)
3855 }
3856 EXPORT_SYMBOL(prandom_u32_state);
3857
3858 -/**
3859 - * prandom_u32 - pseudo random number generator
3860 - *
3861 - * A 32 bit pseudo-random number is generated using a fast
3862 - * algorithm suitable for simulation. This algorithm is NOT
3863 - * considered safe for cryptographic use.
3864 - */
3865 -u32 prandom_u32(void)
3866 -{
3867 - struct rnd_state *state = &get_cpu_var(net_rand_state);
3868 - u32 res;
3869 -
3870 - res = prandom_u32_state(state);
3871 - put_cpu_var(net_rand_state);
3872 -
3873 - return res;
3874 -}
3875 -EXPORT_SYMBOL(prandom_u32);
3876 -
3877 /**
3878 * prandom_bytes_state - get the requested number of pseudo-random bytes
3879 *
3880 @@ -119,20 +90,6 @@ void prandom_bytes_state(struct rnd_state *state, void *buf, size_t bytes)
3881 }
3882 EXPORT_SYMBOL(prandom_bytes_state);
3883
3884 -/**
3885 - * prandom_bytes - get the requested number of pseudo-random bytes
3886 - * @buf: where to copy the pseudo-random bytes to
3887 - * @bytes: the requested number of bytes
3888 - */
3889 -void prandom_bytes(void *buf, size_t bytes)
3890 -{
3891 - struct rnd_state *state = &get_cpu_var(net_rand_state);
3892 -
3893 - prandom_bytes_state(state, buf, bytes);
3894 - put_cpu_var(net_rand_state);
3895 -}
3896 -EXPORT_SYMBOL(prandom_bytes);
3897 -
3898 static void prandom_warmup(struct rnd_state *state)
3899 {
3900 /* Calling RNG ten times to satisfy recurrence condition */
3901 @@ -148,96 +105,6 @@ static void prandom_warmup(struct rnd_state *state)
3902 prandom_u32_state(state);
3903 }
3904
3905 -static u32 __extract_hwseed(void)
3906 -{
3907 - unsigned int val = 0;
3908 -
3909 - (void)(arch_get_random_seed_int(&val) ||
3910 - arch_get_random_int(&val));
3911 -
3912 - return val;
3913 -}
3914 -
3915 -static void prandom_seed_early(struct rnd_state *state, u32 seed,
3916 - bool mix_with_hwseed)
3917 -{
3918 -#define LCG(x) ((x) * 69069U) /* super-duper LCG */
3919 -#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
3920 - state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
3921 - state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
3922 - state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
3923 - state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
3924 -}
3925 -
3926 -/**
3927 - * prandom_seed - add entropy to pseudo random number generator
3928 - * @entropy: entropy value
3929 - *
3930 - * Add some additional entropy to the prandom pool.
3931 - */
3932 -void prandom_seed(u32 entropy)
3933 -{
3934 - int i;
3935 - /*
3936 - * No locking on the CPUs, but then somewhat random results are, well,
3937 - * expected.
3938 - */
3939 - for_each_possible_cpu(i) {
3940 - struct rnd_state *state = &per_cpu(net_rand_state, i);
3941 -
3942 - state->s1 = __seed(state->s1 ^ entropy, 2U);
3943 - prandom_warmup(state);
3944 - }
3945 -}
3946 -EXPORT_SYMBOL(prandom_seed);
3947 -
3948 -/*
3949 - * Generate some initially weak seeding values to allow
3950 - * to start the prandom_u32() engine.
3951 - */
3952 -static int __init prandom_init(void)
3953 -{
3954 - int i;
3955 -
3956 - prandom_state_selftest();
3957 -
3958 - for_each_possible_cpu(i) {
3959 - struct rnd_state *state = &per_cpu(net_rand_state, i);
3960 - u32 weak_seed = (i + jiffies) ^ random_get_entropy();
3961 -
3962 - prandom_seed_early(state, weak_seed, true);
3963 - prandom_warmup(state);
3964 - }
3965 -
3966 - return 0;
3967 -}
3968 -core_initcall(prandom_init);
3969 -
3970 -static void __prandom_timer(struct timer_list *unused);
3971 -
3972 -static DEFINE_TIMER(seed_timer, __prandom_timer);
3973 -
3974 -static void __prandom_timer(struct timer_list *unused)
3975 -{
3976 - u32 entropy;
3977 - unsigned long expires;
3978 -
3979 - get_random_bytes(&entropy, sizeof(entropy));
3980 - prandom_seed(entropy);
3981 -
3982 - /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
3983 - expires = 40 + prandom_u32_max(40);
3984 - seed_timer.expires = jiffies + msecs_to_jiffies(expires * MSEC_PER_SEC);
3985 -
3986 - add_timer(&seed_timer);
3987 -}
3988 -
3989 -static void __init __prandom_start_seed_timer(void)
3990 -{
3991 - seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
3992 - add_timer(&seed_timer);
3993 -}
3994 -
3995 void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
3996 {
3997 int i;
3998 @@ -257,51 +124,6 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
3999 }
4000 EXPORT_SYMBOL(prandom_seed_full_state);
4001
4002 -/*
4003 - * Generate better values after random number generator
4004 - * is fully initialized.
4005 - */
4006 -static void __prandom_reseed(bool late)
4007 -{
4008 - unsigned long flags;
4009 - static bool latch = false;
4010 - static DEFINE_SPINLOCK(lock);
4011 -
4012 - /* Asking for random bytes might result in bytes getting
4013 - * moved into the nonblocking pool and thus marking it
4014 - * as initialized. In this case we would double back into
4015 - * this function and attempt to do a late reseed.
4016 - * Ignore the pointless attempt to reseed again if we're
4017 - * already waiting for bytes when the nonblocking pool
4018 - * got initialized.
4019 - */
4020 -
4021 - /* only allow initial seeding (late == false) once */
4022 - if (!spin_trylock_irqsave(&lock, flags))
4023 - return;
4024 -
4025 - if (latch && !late)
4026 - goto out;
4027 -
4028 - latch = true;
4029 - prandom_seed_full_state(&net_rand_state);
4030 -out:
4031 - spin_unlock_irqrestore(&lock, flags);
4032 -}
4033 -
4034 -void prandom_reseed_late(void)
4035 -{
4036 - __prandom_reseed(true);
4037 -}
4038 -
4039 -static int __init prandom_reseed(void)
4040 -{
4041 - __prandom_reseed(false);
4042 - __prandom_start_seed_timer();
4043 - return 0;
4044 -}
4045 -late_initcall(prandom_reseed);
4046 -
4047 #ifdef CONFIG_RANDOM32_SELFTEST
4048 static struct prandom_test1 {
4049 u32 seed;
4050 @@ -421,7 +243,28 @@ static struct prandom_test2 {
4051 { 407983964U, 921U, 728767059U },
4052 };
4053
4054 -static void __init prandom_state_selftest(void)
4055 +static u32 __extract_hwseed(void)
4056 +{
4057 + unsigned int val = 0;
4058 +
4059 + (void)(arch_get_random_seed_int(&val) ||
4060 + arch_get_random_int(&val));
4061 +
4062 + return val;
4063 +}
4064 +
4065 +static void prandom_seed_early(struct rnd_state *state, u32 seed,
4066 + bool mix_with_hwseed)
4067 +{
4068 +#define LCG(x) ((x) * 69069U) /* super-duper LCG */
4069 +#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
4070 + state->s1 = __seed(HWSEED() ^ LCG(seed), 2U);
4071 + state->s2 = __seed(HWSEED() ^ LCG(state->s1), 8U);
4072 + state->s3 = __seed(HWSEED() ^ LCG(state->s2), 16U);
4073 + state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
4074 +}
4075 +
4076 +static int __init prandom_state_selftest(void)
4077 {
4078 int i, j, errors = 0, runs = 0;
4079 bool error = false;
4080 @@ -461,5 +304,266 @@ static void __init prandom_state_selftest(void)
4081 pr_warn("prandom: %d/%d self tests failed\n", errors, runs);
4082 else
4083 pr_info("prandom: %d self tests passed\n", runs);
4084 + return 0;
4085 +}
4086 +core_initcall(prandom_state_selftest);
4087 +#endif
4088 +
4089 +/*
4090 + * The prandom_u32() implementation is now completely separate from the
4091 + * prandom_state() functions, which are retained (for now) for compatibility.
4092 + *
4093 + * Because of (ab)use in the networking code for choosing random TCP/UDP port
4094 + * numbers, which open DoS possibilities if guessable, we want something
4095 + * stronger than a standard PRNG. But the performance requirements of
4096 + * the network code do not allow robust crypto for this application.
4097 + *
4098 + * So this is a homebrew Junior Spaceman implementation, based on the
4099 + * lowest-latency trustworthy crypto primitive available, SipHash.
4100 + * (The authors of SipHash have not been consulted about this abuse of
4101 + * their work.)
4102 + *
4103 + * Standard SipHash-2-4 uses 2n+4 rounds to hash n words of input to
4104 + * one word of output. This abbreviated version uses 2 rounds per word
4105 + * of output.
4106 + */
4107 +
4108 +struct siprand_state {
4109 + unsigned long v0;
4110 + unsigned long v1;
4111 + unsigned long v2;
4112 + unsigned long v3;
4113 +};
4114 +
4115 +static DEFINE_PER_CPU(struct siprand_state, net_rand_state) __latent_entropy;
4116 +
4117 +/*
4118 + * This is the core CPRNG function. As "pseudorandom", this is not used
4119 + * for truly valuable things, just intended to be a PITA to guess.
4120 + * For maximum speed, we do just two SipHash rounds per word. This is
4121 + * the same rate as 4 rounds per 64 bits that SipHash normally uses,
4122 + * so hopefully it's reasonably secure.
4123 + *
4124 + * There are two changes from the official SipHash finalization:
4125 + * - We omit some constants XORed with v2 in the SipHash spec as irrelevant;
4126 + * they are there only to make the output rounds distinct from the input
4127 + * rounds, and this application has no input rounds.
4128 + * - Rather than returning v0^v1^v2^v3, return v1+v3.
4129 + * If you look at the SipHash round, the last operation on v3 is
4130 + * "v3 ^= v0", so "v0 ^ v3" just undoes that, a waste of time.
4131 + * Likewise "v1 ^= v2". (The rotate of v2 makes a difference, but
4132 + * it still cancels out half of the bits in v2 for no benefit.)
4133 + * Second, since the last combining operation was xor, continue the
4134 + * pattern of alternating xor/add for a tiny bit of extra non-linearity.
4135 + */
4136 +static inline u32 siprand_u32(struct siprand_state *s)
4137 +{
4138 + unsigned long v0 = s->v0, v1 = s->v1, v2 = s->v2, v3 = s->v3;
4139 +
4140 + PRND_SIPROUND(v0, v1, v2, v3);
4141 + PRND_SIPROUND(v0, v1, v2, v3);
4142 + s->v0 = v0; s->v1 = v1; s->v2 = v2; s->v3 = v3;
4143 + return v1 + v3;
4144 +}
4145 +
4146 +
4147 +/**
4148 + * prandom_u32 - pseudo random number generator
4149 + *
4150 + * A 32 bit pseudo-random number is generated using a fast
4151 + * algorithm suitable for simulation. This algorithm is NOT
4152 + * considered safe for cryptographic use.
4153 + */
4154 +u32 prandom_u32(void)
4155 +{
4156 + struct siprand_state *state = get_cpu_ptr(&net_rand_state);
4157 + u32 res = siprand_u32(state);
4158 +
4159 + put_cpu_ptr(&net_rand_state);
4160 + return res;
4161 +}
4162 +EXPORT_SYMBOL(prandom_u32);
4163 +
4164 +/**
4165 + * prandom_bytes - get the requested number of pseudo-random bytes
4166 + * @buf: where to copy the pseudo-random bytes to
4167 + * @bytes: the requested number of bytes
4168 + */
4169 +void prandom_bytes(void *buf, size_t bytes)
4170 +{
4171 + struct siprand_state *state = get_cpu_ptr(&net_rand_state);
4172 + u8 *ptr = buf;
4173 +
4174 + while (bytes >= sizeof(u32)) {
4175 + put_unaligned(siprand_u32(state), (u32 *)ptr);
4176 + ptr += sizeof(u32);
4177 + bytes -= sizeof(u32);
4178 + }
4179 +
4180 + if (bytes > 0) {
4181 + u32 rem = siprand_u32(state);
4182 +
4183 + do {
4184 + *ptr++ = (u8)rem;
4185 + rem >>= BITS_PER_BYTE;
4186 + } while (--bytes > 0);
4187 + }
4188 + put_cpu_ptr(&net_rand_state);
4189 +}
4190 +EXPORT_SYMBOL(prandom_bytes);
4191 +
4192 +/**
4193 + * prandom_seed - add entropy to pseudo random number generator
4194 + * @entropy: entropy value
4195 + *
4196 + * Add some additional seed material to the prandom pool.
4197 + * The "entropy" is actually our IP address (the only caller is
4198 + * the network code), not for unpredictability, but to ensure that
4199 + * different machines are initialized differently.
4200 + */
4201 +void prandom_seed(u32 entropy)
4202 +{
4203 + int i;
4204 +
4205 + add_device_randomness(&entropy, sizeof(entropy));
4206 +
4207 + for_each_possible_cpu(i) {
4208 + struct siprand_state *state = per_cpu_ptr(&net_rand_state, i);
4209 + unsigned long v0 = state->v0, v1 = state->v1;
4210 + unsigned long v2 = state->v2, v3 = state->v3;
4211 +
4212 + do {
4213 + v3 ^= entropy;
4214 + PRND_SIPROUND(v0, v1, v2, v3);
4215 + PRND_SIPROUND(v0, v1, v2, v3);
4216 + v0 ^= entropy;
4217 + } while (unlikely(!v0 || !v1 || !v2 || !v3));
4218 +
4219 + WRITE_ONCE(state->v0, v0);
4220 + WRITE_ONCE(state->v1, v1);
4221 + WRITE_ONCE(state->v2, v2);
4222 + WRITE_ONCE(state->v3, v3);
4223 + }
4224 +}
4225 +EXPORT_SYMBOL(prandom_seed);
4226 +
4227 +/*
4228 + * Generate some initially weak seeding values to allow
4229 + * the prandom_u32() engine to be started.
4230 + */
4231 +static int __init prandom_init_early(void)
4232 +{
4233 + int i;
4234 + unsigned long v0, v1, v2, v3;
4235 +
4236 + if (!arch_get_random_long(&v0))
4237 + v0 = jiffies;
4238 + if (!arch_get_random_long(&v1))
4239 + v1 = random_get_entropy();
4240 + v2 = v0 ^ PRND_K0;
4241 + v3 = v1 ^ PRND_K1;
4242 +
4243 + for_each_possible_cpu(i) {
4244 + struct siprand_state *state;
4245 +
4246 + v3 ^= i;
4247 + PRND_SIPROUND(v0, v1, v2, v3);
4248 + PRND_SIPROUND(v0, v1, v2, v3);
4249 + v0 ^= i;
4250 +
4251 + state = per_cpu_ptr(&net_rand_state, i);
4252 + state->v0 = v0; state->v1 = v1;
4253 + state->v2 = v2; state->v3 = v3;
4254 + }
4255 +
4256 + return 0;
4257 }
4258 +core_initcall(prandom_init_early);
4259 +
4260 +
4261 +/* Stronger reseeding when available, and periodically thereafter. */
4262 +static void prandom_reseed(struct timer_list *unused);
4263 +
4264 +static DEFINE_TIMER(seed_timer, prandom_reseed);
4265 +
4266 +static void prandom_reseed(struct timer_list *unused)
4267 +{
4268 + unsigned long expires;
4269 + int i;
4270 +
4271 + /*
4272 + * Reinitialize each CPU's PRNG with 128 bits of key.
4273 + * No locking on the CPUs, but then somewhat random results are,
4274 + * well, expected.
4275 + */
4276 + for_each_possible_cpu(i) {
4277 + struct siprand_state *state;
4278 + unsigned long v0 = get_random_long(), v2 = v0 ^ PRND_K0;
4279 + unsigned long v1 = get_random_long(), v3 = v1 ^ PRND_K1;
4280 +#if BITS_PER_LONG == 32
4281 + int j;
4282 +
4283 + /*
4284 + * On 32-bit machines, hash in two extra words to
4285 + * approximate 128-bit key length. Not that the hash
4286 + * has that much security, but this prevents a trivial
4287 + * 64-bit brute force.
4288 + */
4289 + for (j = 0; j < 2; j++) {
4290 + unsigned long m = get_random_long();
4291 +
4292 + v3 ^= m;
4293 + PRND_SIPROUND(v0, v1, v2, v3);
4294 + PRND_SIPROUND(v0, v1, v2, v3);
4295 + v0 ^= m;
4296 + }
4297 #endif
4298 + /*
4299 + * Probably impossible in practice, but there is a
4300 + * theoretical risk that a race between this reseeding
4301 + * and the target CPU writing its state back could
4302 + * create the all-zero SipHash fixed point.
4303 + *
4304 + * To ensure that never happens, ensure the state
4305 + * we write contains no zero words.
4306 + */
4307 + state = per_cpu_ptr(&net_rand_state, i);
4308 + WRITE_ONCE(state->v0, v0 ? v0 : -1ul);
4309 + WRITE_ONCE(state->v1, v1 ? v1 : -1ul);
4310 + WRITE_ONCE(state->v2, v2 ? v2 : -1ul);
4311 + WRITE_ONCE(state->v3, v3 ? v3 : -1ul);
4312 + }
4313 +
4314 + /* reseed every ~60 seconds, in [40 .. 80) interval with slack */
4315 + expires = round_jiffies(jiffies + 40 * HZ + prandom_u32_max(40 * HZ));
4316 + mod_timer(&seed_timer, expires);
4317 +}
4318 +
4319 +/*
4320 + * The random ready callback can be called from almost any interrupt.
4321 + * To avoid worrying about whether it's safe to delay that interrupt
4322 + * long enough to seed all CPUs, just schedule an immediate timer event.
4323 + */
4324 +static void prandom_timer_start(struct random_ready_callback *unused)
4325 +{
4326 + mod_timer(&seed_timer, jiffies);
4327 +}
4328 +
4329 +/*
4330 + * Start periodic full reseeding as soon as strong
4331 + * random numbers are available.
4332 + */
4333 +static int __init prandom_init_late(void)
4334 +{
4335 + static struct random_ready_callback random_ready = {
4336 + .func = prandom_timer_start
4337 + };
4338 + int ret = add_random_ready_callback(&random_ready);
4339 +
4340 + if (ret == -EALREADY) {
4341 + prandom_timer_start(&random_ready);
4342 + ret = 0;
4343 + }
4344 + return ret;
4345 +}
4346 +late_initcall(prandom_init_late);
4347 diff --git a/mm/slub.c b/mm/slub.c
4348 index d69934eac9e94..f41414571c9eb 100644
4349 --- a/mm/slub.c
4350 +++ b/mm/slub.c
4351 @@ -2763,7 +2763,7 @@ redo:
4352
4353 object = c->freelist;
4354 page = c->page;
4355 - if (unlikely(!object || !node_match(page, node))) {
4356 + if (unlikely(!object || !page || !node_match(page, node))) {
4357 object = __slab_alloc(s, gfpflags, node, addr, c);
4358 stat(s, ALLOC_SLOWPATH);
4359 } else {
4360 diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
4361 index bf9fd6ee88fe0..0470909605392 100644
4362 --- a/net/can/j1939/socket.c
4363 +++ b/net/can/j1939/socket.c
4364 @@ -475,6 +475,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
4365 goto out_release_sock;
4366 }
4367
4368 + if (!(ndev->flags & IFF_UP)) {
4369 + dev_put(ndev);
4370 + ret = -ENETDOWN;
4371 + goto out_release_sock;
4372 + }
4373 +
4374 priv = j1939_netdev_start(ndev);
4375 dev_put(ndev);
4376 if (IS_ERR(priv)) {
4377 diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
4378 index a058213b77a78..7c841037c5334 100644
4379 --- a/net/ipv4/netfilter.c
4380 +++ b/net/ipv4/netfilter.c
4381 @@ -17,17 +17,19 @@
4382 #include <net/netfilter/nf_queue.h>
4383
4384 /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
4385 -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type)
4386 +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type)
4387 {
4388 const struct iphdr *iph = ip_hdr(skb);
4389 struct rtable *rt;
4390 struct flowi4 fl4 = {};
4391 __be32 saddr = iph->saddr;
4392 - const struct sock *sk = skb_to_full_sk(skb);
4393 - __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
4394 + __u8 flags;
4395 struct net_device *dev = skb_dst(skb)->dev;
4396 unsigned int hh_len;
4397
4398 + sk = sk_to_full_sk(sk);
4399 + flags = sk ? inet_sk_flowi_flags(sk) : 0;
4400 +
4401 if (addr_type == RTN_UNSPEC)
4402 addr_type = inet_addr_type_dev_table(net, dev, saddr);
4403 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
4404 diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
4405 index bb9266ea37858..ae45bcdd335ea 100644
4406 --- a/net/ipv4/netfilter/iptable_mangle.c
4407 +++ b/net/ipv4/netfilter/iptable_mangle.c
4408 @@ -62,7 +62,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
4409 iph->daddr != daddr ||
4410 skb->mark != mark ||
4411 iph->tos != tos) {
4412 - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
4413 + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
4414 if (err < 0)
4415 ret = NF_DROP_ERR(err);
4416 }
4417 diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
4418 index 2361fdac2c438..57817313a85c1 100644
4419 --- a/net/ipv4/netfilter/nf_reject_ipv4.c
4420 +++ b/net/ipv4/netfilter/nf_reject_ipv4.c
4421 @@ -127,7 +127,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
4422 ip4_dst_hoplimit(skb_dst(nskb)));
4423 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
4424
4425 - if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
4426 + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
4427 goto free_nskb;
4428
4429 niph = ip_hdr(nskb);
4430 diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
4431 index 535b69326f66a..2b45d14555926 100644
4432 --- a/net/ipv4/syncookies.c
4433 +++ b/net/ipv4/syncookies.c
4434 @@ -291,7 +291,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
4435 __u32 cookie = ntohl(th->ack_seq) - 1;
4436 struct sock *ret = sk;
4437 struct request_sock *req;
4438 - int mss;
4439 + int full_space, mss;
4440 struct rtable *rt;
4441 __u8 rcv_wscale;
4442 struct flowi4 fl4;
4443 @@ -386,8 +386,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
4444
4445 /* Try to redo what tcp_v4_send_synack did. */
4446 req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
4447 + /* limit the window selection if the user enforce a smaller rx buffer */
4448 + full_space = tcp_full_space(sk);
4449 + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
4450 + (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
4451 + req->rsk_window_clamp = full_space;
4452
4453 - tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
4454 + tcp_select_initial_window(sk, full_space, req->mss,
4455 &req->rsk_rcv_wnd, &req->rsk_window_clamp,
4456 ireq->wscale_ok, &rcv_wscale,
4457 dst_metric(&rt->dst, RTAX_INITRWND));
4458 diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
4459 index a3908e55ed89a..d7c64e953e9a5 100644
4460 --- a/net/ipv4/udp_offload.c
4461 +++ b/net/ipv4/udp_offload.c
4462 @@ -349,7 +349,7 @@ out:
4463 static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
4464 struct sk_buff *skb)
4465 {
4466 - struct udphdr *uh = udp_hdr(skb);
4467 + struct udphdr *uh = udp_gro_udphdr(skb);
4468 struct sk_buff *pp = NULL;
4469 struct udphdr *uh2;
4470 struct sk_buff *p;
4471 diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
4472 index 6d0e942d082d4..ab9a279dd6d47 100644
4473 --- a/net/ipv6/netfilter.c
4474 +++ b/net/ipv6/netfilter.c
4475 @@ -20,10 +20,10 @@
4476 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
4477 #include "../bridge/br_private.h"
4478
4479 -int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
4480 +int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb)
4481 {
4482 const struct ipv6hdr *iph = ipv6_hdr(skb);
4483 - struct sock *sk = sk_to_full_sk(skb->sk);
4484 + struct sock *sk = sk_to_full_sk(sk_partial);
4485 unsigned int hh_len;
4486 struct dst_entry *dst;
4487 int strict = (ipv6_addr_type(&iph->daddr) &
4488 @@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
4489 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
4490 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
4491 skb->mark != rt_info->mark)
4492 - return ip6_route_me_harder(entry->state.net, skb);
4493 + return ip6_route_me_harder(entry->state.net, entry->state.sk, skb);
4494 }
4495 return 0;
4496 }
4497 diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
4498 index 070afb97fa2ba..401e8dcb2c84b 100644
4499 --- a/net/ipv6/netfilter/ip6table_mangle.c
4500 +++ b/net/ipv6/netfilter/ip6table_mangle.c
4501 @@ -57,7 +57,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state)
4502 skb->mark != mark ||
4503 ipv6_hdr(skb)->hop_limit != hop_limit ||
4504 flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
4505 - err = ip6_route_me_harder(state->net, skb);
4506 + err = ip6_route_me_harder(state->net, state->sk, skb);
4507 if (err < 0)
4508 ret = NF_DROP_ERR(err);
4509 }
4510 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
4511 index 98954830c40ba..2872f7a00e863 100644
4512 --- a/net/ipv6/sit.c
4513 +++ b/net/ipv6/sit.c
4514 @@ -1088,7 +1088,6 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
4515 if (tdev && !netif_is_l3_master(tdev)) {
4516 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
4517
4518 - dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
4519 dev->mtu = tdev->mtu - t_hlen;
4520 if (dev->mtu < IPV6_MIN_MTU)
4521 dev->mtu = IPV6_MIN_MTU;
4522 @@ -1378,7 +1377,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
4523 dev->priv_destructor = ipip6_dev_free;
4524
4525 dev->type = ARPHRD_SIT;
4526 - dev->hard_header_len = LL_MAX_HEADER + t_hlen;
4527 dev->mtu = ETH_DATA_LEN - t_hlen;
4528 dev->min_mtu = IPV6_MIN_MTU;
4529 dev->max_mtu = IP6_MAX_MTU - t_hlen;
4530 diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
4531 index 30915f6f31e30..ec155844012b2 100644
4532 --- a/net/ipv6/syncookies.c
4533 +++ b/net/ipv6/syncookies.c
4534 @@ -136,7 +136,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
4535 __u32 cookie = ntohl(th->ack_seq) - 1;
4536 struct sock *ret = sk;
4537 struct request_sock *req;
4538 - int mss;
4539 + int full_space, mss;
4540 struct dst_entry *dst;
4541 __u8 rcv_wscale;
4542 u32 tsoff = 0;
4543 @@ -241,7 +241,13 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
4544 }
4545
4546 req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
4547 - tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
4548 + /* limit the window selection if the user enforce a smaller rx buffer */
4549 + full_space = tcp_full_space(sk);
4550 + if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
4551 + (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
4552 + req->rsk_window_clamp = full_space;
4553 +
4554 + tcp_select_initial_window(sk, full_space, req->mss,
4555 &req->rsk_rcv_wnd, &req->rsk_window_clamp,
4556 ireq->wscale_ok, &rcv_wscale,
4557 dst_metric(dst, RTAX_INITRWND));
4558 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
4559 index ebb62a4ebe30d..be8fd79202b87 100644
4560 --- a/net/iucv/af_iucv.c
4561 +++ b/net/iucv/af_iucv.c
4562 @@ -1574,7 +1574,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
4563 break;
4564 }
4565
4566 - if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
4567 + if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
4568 + sk->sk_state == IUCV_CONNECTED) {
4569 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
4570 txmsg.class = 0;
4571 txmsg.tag = 0;
4572 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4573 index f029e75ec815a..30a0c7c6224b3 100644
4574 --- a/net/mac80211/tx.c
4575 +++ b/net/mac80211/tx.c
4576 @@ -1944,19 +1944,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
4577
4578 /* device xmit handlers */
4579
4580 +enum ieee80211_encrypt {
4581 + ENCRYPT_NO,
4582 + ENCRYPT_MGMT,
4583 + ENCRYPT_DATA,
4584 +};
4585 +
4586 static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
4587 struct sk_buff *skb,
4588 - int head_need, bool may_encrypt)
4589 + int head_need,
4590 + enum ieee80211_encrypt encrypt)
4591 {
4592 struct ieee80211_local *local = sdata->local;
4593 - struct ieee80211_hdr *hdr;
4594 bool enc_tailroom;
4595 int tail_need = 0;
4596
4597 - hdr = (struct ieee80211_hdr *) skb->data;
4598 - enc_tailroom = may_encrypt &&
4599 - (sdata->crypto_tx_tailroom_needed_cnt ||
4600 - ieee80211_is_mgmt(hdr->frame_control));
4601 + enc_tailroom = encrypt == ENCRYPT_MGMT ||
4602 + (encrypt == ENCRYPT_DATA &&
4603 + sdata->crypto_tx_tailroom_needed_cnt);
4604
4605 if (enc_tailroom) {
4606 tail_need = IEEE80211_ENCRYPT_TAILROOM;
4607 @@ -1988,23 +1993,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
4608 {
4609 struct ieee80211_local *local = sdata->local;
4610 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4611 - struct ieee80211_hdr *hdr;
4612 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
4613 int headroom;
4614 - bool may_encrypt;
4615 + enum ieee80211_encrypt encrypt;
4616
4617 - may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
4618 + if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)
4619 + encrypt = ENCRYPT_NO;
4620 + else if (ieee80211_is_mgmt(hdr->frame_control))
4621 + encrypt = ENCRYPT_MGMT;
4622 + else
4623 + encrypt = ENCRYPT_DATA;
4624
4625 headroom = local->tx_headroom;
4626 - if (may_encrypt)
4627 + if (encrypt != ENCRYPT_NO)
4628 headroom += sdata->encrypt_headroom;
4629 headroom -= skb_headroom(skb);
4630 headroom = max_t(int, 0, headroom);
4631
4632 - if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
4633 + if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) {
4634 ieee80211_free_txskb(&local->hw, skb);
4635 return;
4636 }
4637
4638 + /* reload after potential resize */
4639 hdr = (struct ieee80211_hdr *) skb->data;
4640 info->control.vif = &sdata->vif;
4641
4642 @@ -2808,7 +2819,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
4643 head_need += sdata->encrypt_headroom;
4644 head_need += local->tx_headroom;
4645 head_need = max_t(int, 0, head_need);
4646 - if (ieee80211_skb_resize(sdata, skb, head_need, true)) {
4647 + if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) {
4648 ieee80211_free_txskb(&local->hw, skb);
4649 skb = NULL;
4650 return ERR_PTR(-ENOMEM);
4651 @@ -3482,7 +3493,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
4652 if (unlikely(ieee80211_skb_resize(sdata, skb,
4653 max_t(int, extra_head + hw_headroom -
4654 skb_headroom(skb), 0),
4655 - false))) {
4656 + ENCRYPT_NO))) {
4657 kfree_skb(skb);
4658 return true;
4659 }
4660 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
4661 index 133a3f1b6f56c..3cc4daa856d6b 100644
4662 --- a/net/netfilter/ipset/ip_set_core.c
4663 +++ b/net/netfilter/ipset/ip_set_core.c
4664 @@ -485,13 +485,14 @@ ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,
4665 if (SET_WITH_COUNTER(set)) {
4666 struct ip_set_counter *counter = ext_counter(data, set);
4667
4668 + ip_set_update_counter(counter, ext, flags);
4669 +
4670 if (flags & IPSET_FLAG_MATCH_COUNTERS &&
4671 !(ip_set_match_counter(ip_set_get_packets(counter),
4672 mext->packets, mext->packets_op) &&
4673 ip_set_match_counter(ip_set_get_bytes(counter),
4674 mext->bytes, mext->bytes_op)))
4675 return false;
4676 - ip_set_update_counter(counter, ext, flags);
4677 }
4678 if (SET_WITH_SKBINFO(set))
4679 ip_set_get_skbinfo(ext_skbinfo(data, set),
4680 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
4681 index 64a05906cc0e6..89aa1fc334b19 100644
4682 --- a/net/netfilter/ipvs/ip_vs_core.c
4683 +++ b/net/netfilter/ipvs/ip_vs_core.c
4684 @@ -748,12 +748,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af,
4685 struct dst_entry *dst = skb_dst(skb);
4686
4687 if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) &&
4688 - ip6_route_me_harder(ipvs->net, skb) != 0)
4689 + ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0)
4690 return 1;
4691 } else
4692 #endif
4693 if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) &&
4694 - ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0)
4695 + ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0)
4696 return 1;
4697
4698 return 0;
4699 diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
4700 index 59151dc07fdc1..e87b6bd6b3cdb 100644
4701 --- a/net/netfilter/nf_nat_proto.c
4702 +++ b/net/netfilter/nf_nat_proto.c
4703 @@ -715,7 +715,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb,
4704
4705 if (ct->tuplehash[dir].tuple.dst.u3.ip !=
4706 ct->tuplehash[!dir].tuple.src.u3.ip) {
4707 - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
4708 + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
4709 if (err < 0)
4710 ret = NF_DROP_ERR(err);
4711 }
4712 @@ -953,7 +953,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb,
4713
4714 if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3,
4715 &ct->tuplehash[!dir].tuple.src.u3)) {
4716 - err = nf_ip6_route_me_harder(state->net, skb);
4717 + err = nf_ip6_route_me_harder(state->net, state->sk, skb);
4718 if (err < 0)
4719 ret = NF_DROP_ERR(err);
4720 }
4721 diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
4722 index b9cbe1e2453e8..4bb4cfde28b47 100644
4723 --- a/net/netfilter/nf_synproxy_core.c
4724 +++ b/net/netfilter/nf_synproxy_core.c
4725 @@ -446,7 +446,7 @@ synproxy_send_tcp(struct net *net,
4726
4727 skb_dst_set_noref(nskb, skb_dst(skb));
4728 nskb->protocol = htons(ETH_P_IP);
4729 - if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
4730 + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
4731 goto free_nskb;
4732
4733 if (nfct) {
4734 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4735 index 5a77b7a177229..51391d5d22656 100644
4736 --- a/net/netfilter/nf_tables_api.c
4737 +++ b/net/netfilter/nf_tables_api.c
4738 @@ -7010,11 +7010,15 @@ static void nf_tables_abort_release(struct nft_trans *trans)
4739 kfree(trans);
4740 }
4741
4742 -static int __nf_tables_abort(struct net *net, bool autoload)
4743 +static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
4744 {
4745 struct nft_trans *trans, *next;
4746 struct nft_trans_elem *te;
4747
4748 + if (action == NFNL_ABORT_VALIDATE &&
4749 + nf_tables_validate(net) < 0)
4750 + return -EAGAIN;
4751 +
4752 list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list,
4753 list) {
4754 switch (trans->msg_type) {
4755 @@ -7132,7 +7136,7 @@ static int __nf_tables_abort(struct net *net, bool autoload)
4756 nf_tables_abort_release(trans);
4757 }
4758
4759 - if (autoload)
4760 + if (action == NFNL_ABORT_AUTOLOAD)
4761 nf_tables_module_autoload(net);
4762 else
4763 nf_tables_module_autoload_cleanup(net);
4764 @@ -7145,9 +7149,10 @@ static void nf_tables_cleanup(struct net *net)
4765 nft_validate_state_update(net, NFT_VALIDATE_SKIP);
4766 }
4767
4768 -static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload)
4769 +static int nf_tables_abort(struct net *net, struct sk_buff *skb,
4770 + enum nfnl_abort_action action)
4771 {
4772 - int ret = __nf_tables_abort(net, autoload);
4773 + int ret = __nf_tables_abort(net, action);
4774
4775 mutex_unlock(&net->nft.commit_mutex);
4776
4777 @@ -7754,7 +7759,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
4778 {
4779 mutex_lock(&net->nft.commit_mutex);
4780 if (!list_empty(&net->nft.commit_list))
4781 - __nf_tables_abort(net, false);
4782 + __nf_tables_abort(net, NFNL_ABORT_NONE);
4783 __nft_release_tables(net);
4784 mutex_unlock(&net->nft.commit_mutex);
4785 WARN_ON_ONCE(!list_empty(&net->nft.tables));
4786 diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
4787 index 6d03b09096210..81c86a156c6c0 100644
4788 --- a/net/netfilter/nfnetlink.c
4789 +++ b/net/netfilter/nfnetlink.c
4790 @@ -315,7 +315,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
4791 return netlink_ack(skb, nlh, -EINVAL, NULL);
4792 replay:
4793 status = 0;
4794 -
4795 +replay_abort:
4796 skb = netlink_skb_clone(oskb, GFP_KERNEL);
4797 if (!skb)
4798 return netlink_ack(oskb, nlh, -ENOMEM, NULL);
4799 @@ -481,7 +481,7 @@ ack:
4800 }
4801 done:
4802 if (status & NFNL_BATCH_REPLAY) {
4803 - ss->abort(net, oskb, true);
4804 + ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD);
4805 nfnl_err_reset(&err_list);
4806 kfree_skb(skb);
4807 module_put(ss->owner);
4808 @@ -492,11 +492,25 @@ done:
4809 status |= NFNL_BATCH_REPLAY;
4810 goto done;
4811 } else if (err) {
4812 - ss->abort(net, oskb, false);
4813 + ss->abort(net, oskb, NFNL_ABORT_NONE);
4814 netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL);
4815 }
4816 } else {
4817 - ss->abort(net, oskb, false);
4818 + enum nfnl_abort_action abort_action;
4819 +
4820 + if (status & NFNL_BATCH_FAILURE)
4821 + abort_action = NFNL_ABORT_NONE;
4822 + else
4823 + abort_action = NFNL_ABORT_VALIDATE;
4824 +
4825 + err = ss->abort(net, oskb, abort_action);
4826 + if (err == -EAGAIN) {
4827 + nfnl_err_reset(&err_list);
4828 + kfree_skb(skb);
4829 + module_put(ss->owner);
4830 + status |= NFNL_BATCH_FAILURE;
4831 + goto replay_abort;
4832 + }
4833 }
4834 if (ss->cleanup)
4835 ss->cleanup(net);
4836 diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c
4837 index 8826bbe71136c..edd02cda57fca 100644
4838 --- a/net/netfilter/nft_chain_route.c
4839 +++ b/net/netfilter/nft_chain_route.c
4840 @@ -42,7 +42,7 @@ static unsigned int nf_route_table_hook4(void *priv,
4841 iph->daddr != daddr ||
4842 skb->mark != mark ||
4843 iph->tos != tos) {
4844 - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC);
4845 + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC);
4846 if (err < 0)
4847 ret = NF_DROP_ERR(err);
4848 }
4849 @@ -92,7 +92,7 @@ static unsigned int nf_route_table_hook6(void *priv,
4850 skb->mark != mark ||
4851 ipv6_hdr(skb)->hop_limit != hop_limit ||
4852 flowlabel != *((u32 *)ipv6_hdr(skb)))) {
4853 - err = nf_ip6_route_me_harder(state->net, skb);
4854 + err = nf_ip6_route_me_harder(state->net, state->sk, skb);
4855 if (err < 0)
4856 ret = NF_DROP_ERR(err);
4857 }
4858 diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c
4859 index 51b454d8fa9c9..924195861faf7 100644
4860 --- a/net/netfilter/utils.c
4861 +++ b/net/netfilter/utils.c
4862 @@ -191,8 +191,8 @@ static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry
4863 skb->mark == rt_info->mark &&
4864 iph->daddr == rt_info->daddr &&
4865 iph->saddr == rt_info->saddr))
4866 - return ip_route_me_harder(entry->state.net, skb,
4867 - RTN_UNSPEC);
4868 + return ip_route_me_harder(entry->state.net, entry->state.sk,
4869 + skb, RTN_UNSPEC);
4870 }
4871 #endif
4872 return 0;
4873 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
4874 index 0e275e11f5115..6e6147a81bc3a 100644
4875 --- a/net/sched/sch_generic.c
4876 +++ b/net/sched/sch_generic.c
4877 @@ -1127,10 +1127,13 @@ static void dev_deactivate_queue(struct net_device *dev,
4878 void *_qdisc_default)
4879 {
4880 struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc);
4881 + struct Qdisc *qdisc_default = _qdisc_default;
4882
4883 if (qdisc) {
4884 if (!(qdisc->flags & TCQ_F_BUILTIN))
4885 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
4886 +
4887 + rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
4888 }
4889 }
4890
4891 diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
4892 index 931c426673c02..444e1792d02cf 100644
4893 --- a/net/tipc/topsrv.c
4894 +++ b/net/tipc/topsrv.c
4895 @@ -664,12 +664,18 @@ static int tipc_topsrv_start(struct net *net)
4896
4897 ret = tipc_topsrv_work_start(srv);
4898 if (ret < 0)
4899 - return ret;
4900 + goto err_start;
4901
4902 ret = tipc_topsrv_create_listener(srv);
4903 if (ret < 0)
4904 - tipc_topsrv_work_stop(srv);
4905 + goto err_create;
4906
4907 + return 0;
4908 +
4909 +err_create:
4910 + tipc_topsrv_work_stop(srv);
4911 +err_start:
4912 + kfree(srv);
4913 return ret;
4914 }
4915
4916 diff --git a/net/wireless/core.c b/net/wireless/core.c
4917 index ee5bb8d8af04e..5d151e8f89320 100644
4918 --- a/net/wireless/core.c
4919 +++ b/net/wireless/core.c
4920 @@ -1224,8 +1224,7 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev,
4921 }
4922 EXPORT_SYMBOL(cfg80211_stop_iface);
4923
4924 -void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
4925 - struct wireless_dev *wdev)
4926 +void cfg80211_init_wdev(struct wireless_dev *wdev)
4927 {
4928 mutex_init(&wdev->mtx);
4929 INIT_LIST_HEAD(&wdev->event_list);
4930 @@ -1236,6 +1235,30 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
4931 spin_lock_init(&wdev->pmsr_lock);
4932 INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk);
4933
4934 +#ifdef CONFIG_CFG80211_WEXT
4935 + wdev->wext.default_key = -1;
4936 + wdev->wext.default_mgmt_key = -1;
4937 + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
4938 +#endif
4939 +
4940 + if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
4941 + wdev->ps = true;
4942 + else
4943 + wdev->ps = false;
4944 + /* allow mac80211 to determine the timeout */
4945 + wdev->ps_timeout = -1;
4946 +
4947 + if ((wdev->iftype == NL80211_IFTYPE_STATION ||
4948 + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
4949 + wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
4950 + wdev->netdev->priv_flags |= IFF_DONT_BRIDGE;
4951 +
4952 + INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
4953 +}
4954 +
4955 +void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
4956 + struct wireless_dev *wdev)
4957 +{
4958 /*
4959 * We get here also when the interface changes network namespaces,
4960 * as it's registered into the new one, but we don't want it to
4961 @@ -1269,6 +1292,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
4962 switch (state) {
4963 case NETDEV_POST_INIT:
4964 SET_NETDEV_DEVTYPE(dev, &wiphy_type);
4965 + wdev->netdev = dev;
4966 + /* can only change netns with wiphy */
4967 + dev->features |= NETIF_F_NETNS_LOCAL;
4968 +
4969 + cfg80211_init_wdev(wdev);
4970 break;
4971 case NETDEV_REGISTER:
4972 /*
4973 @@ -1276,35 +1304,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
4974 * called within code protected by it when interfaces
4975 * are added with nl80211.
4976 */
4977 - /* can only change netns with wiphy */
4978 - dev->features |= NETIF_F_NETNS_LOCAL;
4979 -
4980 if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
4981 "phy80211")) {
4982 pr_err("failed to add phy80211 symlink to netdev!\n");
4983 }
4984 - wdev->netdev = dev;
4985 -#ifdef CONFIG_CFG80211_WEXT
4986 - wdev->wext.default_key = -1;
4987 - wdev->wext.default_mgmt_key = -1;
4988 - wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
4989 -#endif
4990 -
4991 - if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT)
4992 - wdev->ps = true;
4993 - else
4994 - wdev->ps = false;
4995 - /* allow mac80211 to determine the timeout */
4996 - wdev->ps_timeout = -1;
4997 -
4998 - if ((wdev->iftype == NL80211_IFTYPE_STATION ||
4999 - wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||
5000 - wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
5001 - dev->priv_flags |= IFF_DONT_BRIDGE;
5002 -
5003 - INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
5004
5005 - cfg80211_init_wdev(rdev, wdev);
5006 + cfg80211_register_wdev(rdev, wdev);
5007 break;
5008 case NETDEV_GOING_DOWN:
5009 cfg80211_leave(rdev, wdev);
5010 diff --git a/net/wireless/core.h b/net/wireless/core.h
5011 index ed487e3245714..d83c8e009448a 100644
5012 --- a/net/wireless/core.h
5013 +++ b/net/wireless/core.h
5014 @@ -210,8 +210,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
5015 int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
5016 struct net *net);
5017
5018 -void cfg80211_init_wdev(struct cfg80211_registered_device *rdev,
5019 - struct wireless_dev *wdev);
5020 +void cfg80211_init_wdev(struct wireless_dev *wdev);
5021 +void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
5022 + struct wireless_dev *wdev);
5023
5024 static inline void wdev_lock(struct wireless_dev *wdev)
5025 __acquires(wdev)
5026 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5027 index 672b70730e898..dbac5c0995a0f 100644
5028 --- a/net/wireless/nl80211.c
5029 +++ b/net/wireless/nl80211.c
5030 @@ -3654,7 +3654,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
5031 * P2P Device and NAN do not have a netdev, so don't go
5032 * through the netdev notifier and must be added here
5033 */
5034 - cfg80211_init_wdev(rdev, wdev);
5035 + cfg80211_init_wdev(wdev);
5036 + cfg80211_register_wdev(rdev, wdev);
5037 break;
5038 default:
5039 break;
5040 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
5041 index 20a8e6af88c45..0f3b57a73670b 100644
5042 --- a/net/wireless/reg.c
5043 +++ b/net/wireless/reg.c
5044 @@ -3405,7 +3405,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
5045 power_rule = &reg_rule->power_rule;
5046
5047 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
5048 - snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
5049 + snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO",
5050 freq_range->max_bandwidth_khz,
5051 reg_get_max_bandwidth(rd, reg_rule));
5052 else
5053 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
5054 index 256f3e97d1f34..5e8146fcb5835 100644
5055 --- a/net/x25/af_x25.c
5056 +++ b/net/x25/af_x25.c
5057 @@ -819,7 +819,7 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
5058 sock->state = SS_CONNECTED;
5059 rc = 0;
5060 out_put_neigh:
5061 - if (rc) {
5062 + if (rc && x25->neighbour) {
5063 read_lock_bh(&x25_list_lock);
5064 x25_neigh_put(x25->neighbour);
5065 x25->neighbour = NULL;
5066 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
5067 index aaea8cb7459d8..61fd0569d3934 100644
5068 --- a/net/xfrm/xfrm_state.c
5069 +++ b/net/xfrm/xfrm_state.c
5070 @@ -2001,6 +2001,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
5071 int err = -ENOENT;
5072 __be32 minspi = htonl(low);
5073 __be32 maxspi = htonl(high);
5074 + __be32 newspi = 0;
5075 u32 mark = x->mark.v & x->mark.m;
5076
5077 spin_lock_bh(&x->lock);
5078 @@ -2019,21 +2020,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high)
5079 xfrm_state_put(x0);
5080 goto unlock;
5081 }
5082 - x->id.spi = minspi;
5083 + newspi = minspi;
5084 } else {
5085 u32 spi = 0;
5086 for (h = 0; h < high-low+1; h++) {
5087 spi = low + prandom_u32()%(high-low+1);
5088 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
5089 if (x0 == NULL) {
5090 - x->id.spi = htonl(spi);
5091 + newspi = htonl(spi);
5092 break;
5093 }
5094 xfrm_state_put(x0);
5095 }
5096 }
5097 - if (x->id.spi) {
5098 + if (newspi) {
5099 spin_lock_bh(&net->xfrm.xfrm_state_lock);
5100 + x->id.spi = newspi;
5101 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
5102 hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
5103 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
5104 diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c
5105 index de92365e4324b..5887bff50560a 100644
5106 --- a/security/selinux/ibpkey.c
5107 +++ b/security/selinux/ibpkey.c
5108 @@ -151,8 +151,10 @@ static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid)
5109 * is valid, it just won't be added to the cache.
5110 */
5111 new = kzalloc(sizeof(*new), GFP_ATOMIC);
5112 - if (!new)
5113 + if (!new) {
5114 + ret = -ENOMEM;
5115 goto out;
5116 + }
5117
5118 new->psec.subnet_prefix = subnet_prefix;
5119 new->psec.pkey = pkey_num;
5120 diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
5121 index 09ff209df4a30..c87187f635733 100644
5122 --- a/sound/hda/ext/hdac_ext_controller.c
5123 +++ b/sound/hda/ext/hdac_ext_controller.c
5124 @@ -148,6 +148,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
5125 return NULL;
5126 if (bus->idx != bus_idx)
5127 return NULL;
5128 + if (addr < 0 || addr > 31)
5129 + return NULL;
5130
5131 list_for_each_entry(hlink, &bus->hlink_list, list) {
5132 for (i = 0; i < HDA_MAX_CODECS; i++) {
5133 diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
5134 index a356fb0e57738..9da7a06d024f1 100644
5135 --- a/sound/pci/hda/hda_controller.h
5136 +++ b/sound/pci/hda/hda_controller.h
5137 @@ -41,7 +41,7 @@
5138 /* 24 unused */
5139 #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
5140 #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
5141 -#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */
5142 +/* 27 unused */
5143 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
5144 #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
5145 #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
5146 @@ -143,6 +143,7 @@ struct azx {
5147 unsigned int align_buffer_size:1;
5148 unsigned int region_requested:1;
5149 unsigned int disabled:1; /* disabled by vga_switcheroo */
5150 + unsigned int pm_prepared:1;
5151
5152 /* GTS present */
5153 unsigned int gts_present:1;
5154 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5155 index 9a1968932b783..192e580561efd 100644
5156 --- a/sound/pci/hda/hda_intel.c
5157 +++ b/sound/pci/hda/hda_intel.c
5158 @@ -295,8 +295,7 @@ enum {
5159 /* PCH for HSW/BDW; with runtime PM */
5160 /* no i915 binding for this as HSW/BDW has another controller for HDMI */
5161 #define AZX_DCAPS_INTEL_PCH \
5162 - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
5163 - AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
5164 + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME)
5165
5166 /* HSW HDMI */
5167 #define AZX_DCAPS_INTEL_HASWELL \
5168 @@ -984,7 +983,7 @@ static void __azx_runtime_suspend(struct azx *chip)
5169 display_power(chip, false);
5170 }
5171
5172 -static void __azx_runtime_resume(struct azx *chip, bool from_rt)
5173 +static void __azx_runtime_resume(struct azx *chip)
5174 {
5175 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
5176 struct hdac_bus *bus = azx_bus(chip);
5177 @@ -1001,7 +1000,8 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
5178 azx_init_pci(chip);
5179 hda_intel_init_chip(chip, true);
5180
5181 - if (from_rt) {
5182 + /* Avoid codec resume if runtime resume is for system suspend */
5183 + if (!chip->pm_prepared) {
5184 list_for_each_codec(codec, &chip->bus) {
5185 if (codec->relaxed_resume)
5186 continue;
5187 @@ -1017,6 +1017,29 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt)
5188 }
5189
5190 #ifdef CONFIG_PM_SLEEP
5191 +static int azx_prepare(struct device *dev)
5192 +{
5193 + struct snd_card *card = dev_get_drvdata(dev);
5194 + struct azx *chip;
5195 +
5196 + chip = card->private_data;
5197 + chip->pm_prepared = 1;
5198 +
5199 + /* HDA controller always requires different WAKEEN for runtime suspend
5200 + * and system suspend, so don't use direct-complete here.
5201 + */
5202 + return 0;
5203 +}
5204 +
5205 +static void azx_complete(struct device *dev)
5206 +{
5207 + struct snd_card *card = dev_get_drvdata(dev);
5208 + struct azx *chip;
5209 +
5210 + chip = card->private_data;
5211 + chip->pm_prepared = 0;
5212 +}
5213 +
5214 static int azx_suspend(struct device *dev)
5215 {
5216 struct snd_card *card = dev_get_drvdata(dev);
5217 @@ -1028,15 +1051,7 @@ static int azx_suspend(struct device *dev)
5218
5219 chip = card->private_data;
5220 bus = azx_bus(chip);
5221 - snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
5222 - /* An ugly workaround: direct call of __azx_runtime_suspend() and
5223 - * __azx_runtime_resume() for old Intel platforms that suffer from
5224 - * spurious wakeups after S3 suspend
5225 - */
5226 - if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
5227 - __azx_runtime_suspend(chip);
5228 - else
5229 - pm_runtime_force_suspend(dev);
5230 + __azx_runtime_suspend(chip);
5231 if (bus->irq >= 0) {
5232 free_irq(bus->irq, chip);
5233 bus->irq = -1;
5234 @@ -1064,11 +1079,7 @@ static int azx_resume(struct device *dev)
5235 if (azx_acquire_irq(chip, 1) < 0)
5236 return -EIO;
5237
5238 - if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP)
5239 - __azx_runtime_resume(chip, false);
5240 - else
5241 - pm_runtime_force_resume(dev);
5242 - snd_power_change_state(card, SNDRV_CTL_POWER_D0);
5243 + __azx_runtime_resume(chip);
5244
5245 trace_azx_resume(chip);
5246 return 0;
5247 @@ -1116,10 +1127,7 @@ static int azx_runtime_suspend(struct device *dev)
5248 chip = card->private_data;
5249
5250 /* enable controller wake up event */
5251 - if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) {
5252 - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
5253 - STATESTS_INT_MASK);
5254 - }
5255 + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | STATESTS_INT_MASK);
5256
5257 __azx_runtime_suspend(chip);
5258 trace_azx_runtime_suspend(chip);
5259 @@ -1130,18 +1138,14 @@ static int azx_runtime_resume(struct device *dev)
5260 {
5261 struct snd_card *card = dev_get_drvdata(dev);
5262 struct azx *chip;
5263 - bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0;
5264
5265 if (!azx_is_pm_ready(card))
5266 return 0;
5267 chip = card->private_data;
5268 - __azx_runtime_resume(chip, from_rt);
5269 + __azx_runtime_resume(chip);
5270
5271 /* disable controller Wake Up event*/
5272 - if (from_rt) {
5273 - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
5274 - ~STATESTS_INT_MASK);
5275 - }
5276 + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK);
5277
5278 trace_azx_runtime_resume(chip);
5279 return 0;
5280 @@ -1175,6 +1179,8 @@ static int azx_runtime_idle(struct device *dev)
5281 static const struct dev_pm_ops azx_pm = {
5282 SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume)
5283 #ifdef CONFIG_PM_SLEEP
5284 + .prepare = azx_prepare,
5285 + .complete = azx_complete,
5286 .freeze_noirq = azx_freeze_noirq,
5287 .thaw_noirq = azx_thaw_noirq,
5288 #endif
5289 @@ -2322,6 +2328,7 @@ static int azx_probe_continue(struct azx *chip)
5290
5291 if (azx_has_pm_runtime(chip)) {
5292 pm_runtime_use_autosuspend(&pci->dev);
5293 + pm_runtime_allow(&pci->dev);
5294 pm_runtime_put_autosuspend(&pci->dev);
5295 }
5296
5297 diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
5298 index 55408c8fcb4e3..cdd7ae90c2b59 100644
5299 --- a/sound/soc/codecs/cs42l51.c
5300 +++ b/sound/soc/codecs/cs42l51.c
5301 @@ -247,8 +247,28 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = {
5302 &cs42l51_adcr_mux_controls),
5303 };
5304
5305 +static int mclk_event(struct snd_soc_dapm_widget *w,
5306 + struct snd_kcontrol *kcontrol, int event)
5307 +{
5308 + struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm);
5309 + struct cs42l51_private *cs42l51 = snd_soc_component_get_drvdata(comp);
5310 +
5311 + switch (event) {
5312 + case SND_SOC_DAPM_PRE_PMU:
5313 + return clk_prepare_enable(cs42l51->mclk_handle);
5314 + case SND_SOC_DAPM_POST_PMD:
5315 + /* Delay mclk shutdown to fulfill power-down sequence requirements */
5316 + msleep(20);
5317 + clk_disable_unprepare(cs42l51->mclk_handle);
5318 + break;
5319 + }
5320 +
5321 + return 0;
5322 +}
5323 +
5324 static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = {
5325 - SND_SOC_DAPM_CLOCK_SUPPLY("MCLK")
5326 + SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, mclk_event,
5327 + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
5328 };
5329
5330 static const struct snd_soc_dapm_route cs42l51_routes[] = {
5331 diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
5332 index f318403133e96..81906c25e4a87 100644
5333 --- a/sound/soc/codecs/wcd9335.c
5334 +++ b/sound/soc/codecs/wcd9335.c
5335 @@ -618,7 +618,7 @@ static const char * const sb_tx8_mux_text[] = {
5336 "ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192"
5337 };
5338
5339 -static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
5340 +static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400);
5341 static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1);
5342 static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
5343 static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0);
5344 diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
5345 index 7cefda341fbf8..a540a2dad80c3 100644
5346 --- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
5347 +++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
5348 @@ -401,17 +401,40 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
5349 struct snd_interval *channels = hw_param_interval(params,
5350 SNDRV_PCM_HW_PARAM_CHANNELS);
5351 struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
5352 - struct snd_soc_dpcm *dpcm = container_of(
5353 - params, struct snd_soc_dpcm, hw_params);
5354 - struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
5355 - struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
5356 + struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
5357 +
5358 + /*
5359 + * The following loop will be called only for playback stream
5360 + * In this platform, there is only one playback device on every SSP
5361 + */
5362 + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
5363 + rtd_dpcm = dpcm;
5364 + break;
5365 + }
5366 +
5367 + /*
5368 + * This following loop will be called only for capture stream
5369 + * In this platform, there is only one capture device on every SSP
5370 + */
5371 + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
5372 + rtd_dpcm = dpcm;
5373 + break;
5374 + }
5375 +
5376 + if (!rtd_dpcm)
5377 + return -EINVAL;
5378 +
5379 + /*
5380 + * The above 2 loops are mutually exclusive based on the stream direction,
5381 + * thus rtd_dpcm variable will never be overwritten
5382 + */
5383
5384 /*
5385 * The ADSP will convert the FE rate to 48k, stereo, 24 bit
5386 */
5387 - if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
5388 - !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
5389 - !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
5390 + if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
5391 + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
5392 + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
5393 rate->min = rate->max = 48000;
5394 channels->min = channels->max = 2;
5395 snd_mask_none(fmt);
5396 @@ -421,7 +444,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
5397 * The speaker on the SSP0 supports S16_LE and not S24_LE.
5398 * thus changing the mask here
5399 */
5400 - if (!strcmp(be_dai_link->name, "SSP0-Codec"))
5401 + if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
5402 snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
5403
5404 return 0;
5405 diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
5406 index 7e6c41e63d8e1..23e1de61e92e4 100644
5407 --- a/sound/soc/qcom/sdm845.c
5408 +++ b/sound/soc/qcom/sdm845.c
5409 @@ -16,6 +16,7 @@
5410 #include "qdsp6/q6afe.h"
5411 #include "../codecs/rt5663.h"
5412
5413 +#define DRIVER_NAME "sdm845"
5414 #define DEFAULT_SAMPLE_RATE_48K 48000
5415 #define DEFAULT_MCLK_RATE 24576000
5416 #define TDM_BCLK_RATE 6144000
5417 @@ -407,6 +408,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev)
5418 goto data_alloc_fail;
5419 }
5420
5421 + card->driver_name = DRIVER_NAME;
5422 card->dapm_widgets = sdm845_snd_widgets;
5423 card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets);
5424 card->dev = dev;
5425 diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
5426 index bb5130d021554..a5201de1a191d 100644
5427 --- a/tools/perf/builtin-trace.c
5428 +++ b/tools/perf/builtin-trace.c
5429 @@ -3979,9 +3979,9 @@ do_concat:
5430 err = 0;
5431
5432 if (lists[0]) {
5433 - struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
5434 - "event selector. use 'perf list' to list available events",
5435 - parse_events_option);
5436 + struct option o = {
5437 + .value = &trace->evlist,
5438 + };
5439 err = parse_events_option(&o, lists[0], 0);
5440 }
5441 out:
5442 @@ -3995,9 +3995,12 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
5443 {
5444 struct trace *trace = opt->value;
5445
5446 - if (!list_empty(&trace->evlist->core.entries))
5447 - return parse_cgroups(opt, str, unset);
5448 -
5449 + if (!list_empty(&trace->evlist->core.entries)) {
5450 + struct option o = {
5451 + .value = &trace->evlist,
5452 + };
5453 + return parse_cgroups(&o, str, unset);
5454 + }
5455 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
5456
5457 return 0;
5458 diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
5459 index 93c03b39cd9cd..3b02c3f1b2895 100644
5460 --- a/tools/perf/util/scripting-engines/trace-event-python.c
5461 +++ b/tools/perf/util/scripting-engines/trace-event-python.c
5462 @@ -1587,7 +1587,6 @@ static void _free_command_line(wchar_t **command_line, int num)
5463 static int python_start_script(const char *script, int argc, const char **argv)
5464 {
5465 struct tables *tables = &tables_global;
5466 - PyMODINIT_FUNC (*initfunc)(void);
5467 #if PY_MAJOR_VERSION < 3
5468 const char **command_line;
5469 #else
5470 @@ -1602,20 +1601,18 @@ static int python_start_script(const char *script, int argc, const char **argv)
5471 FILE *fp;
5472
5473 #if PY_MAJOR_VERSION < 3
5474 - initfunc = initperf_trace_context;
5475 command_line = malloc((argc + 1) * sizeof(const char *));
5476 command_line[0] = script;
5477 for (i = 1; i < argc + 1; i++)
5478 command_line[i] = argv[i - 1];
5479 + PyImport_AppendInittab(name, initperf_trace_context);
5480 #else
5481 - initfunc = PyInit_perf_trace_context;
5482 command_line = malloc((argc + 1) * sizeof(wchar_t *));
5483 command_line[0] = Py_DecodeLocale(script, NULL);
5484 for (i = 1; i < argc + 1; i++)
5485 command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
5486 + PyImport_AppendInittab(name, PyInit_perf_trace_context);
5487 #endif
5488 -
5489 - PyImport_AppendInittab(name, initfunc);
5490 Py_Initialize();
5491
5492 #if PY_MAJOR_VERSION < 3
5493 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
5494 index 5c172845fa5ac..ff524a3fc5003 100644
5495 --- a/tools/perf/util/session.c
5496 +++ b/tools/perf/util/session.c
5497 @@ -588,6 +588,7 @@ static void perf_event__mmap2_swap(union perf_event *event,
5498 event->mmap2.maj = bswap_32(event->mmap2.maj);
5499 event->mmap2.min = bswap_32(event->mmap2.min);
5500 event->mmap2.ino = bswap_64(event->mmap2.ino);
5501 + event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
5502
5503 if (sample_id_all) {
5504 void *data = &event->mmap2.filename;
5505 diff --git a/tools/testing/selftests/bpf/prog_tests/map_init.c b/tools/testing/selftests/bpf/prog_tests/map_init.c
5506 new file mode 100644
5507 index 0000000000000..14a31109dd0e0
5508 --- /dev/null
5509 +++ b/tools/testing/selftests/bpf/prog_tests/map_init.c
5510 @@ -0,0 +1,214 @@
5511 +// SPDX-License-Identifier: GPL-2.0-only
5512 +/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
5513 +
5514 +#include <test_progs.h>
5515 +#include "test_map_init.skel.h"
5516 +
5517 +#define TEST_VALUE 0x1234
5518 +#define FILL_VALUE 0xdeadbeef
5519 +
5520 +static int nr_cpus;
5521 +static int duration;
5522 +
5523 +typedef unsigned long long map_key_t;
5524 +typedef unsigned long long map_value_t;
5525 +typedef struct {
5526 + map_value_t v; /* padding */
5527 +} __bpf_percpu_val_align pcpu_map_value_t;
5528 +
5529 +
5530 +static int map_populate(int map_fd, int num)
5531 +{
5532 + pcpu_map_value_t value[nr_cpus];
5533 + int i, err;
5534 + map_key_t key;
5535 +
5536 + for (i = 0; i < nr_cpus; i++)
5537 + bpf_percpu(value, i) = FILL_VALUE;
5538 +
5539 + for (key = 1; key <= num; key++) {
5540 + err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
5541 + if (!ASSERT_OK(err, "bpf_map_update_elem"))
5542 + return -1;
5543 + }
5544 +
5545 + return 0;
5546 +}
5547 +
5548 +static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
5549 + int *map_fd, int populate)
5550 +{
5551 + struct test_map_init *skel;
5552 + int err;
5553 +
5554 + skel = test_map_init__open();
5555 + if (!ASSERT_OK_PTR(skel, "skel_open"))
5556 + return NULL;
5557 +
5558 + err = bpf_map__set_type(skel->maps.hashmap1, map_type);
5559 + if (!ASSERT_OK(err, "bpf_map__set_type"))
5560 + goto error;
5561 +
5562 + err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
5563 + if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
5564 + goto error;
5565 +
5566 + err = test_map_init__load(skel);
5567 + if (!ASSERT_OK(err, "skel_load"))
5568 + goto error;
5569 +
5570 + *map_fd = bpf_map__fd(skel->maps.hashmap1);
5571 + if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
5572 + goto error;
5573 +
5574 + err = map_populate(*map_fd, populate);
5575 + if (!ASSERT_OK(err, "map_populate"))
5576 + goto error_map;
5577 +
5578 + return skel;
5579 +
5580 +error_map:
5581 + close(*map_fd);
5582 +error:
5583 + test_map_init__destroy(skel);
5584 + return NULL;
5585 +}
5586 +
5587 +/* executes bpf program that updates map with key, value */
5588 +static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
5589 + map_value_t value)
5590 +{
5591 + struct test_map_init__bss *bss;
5592 +
5593 + bss = skel->bss;
5594 +
5595 + bss->inKey = key;
5596 + bss->inValue = value;
5597 + bss->inPid = getpid();
5598 +
5599 + if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
5600 + return -1;
5601 +
5602 + /* Let tracepoint trigger */
5603 + syscall(__NR_getpgid);
5604 +
5605 + test_map_init__detach(skel);
5606 +
5607 + return 0;
5608 +}
5609 +
5610 +static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
5611 +{
5612 + int i, nzCnt = 0;
5613 + map_value_t val;
5614 +
5615 + for (i = 0; i < nr_cpus; i++) {
5616 + val = bpf_percpu(value, i);
5617 + if (val) {
5618 + if (CHECK(val != expected, "map value",
5619 + "unexpected for cpu %d: 0x%llx\n", i, val))
5620 + return -1;
5621 + nzCnt++;
5622 + }
5623 + }
5624 +
5625 + if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
5626 + nzCnt))
5627 + return -1;
5628 +
5629 + return 0;
5630 +}
5631 +
5632 +/* Add key=1 elem with values set for all CPUs
5633 + * Delete elem key=1
5634 + * Run bpf prog that inserts new key=1 elem with value=0x1234
5635 + * (bpf prog can only set value for current CPU)
5636 + * Lookup Key=1 and check value is as expected for all CPUs:
5637 + * value set by bpf prog for one CPU, 0 for all others
5638 + */
5639 +static void test_pcpu_map_init(void)
5640 +{
5641 + pcpu_map_value_t value[nr_cpus];
5642 + struct test_map_init *skel;
5643 + int map_fd, err;
5644 + map_key_t key;
5645 +
5646 + /* max 1 elem in map so insertion is forced to reuse freed entry */
5647 + skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
5648 + if (!ASSERT_OK_PTR(skel, "prog_setup"))
5649 + return;
5650 +
5651 + /* delete element so the entry can be re-used*/
5652 + key = 1;
5653 + err = bpf_map_delete_elem(map_fd, &key);
5654 + if (!ASSERT_OK(err, "bpf_map_delete_elem"))
5655 + goto cleanup;
5656 +
5657 + /* run bpf prog that inserts new elem, re-using the slot just freed */
5658 + err = prog_run_insert_elem(skel, key, TEST_VALUE);
5659 + if (!ASSERT_OK(err, "prog_run_insert_elem"))
5660 + goto cleanup;
5661 +
5662 + /* check that key=1 was re-created by bpf prog */
5663 + err = bpf_map_lookup_elem(map_fd, &key, value);
5664 + if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
5665 + goto cleanup;
5666 +
5667 + /* and has expected values */
5668 + check_values_one_cpu(value, TEST_VALUE);
5669 +
5670 +cleanup:
5671 + test_map_init__destroy(skel);
5672 +}
5673 +
5674 +/* Add key=1 and key=2 elems with values set for all CPUs
5675 + * Run bpf prog that inserts new key=3 elem
5676 + * (only for current cpu; other cpus should have initial value = 0)
5677 + * Lookup Key=1 and check value is as expected for all CPUs
5678 + */
5679 +static void test_pcpu_lru_map_init(void)
5680 +{
5681 + pcpu_map_value_t value[nr_cpus];
5682 + struct test_map_init *skel;
5683 + int map_fd, err;
5684 + map_key_t key;
5685 +
5686 + /* Set up LRU map with 2 elements, values filled for all CPUs.
5687 + * With these 2 elements, the LRU map is full
5688 + */
5689 + skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
5690 + if (!ASSERT_OK_PTR(skel, "prog_setup"))
5691 + return;
5692 +
5693 + /* run bpf prog that inserts new key=3 element, re-using LRU slot */
5694 + key = 3;
5695 + err = prog_run_insert_elem(skel, key, TEST_VALUE);
5696 + if (!ASSERT_OK(err, "prog_run_insert_elem"))
5697 + goto cleanup;
5698 +
5699 + /* check that key=3 replaced one of earlier elements */
5700 + err = bpf_map_lookup_elem(map_fd, &key, value);
5701 + if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
5702 + goto cleanup;
5703 +
5704 + /* and has expected values */
5705 + check_values_one_cpu(value, TEST_VALUE);
5706 +
5707 +cleanup:
5708 + test_map_init__destroy(skel);
5709 +}
5710 +
5711 +void test_map_init(void)
5712 +{
5713 + nr_cpus = bpf_num_possible_cpus();
5714 + if (nr_cpus <= 1) {
5715 + printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
5716 + test__skip();
5717 + return;
5718 + }
5719 +
5720 + if (test__start_subtest("pcpu_map_init"))
5721 + test_pcpu_map_init();
5722 + if (test__start_subtest("pcpu_lru_map_init"))
5723 + test_pcpu_lru_map_init();
5724 +}
5725 diff --git a/tools/testing/selftests/bpf/progs/test_map_init.c b/tools/testing/selftests/bpf/progs/test_map_init.c
5726 new file mode 100644
5727 index 0000000000000..c89d28ead6737
5728 --- /dev/null
5729 +++ b/tools/testing/selftests/bpf/progs/test_map_init.c
5730 @@ -0,0 +1,33 @@
5731 +// SPDX-License-Identifier: GPL-2.0
5732 +/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
5733 +
5734 +#include "vmlinux.h"
5735 +#include <bpf/bpf_helpers.h>
5736 +
5737 +__u64 inKey = 0;
5738 +__u64 inValue = 0;
5739 +__u32 inPid = 0;
5740 +
5741 +struct {
5742 + __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
5743 + __uint(max_entries, 2);
5744 + __type(key, __u64);
5745 + __type(value, __u64);
5746 +} hashmap1 SEC(".maps");
5747 +
5748 +
5749 +SEC("tp/syscalls/sys_enter_getpgid")
5750 +int sysenter_getpgid(const void *ctx)
5751 +{
5752 + /* Just do it for once, when called from our own test prog. This
5753 + * ensures the map value is only updated for a single CPU.
5754 + */
5755 + int cur_pid = bpf_get_current_pid_tgid() >> 32;
5756 +
5757 + if (cur_pid == inPid)
5758 + bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
5759 +
5760 + return 0;
5761 +}
5762 +
5763 +char _license[] SEC("license") = "GPL";
5764 diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
5765 index 0f60087583d8f..a753c73d869ab 100644
5766 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
5767 +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc
5768 @@ -11,12 +11,16 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported
5769 :;: "user-memory access syntax and ustring working on user memory";:
5770 echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \
5771 > kprobe_events
5772 +echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \
5773 + >> kprobe_events
5774
5775 grep myevent kprobe_events | \
5776 grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string'
5777 echo 1 > events/kprobes/myevent/enable
5778 +echo 1 > events/kprobes/myevent2/enable
5779 echo > /dev/null
5780 echo 0 > events/kprobes/myevent/enable
5781 +echo 0 > events/kprobes/myevent2/enable
5782
5783 grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"'
5784
5785 diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c
5786 index b9fe75fc3e517..8a59438ccc78b 100644
5787 --- a/tools/testing/selftests/pidfd/pidfd_open_test.c
5788 +++ b/tools/testing/selftests/pidfd/pidfd_open_test.c
5789 @@ -6,7 +6,6 @@
5790 #include <inttypes.h>
5791 #include <limits.h>
5792 #include <linux/types.h>
5793 -#include <linux/wait.h>
5794 #include <sched.h>
5795 #include <signal.h>
5796 #include <stdbool.h>
5797 diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c
5798 index 4b115444dfe90..6108112753573 100644
5799 --- a/tools/testing/selftests/pidfd/pidfd_poll_test.c
5800 +++ b/tools/testing/selftests/pidfd/pidfd_poll_test.c
5801 @@ -3,7 +3,6 @@
5802 #define _GNU_SOURCE
5803 #include <errno.h>
5804 #include <linux/types.h>
5805 -#include <linux/wait.h>
5806 #include <poll.h>
5807 #include <signal.h>
5808 #include <stdbool.h>
5809 diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c
5810 index 471e2aa280776..fb4fe9188806e 100644
5811 --- a/tools/testing/selftests/proc/proc-loadavg-001.c
5812 +++ b/tools/testing/selftests/proc/proc-loadavg-001.c
5813 @@ -14,7 +14,6 @@
5814 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5815 */
5816 /* Test that /proc/loadavg correctly reports last pid in pid namespace. */
5817 -#define _GNU_SOURCE
5818 #include <errno.h>
5819 #include <sched.h>
5820 #include <sys/types.h>
5821 diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c
5822 index 9f6d000c02455..8511dcfe67c75 100644
5823 --- a/tools/testing/selftests/proc/proc-self-syscall.c
5824 +++ b/tools/testing/selftests/proc/proc-self-syscall.c
5825 @@ -13,7 +13,6 @@
5826 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
5827 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
5828 */
5829 -#define _GNU_SOURCE
5830 #include <unistd.h>
5831 #include <sys/syscall.h>
5832 #include <sys/types.h>
5833 diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c
5834 index 30e2b78490898..e7ceabed7f51f 100644
5835 --- a/tools/testing/selftests/proc/proc-uptime-002.c
5836 +++ b/tools/testing/selftests/proc/proc-uptime-002.c
5837 @@ -15,7 +15,6 @@
5838 */
5839 // Test that values in /proc/uptime increment monotonically
5840 // while shifting across CPUs.
5841 -#define _GNU_SOURCE
5842 #undef NDEBUG
5843 #include <assert.h>
5844 #include <unistd.h>
5845 diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5846 index 8700402f3000d..03a586ab6d27b 100644
5847 --- a/virt/kvm/arm/mmu.c
5848 +++ b/virt/kvm/arm/mmu.c
5849 @@ -1756,6 +1756,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
5850 if (kvm_is_device_pfn(pfn)) {
5851 mem_type = PAGE_S2_DEVICE;
5852 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
5853 + force_pte = true;
5854 } else if (logging_active) {
5855 /*
5856 * Faults on pages in a memslot with logging enabled
5857 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
5858 index 87927f7e1ee70..48fde38d64c37 100644
5859 --- a/virt/kvm/arm/psci.c
5860 +++ b/virt/kvm/arm/psci.c
5861 @@ -408,7 +408,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
5862 val = SMCCC_RET_SUCCESS;
5863 break;
5864 case KVM_BP_HARDEN_NOT_REQUIRED:
5865 - val = SMCCC_RET_NOT_REQUIRED;
5866 + val = SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED;
5867 break;
5868 }
5869 break;