Contents of /trunk/kernel-alx/patches-5.4/0235-5.4.136-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 4 weeks ago) by niro
File size: 128671 byte(s)
Mon Oct 24 12:40:44 2022 UTC (18 months, 4 weeks ago) by niro
File size: 128671 byte(s)
-add missing
1 | diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst |
2 | index 4a9d9c794ee5d..7d255249094d0 100644 |
3 | --- a/Documentation/arm64/tagged-address-abi.rst |
4 | +++ b/Documentation/arm64/tagged-address-abi.rst |
5 | @@ -45,14 +45,24 @@ how the user addresses are used by the kernel: |
6 | |
7 | 1. User addresses not accessed by the kernel but used for address space |
8 | management (e.g. ``mprotect()``, ``madvise()``). The use of valid |
9 | - tagged pointers in this context is allowed with the exception of |
10 | - ``brk()``, ``mmap()`` and the ``new_address`` argument to |
11 | - ``mremap()`` as these have the potential to alias with existing |
12 | - user addresses. |
13 | - |
14 | - NOTE: This behaviour changed in v5.6 and so some earlier kernels may |
15 | - incorrectly accept valid tagged pointers for the ``brk()``, |
16 | - ``mmap()`` and ``mremap()`` system calls. |
17 | + tagged pointers in this context is allowed with these exceptions: |
18 | + |
19 | + - ``brk()``, ``mmap()`` and the ``new_address`` argument to |
20 | + ``mremap()`` as these have the potential to alias with existing |
21 | + user addresses. |
22 | + |
23 | + NOTE: This behaviour changed in v5.6 and so some earlier kernels may |
24 | + incorrectly accept valid tagged pointers for the ``brk()``, |
25 | + ``mmap()`` and ``mremap()`` system calls. |
26 | + |
27 | + - The ``range.start``, ``start`` and ``dst`` arguments to the |
28 | + ``UFFDIO_*`` ``ioctl()``s used on a file descriptor obtained from |
29 | + ``userfaultfd()``, as fault addresses subsequently obtained by reading |
30 | + the file descriptor will be untagged, which may otherwise confuse |
31 | + tag-unaware programs. |
32 | + |
33 | + NOTE: This behaviour changed in v5.14 and so some earlier kernels may |
34 | + incorrectly accept valid tagged pointers for this system call. |
35 | |
36 | 2. User addresses accessed by the kernel (e.g. ``write()``). This ABI |
37 | relaxation is disabled by default and the application thread needs to |
38 | diff --git a/Documentation/trace/histogram.rst b/Documentation/trace/histogram.rst |
39 | index 8408670d03282..3f3d1b960fe79 100644 |
40 | --- a/Documentation/trace/histogram.rst |
41 | +++ b/Documentation/trace/histogram.rst |
42 | @@ -191,7 +191,7 @@ Documentation written by Tom Zanussi |
43 | with the event, in nanoseconds. May be |
44 | modified by .usecs to have timestamps |
45 | interpreted as microseconds. |
46 | - cpu int the cpu on which the event occurred. |
47 | + common_cpu int the cpu on which the event occurred. |
48 | ====================== ==== ======================================= |
49 | |
50 | Extended error information |
51 | diff --git a/Makefile b/Makefile |
52 | index 428ebbd039967..1c565572bfb24 100644 |
53 | --- a/Makefile |
54 | +++ b/Makefile |
55 | @@ -1,7 +1,7 @@ |
56 | # SPDX-License-Identifier: GPL-2.0 |
57 | VERSION = 5 |
58 | PATCHLEVEL = 4 |
59 | -SUBLEVEL = 135 |
60 | +SUBLEVEL = 136 |
61 | EXTRAVERSION = |
62 | NAME = Kleptomaniac Octopus |
63 | |
64 | diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h |
65 | index dd10854321cac..166842337eb2c 100644 |
66 | --- a/arch/mips/include/asm/pgalloc.h |
67 | +++ b/arch/mips/include/asm/pgalloc.h |
68 | @@ -62,15 +62,11 @@ do { \ |
69 | |
70 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
71 | { |
72 | - pmd_t *pmd = NULL; |
73 | - struct page *pg; |
74 | + pmd_t *pmd; |
75 | |
76 | - pg = alloc_pages(GFP_KERNEL | __GFP_ACCOUNT, PMD_ORDER); |
77 | - if (pg) { |
78 | - pgtable_pmd_page_ctor(pg); |
79 | - pmd = (pmd_t *)page_address(pg); |
80 | + pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER); |
81 | + if (pmd) |
82 | pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table); |
83 | - } |
84 | return pmd; |
85 | } |
86 | |
87 | diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c |
88 | index c206b31ce07ac..1bdf5e7d1b438 100644 |
89 | --- a/arch/nds32/mm/mmap.c |
90 | +++ b/arch/nds32/mm/mmap.c |
91 | @@ -59,7 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, |
92 | |
93 | vma = find_vma(mm, addr); |
94 | if (TASK_SIZE - len >= addr && |
95 | - (!vma || addr + len <= vma->vm_start)) |
96 | + (!vma || addr + len <= vm_start_gap(vma))) |
97 | return addr; |
98 | } |
99 | |
100 | diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
101 | index 9011857c0434d..bba358f134718 100644 |
102 | --- a/arch/powerpc/kvm/book3s_hv.c |
103 | +++ b/arch/powerpc/kvm/book3s_hv.c |
104 | @@ -2306,8 +2306,10 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
105 | HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; |
106 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
107 | vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); |
108 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
109 | if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) |
110 | vcpu->arch.hfscr |= HFSCR_TM; |
111 | +#endif |
112 | } |
113 | if (cpu_has_feature(CPU_FTR_TM_COMP)) |
114 | vcpu->arch.hfscr |= HFSCR_TM; |
115 | diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c |
116 | index 66fa2c7590aa0..9906d203d9d39 100644 |
117 | --- a/arch/powerpc/kvm/book3s_hv_nested.c |
118 | +++ b/arch/powerpc/kvm/book3s_hv_nested.c |
119 | @@ -232,6 +232,9 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) |
120 | if (vcpu->kvm->arch.l1_ptcr == 0) |
121 | return H_NOT_AVAILABLE; |
122 | |
123 | + if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) |
124 | + return H_BAD_MODE; |
125 | + |
126 | /* copy parameters in */ |
127 | hv_ptr = kvmppc_get_gpr(vcpu, 4); |
128 | err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv, |
129 | @@ -253,6 +256,23 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu) |
130 | if (l2_hv.vcpu_token >= NR_CPUS) |
131 | return H_PARAMETER; |
132 | |
133 | + /* |
134 | + * L1 must have set up a suspended state to enter the L2 in a |
135 | + * transactional state, and only in that case. These have to be |
136 | + * filtered out here to prevent causing a TM Bad Thing in the |
137 | + * host HRFID. We could synthesize a TM Bad Thing back to the L1 |
138 | + * here but there doesn't seem like much point. |
139 | + */ |
140 | + if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) { |
141 | + if (!MSR_TM_ACTIVE(l2_regs.msr)) |
142 | + return H_BAD_MODE; |
143 | + } else { |
144 | + if (l2_regs.msr & MSR_TS_MASK) |
145 | + return H_BAD_MODE; |
146 | + if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK)) |
147 | + return H_BAD_MODE; |
148 | + } |
149 | + |
150 | /* translate lpid */ |
151 | l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true); |
152 | if (!l2) |
153 | diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c |
154 | index 26b25994c9697..41137ec727622 100644 |
155 | --- a/arch/powerpc/kvm/book3s_rtas.c |
156 | +++ b/arch/powerpc/kvm/book3s_rtas.c |
157 | @@ -240,6 +240,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) |
158 | * value so we can restore it on the way out. |
159 | */ |
160 | orig_rets = args.rets; |
161 | + if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) { |
162 | + /* |
163 | + * Don't overflow our args array: ensure there is room for |
164 | + * at least rets[0] (even if the call specifies 0 nret). |
165 | + * |
166 | + * Each handler must then check for the correct nargs and nret |
167 | + * values, but they may always return failure in rets[0]. |
168 | + */ |
169 | + rc = -EINVAL; |
170 | + goto fail; |
171 | + } |
172 | args.rets = &args.args[be32_to_cpu(args.nargs)]; |
173 | |
174 | mutex_lock(&vcpu->kvm->arch.rtas_token_lock); |
175 | @@ -267,9 +278,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) |
176 | fail: |
177 | /* |
178 | * We only get here if the guest has called RTAS with a bogus |
179 | - * args pointer. That means we can't get to the args, and so we |
180 | - * can't fail the RTAS call. So fail right out to userspace, |
181 | - * which should kill the guest. |
182 | + * args pointer or nargs/nret values that would overflow the |
183 | + * array. That means we can't get to the args, and so we can't |
184 | + * fail the RTAS call. So fail right out to userspace, which |
185 | + * should kill the guest. |
186 | + * |
187 | + * SLOF should actually pass the hcall return value from the |
188 | + * rtas handler call in r3, so enter_rtas could be modified to |
189 | + * return a failure indication in r3 and we could return such |
190 | + * errors to the guest rather than failing to host userspace. |
191 | + * However old guests that don't test for failure could then |
192 | + * continue silently after errors, so for now we won't do this. |
193 | */ |
194 | return rc; |
195 | } |
196 | diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c |
197 | index e03c064716789..8dd4d2b83677b 100644 |
198 | --- a/arch/powerpc/kvm/powerpc.c |
199 | +++ b/arch/powerpc/kvm/powerpc.c |
200 | @@ -2035,9 +2035,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, |
201 | { |
202 | struct kvm_enable_cap cap; |
203 | r = -EFAULT; |
204 | - vcpu_load(vcpu); |
205 | if (copy_from_user(&cap, argp, sizeof(cap))) |
206 | goto out; |
207 | + vcpu_load(vcpu); |
208 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
209 | vcpu_put(vcpu); |
210 | break; |
211 | @@ -2061,9 +2061,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, |
212 | case KVM_DIRTY_TLB: { |
213 | struct kvm_dirty_tlb dirty; |
214 | r = -EFAULT; |
215 | - vcpu_load(vcpu); |
216 | if (copy_from_user(&dirty, argp, sizeof(dirty))) |
217 | goto out; |
218 | + vcpu_load(vcpu); |
219 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); |
220 | vcpu_put(vcpu); |
221 | break; |
222 | diff --git a/arch/s390/boot/text_dma.S b/arch/s390/boot/text_dma.S |
223 | index 9715715c4c28d..ea93314f44976 100644 |
224 | --- a/arch/s390/boot/text_dma.S |
225 | +++ b/arch/s390/boot/text_dma.S |
226 | @@ -9,16 +9,6 @@ |
227 | #include <asm/errno.h> |
228 | #include <asm/sigp.h> |
229 | |
230 | -#ifdef CC_USING_EXPOLINE |
231 | - .pushsection .dma.text.__s390_indirect_jump_r14,"axG" |
232 | -__dma__s390_indirect_jump_r14: |
233 | - larl %r1,0f |
234 | - ex 0,0(%r1) |
235 | - j . |
236 | -0: br %r14 |
237 | - .popsection |
238 | -#endif |
239 | - |
240 | .section .dma.text,"ax" |
241 | /* |
242 | * Simplified version of expoline thunk. The normal thunks can not be used here, |
243 | @@ -27,11 +17,10 @@ __dma__s390_indirect_jump_r14: |
244 | * affects a few functions that are not performance-relevant. |
245 | */ |
246 | .macro BR_EX_DMA_r14 |
247 | -#ifdef CC_USING_EXPOLINE |
248 | - jg __dma__s390_indirect_jump_r14 |
249 | -#else |
250 | - br %r14 |
251 | -#endif |
252 | + larl %r1,0f |
253 | + ex 0,0(%r1) |
254 | + j . |
255 | +0: br %r14 |
256 | .endm |
257 | |
258 | /* |
259 | diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h |
260 | index 68d362f8d6c17..c72c179a5aae9 100644 |
261 | --- a/arch/s390/include/asm/ftrace.h |
262 | +++ b/arch/s390/include/asm/ftrace.h |
263 | @@ -27,6 +27,7 @@ void ftrace_caller(void); |
264 | |
265 | extern char ftrace_graph_caller_end; |
266 | extern unsigned long ftrace_plt; |
267 | +extern void *ftrace_func; |
268 | |
269 | struct dyn_arch_ftrace { }; |
270 | |
271 | diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c |
272 | index 1bb85f60c0dd5..7df6b2642ddb0 100644 |
273 | --- a/arch/s390/kernel/ftrace.c |
274 | +++ b/arch/s390/kernel/ftrace.c |
275 | @@ -57,6 +57,7 @@ |
276 | * > brasl %r0,ftrace_caller # offset 0 |
277 | */ |
278 | |
279 | +void *ftrace_func __read_mostly = ftrace_stub; |
280 | unsigned long ftrace_plt; |
281 | |
282 | static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) |
283 | @@ -166,6 +167,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) |
284 | |
285 | int ftrace_update_ftrace_func(ftrace_func_t func) |
286 | { |
287 | + ftrace_func = func; |
288 | return 0; |
289 | } |
290 | |
291 | diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S |
292 | index f942341429b1c..14bad577f5351 100644 |
293 | --- a/arch/s390/kernel/mcount.S |
294 | +++ b/arch/s390/kernel/mcount.S |
295 | @@ -61,13 +61,13 @@ ENTRY(ftrace_caller) |
296 | #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES |
297 | aghik %r2,%r0,-MCOUNT_INSN_SIZE |
298 | lgrl %r4,function_trace_op |
299 | - lgrl %r1,ftrace_trace_function |
300 | + lgrl %r1,ftrace_func |
301 | #else |
302 | lgr %r2,%r0 |
303 | aghi %r2,-MCOUNT_INSN_SIZE |
304 | larl %r4,function_trace_op |
305 | lg %r4,0(%r4) |
306 | - larl %r1,ftrace_trace_function |
307 | + larl %r1,ftrace_func |
308 | lg %r1,0(%r1) |
309 | #endif |
310 | lgr %r3,%r14 |
311 | diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c |
312 | index c8c16b5eed6be..e160f4650f8e4 100644 |
313 | --- a/arch/s390/net/bpf_jit_comp.c |
314 | +++ b/arch/s390/net/bpf_jit_comp.c |
315 | @@ -114,7 +114,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) |
316 | { |
317 | u32 r1 = reg2hex[b1]; |
318 | |
319 | - if (!jit->seen_reg[r1] && r1 >= 6 && r1 <= 15) |
320 | + if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) |
321 | jit->seen_reg[r1] = 1; |
322 | } |
323 | |
324 | diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c |
325 | index bf2f0373a3b2f..55745d6953f02 100644 |
326 | --- a/drivers/block/rbd.c |
327 | +++ b/drivers/block/rbd.c |
328 | @@ -4239,8 +4239,6 @@ again: |
329 | |
330 | static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) |
331 | { |
332 | - bool need_wait; |
333 | - |
334 | dout("%s rbd_dev %p\n", __func__, rbd_dev); |
335 | lockdep_assert_held_write(&rbd_dev->lock_rwsem); |
336 | |
337 | @@ -4252,11 +4250,11 @@ static bool rbd_quiesce_lock(struct rbd_device *rbd_dev) |
338 | */ |
339 | rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING; |
340 | rbd_assert(!completion_done(&rbd_dev->releasing_wait)); |
341 | - need_wait = !list_empty(&rbd_dev->running_list); |
342 | - downgrade_write(&rbd_dev->lock_rwsem); |
343 | - if (need_wait) |
344 | - wait_for_completion(&rbd_dev->releasing_wait); |
345 | - up_read(&rbd_dev->lock_rwsem); |
346 | + if (list_empty(&rbd_dev->running_list)) |
347 | + return true; |
348 | + |
349 | + up_write(&rbd_dev->lock_rwsem); |
350 | + wait_for_completion(&rbd_dev->releasing_wait); |
351 | |
352 | down_write(&rbd_dev->lock_rwsem); |
353 | if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING) |
354 | @@ -4342,15 +4340,11 @@ static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v, |
355 | if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { |
356 | down_write(&rbd_dev->lock_rwsem); |
357 | if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { |
358 | - /* |
359 | - * we already know that the remote client is |
360 | - * the owner |
361 | - */ |
362 | - up_write(&rbd_dev->lock_rwsem); |
363 | - return; |
364 | + dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n", |
365 | + __func__, rbd_dev, cid.gid, cid.handle); |
366 | + } else { |
367 | + rbd_set_owner_cid(rbd_dev, &cid); |
368 | } |
369 | - |
370 | - rbd_set_owner_cid(rbd_dev, &cid); |
371 | downgrade_write(&rbd_dev->lock_rwsem); |
372 | } else { |
373 | down_read(&rbd_dev->lock_rwsem); |
374 | @@ -4375,14 +4369,12 @@ static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v, |
375 | if (!rbd_cid_equal(&cid, &rbd_empty_cid)) { |
376 | down_write(&rbd_dev->lock_rwsem); |
377 | if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) { |
378 | - dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n", |
379 | + dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n", |
380 | __func__, rbd_dev, cid.gid, cid.handle, |
381 | rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle); |
382 | - up_write(&rbd_dev->lock_rwsem); |
383 | - return; |
384 | + } else { |
385 | + rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); |
386 | } |
387 | - |
388 | - rbd_set_owner_cid(rbd_dev, &rbd_empty_cid); |
389 | downgrade_write(&rbd_dev->lock_rwsem); |
390 | } else { |
391 | down_read(&rbd_dev->lock_rwsem); |
392 | diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c |
393 | index 3a2b607369151..415d7b3a59f82 100644 |
394 | --- a/drivers/firmware/efi/efi.c |
395 | +++ b/drivers/firmware/efi/efi.c |
396 | @@ -975,6 +975,7 @@ static int __init efi_memreserve_map_root(void) |
397 | static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) |
398 | { |
399 | struct resource *res, *parent; |
400 | + int ret; |
401 | |
402 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); |
403 | if (!res) |
404 | @@ -987,7 +988,17 @@ static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size) |
405 | |
406 | /* we expect a conflict with a 'System RAM' region */ |
407 | parent = request_resource_conflict(&iomem_resource, res); |
408 | - return parent ? request_resource(parent, res) : 0; |
409 | + ret = parent ? request_resource(parent, res) : 0; |
410 | + |
411 | + /* |
412 | + * Given that efi_mem_reserve_iomem() can be called at any |
413 | + * time, only call memblock_reserve() if the architecture |
414 | + * keeps the infrastructure around. |
415 | + */ |
416 | + if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret) |
417 | + memblock_reserve(addr, size); |
418 | + |
419 | + return ret; |
420 | } |
421 | |
422 | int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size) |
423 | diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c |
424 | index c1955d320fecd..8f665678e9e39 100644 |
425 | --- a/drivers/firmware/efi/tpm.c |
426 | +++ b/drivers/firmware/efi/tpm.c |
427 | @@ -62,9 +62,11 @@ int __init efi_tpm_eventlog_init(void) |
428 | tbl_size = sizeof(*log_tbl) + log_tbl->size; |
429 | memblock_reserve(efi.tpm_log, tbl_size); |
430 | |
431 | - if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR || |
432 | - log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { |
433 | - pr_warn(FW_BUG "TPM Final Events table missing or invalid\n"); |
434 | + if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR) { |
435 | + pr_info("TPM Final Events table not present\n"); |
436 | + goto out; |
437 | + } else if (log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) { |
438 | + pr_warn(FW_BUG "TPM Final Events table invalid\n"); |
439 | goto out; |
440 | } |
441 | |
442 | diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c |
443 | index 127f8a5352019..76b6676b0106f 100644 |
444 | --- a/drivers/gpu/drm/drm_ioctl.c |
445 | +++ b/drivers/gpu/drm/drm_ioctl.c |
446 | @@ -826,6 +826,9 @@ long drm_ioctl(struct file *filp, |
447 | if (drm_dev_is_unplugged(dev)) |
448 | return -ENODEV; |
449 | |
450 | + if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE) |
451 | + return -ENOTTY; |
452 | + |
453 | is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END; |
454 | |
455 | if (is_driver_ioctl) { |
456 | diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c |
457 | index 2aa89eaecf6ff..bdb4d59c81277 100644 |
458 | --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c |
459 | +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c |
460 | @@ -453,7 +453,6 @@ static int rpi_touchscreen_remove(struct i2c_client *i2c) |
461 | drm_panel_remove(&ts->base); |
462 | |
463 | mipi_dsi_device_unregister(ts->dsi); |
464 | - kfree(ts->dsi); |
465 | |
466 | return 0; |
467 | } |
468 | diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c |
469 | index aa301c6063467..acf1cc2bee07b 100644 |
470 | --- a/drivers/iio/accel/bma180.c |
471 | +++ b/drivers/iio/accel/bma180.c |
472 | @@ -47,7 +47,7 @@ struct bma180_part_info { |
473 | |
474 | u8 int_reset_reg, int_reset_mask; |
475 | u8 sleep_reg, sleep_mask; |
476 | - u8 bw_reg, bw_mask; |
477 | + u8 bw_reg, bw_mask, bw_offset; |
478 | u8 scale_reg, scale_mask; |
479 | u8 power_reg, power_mask, lowpower_val; |
480 | u8 int_enable_reg, int_enable_mask; |
481 | @@ -103,6 +103,7 @@ struct bma180_part_info { |
482 | |
483 | #define BMA250_RANGE_MASK GENMASK(3, 0) /* Range of accel values */ |
484 | #define BMA250_BW_MASK GENMASK(4, 0) /* Accel bandwidth */ |
485 | +#define BMA250_BW_OFFSET 8 |
486 | #define BMA250_SUSPEND_MASK BIT(7) /* chip will sleep */ |
487 | #define BMA250_LOWPOWER_MASK BIT(6) |
488 | #define BMA250_DATA_INTEN_MASK BIT(4) |
489 | @@ -241,7 +242,8 @@ static int bma180_set_bw(struct bma180_data *data, int val) |
490 | for (i = 0; i < data->part_info->num_bw; ++i) { |
491 | if (data->part_info->bw_table[i] == val) { |
492 | ret = bma180_set_bits(data, data->part_info->bw_reg, |
493 | - data->part_info->bw_mask, i); |
494 | + data->part_info->bw_mask, |
495 | + i + data->part_info->bw_offset); |
496 | if (ret) { |
497 | dev_err(&data->client->dev, |
498 | "failed to set bandwidth\n"); |
499 | @@ -633,32 +635,53 @@ static const struct iio_chan_spec bma250_channels[] = { |
500 | |
501 | static const struct bma180_part_info bma180_part_info[] = { |
502 | [BMA180] = { |
503 | - bma180_channels, ARRAY_SIZE(bma180_channels), |
504 | - bma180_scale_table, ARRAY_SIZE(bma180_scale_table), |
505 | - bma180_bw_table, ARRAY_SIZE(bma180_bw_table), |
506 | - BMA180_CTRL_REG0, BMA180_RESET_INT, |
507 | - BMA180_CTRL_REG0, BMA180_SLEEP, |
508 | - BMA180_BW_TCS, BMA180_BW, |
509 | - BMA180_OFFSET_LSB1, BMA180_RANGE, |
510 | - BMA180_TCO_Z, BMA180_MODE_CONFIG, BMA180_LOW_POWER, |
511 | - BMA180_CTRL_REG3, BMA180_NEW_DATA_INT, |
512 | - BMA180_RESET, |
513 | - bma180_chip_config, |
514 | - bma180_chip_disable, |
515 | + .channels = bma180_channels, |
516 | + .num_channels = ARRAY_SIZE(bma180_channels), |
517 | + .scale_table = bma180_scale_table, |
518 | + .num_scales = ARRAY_SIZE(bma180_scale_table), |
519 | + .bw_table = bma180_bw_table, |
520 | + .num_bw = ARRAY_SIZE(bma180_bw_table), |
521 | + .int_reset_reg = BMA180_CTRL_REG0, |
522 | + .int_reset_mask = BMA180_RESET_INT, |
523 | + .sleep_reg = BMA180_CTRL_REG0, |
524 | + .sleep_mask = BMA180_SLEEP, |
525 | + .bw_reg = BMA180_BW_TCS, |
526 | + .bw_mask = BMA180_BW, |
527 | + .scale_reg = BMA180_OFFSET_LSB1, |
528 | + .scale_mask = BMA180_RANGE, |
529 | + .power_reg = BMA180_TCO_Z, |
530 | + .power_mask = BMA180_MODE_CONFIG, |
531 | + .lowpower_val = BMA180_LOW_POWER, |
532 | + .int_enable_reg = BMA180_CTRL_REG3, |
533 | + .int_enable_mask = BMA180_NEW_DATA_INT, |
534 | + .softreset_reg = BMA180_RESET, |
535 | + .chip_config = bma180_chip_config, |
536 | + .chip_disable = bma180_chip_disable, |
537 | }, |
538 | [BMA250] = { |
539 | - bma250_channels, ARRAY_SIZE(bma250_channels), |
540 | - bma250_scale_table, ARRAY_SIZE(bma250_scale_table), |
541 | - bma250_bw_table, ARRAY_SIZE(bma250_bw_table), |
542 | - BMA250_INT_RESET_REG, BMA250_INT_RESET_MASK, |
543 | - BMA250_POWER_REG, BMA250_SUSPEND_MASK, |
544 | - BMA250_BW_REG, BMA250_BW_MASK, |
545 | - BMA250_RANGE_REG, BMA250_RANGE_MASK, |
546 | - BMA250_POWER_REG, BMA250_LOWPOWER_MASK, 1, |
547 | - BMA250_INT_ENABLE_REG, BMA250_DATA_INTEN_MASK, |
548 | - BMA250_RESET_REG, |
549 | - bma250_chip_config, |
550 | - bma250_chip_disable, |
551 | + .channels = bma250_channels, |
552 | + .num_channels = ARRAY_SIZE(bma250_channels), |
553 | + .scale_table = bma250_scale_table, |
554 | + .num_scales = ARRAY_SIZE(bma250_scale_table), |
555 | + .bw_table = bma250_bw_table, |
556 | + .num_bw = ARRAY_SIZE(bma250_bw_table), |
557 | + .int_reset_reg = BMA250_INT_RESET_REG, |
558 | + .int_reset_mask = BMA250_INT_RESET_MASK, |
559 | + .sleep_reg = BMA250_POWER_REG, |
560 | + .sleep_mask = BMA250_SUSPEND_MASK, |
561 | + .bw_reg = BMA250_BW_REG, |
562 | + .bw_mask = BMA250_BW_MASK, |
563 | + .bw_offset = BMA250_BW_OFFSET, |
564 | + .scale_reg = BMA250_RANGE_REG, |
565 | + .scale_mask = BMA250_RANGE_MASK, |
566 | + .power_reg = BMA250_POWER_REG, |
567 | + .power_mask = BMA250_LOWPOWER_MASK, |
568 | + .lowpower_val = 1, |
569 | + .int_enable_reg = BMA250_INT_ENABLE_REG, |
570 | + .int_enable_mask = BMA250_DATA_INTEN_MASK, |
571 | + .softreset_reg = BMA250_RESET_REG, |
572 | + .chip_config = bma250_chip_config, |
573 | + .chip_disable = bma250_chip_disable, |
574 | }, |
575 | }; |
576 | |
577 | diff --git a/drivers/media/pci/ngene/ngene-core.c b/drivers/media/pci/ngene/ngene-core.c |
578 | index af15ca1c501bf..919b297e645c5 100644 |
579 | --- a/drivers/media/pci/ngene/ngene-core.c |
580 | +++ b/drivers/media/pci/ngene/ngene-core.c |
581 | @@ -385,7 +385,7 @@ static int ngene_command_config_free_buf(struct ngene *dev, u8 *config) |
582 | |
583 | com.cmd.hdr.Opcode = CMD_CONFIGURE_FREE_BUFFER; |
584 | com.cmd.hdr.Length = 6; |
585 | - memcpy(&com.cmd.ConfigureBuffers.config, config, 6); |
586 | + memcpy(&com.cmd.ConfigureFreeBuffers.config, config, 6); |
587 | com.in_len = 6; |
588 | com.out_len = 0; |
589 | |
590 | diff --git a/drivers/media/pci/ngene/ngene.h b/drivers/media/pci/ngene/ngene.h |
591 | index 84f04e0e0cb9a..3d296f1998a1a 100644 |
592 | --- a/drivers/media/pci/ngene/ngene.h |
593 | +++ b/drivers/media/pci/ngene/ngene.h |
594 | @@ -407,12 +407,14 @@ enum _BUFFER_CONFIGS { |
595 | |
596 | struct FW_CONFIGURE_FREE_BUFFERS { |
597 | struct FW_HEADER hdr; |
598 | - u8 UVI1_BufferLength; |
599 | - u8 UVI2_BufferLength; |
600 | - u8 TVO_BufferLength; |
601 | - u8 AUD1_BufferLength; |
602 | - u8 AUD2_BufferLength; |
603 | - u8 TVA_BufferLength; |
604 | + struct { |
605 | + u8 UVI1_BufferLength; |
606 | + u8 UVI2_BufferLength; |
607 | + u8 TVO_BufferLength; |
608 | + u8 AUD1_BufferLength; |
609 | + u8 AUD2_BufferLength; |
610 | + u8 TVA_BufferLength; |
611 | + } __packed config; |
612 | } __attribute__ ((__packed__)); |
613 | |
614 | struct FW_CONFIGURE_UART { |
615 | diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c |
616 | index 8aa8825557a81..40b105daaf9e7 100644 |
617 | --- a/drivers/net/dsa/mv88e6xxx/chip.c |
618 | +++ b/drivers/net/dsa/mv88e6xxx/chip.c |
619 | @@ -3192,7 +3192,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { |
620 | .port_set_cmode = mv88e6341_port_set_cmode, |
621 | .port_setup_message_port = mv88e6xxx_setup_message_port, |
622 | .stats_snapshot = mv88e6390_g1_stats_snapshot, |
623 | - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, |
624 | + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, |
625 | .stats_get_sset_count = mv88e6320_stats_get_sset_count, |
626 | .stats_get_strings = mv88e6320_stats_get_strings, |
627 | .stats_get_stats = mv88e6390_stats_get_stats, |
628 | @@ -3907,7 +3907,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { |
629 | .port_set_cmode = mv88e6341_port_set_cmode, |
630 | .port_setup_message_port = mv88e6xxx_setup_message_port, |
631 | .stats_snapshot = mv88e6390_g1_stats_snapshot, |
632 | - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, |
633 | + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, |
634 | .stats_get_sset_count = mv88e6320_stats_get_sset_count, |
635 | .stats_get_strings = mv88e6320_stats_get_strings, |
636 | .stats_get_stats = mv88e6390_stats_get_stats, |
637 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
638 | index d1c3939b0307f..287ea792922a9 100644 |
639 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
640 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
641 | @@ -9239,6 +9239,12 @@ int bnxt_half_open_nic(struct bnxt *bp) |
642 | { |
643 | int rc = 0; |
644 | |
645 | + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
646 | + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); |
647 | + rc = -ENODEV; |
648 | + goto half_open_err; |
649 | + } |
650 | + |
651 | rc = bnxt_alloc_mem(bp, false); |
652 | if (rc) { |
653 | netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); |
654 | @@ -9987,12 +9993,15 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent) |
655 | if (netif_running(bp->dev)) { |
656 | int rc; |
657 | |
658 | - if (!silent) |
659 | + if (silent) { |
660 | + bnxt_close_nic(bp, false, false); |
661 | + bnxt_open_nic(bp, false, false); |
662 | + } else { |
663 | bnxt_ulp_stop(bp); |
664 | - bnxt_close_nic(bp, false, false); |
665 | - rc = bnxt_open_nic(bp, false, false); |
666 | - if (!silent && !rc) |
667 | - bnxt_ulp_start(bp); |
668 | + bnxt_close_nic(bp, true, false); |
669 | + rc = bnxt_open_nic(bp, true, false); |
670 | + bnxt_ulp_start(bp, rc); |
671 | + } |
672 | } |
673 | } |
674 | |
675 | @@ -10732,6 +10741,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) |
676 | } |
677 | bp->fw_reset_timestamp = jiffies; |
678 | rtnl_lock(); |
679 | + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { |
680 | + rtnl_unlock(); |
681 | + goto fw_reset_abort; |
682 | + } |
683 | bnxt_fw_reset_close(bp); |
684 | if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { |
685 | bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; |
686 | @@ -12144,10 +12157,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) |
687 | if (!err && netif_running(netdev)) |
688 | err = bnxt_open(netdev); |
689 | |
690 | - if (!err) { |
691 | + if (!err) |
692 | result = PCI_ERS_RESULT_RECOVERED; |
693 | - bnxt_ulp_start(bp); |
694 | - } |
695 | + bnxt_ulp_start(bp, err); |
696 | } |
697 | |
698 | if (result != PCI_ERS_RESULT_RECOVERED) { |
699 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |
700 | index 13ef6a9afaa09..b0ae180df4e6f 100644 |
701 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |
702 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c |
703 | @@ -186,7 +186,7 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id) |
704 | |
705 | edev->ulp_tbl[ulp_id].msix_requested = 0; |
706 | edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; |
707 | - if (netif_running(dev)) { |
708 | + if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) { |
709 | bnxt_close_nic(bp, true, false); |
710 | bnxt_open_nic(bp, true, false); |
711 | } |
712 | @@ -274,6 +274,7 @@ void bnxt_ulp_stop(struct bnxt *bp) |
713 | if (!edev) |
714 | return; |
715 | |
716 | + edev->flags |= BNXT_EN_FLAG_ULP_STOPPED; |
717 | for (i = 0; i < BNXT_MAX_ULP; i++) { |
718 | struct bnxt_ulp *ulp = &edev->ulp_tbl[i]; |
719 | |
720 | @@ -284,7 +285,7 @@ void bnxt_ulp_stop(struct bnxt *bp) |
721 | } |
722 | } |
723 | |
724 | -void bnxt_ulp_start(struct bnxt *bp) |
725 | +void bnxt_ulp_start(struct bnxt *bp, int err) |
726 | { |
727 | struct bnxt_en_dev *edev = bp->edev; |
728 | struct bnxt_ulp_ops *ops; |
729 | @@ -293,6 +294,11 @@ void bnxt_ulp_start(struct bnxt *bp) |
730 | if (!edev) |
731 | return; |
732 | |
733 | + edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED; |
734 | + |
735 | + if (err) |
736 | + return; |
737 | + |
738 | for (i = 0; i < BNXT_MAX_ULP; i++) { |
739 | struct bnxt_ulp *ulp = &edev->ulp_tbl[i]; |
740 | |
741 | @@ -467,13 +473,14 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) |
742 | if (!edev) |
743 | return ERR_PTR(-ENOMEM); |
744 | edev->en_ops = &bnxt_en_ops_tbl; |
745 | - if (bp->flags & BNXT_FLAG_ROCEV1_CAP) |
746 | - edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; |
747 | - if (bp->flags & BNXT_FLAG_ROCEV2_CAP) |
748 | - edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; |
749 | edev->net = dev; |
750 | edev->pdev = bp->pdev; |
751 | bp->edev = edev; |
752 | } |
753 | + edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP; |
754 | + if (bp->flags & BNXT_FLAG_ROCEV1_CAP) |
755 | + edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; |
756 | + if (bp->flags & BNXT_FLAG_ROCEV2_CAP) |
757 | + edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; |
758 | return bp->edev; |
759 | } |
760 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h |
761 | index cd78453d0bf0f..9895406b98308 100644 |
762 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h |
763 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h |
764 | @@ -64,6 +64,7 @@ struct bnxt_en_dev { |
765 | #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \ |
766 | BNXT_EN_FLAG_ROCEV2_CAP) |
767 | #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4 |
768 | + #define BNXT_EN_FLAG_ULP_STOPPED 0x8 |
769 | const struct bnxt_en_ops *en_ops; |
770 | struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP]; |
771 | }; |
772 | @@ -92,7 +93,7 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp); |
773 | int bnxt_get_ulp_msix_base(struct bnxt *bp); |
774 | int bnxt_get_ulp_stat_ctxs(struct bnxt *bp); |
775 | void bnxt_ulp_stop(struct bnxt *bp); |
776 | -void bnxt_ulp_start(struct bnxt *bp); |
777 | +void bnxt_ulp_start(struct bnxt *bp, int err); |
778 | void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); |
779 | void bnxt_ulp_shutdown(struct bnxt *bp); |
780 | void bnxt_ulp_irq_stop(struct bnxt *bp); |
781 | diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
782 | index 7bd6d2bf84408..c2a1fa75b2147 100644 |
783 | --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
784 | +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c |
785 | @@ -1187,7 +1187,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, |
786 | |
787 | switch (mode) { |
788 | case GENET_POWER_PASSIVE: |
789 | - reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); |
790 | + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | |
791 | + EXT_ENERGY_DET_MASK); |
792 | if (GENET_IS_V5(priv)) { |
793 | reg &= ~(EXT_PWR_DOWN_PHY_EN | |
794 | EXT_PWR_DOWN_PHY_RD | |
795 | @@ -2901,12 +2902,6 @@ static int bcmgenet_open(struct net_device *dev) |
796 | |
797 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
798 | |
799 | - if (priv->internal_phy) { |
800 | - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
801 | - reg |= EXT_ENERGY_DET_MASK; |
802 | - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); |
803 | - } |
804 | - |
805 | /* Disable RX/TX DMA and flush TX queues */ |
806 | dma_ctrl = bcmgenet_dma_disable(priv); |
807 | |
808 | @@ -3623,7 +3618,6 @@ static int bcmgenet_resume(struct device *d) |
809 | struct bcmgenet_priv *priv = netdev_priv(dev); |
810 | unsigned long dma_ctrl; |
811 | int ret; |
812 | - u32 reg; |
813 | |
814 | if (!netif_running(dev)) |
815 | return 0; |
816 | @@ -3655,12 +3649,6 @@ static int bcmgenet_resume(struct device *d) |
817 | |
818 | bcmgenet_set_hw_addr(priv, dev->dev_addr); |
819 | |
820 | - if (priv->internal_phy) { |
821 | - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
822 | - reg |= EXT_ENERGY_DET_MASK; |
823 | - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); |
824 | - } |
825 | - |
826 | if (priv->wolopts) |
827 | bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC); |
828 | |
829 | diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c |
830 | index a41f82379369a..164988f3b4fab 100644 |
831 | --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c |
832 | +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c |
833 | @@ -160,12 +160,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, |
834 | reg |= CMD_RX_EN; |
835 | bcmgenet_umac_writel(priv, reg, UMAC_CMD); |
836 | |
837 | - if (priv->hw_params->flags & GENET_HAS_EXT) { |
838 | - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); |
839 | - reg &= ~EXT_ENERGY_DET_MASK; |
840 | - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); |
841 | - } |
842 | - |
843 | return 0; |
844 | } |
845 | |
846 | diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c |
847 | index 4cddd628d41b2..9ed3d1ab2ca58 100644 |
848 | --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c |
849 | +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c |
850 | @@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) |
851 | * bits 32:47 indicate the PVF num. |
852 | */ |
853 | for (q_no = 0; q_no < ern; q_no++) { |
854 | - reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; |
855 | + reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; |
856 | |
857 | /* for VF assigned queues. */ |
858 | if (q_no < oct->sriov_info.pf_srn) { |
859 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
860 | index deb1c1f301078..21414a34a5b5a 100644 |
861 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
862 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
863 | @@ -2245,6 +2245,9 @@ static void detach_ulds(struct adapter *adap) |
864 | { |
865 | unsigned int i; |
866 | |
867 | + if (!is_uld(adap)) |
868 | + return; |
869 | + |
870 | mutex_lock(&uld_mutex); |
871 | list_del(&adap->list_node); |
872 | |
873 | @@ -6152,10 +6155,13 @@ static void remove_one(struct pci_dev *pdev) |
874 | */ |
875 | destroy_workqueue(adapter->workq); |
876 | |
877 | - if (is_uld(adapter)) { |
878 | - detach_ulds(adapter); |
879 | - t4_uld_clean_up(adapter); |
880 | - } |
881 | + detach_ulds(adapter); |
882 | + |
883 | + for_each_port(adapter, i) |
884 | + if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
885 | + unregister_netdev(adapter->port[i]); |
886 | + |
887 | + t4_uld_clean_up(adapter); |
888 | |
889 | adap_free_hma_mem(adapter); |
890 | |
891 | @@ -6163,10 +6169,6 @@ static void remove_one(struct pci_dev *pdev) |
892 | |
893 | cxgb4_free_mps_ref_entries(adapter); |
894 | |
895 | - for_each_port(adapter, i) |
896 | - if (adapter->port[i]->reg_state == NETREG_REGISTERED) |
897 | - unregister_netdev(adapter->port[i]); |
898 | - |
899 | debugfs_remove_recursive(adapter->debugfs_root); |
900 | |
901 | if (!is_t4(adapter->params.chip)) |
902 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c |
903 | index 86b528d8364c0..971bdd70b6d66 100644 |
904 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c |
905 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c |
906 | @@ -638,6 +638,9 @@ void t4_uld_clean_up(struct adapter *adap) |
907 | { |
908 | unsigned int i; |
909 | |
910 | + if (!is_uld(adap)) |
911 | + return; |
912 | + |
913 | mutex_lock(&uld_mutex); |
914 | for (i = 0; i < CXGB4_ULD_MAX; i++) { |
915 | if (!adap->uld[i].handle) |
916 | diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c |
917 | index 1c4b35b1b3590..f8dfa7501f65a 100644 |
918 | --- a/drivers/net/ethernet/google/gve/gve_main.c |
919 | +++ b/drivers/net/ethernet/google/gve/gve_main.c |
920 | @@ -1170,13 +1170,16 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
921 | |
922 | err = register_netdev(dev); |
923 | if (err) |
924 | - goto abort_with_wq; |
925 | + goto abort_with_gve_init; |
926 | |
927 | dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); |
928 | gve_clear_probe_in_progress(priv); |
929 | queue_work(priv->gve_wq, &priv->service_task); |
930 | return 0; |
931 | |
932 | +abort_with_gve_init: |
933 | + gve_teardown_priv_resources(priv); |
934 | + |
935 | abort_with_wq: |
936 | destroy_workqueue(priv->gve_wq); |
937 | |
938 | diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c |
939 | index 2ffe035e96d6e..b5eae06dd8705 100644 |
940 | --- a/drivers/net/ethernet/hisilicon/hip04_eth.c |
941 | +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c |
942 | @@ -131,7 +131,7 @@ |
943 | /* buf unit size is cache_line_size, which is 64, so the shift is 6 */ |
944 | #define PPE_BUF_SIZE_SHIFT 6 |
945 | #define PPE_TX_BUF_HOLD BIT(31) |
946 | -#define CACHE_LINE_MASK 0x3F |
947 | +#define SOC_CACHE_LINE_MASK 0x3F |
948 | #else |
949 | #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 |
950 | #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 |
951 | @@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
952 | #if defined(CONFIG_HI13X1_GMAC) |
953 | desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV |
954 | | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT); |
955 | - desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK); |
956 | - desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK); |
957 | + desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK); |
958 | + desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK); |
959 | #else |
960 | desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); |
961 | desc->send_addr = (__force u32)cpu_to_be32(phys); |
962 | diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
963 | index fc275d4f484c5..ea348ebbbf2e9 100644 |
964 | --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
965 | +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c |
966 | @@ -2119,6 +2119,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) |
967 | |
968 | static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) |
969 | { |
970 | + struct hnae3_handle *nic = &hdev->nic; |
971 | + int ret; |
972 | + |
973 | + ret = hclgevf_en_hw_strip_rxvtag(nic, true); |
974 | + if (ret) { |
975 | + dev_err(&hdev->pdev->dev, |
976 | + "failed to enable rx vlan offload, ret = %d\n", ret); |
977 | + return ret; |
978 | + } |
979 | + |
980 | return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, |
981 | false); |
982 | } |
983 | diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c |
984 | index a06d514215ede..cbd83bb5c1ac0 100644 |
985 | --- a/drivers/net/ethernet/intel/e1000e/netdev.c |
986 | +++ b/drivers/net/ethernet/intel/e1000e/netdev.c |
987 | @@ -7401,6 +7401,7 @@ err_flashmap: |
988 | err_ioremap: |
989 | free_netdev(netdev); |
990 | err_alloc_etherdev: |
991 | + pci_disable_pcie_error_reporting(pdev); |
992 | pci_release_mem_regions(pdev); |
993 | err_pci_reg: |
994 | err_dma: |
995 | diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c |
996 | index bb236fa440487..36b016308c62f 100644 |
997 | --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c |
998 | +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c |
999 | @@ -2230,6 +2230,7 @@ err_sw_init: |
1000 | err_ioremap: |
1001 | free_netdev(netdev); |
1002 | err_alloc_netdev: |
1003 | + pci_disable_pcie_error_reporting(pdev); |
1004 | pci_release_mem_regions(pdev); |
1005 | err_pci_reg: |
1006 | err_dma: |
1007 | diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c |
1008 | index a97e1f9ca1ede..cda9b9a8392a2 100644 |
1009 | --- a/drivers/net/ethernet/intel/iavf/iavf_main.c |
1010 | +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c |
1011 | @@ -3765,6 +3765,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1012 | err_ioremap: |
1013 | free_netdev(netdev); |
1014 | err_alloc_etherdev: |
1015 | + pci_disable_pcie_error_reporting(pdev); |
1016 | pci_release_regions(pdev); |
1017 | err_pci_reg: |
1018 | err_dma: |
1019 | diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
1020 | index c37f0590b3a4d..158feb0ab2739 100644 |
1021 | --- a/drivers/net/ethernet/intel/igb/igb_main.c |
1022 | +++ b/drivers/net/ethernet/intel/igb/igb_main.c |
1023 | @@ -940,6 +940,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) |
1024 | **/ |
1025 | static int igb_request_msix(struct igb_adapter *adapter) |
1026 | { |
1027 | + unsigned int num_q_vectors = adapter->num_q_vectors; |
1028 | struct net_device *netdev = adapter->netdev; |
1029 | int i, err = 0, vector = 0, free_vector = 0; |
1030 | |
1031 | @@ -948,7 +949,13 @@ static int igb_request_msix(struct igb_adapter *adapter) |
1032 | if (err) |
1033 | goto err_out; |
1034 | |
1035 | - for (i = 0; i < adapter->num_q_vectors; i++) { |
1036 | + if (num_q_vectors > MAX_Q_VECTORS) { |
1037 | + num_q_vectors = MAX_Q_VECTORS; |
1038 | + dev_warn(&adapter->pdev->dev, |
1039 | + "The number of queue vectors (%d) is higher than max allowed (%d)\n", |
1040 | + adapter->num_q_vectors, MAX_Q_VECTORS); |
1041 | + } |
1042 | + for (i = 0; i < num_q_vectors; i++) { |
1043 | struct igb_q_vector *q_vector = adapter->q_vector[i]; |
1044 | |
1045 | vector++; |
1046 | @@ -1687,14 +1694,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter) |
1047 | **/ |
1048 | static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) |
1049 | { |
1050 | - struct igb_ring *ring = adapter->tx_ring[queue]; |
1051 | struct net_device *netdev = adapter->netdev; |
1052 | struct e1000_hw *hw = &adapter->hw; |
1053 | + struct igb_ring *ring; |
1054 | u32 tqavcc, tqavctrl; |
1055 | u16 value; |
1056 | |
1057 | WARN_ON(hw->mac.type != e1000_i210); |
1058 | WARN_ON(queue < 0 || queue > 1); |
1059 | + ring = adapter->tx_ring[queue]; |
1060 | |
1061 | /* If any of the Qav features is enabled, configure queues as SR and |
1062 | * with HIGH PRIO. If none is, then configure them with LOW PRIO and |
1063 | @@ -3469,6 +3477,7 @@ err_sw_init: |
1064 | err_ioremap: |
1065 | free_netdev(netdev); |
1066 | err_alloc_etherdev: |
1067 | + pci_disable_pcie_error_reporting(pdev); |
1068 | pci_release_mem_regions(pdev); |
1069 | err_pci_reg: |
1070 | err_dma: |
1071 | @@ -4657,6 +4666,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) |
1072 | DMA_TO_DEVICE); |
1073 | } |
1074 | |
1075 | + tx_buffer->next_to_watch = NULL; |
1076 | + |
1077 | /* move us one more past the eop_desc for start of next pkt */ |
1078 | tx_buffer++; |
1079 | i++; |
1080 | diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h |
1081 | index 7e16345d836e1..aec998c82b694 100644 |
1082 | --- a/drivers/net/ethernet/intel/igc/igc.h |
1083 | +++ b/drivers/net/ethernet/intel/igc/igc.h |
1084 | @@ -504,7 +504,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) |
1085 | if (hw->phy.ops.read_reg) |
1086 | return hw->phy.ops.read_reg(hw, offset, data); |
1087 | |
1088 | - return 0; |
1089 | + return -EOPNOTSUPP; |
1090 | } |
1091 | |
1092 | /* forward declaration */ |
1093 | diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c |
1094 | index 6b43e1c5b1c3e..084cf4a4114ad 100644 |
1095 | --- a/drivers/net/ethernet/intel/igc/igc_main.c |
1096 | +++ b/drivers/net/ethernet/intel/igc/igc_main.c |
1097 | @@ -256,6 +256,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) |
1098 | DMA_TO_DEVICE); |
1099 | } |
1100 | |
1101 | + tx_buffer->next_to_watch = NULL; |
1102 | + |
1103 | /* move us one more past the eop_desc for start of next pkt */ |
1104 | tx_buffer++; |
1105 | i++; |
1106 | @@ -4310,8 +4312,8 @@ err_sw_init: |
1107 | err_ioremap: |
1108 | free_netdev(netdev); |
1109 | err_alloc_etherdev: |
1110 | - pci_release_selected_regions(pdev, |
1111 | - pci_select_bars(pdev, IORESOURCE_MEM)); |
1112 | + pci_disable_pcie_error_reporting(pdev); |
1113 | + pci_release_mem_regions(pdev); |
1114 | err_pci_reg: |
1115 | err_dma: |
1116 | pci_disable_device(pdev); |
1117 | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1118 | index 1b8e70585c44a..8a894e5d923f0 100644 |
1119 | --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1120 | +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1121 | @@ -1827,7 +1827,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, |
1122 | struct sk_buff *skb) |
1123 | { |
1124 | if (ring_uses_build_skb(rx_ring)) { |
1125 | - unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; |
1126 | + unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; |
1127 | + unsigned long offset = (unsigned long)(skb->data) & mask; |
1128 | |
1129 | dma_sync_single_range_for_cpu(rx_ring->dev, |
1130 | IXGBE_CB(skb)->dma, |
1131 | @@ -11207,6 +11208,7 @@ err_ioremap: |
1132 | disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); |
1133 | free_netdev(netdev); |
1134 | err_alloc_etherdev: |
1135 | + pci_disable_pcie_error_reporting(pdev); |
1136 | pci_release_mem_regions(pdev); |
1137 | err_pci_reg: |
1138 | err_dma: |
1139 | diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c |
1140 | index 5969f64169e53..fb51548c57e94 100644 |
1141 | --- a/drivers/net/ethernet/realtek/r8169_main.c |
1142 | +++ b/drivers/net/ethernet/realtek/r8169_main.c |
1143 | @@ -6850,7 +6850,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) |
1144 | new_bus->priv = tp; |
1145 | new_bus->parent = &pdev->dev; |
1146 | new_bus->irq[0] = PHY_IGNORE_INTERRUPT; |
1147 | - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev)); |
1148 | + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", |
1149 | + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); |
1150 | |
1151 | new_bus->read = r8169_mdio_read_reg; |
1152 | new_bus->write = r8169_mdio_write_reg; |
1153 | diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
1154 | index 710ab45eb679d..a5b5a2305791d 100644 |
1155 | --- a/drivers/nvme/host/core.c |
1156 | +++ b/drivers/nvme/host/core.c |
1157 | @@ -694,7 +694,10 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, |
1158 | cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req))); |
1159 | cmnd->write_zeroes.length = |
1160 | cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); |
1161 | - cmnd->write_zeroes.control = 0; |
1162 | + if (nvme_ns_has_pi(ns)) |
1163 | + cmnd->write_zeroes.control = cpu_to_le16(NVME_RW_PRINFO_PRACT); |
1164 | + else |
1165 | + cmnd->write_zeroes.control = 0; |
1166 | return BLK_STS_OK; |
1167 | } |
1168 | |
1169 | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
1170 | index 2cb2ead7615b2..af516c35afe6f 100644 |
1171 | --- a/drivers/nvme/host/pci.c |
1172 | +++ b/drivers/nvme/host/pci.c |
1173 | @@ -2590,7 +2590,9 @@ static void nvme_reset_work(struct work_struct *work) |
1174 | bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); |
1175 | int result; |
1176 | |
1177 | - if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { |
1178 | + if (dev->ctrl.state != NVME_CTRL_RESETTING) { |
1179 | + dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", |
1180 | + dev->ctrl.state); |
1181 | result = -ENODEV; |
1182 | goto out; |
1183 | } |
1184 | @@ -2954,7 +2956,6 @@ static void nvme_remove(struct pci_dev *pdev) |
1185 | if (!pci_device_is_present(pdev)) { |
1186 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); |
1187 | nvme_dev_disable(dev, true); |
1188 | - nvme_dev_remove_admin(dev); |
1189 | } |
1190 | |
1191 | flush_work(&dev->ctrl.reset_work); |
1192 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
1193 | index 3fe9a6f61f85c..6c1b936a94fac 100644 |
1194 | --- a/drivers/pci/quirks.c |
1195 | +++ b/drivers/pci/quirks.c |
1196 | @@ -5312,7 +5312,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); |
1197 | static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) |
1198 | { |
1199 | if ((pdev->device == 0x7312 && pdev->revision != 0x00) || |
1200 | - (pdev->device == 0x7340 && pdev->revision != 0xc5)) |
1201 | + (pdev->device == 0x7340 && pdev->revision != 0xc5) || |
1202 | + (pdev->device == 0x7341 && pdev->revision != 0x00)) |
1203 | return; |
1204 | |
1205 | pci_info(pdev, "disabling ATS\n"); |
1206 | @@ -5327,6 +5328,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); |
1207 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); |
1208 | /* AMD Navi14 dGPU */ |
1209 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); |
1210 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats); |
1211 | #endif /* CONFIG_PCI_ATS */ |
1212 | |
1213 | /* Freescale PCIe doesn't support MSI in RC mode */ |
1214 | diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c |
1215 | index be23942274231..892d853d48a1a 100644 |
1216 | --- a/drivers/pwm/pwm-sprd.c |
1217 | +++ b/drivers/pwm/pwm-sprd.c |
1218 | @@ -180,13 +180,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, |
1219 | } |
1220 | } |
1221 | |
1222 | - if (state->period != cstate->period || |
1223 | - state->duty_cycle != cstate->duty_cycle) { |
1224 | - ret = sprd_pwm_config(spc, pwm, state->duty_cycle, |
1225 | - state->period); |
1226 | - if (ret) |
1227 | - return ret; |
1228 | - } |
1229 | + ret = sprd_pwm_config(spc, pwm, state->duty_cycle, |
1230 | + state->period); |
1231 | + if (ret) |
1232 | + return ret; |
1233 | |
1234 | sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1); |
1235 | } else if (cstate->enabled) { |
1236 | diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c |
1237 | index 5ac3d7c29725a..efc91b03a9bbc 100644 |
1238 | --- a/drivers/regulator/hi6421-regulator.c |
1239 | +++ b/drivers/regulator/hi6421-regulator.c |
1240 | @@ -366,9 +366,8 @@ static struct hi6421_regulator_info |
1241 | |
1242 | static int hi6421_regulator_enable(struct regulator_dev *rdev) |
1243 | { |
1244 | - struct hi6421_regulator_pdata *pdata; |
1245 | + struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev); |
1246 | |
1247 | - pdata = dev_get_drvdata(rdev->dev.parent); |
1248 | /* hi6421 spec requires regulator enablement must be serialized: |
1249 | * - Because when BUCK, LDO switching from off to on, it will have |
1250 | * a huge instantaneous current; so you can not turn on two or |
1251 | @@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev) |
1252 | |
1253 | static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) |
1254 | { |
1255 | - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); |
1256 | - u32 reg_val; |
1257 | + struct hi6421_regulator_info *info; |
1258 | + unsigned int reg_val; |
1259 | |
1260 | + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); |
1261 | regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); |
1262 | if (reg_val & info->mode_mask) |
1263 | return REGULATOR_MODE_IDLE; |
1264 | @@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) |
1265 | |
1266 | static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) |
1267 | { |
1268 | - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); |
1269 | - u32 reg_val; |
1270 | + struct hi6421_regulator_info *info; |
1271 | + unsigned int reg_val; |
1272 | |
1273 | + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); |
1274 | regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); |
1275 | if (reg_val & info->mode_mask) |
1276 | return REGULATOR_MODE_STANDBY; |
1277 | @@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) |
1278 | static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, |
1279 | unsigned int mode) |
1280 | { |
1281 | - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); |
1282 | - u32 new_mode; |
1283 | + struct hi6421_regulator_info *info; |
1284 | + unsigned int new_mode; |
1285 | |
1286 | + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); |
1287 | switch (mode) { |
1288 | case REGULATOR_MODE_NORMAL: |
1289 | new_mode = 0; |
1290 | @@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, |
1291 | static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, |
1292 | unsigned int mode) |
1293 | { |
1294 | - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); |
1295 | - u32 new_mode; |
1296 | + struct hi6421_regulator_info *info; |
1297 | + unsigned int new_mode; |
1298 | |
1299 | + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); |
1300 | switch (mode) { |
1301 | case REGULATOR_MODE_NORMAL: |
1302 | new_mode = 0; |
1303 | @@ -459,7 +462,9 @@ static unsigned int |
1304 | hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, |
1305 | int input_uV, int output_uV, int load_uA) |
1306 | { |
1307 | - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); |
1308 | + struct hi6421_regulator_info *info; |
1309 | + |
1310 | + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); |
1311 | |
1312 | if (load_uA > info->eco_microamp) |
1313 | return REGULATOR_MODE_NORMAL; |
1314 | @@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev) |
1315 | if (!pdata) |
1316 | return -ENOMEM; |
1317 | mutex_init(&pdata->lock); |
1318 | - platform_set_drvdata(pdev, pdata); |
1319 | |
1320 | for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) { |
1321 | /* assign per-regulator data */ |
1322 | info = &hi6421_regulator_info[i]; |
1323 | |
1324 | config.dev = pdev->dev.parent; |
1325 | - config.driver_data = info; |
1326 | + config.driver_data = pdata; |
1327 | config.regmap = pmic->regmap; |
1328 | |
1329 | rdev = devm_regulator_register(&pdev->dev, &info->desc, |
1330 | diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c |
1331 | index 2f1553d0a10ed..77bba91b57147 100644 |
1332 | --- a/drivers/scsi/scsi_transport_iscsi.c |
1333 | +++ b/drivers/scsi/scsi_transport_iscsi.c |
1334 | @@ -432,39 +432,10 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, |
1335 | struct device *dev = container_of(kobj, struct device, kobj); |
1336 | struct iscsi_iface *iface = iscsi_dev_to_iface(dev); |
1337 | struct iscsi_transport *t = iface->transport; |
1338 | - int param; |
1339 | - int param_type; |
1340 | + int param = -1; |
1341 | |
1342 | if (attr == &dev_attr_iface_enabled.attr) |
1343 | param = ISCSI_NET_PARAM_IFACE_ENABLE; |
1344 | - else if (attr == &dev_attr_iface_vlan_id.attr) |
1345 | - param = ISCSI_NET_PARAM_VLAN_ID; |
1346 | - else if (attr == &dev_attr_iface_vlan_priority.attr) |
1347 | - param = ISCSI_NET_PARAM_VLAN_PRIORITY; |
1348 | - else if (attr == &dev_attr_iface_vlan_enabled.attr) |
1349 | - param = ISCSI_NET_PARAM_VLAN_ENABLED; |
1350 | - else if (attr == &dev_attr_iface_mtu.attr) |
1351 | - param = ISCSI_NET_PARAM_MTU; |
1352 | - else if (attr == &dev_attr_iface_port.attr) |
1353 | - param = ISCSI_NET_PARAM_PORT; |
1354 | - else if (attr == &dev_attr_iface_ipaddress_state.attr) |
1355 | - param = ISCSI_NET_PARAM_IPADDR_STATE; |
1356 | - else if (attr == &dev_attr_iface_delayed_ack_en.attr) |
1357 | - param = ISCSI_NET_PARAM_DELAYED_ACK_EN; |
1358 | - else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) |
1359 | - param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; |
1360 | - else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) |
1361 | - param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; |
1362 | - else if (attr == &dev_attr_iface_tcp_wsf.attr) |
1363 | - param = ISCSI_NET_PARAM_TCP_WSF; |
1364 | - else if (attr == &dev_attr_iface_tcp_timer_scale.attr) |
1365 | - param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; |
1366 | - else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) |
1367 | - param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; |
1368 | - else if (attr == &dev_attr_iface_cache_id.attr) |
1369 | - param = ISCSI_NET_PARAM_CACHE_ID; |
1370 | - else if (attr == &dev_attr_iface_redirect_en.attr) |
1371 | - param = ISCSI_NET_PARAM_REDIRECT_EN; |
1372 | else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr) |
1373 | param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO; |
1374 | else if (attr == &dev_attr_iface_header_digest.attr) |
1375 | @@ -501,6 +472,38 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, |
1376 | param = ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN; |
1377 | else if (attr == &dev_attr_iface_initiator_name.attr) |
1378 | param = ISCSI_IFACE_PARAM_INITIATOR_NAME; |
1379 | + |
1380 | + if (param != -1) |
1381 | + return t->attr_is_visible(ISCSI_IFACE_PARAM, param); |
1382 | + |
1383 | + if (attr == &dev_attr_iface_vlan_id.attr) |
1384 | + param = ISCSI_NET_PARAM_VLAN_ID; |
1385 | + else if (attr == &dev_attr_iface_vlan_priority.attr) |
1386 | + param = ISCSI_NET_PARAM_VLAN_PRIORITY; |
1387 | + else if (attr == &dev_attr_iface_vlan_enabled.attr) |
1388 | + param = ISCSI_NET_PARAM_VLAN_ENABLED; |
1389 | + else if (attr == &dev_attr_iface_mtu.attr) |
1390 | + param = ISCSI_NET_PARAM_MTU; |
1391 | + else if (attr == &dev_attr_iface_port.attr) |
1392 | + param = ISCSI_NET_PARAM_PORT; |
1393 | + else if (attr == &dev_attr_iface_ipaddress_state.attr) |
1394 | + param = ISCSI_NET_PARAM_IPADDR_STATE; |
1395 | + else if (attr == &dev_attr_iface_delayed_ack_en.attr) |
1396 | + param = ISCSI_NET_PARAM_DELAYED_ACK_EN; |
1397 | + else if (attr == &dev_attr_iface_tcp_nagle_disable.attr) |
1398 | + param = ISCSI_NET_PARAM_TCP_NAGLE_DISABLE; |
1399 | + else if (attr == &dev_attr_iface_tcp_wsf_disable.attr) |
1400 | + param = ISCSI_NET_PARAM_TCP_WSF_DISABLE; |
1401 | + else if (attr == &dev_attr_iface_tcp_wsf.attr) |
1402 | + param = ISCSI_NET_PARAM_TCP_WSF; |
1403 | + else if (attr == &dev_attr_iface_tcp_timer_scale.attr) |
1404 | + param = ISCSI_NET_PARAM_TCP_TIMER_SCALE; |
1405 | + else if (attr == &dev_attr_iface_tcp_timestamp_en.attr) |
1406 | + param = ISCSI_NET_PARAM_TCP_TIMESTAMP_EN; |
1407 | + else if (attr == &dev_attr_iface_cache_id.attr) |
1408 | + param = ISCSI_NET_PARAM_CACHE_ID; |
1409 | + else if (attr == &dev_attr_iface_redirect_en.attr) |
1410 | + param = ISCSI_NET_PARAM_REDIRECT_EN; |
1411 | else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { |
1412 | if (attr == &dev_attr_ipv4_iface_ipaddress.attr) |
1413 | param = ISCSI_NET_PARAM_IPV4_ADDR; |
1414 | @@ -591,32 +594,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj, |
1415 | return 0; |
1416 | } |
1417 | |
1418 | - switch (param) { |
1419 | - case ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO: |
1420 | - case ISCSI_IFACE_PARAM_HDRDGST_EN: |
1421 | - case ISCSI_IFACE_PARAM_DATADGST_EN: |
1422 | - case ISCSI_IFACE_PARAM_IMM_DATA_EN: |
1423 | - case ISCSI_IFACE_PARAM_INITIAL_R2T_EN: |
1424 | - case ISCSI_IFACE_PARAM_DATASEQ_INORDER_EN: |
1425 | - case ISCSI_IFACE_PARAM_PDU_INORDER_EN: |
1426 | - case ISCSI_IFACE_PARAM_ERL: |
1427 | - case ISCSI_IFACE_PARAM_MAX_RECV_DLENGTH: |
1428 | - case ISCSI_IFACE_PARAM_FIRST_BURST: |
1429 | - case ISCSI_IFACE_PARAM_MAX_R2T: |
1430 | - case ISCSI_IFACE_PARAM_MAX_BURST: |
1431 | - case ISCSI_IFACE_PARAM_CHAP_AUTH_EN: |
1432 | - case ISCSI_IFACE_PARAM_BIDI_CHAP_EN: |
1433 | - case ISCSI_IFACE_PARAM_DISCOVERY_AUTH_OPTIONAL: |
1434 | - case ISCSI_IFACE_PARAM_DISCOVERY_LOGOUT_EN: |
1435 | - case ISCSI_IFACE_PARAM_STRICT_LOGIN_COMP_EN: |
1436 | - case ISCSI_IFACE_PARAM_INITIATOR_NAME: |
1437 | - param_type = ISCSI_IFACE_PARAM; |
1438 | - break; |
1439 | - default: |
1440 | - param_type = ISCSI_NET_PARAM; |
1441 | - } |
1442 | - |
1443 | - return t->attr_is_visible(param_type, param); |
1444 | + return t->attr_is_visible(ISCSI_NET_PARAM, param); |
1445 | } |
1446 | |
1447 | static struct attribute *iscsi_iface_attrs[] = { |
1448 | diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c |
1449 | index 1d0c335b0bf88..5ac60d06c674d 100644 |
1450 | --- a/drivers/spi/spi-cadence.c |
1451 | +++ b/drivers/spi/spi-cadence.c |
1452 | @@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev) |
1453 | goto clk_dis_apb; |
1454 | } |
1455 | |
1456 | + pm_runtime_use_autosuspend(&pdev->dev); |
1457 | + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); |
1458 | + pm_runtime_get_noresume(&pdev->dev); |
1459 | + pm_runtime_set_active(&pdev->dev); |
1460 | + pm_runtime_enable(&pdev->dev); |
1461 | + |
1462 | ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); |
1463 | if (ret < 0) |
1464 | master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; |
1465 | @@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev) |
1466 | /* SPI controller initializations */ |
1467 | cdns_spi_init_hw(xspi); |
1468 | |
1469 | - pm_runtime_set_active(&pdev->dev); |
1470 | - pm_runtime_enable(&pdev->dev); |
1471 | - pm_runtime_use_autosuspend(&pdev->dev); |
1472 | - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); |
1473 | - |
1474 | irq = platform_get_irq(pdev, 0); |
1475 | if (irq <= 0) { |
1476 | ret = -ENXIO; |
1477 | @@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev) |
1478 | |
1479 | master->bits_per_word_mask = SPI_BPW_MASK(8); |
1480 | |
1481 | + pm_runtime_mark_last_busy(&pdev->dev); |
1482 | + pm_runtime_put_autosuspend(&pdev->dev); |
1483 | + |
1484 | ret = spi_register_master(master); |
1485 | if (ret) { |
1486 | dev_err(&pdev->dev, "spi_register_master failed\n"); |
1487 | diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c |
1488 | index 09c9a1edb2c6d..e237481dbbbbf 100644 |
1489 | --- a/drivers/spi/spi-imx.c |
1490 | +++ b/drivers/spi/spi-imx.c |
1491 | @@ -64,8 +64,7 @@ struct spi_imx_data; |
1492 | struct spi_imx_devtype_data { |
1493 | void (*intctrl)(struct spi_imx_data *, int); |
1494 | int (*prepare_message)(struct spi_imx_data *, struct spi_message *); |
1495 | - int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *, |
1496 | - struct spi_transfer *); |
1497 | + int (*prepare_transfer)(struct spi_imx_data *, struct spi_device *); |
1498 | void (*trigger)(struct spi_imx_data *); |
1499 | int (*rx_available)(struct spi_imx_data *); |
1500 | void (*reset)(struct spi_imx_data *); |
1501 | @@ -564,11 +563,10 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, |
1502 | } |
1503 | |
1504 | static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, |
1505 | - struct spi_device *spi, |
1506 | - struct spi_transfer *t) |
1507 | + struct spi_device *spi) |
1508 | { |
1509 | u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); |
1510 | - u32 clk = t->speed_hz, delay; |
1511 | + u32 clk, delay; |
1512 | |
1513 | /* Clear BL field and set the right value */ |
1514 | ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; |
1515 | @@ -582,7 +580,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, |
1516 | /* set clock speed */ |
1517 | ctrl &= ~(0xf << MX51_ECSPI_CTRL_POSTDIV_OFFSET | |
1518 | 0xf << MX51_ECSPI_CTRL_PREDIV_OFFSET); |
1519 | - ctrl |= mx51_ecspi_clkdiv(spi_imx, t->speed_hz, &clk); |
1520 | + ctrl |= mx51_ecspi_clkdiv(spi_imx, spi_imx->spi_bus_clk, &clk); |
1521 | spi_imx->spi_bus_clk = clk; |
1522 | |
1523 | if (spi_imx->usedma) |
1524 | @@ -694,13 +692,12 @@ static int mx31_prepare_message(struct spi_imx_data *spi_imx, |
1525 | } |
1526 | |
1527 | static int mx31_prepare_transfer(struct spi_imx_data *spi_imx, |
1528 | - struct spi_device *spi, |
1529 | - struct spi_transfer *t) |
1530 | + struct spi_device *spi) |
1531 | { |
1532 | unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; |
1533 | unsigned int clk; |
1534 | |
1535 | - reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) << |
1536 | + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) << |
1537 | MX31_CSPICTRL_DR_SHIFT; |
1538 | spi_imx->spi_bus_clk = clk; |
1539 | |
1540 | @@ -799,14 +796,13 @@ static int mx21_prepare_message(struct spi_imx_data *spi_imx, |
1541 | } |
1542 | |
1543 | static int mx21_prepare_transfer(struct spi_imx_data *spi_imx, |
1544 | - struct spi_device *spi, |
1545 | - struct spi_transfer *t) |
1546 | + struct spi_device *spi) |
1547 | { |
1548 | unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER; |
1549 | unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18; |
1550 | unsigned int clk; |
1551 | |
1552 | - reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, t->speed_hz, max, &clk) |
1553 | + reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, spi_imx->spi_bus_clk, max, &clk) |
1554 | << MX21_CSPICTRL_DR_SHIFT; |
1555 | spi_imx->spi_bus_clk = clk; |
1556 | |
1557 | @@ -875,13 +871,12 @@ static int mx1_prepare_message(struct spi_imx_data *spi_imx, |
1558 | } |
1559 | |
1560 | static int mx1_prepare_transfer(struct spi_imx_data *spi_imx, |
1561 | - struct spi_device *spi, |
1562 | - struct spi_transfer *t) |
1563 | + struct spi_device *spi) |
1564 | { |
1565 | unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; |
1566 | unsigned int clk; |
1567 | |
1568 | - reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, t->speed_hz, &clk) << |
1569 | + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, spi_imx->spi_bus_clk, &clk) << |
1570 | MX1_CSPICTRL_DR_SHIFT; |
1571 | spi_imx->spi_bus_clk = clk; |
1572 | |
1573 | @@ -1199,6 +1194,16 @@ static int spi_imx_setupxfer(struct spi_device *spi, |
1574 | if (!t) |
1575 | return 0; |
1576 | |
1577 | + if (!t->speed_hz) { |
1578 | + if (!spi->max_speed_hz) { |
1579 | + dev_err(&spi->dev, "no speed_hz provided!\n"); |
1580 | + return -EINVAL; |
1581 | + } |
1582 | + dev_dbg(&spi->dev, "using spi->max_speed_hz!\n"); |
1583 | + spi_imx->spi_bus_clk = spi->max_speed_hz; |
1584 | + } else |
1585 | + spi_imx->spi_bus_clk = t->speed_hz; |
1586 | + |
1587 | spi_imx->bits_per_word = t->bits_per_word; |
1588 | |
1589 | /* |
1590 | @@ -1240,7 +1245,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, |
1591 | spi_imx->slave_burst = t->len; |
1592 | } |
1593 | |
1594 | - spi_imx->devtype_data->prepare_transfer(spi_imx, spi, t); |
1595 | + spi_imx->devtype_data->prepare_transfer(spi_imx, spi); |
1596 | |
1597 | return 0; |
1598 | } |
1599 | diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c |
1600 | index 8acf24f7c5d40..81eac9fbd08cb 100644 |
1601 | --- a/drivers/spi/spi-mt65xx.c |
1602 | +++ b/drivers/spi/spi-mt65xx.c |
1603 | @@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master, |
1604 | mtk_spi_setup_packet(master); |
1605 | |
1606 | cnt = xfer->len / 4; |
1607 | - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); |
1608 | + if (xfer->tx_buf) |
1609 | + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); |
1610 | + |
1611 | + if (xfer->rx_buf) |
1612 | + ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt); |
1613 | |
1614 | remainder = xfer->len % 4; |
1615 | if (remainder > 0) { |
1616 | reg_val = 0; |
1617 | - memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); |
1618 | - writel(reg_val, mdata->base + SPI_TX_DATA_REG); |
1619 | + if (xfer->tx_buf) { |
1620 | + memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); |
1621 | + writel(reg_val, mdata->base + SPI_TX_DATA_REG); |
1622 | + } |
1623 | + if (xfer->rx_buf) { |
1624 | + reg_val = readl(mdata->base + SPI_RX_DATA_REG); |
1625 | + memcpy(xfer->rx_buf + (cnt * 4), ®_val, remainder); |
1626 | + } |
1627 | } |
1628 | |
1629 | mtk_spi_enable_transfer(master); |
1630 | diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c |
1631 | index 3af6a5a3a4b29..e9d48e94f5edb 100644 |
1632 | --- a/drivers/spi/spi-stm32.c |
1633 | +++ b/drivers/spi/spi-stm32.c |
1634 | @@ -1908,35 +1908,48 @@ static int stm32_spi_probe(struct platform_device *pdev) |
1635 | master->transfer_one = stm32_spi_transfer_one; |
1636 | master->unprepare_message = stm32_spi_unprepare_msg; |
1637 | |
1638 | - spi->dma_tx = dma_request_slave_channel(spi->dev, "tx"); |
1639 | - if (!spi->dma_tx) |
1640 | + spi->dma_tx = dma_request_chan(spi->dev, "tx"); |
1641 | + if (IS_ERR(spi->dma_tx)) { |
1642 | + ret = PTR_ERR(spi->dma_tx); |
1643 | + spi->dma_tx = NULL; |
1644 | + if (ret == -EPROBE_DEFER) |
1645 | + goto err_clk_disable; |
1646 | + |
1647 | dev_warn(&pdev->dev, "failed to request tx dma channel\n"); |
1648 | - else |
1649 | + } else { |
1650 | master->dma_tx = spi->dma_tx; |
1651 | + } |
1652 | + |
1653 | + spi->dma_rx = dma_request_chan(spi->dev, "rx"); |
1654 | + if (IS_ERR(spi->dma_rx)) { |
1655 | + ret = PTR_ERR(spi->dma_rx); |
1656 | + spi->dma_rx = NULL; |
1657 | + if (ret == -EPROBE_DEFER) |
1658 | + goto err_dma_release; |
1659 | |
1660 | - spi->dma_rx = dma_request_slave_channel(spi->dev, "rx"); |
1661 | - if (!spi->dma_rx) |
1662 | dev_warn(&pdev->dev, "failed to request rx dma channel\n"); |
1663 | - else |
1664 | + } else { |
1665 | master->dma_rx = spi->dma_rx; |
1666 | + } |
1667 | |
1668 | if (spi->dma_tx || spi->dma_rx) |
1669 | master->can_dma = stm32_spi_can_dma; |
1670 | |
1671 | pm_runtime_set_active(&pdev->dev); |
1672 | + pm_runtime_get_noresume(&pdev->dev); |
1673 | pm_runtime_enable(&pdev->dev); |
1674 | |
1675 | ret = spi_register_master(master); |
1676 | if (ret) { |
1677 | dev_err(&pdev->dev, "spi master registration failed: %d\n", |
1678 | ret); |
1679 | - goto err_dma_release; |
1680 | + goto err_pm_disable; |
1681 | } |
1682 | |
1683 | if (!master->cs_gpios) { |
1684 | dev_err(&pdev->dev, "no CS gpios available\n"); |
1685 | ret = -EINVAL; |
1686 | - goto err_dma_release; |
1687 | + goto err_pm_disable; |
1688 | } |
1689 | |
1690 | for (i = 0; i < master->num_chipselect; i++) { |
1691 | @@ -1960,13 +1973,15 @@ static int stm32_spi_probe(struct platform_device *pdev) |
1692 | |
1693 | return 0; |
1694 | |
1695 | +err_pm_disable: |
1696 | + pm_runtime_disable(&pdev->dev); |
1697 | + pm_runtime_put_noidle(&pdev->dev); |
1698 | + pm_runtime_set_suspended(&pdev->dev); |
1699 | err_dma_release: |
1700 | if (spi->dma_tx) |
1701 | dma_release_channel(spi->dma_tx); |
1702 | if (spi->dma_rx) |
1703 | dma_release_channel(spi->dma_rx); |
1704 | - |
1705 | - pm_runtime_disable(&pdev->dev); |
1706 | err_clk_disable: |
1707 | clk_disable_unprepare(spi->clk); |
1708 | err_master_put: |
1709 | @@ -1980,9 +1995,14 @@ static int stm32_spi_remove(struct platform_device *pdev) |
1710 | struct spi_master *master = platform_get_drvdata(pdev); |
1711 | struct stm32_spi *spi = spi_master_get_devdata(master); |
1712 | |
1713 | + pm_runtime_get_sync(&pdev->dev); |
1714 | + |
1715 | spi_unregister_master(master); |
1716 | spi->cfg->disable(spi); |
1717 | |
1718 | + pm_runtime_disable(&pdev->dev); |
1719 | + pm_runtime_put_noidle(&pdev->dev); |
1720 | + pm_runtime_set_suspended(&pdev->dev); |
1721 | if (master->dma_tx) |
1722 | dma_release_channel(master->dma_tx); |
1723 | if (master->dma_rx) |
1724 | @@ -1990,7 +2010,6 @@ static int stm32_spi_remove(struct platform_device *pdev) |
1725 | |
1726 | clk_disable_unprepare(spi->clk); |
1727 | |
1728 | - pm_runtime_disable(&pdev->dev); |
1729 | |
1730 | pinctrl_pm_select_sleep_state(&pdev->dev); |
1731 | |
1732 | diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
1733 | index f1e81886122d0..e63c163dba788 100644 |
1734 | --- a/drivers/target/target_core_sbc.c |
1735 | +++ b/drivers/target/target_core_sbc.c |
1736 | @@ -25,7 +25,7 @@ |
1737 | #include "target_core_alua.h" |
1738 | |
1739 | static sense_reason_t |
1740 | -sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); |
1741 | +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char, u32, bool); |
1742 | static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd); |
1743 | |
1744 | static sense_reason_t |
1745 | @@ -279,14 +279,14 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) |
1746 | } |
1747 | |
1748 | static sense_reason_t |
1749 | -sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) |
1750 | +sbc_setup_write_same(struct se_cmd *cmd, unsigned char flags, struct sbc_ops *ops) |
1751 | { |
1752 | struct se_device *dev = cmd->se_dev; |
1753 | sector_t end_lba = dev->transport->get_blocks(dev) + 1; |
1754 | unsigned int sectors = sbc_get_write_same_sectors(cmd); |
1755 | sense_reason_t ret; |
1756 | |
1757 | - if ((flags[0] & 0x04) || (flags[0] & 0x02)) { |
1758 | + if ((flags & 0x04) || (flags & 0x02)) { |
1759 | pr_err("WRITE_SAME PBDATA and LBDATA" |
1760 | " bits not supported for Block Discard" |
1761 | " Emulation\n"); |
1762 | @@ -308,7 +308,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o |
1763 | } |
1764 | |
1765 | /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ |
1766 | - if (flags[0] & 0x10) { |
1767 | + if (flags & 0x10) { |
1768 | pr_warn("WRITE SAME with ANCHOR not supported\n"); |
1769 | return TCM_INVALID_CDB_FIELD; |
1770 | } |
1771 | @@ -316,7 +316,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o |
1772 | * Special case for WRITE_SAME w/ UNMAP=1 that ends up getting |
1773 | * translated into block discard requests within backend code. |
1774 | */ |
1775 | - if (flags[0] & 0x08) { |
1776 | + if (flags & 0x08) { |
1777 | if (!ops->execute_unmap) |
1778 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1779 | |
1780 | @@ -331,7 +331,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o |
1781 | if (!ops->execute_write_same) |
1782 | return TCM_UNSUPPORTED_SCSI_OPCODE; |
1783 | |
1784 | - ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); |
1785 | + ret = sbc_check_prot(dev, cmd, flags >> 5, sectors, true); |
1786 | if (ret) |
1787 | return ret; |
1788 | |
1789 | @@ -686,10 +686,9 @@ sbc_set_prot_op_checks(u8 protect, bool fabric_prot, enum target_prot_type prot_ |
1790 | } |
1791 | |
1792 | static sense_reason_t |
1793 | -sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, |
1794 | +sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char protect, |
1795 | u32 sectors, bool is_write) |
1796 | { |
1797 | - u8 protect = cdb[1] >> 5; |
1798 | int sp_ops = cmd->se_sess->sup_prot_ops; |
1799 | int pi_prot_type = dev->dev_attrib.pi_prot_type; |
1800 | bool fabric_prot = false; |
1801 | @@ -737,7 +736,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, |
1802 | /* Fallthrough */ |
1803 | default: |
1804 | pr_err("Unable to determine pi_prot_type for CDB: 0x%02x " |
1805 | - "PROTECT: 0x%02x\n", cdb[0], protect); |
1806 | + "PROTECT: 0x%02x\n", cmd->t_task_cdb[0], protect); |
1807 | return TCM_INVALID_CDB_FIELD; |
1808 | } |
1809 | |
1810 | @@ -812,7 +811,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1811 | if (sbc_check_dpofua(dev, cmd, cdb)) |
1812 | return TCM_INVALID_CDB_FIELD; |
1813 | |
1814 | - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); |
1815 | + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); |
1816 | if (ret) |
1817 | return ret; |
1818 | |
1819 | @@ -826,7 +825,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1820 | if (sbc_check_dpofua(dev, cmd, cdb)) |
1821 | return TCM_INVALID_CDB_FIELD; |
1822 | |
1823 | - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); |
1824 | + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); |
1825 | if (ret) |
1826 | return ret; |
1827 | |
1828 | @@ -840,7 +839,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1829 | if (sbc_check_dpofua(dev, cmd, cdb)) |
1830 | return TCM_INVALID_CDB_FIELD; |
1831 | |
1832 | - ret = sbc_check_prot(dev, cmd, cdb, sectors, false); |
1833 | + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, false); |
1834 | if (ret) |
1835 | return ret; |
1836 | |
1837 | @@ -861,7 +860,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1838 | if (sbc_check_dpofua(dev, cmd, cdb)) |
1839 | return TCM_INVALID_CDB_FIELD; |
1840 | |
1841 | - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); |
1842 | + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); |
1843 | if (ret) |
1844 | return ret; |
1845 | |
1846 | @@ -875,7 +874,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1847 | if (sbc_check_dpofua(dev, cmd, cdb)) |
1848 | return TCM_INVALID_CDB_FIELD; |
1849 | |
1850 | - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); |
1851 | + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); |
1852 | if (ret) |
1853 | return ret; |
1854 | |
1855 | @@ -890,7 +889,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1856 | if (sbc_check_dpofua(dev, cmd, cdb)) |
1857 | return TCM_INVALID_CDB_FIELD; |
1858 | |
1859 | - ret = sbc_check_prot(dev, cmd, cdb, sectors, true); |
1860 | + ret = sbc_check_prot(dev, cmd, cdb[1] >> 5, sectors, true); |
1861 | if (ret) |
1862 | return ret; |
1863 | |
1864 | @@ -949,7 +948,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1865 | size = sbc_get_size(cmd, 1); |
1866 | cmd->t_task_lba = get_unaligned_be64(&cdb[12]); |
1867 | |
1868 | - ret = sbc_setup_write_same(cmd, &cdb[10], ops); |
1869 | + ret = sbc_setup_write_same(cmd, cdb[10], ops); |
1870 | if (ret) |
1871 | return ret; |
1872 | break; |
1873 | @@ -1048,7 +1047,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1874 | size = sbc_get_size(cmd, 1); |
1875 | cmd->t_task_lba = get_unaligned_be64(&cdb[2]); |
1876 | |
1877 | - ret = sbc_setup_write_same(cmd, &cdb[1], ops); |
1878 | + ret = sbc_setup_write_same(cmd, cdb[1], ops); |
1879 | if (ret) |
1880 | return ret; |
1881 | break; |
1882 | @@ -1066,7 +1065,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) |
1883 | * Follow sbcr26 with WRITE_SAME (10) and check for the existence |
1884 | * of byte 1 bit 3 UNMAP instead of original reserved field |
1885 | */ |
1886 | - ret = sbc_setup_write_same(cmd, &cdb[1], ops); |
1887 | + ret = sbc_setup_write_same(cmd, cdb[1], ops); |
1888 | if (ret) |
1889 | return ret; |
1890 | break; |
1891 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
1892 | index 3a2d9318604bb..303e8b3c1bdae 100644 |
1893 | --- a/drivers/usb/core/hub.c |
1894 | +++ b/drivers/usb/core/hub.c |
1895 | @@ -46,6 +46,7 @@ |
1896 | |
1897 | #define USB_TP_TRANSMISSION_DELAY 40 /* ns */ |
1898 | #define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */ |
1899 | +#define USB_PING_RESPONSE_TIME 400 /* ns */ |
1900 | |
1901 | /* Protect struct usb_device->state and ->children members |
1902 | * Note: Both are also protected by ->dev.sem, except that ->state can |
1903 | @@ -180,8 +181,9 @@ int usb_device_supports_lpm(struct usb_device *udev) |
1904 | } |
1905 | |
1906 | /* |
1907 | - * Set the Maximum Exit Latency (MEL) for the host to initiate a transition from |
1908 | - * either U1 or U2. |
1909 | + * Set the Maximum Exit Latency (MEL) for the host to wakup up the path from |
1910 | + * U1/U2, send a PING to the device and receive a PING_RESPONSE. |
1911 | + * See USB 3.1 section C.1.5.2 |
1912 | */ |
1913 | static void usb_set_lpm_mel(struct usb_device *udev, |
1914 | struct usb3_lpm_parameters *udev_lpm_params, |
1915 | @@ -191,35 +193,37 @@ static void usb_set_lpm_mel(struct usb_device *udev, |
1916 | unsigned int hub_exit_latency) |
1917 | { |
1918 | unsigned int total_mel; |
1919 | - unsigned int device_mel; |
1920 | - unsigned int hub_mel; |
1921 | |
1922 | /* |
1923 | - * Calculate the time it takes to transition all links from the roothub |
1924 | - * to the parent hub into U0. The parent hub must then decode the |
1925 | - * packet (hub header decode latency) to figure out which port it was |
1926 | - * bound for. |
1927 | - * |
1928 | - * The Hub Header decode latency is expressed in 0.1us intervals (0x1 |
1929 | - * means 0.1us). Multiply that by 100 to get nanoseconds. |
1930 | + * tMEL1. time to transition path from host to device into U0. |
1931 | + * MEL for parent already contains the delay up to parent, so only add |
1932 | + * the exit latency for the last link (pick the slower exit latency), |
1933 | + * and the hub header decode latency. See USB 3.1 section C 2.2.1 |
1934 | + * Store MEL in nanoseconds |
1935 | */ |
1936 | total_mel = hub_lpm_params->mel + |
1937 | - (hub->descriptor->u.ss.bHubHdrDecLat * 100); |
1938 | + max(udev_exit_latency, hub_exit_latency) * 1000 + |
1939 | + hub->descriptor->u.ss.bHubHdrDecLat * 100; |
1940 | |
1941 | /* |
1942 | - * How long will it take to transition the downstream hub's port into |
1943 | - * U0? The greater of either the hub exit latency or the device exit |
1944 | - * latency. |
1945 | - * |
1946 | - * The BOS U1/U2 exit latencies are expressed in 1us intervals. |
1947 | - * Multiply that by 1000 to get nanoseconds. |
1948 | + * tMEL2. Time to submit PING packet. Sum of tTPTransmissionDelay for |
1949 | + * each link + wHubDelay for each hub. Add only for last link. |
1950 | + * tMEL4, the time for PING_RESPONSE to traverse upstream is similar. |
1951 | + * Multiply by 2 to include it as well. |
1952 | */ |
1953 | - device_mel = udev_exit_latency * 1000; |
1954 | - hub_mel = hub_exit_latency * 1000; |
1955 | - if (device_mel > hub_mel) |
1956 | - total_mel += device_mel; |
1957 | - else |
1958 | - total_mel += hub_mel; |
1959 | + total_mel += (__le16_to_cpu(hub->descriptor->u.ss.wHubDelay) + |
1960 | + USB_TP_TRANSMISSION_DELAY) * 2; |
1961 | + |
1962 | + /* |
1963 | + * tMEL3, tPingResponse. Time taken by device to generate PING_RESPONSE |
1964 | + * after receiving PING. Also add 2100ns as stated in USB 3.1 C 1.5.2.4 |
1965 | + * to cover the delay if the PING_RESPONSE is queued behind a Max Packet |
1966 | + * Size DP. |
1967 | + * Note these delays should be added only once for the entire path, so |
1968 | + * add them to the MEL of the device connected to the roothub. |
1969 | + */ |
1970 | + if (!hub->hdev->parent) |
1971 | + total_mel += USB_PING_RESPONSE_TIME + 2100; |
1972 | |
1973 | udev_lpm_params->mel = total_mel; |
1974 | } |
1975 | @@ -4022,6 +4026,47 @@ static int usb_set_lpm_timeout(struct usb_device *udev, |
1976 | return 0; |
1977 | } |
1978 | |
1979 | +/* |
1980 | + * Don't allow device intiated U1/U2 if the system exit latency + one bus |
1981 | + * interval is greater than the minimum service interval of any active |
1982 | + * periodic endpoint. See USB 3.2 section 9.4.9 |
1983 | + */ |
1984 | +static bool usb_device_may_initiate_lpm(struct usb_device *udev, |
1985 | + enum usb3_link_state state) |
1986 | +{ |
1987 | + unsigned int sel; /* us */ |
1988 | + int i, j; |
1989 | + |
1990 | + if (state == USB3_LPM_U1) |
1991 | + sel = DIV_ROUND_UP(udev->u1_params.sel, 1000); |
1992 | + else if (state == USB3_LPM_U2) |
1993 | + sel = DIV_ROUND_UP(udev->u2_params.sel, 1000); |
1994 | + else |
1995 | + return false; |
1996 | + |
1997 | + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { |
1998 | + struct usb_interface *intf; |
1999 | + struct usb_endpoint_descriptor *desc; |
2000 | + unsigned int interval; |
2001 | + |
2002 | + intf = udev->actconfig->interface[i]; |
2003 | + if (!intf) |
2004 | + continue; |
2005 | + |
2006 | + for (j = 0; j < intf->cur_altsetting->desc.bNumEndpoints; j++) { |
2007 | + desc = &intf->cur_altsetting->endpoint[j].desc; |
2008 | + |
2009 | + if (usb_endpoint_xfer_int(desc) || |
2010 | + usb_endpoint_xfer_isoc(desc)) { |
2011 | + interval = (1 << (desc->bInterval - 1)) * 125; |
2012 | + if (sel + 125 > interval) |
2013 | + return false; |
2014 | + } |
2015 | + } |
2016 | + } |
2017 | + return true; |
2018 | +} |
2019 | + |
2020 | /* |
2021 | * Enable the hub-initiated U1/U2 idle timeouts, and enable device-initiated |
2022 | * U1/U2 entry. |
2023 | @@ -4094,20 +4139,23 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev, |
2024 | * U1/U2_ENABLE |
2025 | */ |
2026 | if (udev->actconfig && |
2027 | - usb_set_device_initiated_lpm(udev, state, true) == 0) { |
2028 | - if (state == USB3_LPM_U1) |
2029 | - udev->usb3_lpm_u1_enabled = 1; |
2030 | - else if (state == USB3_LPM_U2) |
2031 | - udev->usb3_lpm_u2_enabled = 1; |
2032 | - } else { |
2033 | - /* Don't request U1/U2 entry if the device |
2034 | - * cannot transition to U1/U2. |
2035 | - */ |
2036 | - usb_set_lpm_timeout(udev, state, 0); |
2037 | - hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); |
2038 | + usb_device_may_initiate_lpm(udev, state)) { |
2039 | + if (usb_set_device_initiated_lpm(udev, state, true)) { |
2040 | + /* |
2041 | + * Request to enable device initiated U1/U2 failed, |
2042 | + * better to turn off lpm in this case. |
2043 | + */ |
2044 | + usb_set_lpm_timeout(udev, state, 0); |
2045 | + hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state); |
2046 | + return; |
2047 | + } |
2048 | } |
2049 | -} |
2050 | |
2051 | + if (state == USB3_LPM_U1) |
2052 | + udev->usb3_lpm_u1_enabled = 1; |
2053 | + else if (state == USB3_LPM_U2) |
2054 | + udev->usb3_lpm_u2_enabled = 1; |
2055 | +} |
2056 | /* |
2057 | * Disable the hub-initiated U1/U2 idle timeouts, and disable device-initiated |
2058 | * U1/U2 entry. |
2059 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
2060 | index f6a6c54cba35f..d97544fd339b1 100644 |
2061 | --- a/drivers/usb/core/quirks.c |
2062 | +++ b/drivers/usb/core/quirks.c |
2063 | @@ -502,10 +502,6 @@ static const struct usb_device_id usb_quirk_list[] = { |
2064 | /* DJI CineSSD */ |
2065 | { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, |
2066 | |
2067 | - /* Fibocom L850-GL LTE Modem */ |
2068 | - { USB_DEVICE(0x2cb7, 0x0007), .driver_info = |
2069 | - USB_QUIRK_IGNORE_REMOTE_WAKEUP }, |
2070 | - |
2071 | /* INTEL VALUE SSD */ |
2072 | { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME }, |
2073 | |
2074 | diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c |
2075 | index 566bc1e604af4..66dfcdbd1e03a 100644 |
2076 | --- a/drivers/usb/dwc2/gadget.c |
2077 | +++ b/drivers/usb/dwc2/gadget.c |
2078 | @@ -2748,12 +2748,14 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg, |
2079 | return; |
2080 | } |
2081 | |
2082 | - /* Zlp for all endpoints, for ep0 only in DATA IN stage */ |
2083 | + /* Zlp for all endpoints in non DDMA, for ep0 only in DATA IN stage */ |
2084 | if (hs_ep->send_zlp) { |
2085 | - dwc2_hsotg_program_zlp(hsotg, hs_ep); |
2086 | hs_ep->send_zlp = 0; |
2087 | - /* transfer will be completed on next complete interrupt */ |
2088 | - return; |
2089 | + if (!using_desc_dma(hsotg)) { |
2090 | + dwc2_hsotg_program_zlp(hsotg, hs_ep); |
2091 | + /* transfer will be completed on next complete interrupt */ |
2092 | + return; |
2093 | + } |
2094 | } |
2095 | |
2096 | if (hs_ep->index == 0 && hsotg->ep0_state == DWC2_EP0_DATA_IN) { |
2097 | diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c |
2098 | index 903abdf30b5a0..5ef0747225f6b 100644 |
2099 | --- a/drivers/usb/host/max3421-hcd.c |
2100 | +++ b/drivers/usb/host/max3421-hcd.c |
2101 | @@ -153,8 +153,6 @@ struct max3421_hcd { |
2102 | */ |
2103 | struct urb *curr_urb; |
2104 | enum scheduling_pass sched_pass; |
2105 | - struct usb_device *loaded_dev; /* dev that's loaded into the chip */ |
2106 | - int loaded_epnum; /* epnum whose toggles are loaded */ |
2107 | int urb_done; /* > 0 -> no errors, < 0: errno */ |
2108 | size_t curr_len; |
2109 | u8 hien; |
2110 | @@ -492,39 +490,17 @@ max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) |
2111 | * Caller must NOT hold HCD spinlock. |
2112 | */ |
2113 | static void |
2114 | -max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, |
2115 | - int force_toggles) |
2116 | +max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum) |
2117 | { |
2118 | - struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); |
2119 | - int old_epnum, same_ep, rcvtog, sndtog; |
2120 | - struct usb_device *old_dev; |
2121 | + int rcvtog, sndtog; |
2122 | u8 hctl; |
2123 | |
2124 | - old_dev = max3421_hcd->loaded_dev; |
2125 | - old_epnum = max3421_hcd->loaded_epnum; |
2126 | - |
2127 | - same_ep = (dev == old_dev && epnum == old_epnum); |
2128 | - if (same_ep && !force_toggles) |
2129 | - return; |
2130 | - |
2131 | - if (old_dev && !same_ep) { |
2132 | - /* save the old end-points toggles: */ |
2133 | - u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); |
2134 | - |
2135 | - rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; |
2136 | - sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; |
2137 | - |
2138 | - /* no locking: HCD (i.e., we) own toggles, don't we? */ |
2139 | - usb_settoggle(old_dev, old_epnum, 0, rcvtog); |
2140 | - usb_settoggle(old_dev, old_epnum, 1, sndtog); |
2141 | - } |
2142 | /* setup new endpoint's toggle bits: */ |
2143 | rcvtog = usb_gettoggle(dev, epnum, 0); |
2144 | sndtog = usb_gettoggle(dev, epnum, 1); |
2145 | hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | |
2146 | BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); |
2147 | |
2148 | - max3421_hcd->loaded_epnum = epnum; |
2149 | spi_wr8(hcd, MAX3421_REG_HCTL, hctl); |
2150 | |
2151 | /* |
2152 | @@ -532,7 +508,6 @@ max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, |
2153 | * address-assignment so it's best to just always load the |
2154 | * address whenever the end-point changed/was forced. |
2155 | */ |
2156 | - max3421_hcd->loaded_dev = dev; |
2157 | spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); |
2158 | } |
2159 | |
2160 | @@ -667,7 +642,7 @@ max3421_select_and_start_urb(struct usb_hcd *hcd) |
2161 | struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); |
2162 | struct urb *urb, *curr_urb = NULL; |
2163 | struct max3421_ep *max3421_ep; |
2164 | - int epnum, force_toggles = 0; |
2165 | + int epnum; |
2166 | struct usb_host_endpoint *ep; |
2167 | struct list_head *pos; |
2168 | unsigned long flags; |
2169 | @@ -777,7 +752,6 @@ done: |
2170 | usb_settoggle(urb->dev, epnum, 0, 1); |
2171 | usb_settoggle(urb->dev, epnum, 1, 1); |
2172 | max3421_ep->pkt_state = PKT_STATE_SETUP; |
2173 | - force_toggles = 1; |
2174 | } else |
2175 | max3421_ep->pkt_state = PKT_STATE_TRANSFER; |
2176 | } |
2177 | @@ -785,7 +759,7 @@ done: |
2178 | spin_unlock_irqrestore(&max3421_hcd->lock, flags); |
2179 | |
2180 | max3421_ep->last_active = max3421_hcd->frame_number; |
2181 | - max3421_set_address(hcd, urb->dev, epnum, force_toggles); |
2182 | + max3421_set_address(hcd, urb->dev, epnum); |
2183 | max3421_set_speed(hcd, urb->dev); |
2184 | max3421_next_transfer(hcd, 0); |
2185 | return 1; |
2186 | @@ -1380,6 +1354,16 @@ max3421_urb_done(struct usb_hcd *hcd) |
2187 | status = 0; |
2188 | urb = max3421_hcd->curr_urb; |
2189 | if (urb) { |
2190 | + /* save the old end-points toggles: */ |
2191 | + u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); |
2192 | + int rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; |
2193 | + int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; |
2194 | + int epnum = usb_endpoint_num(&urb->ep->desc); |
2195 | + |
2196 | + /* no locking: HCD (i.e., we) own toggles, don't we? */ |
2197 | + usb_settoggle(urb->dev, epnum, 0, rcvtog); |
2198 | + usb_settoggle(urb->dev, epnum, 1, sndtog); |
2199 | + |
2200 | max3421_hcd->curr_urb = NULL; |
2201 | spin_lock_irqsave(&max3421_hcd->lock, flags); |
2202 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2203 | diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c |
2204 | index 1a274f8a5bf11..6358d4e0653ed 100644 |
2205 | --- a/drivers/usb/host/xhci-hub.c |
2206 | +++ b/drivers/usb/host/xhci-hub.c |
2207 | @@ -1546,11 +1546,12 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf) |
2208 | * Inform the usbcore about resume-in-progress by returning |
2209 | * a non-zero value even if there are no status changes. |
2210 | */ |
2211 | + spin_lock_irqsave(&xhci->lock, flags); |
2212 | + |
2213 | status = bus_state->resuming_ports; |
2214 | |
2215 | mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC; |
2216 | |
2217 | - spin_lock_irqsave(&xhci->lock, flags); |
2218 | /* For each port, did anything change? If so, set that bit in buf. */ |
2219 | for (i = 0; i < max_ports; i++) { |
2220 | temp = readl(ports[i]->addr); |
2221 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
2222 | index f6b5010deb735..1228b3d92db06 100644 |
2223 | --- a/drivers/usb/host/xhci-ring.c |
2224 | +++ b/drivers/usb/host/xhci-ring.c |
2225 | @@ -440,6 +440,26 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, |
2226 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
2227 | } |
2228 | |
2229 | +static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci, |
2230 | + unsigned int slot_id, |
2231 | + unsigned int ep_index) |
2232 | +{ |
2233 | + if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) { |
2234 | + xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); |
2235 | + return NULL; |
2236 | + } |
2237 | + if (ep_index >= EP_CTX_PER_DEV) { |
2238 | + xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index); |
2239 | + return NULL; |
2240 | + } |
2241 | + if (!xhci->devs[slot_id]) { |
2242 | + xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id); |
2243 | + return NULL; |
2244 | + } |
2245 | + |
2246 | + return &xhci->devs[slot_id]->eps[ep_index]; |
2247 | +} |
2248 | + |
2249 | /* Get the right ring for the given slot_id, ep_index and stream_id. |
2250 | * If the endpoint supports streams, boundary check the URB's stream ID. |
2251 | * If the endpoint doesn't support streams, return the singular endpoint ring. |
2252 | @@ -450,7 +470,10 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, |
2253 | { |
2254 | struct xhci_virt_ep *ep; |
2255 | |
2256 | - ep = &xhci->devs[slot_id]->eps[ep_index]; |
2257 | + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2258 | + if (!ep) |
2259 | + return NULL; |
2260 | + |
2261 | /* Common case: no streams */ |
2262 | if (!(ep->ep_state & EP_HAS_STREAMS)) |
2263 | return ep->ring; |
2264 | @@ -743,11 +766,14 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, |
2265 | memset(&deq_state, 0, sizeof(deq_state)); |
2266 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
2267 | |
2268 | + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2269 | + if (!ep) |
2270 | + return; |
2271 | + |
2272 | vdev = xhci->devs[slot_id]; |
2273 | ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); |
2274 | trace_xhci_handle_cmd_stop_ep(ep_ctx); |
2275 | |
2276 | - ep = &xhci->devs[slot_id]->eps[ep_index]; |
2277 | last_unlinked_td = list_last_entry(&ep->cancelled_td_list, |
2278 | struct xhci_td, cancelled_td_list); |
2279 | |
2280 | @@ -1068,9 +1094,11 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, |
2281 | |
2282 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
2283 | stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); |
2284 | - dev = xhci->devs[slot_id]; |
2285 | - ep = &dev->eps[ep_index]; |
2286 | + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2287 | + if (!ep) |
2288 | + return; |
2289 | |
2290 | + dev = xhci->devs[slot_id]; |
2291 | ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id); |
2292 | if (!ep_ring) { |
2293 | xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n", |
2294 | @@ -1143,9 +1171,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, |
2295 | } |
2296 | |
2297 | cleanup: |
2298 | - dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING; |
2299 | - dev->eps[ep_index].queued_deq_seg = NULL; |
2300 | - dev->eps[ep_index].queued_deq_ptr = NULL; |
2301 | + ep->ep_state &= ~SET_DEQ_PENDING; |
2302 | + ep->queued_deq_seg = NULL; |
2303 | + ep->queued_deq_ptr = NULL; |
2304 | /* Restart any rings with pending URBs */ |
2305 | ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
2306 | } |
2307 | @@ -1154,10 +1182,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, |
2308 | union xhci_trb *trb, u32 cmd_comp_code) |
2309 | { |
2310 | struct xhci_virt_device *vdev; |
2311 | + struct xhci_virt_ep *ep; |
2312 | struct xhci_ep_ctx *ep_ctx; |
2313 | unsigned int ep_index; |
2314 | |
2315 | ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); |
2316 | + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2317 | + if (!ep) |
2318 | + return; |
2319 | + |
2320 | vdev = xhci->devs[slot_id]; |
2321 | ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); |
2322 | trace_xhci_handle_cmd_reset_ep(ep_ctx); |
2323 | @@ -1187,7 +1220,7 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, |
2324 | xhci_ring_cmd_db(xhci); |
2325 | } else { |
2326 | /* Clear our internal halted state */ |
2327 | - xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; |
2328 | + ep->ep_state &= ~EP_HALTED; |
2329 | } |
2330 | |
2331 | /* if this was a soft reset, then restart */ |
2332 | @@ -2356,14 +2389,13 @@ static int handle_tx_event(struct xhci_hcd *xhci, |
2333 | trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); |
2334 | ep_trb_dma = le64_to_cpu(event->buffer); |
2335 | |
2336 | - xdev = xhci->devs[slot_id]; |
2337 | - if (!xdev) { |
2338 | - xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n", |
2339 | - slot_id); |
2340 | + ep = xhci_get_virt_ep(xhci, slot_id, ep_index); |
2341 | + if (!ep) { |
2342 | + xhci_err(xhci, "ERROR Invalid Transfer event\n"); |
2343 | goto err_out; |
2344 | } |
2345 | |
2346 | - ep = &xdev->eps[ep_index]; |
2347 | + xdev = xhci->devs[slot_id]; |
2348 | ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma); |
2349 | ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); |
2350 | |
2351 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
2352 | index 8798ed0317864..834f32fe99308 100644 |
2353 | --- a/drivers/usb/host/xhci.h |
2354 | +++ b/drivers/usb/host/xhci.h |
2355 | @@ -993,6 +993,7 @@ struct xhci_interval_bw_table { |
2356 | unsigned int ss_bw_out; |
2357 | }; |
2358 | |
2359 | +#define EP_CTX_PER_DEV 31 |
2360 | |
2361 | struct xhci_virt_device { |
2362 | struct usb_device *udev; |
2363 | @@ -1007,7 +1008,7 @@ struct xhci_virt_device { |
2364 | struct xhci_container_ctx *out_ctx; |
2365 | /* Used for addressing devices and configuration changes */ |
2366 | struct xhci_container_ctx *in_ctx; |
2367 | - struct xhci_virt_ep eps[31]; |
2368 | + struct xhci_virt_ep eps[EP_CTX_PER_DEV]; |
2369 | u8 fake_port; |
2370 | u8 real_port; |
2371 | struct xhci_interval_bw_table *bw_table; |
2372 | diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c |
2373 | index cfc16943979d5..2010444376314 100644 |
2374 | --- a/drivers/usb/renesas_usbhs/fifo.c |
2375 | +++ b/drivers/usb/renesas_usbhs/fifo.c |
2376 | @@ -101,6 +101,8 @@ static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, |
2377 | #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) |
2378 | #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) |
2379 | static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); |
2380 | +static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable); |
2381 | +static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable); |
2382 | struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) |
2383 | { |
2384 | struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); |
2385 | @@ -123,6 +125,11 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) |
2386 | if (chan) { |
2387 | dmaengine_terminate_all(chan); |
2388 | usbhsf_dma_unmap(pkt); |
2389 | + } else { |
2390 | + if (usbhs_pipe_is_dir_in(pipe)) |
2391 | + usbhsf_rx_irq_ctrl(pipe, 0); |
2392 | + else |
2393 | + usbhsf_tx_irq_ctrl(pipe, 0); |
2394 | } |
2395 | |
2396 | usbhs_pipe_clear_without_sequence(pipe, 0, 0); |
2397 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
2398 | index db03212d620ad..8e5878ec656d0 100644 |
2399 | --- a/drivers/usb/serial/cp210x.c |
2400 | +++ b/drivers/usb/serial/cp210x.c |
2401 | @@ -156,6 +156,7 @@ static const struct usb_device_id id_table[] = { |
2402 | { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ |
2403 | { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */ |
2404 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ |
2405 | + { USB_DEVICE(0x10C4, 0x8A5B) }, /* CEL EM3588 ZigBee USB Stick */ |
2406 | { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ |
2407 | { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ |
2408 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
2409 | @@ -203,8 +204,8 @@ static const struct usb_device_id id_table[] = { |
2410 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
2411 | { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ |
2412 | { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ |
2413 | - { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 Display serial interface */ |
2414 | - { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 M.2 Key E serial interface */ |
2415 | + { USB_DEVICE(0x1901, 0x0197) }, /* GE CS1000 M.2 Key E serial interface */ |
2416 | + { USB_DEVICE(0x1901, 0x0198) }, /* GE CS1000 Display serial interface */ |
2417 | { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ |
2418 | { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ |
2419 | { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ |
2420 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2421 | index 25d8fb3a7395f..d6d10ba1e1e80 100644 |
2422 | --- a/drivers/usb/serial/option.c |
2423 | +++ b/drivers/usb/serial/option.c |
2424 | @@ -238,6 +238,7 @@ static void option_instat_callback(struct urb *urb); |
2425 | #define QUECTEL_PRODUCT_UC15 0x9090 |
2426 | /* These u-blox products use Qualcomm's vendor ID */ |
2427 | #define UBLOX_PRODUCT_R410M 0x90b2 |
2428 | +#define UBLOX_PRODUCT_R6XX 0x90fa |
2429 | /* These Yuga products use Qualcomm's vendor ID */ |
2430 | #define YUGA_PRODUCT_CLM920_NC5 0x9625 |
2431 | |
2432 | @@ -1101,6 +1102,8 @@ static const struct usb_device_id option_ids[] = { |
2433 | /* u-blox products using Qualcomm vendor ID */ |
2434 | { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), |
2435 | .driver_info = RSVD(1) | RSVD(3) }, |
2436 | + { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), |
2437 | + .driver_info = RSVD(3) }, |
2438 | /* Quectel products using Quectel vendor ID */ |
2439 | { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), |
2440 | .driver_info = NUMEP2 }, |
2441 | diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h |
2442 | index cb7b15ecb7aba..c7db6c943ba51 100644 |
2443 | --- a/drivers/usb/storage/unusual_uas.h |
2444 | +++ b/drivers/usb/storage/unusual_uas.h |
2445 | @@ -45,6 +45,13 @@ UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999, |
2446 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2447 | US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME), |
2448 | |
2449 | +/* Reported-by: Julian Sikorski <belegdol@gmail.com> */ |
2450 | +UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999, |
2451 | + "LaCie", |
2452 | + "Rugged USB3-FW", |
2453 | + USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2454 | + US_FL_IGNORE_UAS), |
2455 | + |
2456 | /* |
2457 | * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI |
2458 | * commands in UAS mode. Observed with the 1.28 firmware; are there others? |
2459 | diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c |
2460 | index fc5eb0f893049..c2e82b84c5546 100644 |
2461 | --- a/fs/afs/cmservice.c |
2462 | +++ b/fs/afs/cmservice.c |
2463 | @@ -29,16 +29,11 @@ static void SRXAFSCB_TellMeAboutYourself(struct work_struct *); |
2464 | |
2465 | static int afs_deliver_yfs_cb_callback(struct afs_call *); |
2466 | |
2467 | -#define CM_NAME(name) \ |
2468 | - char afs_SRXCB##name##_name[] __tracepoint_string = \ |
2469 | - "CB." #name |
2470 | - |
2471 | /* |
2472 | * CB.CallBack operation type |
2473 | */ |
2474 | -static CM_NAME(CallBack); |
2475 | static const struct afs_call_type afs_SRXCBCallBack = { |
2476 | - .name = afs_SRXCBCallBack_name, |
2477 | + .name = "CB.CallBack", |
2478 | .deliver = afs_deliver_cb_callback, |
2479 | .destructor = afs_cm_destructor, |
2480 | .work = SRXAFSCB_CallBack, |
2481 | @@ -47,9 +42,8 @@ static const struct afs_call_type afs_SRXCBCallBack = { |
2482 | /* |
2483 | * CB.InitCallBackState operation type |
2484 | */ |
2485 | -static CM_NAME(InitCallBackState); |
2486 | static const struct afs_call_type afs_SRXCBInitCallBackState = { |
2487 | - .name = afs_SRXCBInitCallBackState_name, |
2488 | + .name = "CB.InitCallBackState", |
2489 | .deliver = afs_deliver_cb_init_call_back_state, |
2490 | .destructor = afs_cm_destructor, |
2491 | .work = SRXAFSCB_InitCallBackState, |
2492 | @@ -58,9 +52,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState = { |
2493 | /* |
2494 | * CB.InitCallBackState3 operation type |
2495 | */ |
2496 | -static CM_NAME(InitCallBackState3); |
2497 | static const struct afs_call_type afs_SRXCBInitCallBackState3 = { |
2498 | - .name = afs_SRXCBInitCallBackState3_name, |
2499 | + .name = "CB.InitCallBackState3", |
2500 | .deliver = afs_deliver_cb_init_call_back_state3, |
2501 | .destructor = afs_cm_destructor, |
2502 | .work = SRXAFSCB_InitCallBackState, |
2503 | @@ -69,9 +62,8 @@ static const struct afs_call_type afs_SRXCBInitCallBackState3 = { |
2504 | /* |
2505 | * CB.Probe operation type |
2506 | */ |
2507 | -static CM_NAME(Probe); |
2508 | static const struct afs_call_type afs_SRXCBProbe = { |
2509 | - .name = afs_SRXCBProbe_name, |
2510 | + .name = "CB.Probe", |
2511 | .deliver = afs_deliver_cb_probe, |
2512 | .destructor = afs_cm_destructor, |
2513 | .work = SRXAFSCB_Probe, |
2514 | @@ -80,9 +72,8 @@ static const struct afs_call_type afs_SRXCBProbe = { |
2515 | /* |
2516 | * CB.ProbeUuid operation type |
2517 | */ |
2518 | -static CM_NAME(ProbeUuid); |
2519 | static const struct afs_call_type afs_SRXCBProbeUuid = { |
2520 | - .name = afs_SRXCBProbeUuid_name, |
2521 | + .name = "CB.ProbeUuid", |
2522 | .deliver = afs_deliver_cb_probe_uuid, |
2523 | .destructor = afs_cm_destructor, |
2524 | .work = SRXAFSCB_ProbeUuid, |
2525 | @@ -91,9 +82,8 @@ static const struct afs_call_type afs_SRXCBProbeUuid = { |
2526 | /* |
2527 | * CB.TellMeAboutYourself operation type |
2528 | */ |
2529 | -static CM_NAME(TellMeAboutYourself); |
2530 | static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { |
2531 | - .name = afs_SRXCBTellMeAboutYourself_name, |
2532 | + .name = "CB.TellMeAboutYourself", |
2533 | .deliver = afs_deliver_cb_tell_me_about_yourself, |
2534 | .destructor = afs_cm_destructor, |
2535 | .work = SRXAFSCB_TellMeAboutYourself, |
2536 | @@ -102,9 +92,8 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { |
2537 | /* |
2538 | * YFS CB.CallBack operation type |
2539 | */ |
2540 | -static CM_NAME(YFS_CallBack); |
2541 | static const struct afs_call_type afs_SRXYFSCB_CallBack = { |
2542 | - .name = afs_SRXCBYFS_CallBack_name, |
2543 | + .name = "YFSCB.CallBack", |
2544 | .deliver = afs_deliver_yfs_cb_callback, |
2545 | .destructor = afs_cm_destructor, |
2546 | .work = SRXAFSCB_CallBack, |
2547 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
2548 | index 832b40293907f..5273965226534 100644 |
2549 | --- a/fs/btrfs/extent-tree.c |
2550 | +++ b/fs/btrfs/extent-tree.c |
2551 | @@ -5768,6 +5768,9 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) |
2552 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
2553 | devices = &fs_info->fs_devices->devices; |
2554 | list_for_each_entry(device, devices, dev_list) { |
2555 | + if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) |
2556 | + continue; |
2557 | + |
2558 | ret = btrfs_trim_free_extents(device, &group_trimmed); |
2559 | if (ret) { |
2560 | dev_failed++; |
2561 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
2562 | index 64dd702a5448c..025b02e9799ff 100644 |
2563 | --- a/fs/btrfs/inode.c |
2564 | +++ b/fs/btrfs/inode.c |
2565 | @@ -543,7 +543,7 @@ again: |
2566 | * inode has not been flagged as nocompress. This flag can |
2567 | * change at any time if we discover bad compression ratios. |
2568 | */ |
2569 | - if (inode_need_compress(inode, start, end)) { |
2570 | + if (nr_pages > 1 && inode_need_compress(inode, start, end)) { |
2571 | WARN_ON(pages); |
2572 | pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); |
2573 | if (!pages) { |
2574 | diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
2575 | index 50ad3522ce365..358398b1fe0c9 100644 |
2576 | --- a/fs/hugetlbfs/inode.c |
2577 | +++ b/fs/hugetlbfs/inode.c |
2578 | @@ -76,7 +76,7 @@ enum hugetlb_param { |
2579 | static const struct fs_parameter_spec hugetlb_param_specs[] = { |
2580 | fsparam_u32 ("gid", Opt_gid), |
2581 | fsparam_string("min_size", Opt_min_size), |
2582 | - fsparam_u32 ("mode", Opt_mode), |
2583 | + fsparam_u32oct("mode", Opt_mode), |
2584 | fsparam_string("nr_inodes", Opt_nr_inodes), |
2585 | fsparam_string("pagesize", Opt_pagesize), |
2586 | fsparam_string("size", Opt_size), |
2587 | diff --git a/fs/proc/base.c b/fs/proc/base.c |
2588 | index 75e786684a4e3..90d2f62a96723 100644 |
2589 | --- a/fs/proc/base.c |
2590 | +++ b/fs/proc/base.c |
2591 | @@ -836,7 +836,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, |
2592 | flags = FOLL_FORCE | (write ? FOLL_WRITE : 0); |
2593 | |
2594 | while (count > 0) { |
2595 | - int this_len = min_t(int, count, PAGE_SIZE); |
2596 | + size_t this_len = min_t(size_t, count, PAGE_SIZE); |
2597 | |
2598 | if (write && copy_from_user(page, buf, this_len)) { |
2599 | copied = -EFAULT; |
2600 | diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c |
2601 | index d99d166fd8926..2c807283115d7 100644 |
2602 | --- a/fs/userfaultfd.c |
2603 | +++ b/fs/userfaultfd.c |
2604 | @@ -1272,23 +1272,21 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, |
2605 | } |
2606 | |
2607 | static __always_inline int validate_range(struct mm_struct *mm, |
2608 | - __u64 *start, __u64 len) |
2609 | + __u64 start, __u64 len) |
2610 | { |
2611 | __u64 task_size = mm->task_size; |
2612 | |
2613 | - *start = untagged_addr(*start); |
2614 | - |
2615 | - if (*start & ~PAGE_MASK) |
2616 | + if (start & ~PAGE_MASK) |
2617 | return -EINVAL; |
2618 | if (len & ~PAGE_MASK) |
2619 | return -EINVAL; |
2620 | if (!len) |
2621 | return -EINVAL; |
2622 | - if (*start < mmap_min_addr) |
2623 | + if (start < mmap_min_addr) |
2624 | return -EINVAL; |
2625 | - if (*start >= task_size) |
2626 | + if (start >= task_size) |
2627 | return -EINVAL; |
2628 | - if (len > task_size - *start) |
2629 | + if (len > task_size - start) |
2630 | return -EINVAL; |
2631 | return 0; |
2632 | } |
2633 | @@ -1338,7 +1336,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, |
2634 | goto out; |
2635 | } |
2636 | |
2637 | - ret = validate_range(mm, &uffdio_register.range.start, |
2638 | + ret = validate_range(mm, uffdio_register.range.start, |
2639 | uffdio_register.range.len); |
2640 | if (ret) |
2641 | goto out; |
2642 | @@ -1527,7 +1525,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, |
2643 | if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister))) |
2644 | goto out; |
2645 | |
2646 | - ret = validate_range(mm, &uffdio_unregister.start, |
2647 | + ret = validate_range(mm, uffdio_unregister.start, |
2648 | uffdio_unregister.len); |
2649 | if (ret) |
2650 | goto out; |
2651 | @@ -1678,7 +1676,7 @@ static int userfaultfd_wake(struct userfaultfd_ctx *ctx, |
2652 | if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake))) |
2653 | goto out; |
2654 | |
2655 | - ret = validate_range(ctx->mm, &uffdio_wake.start, uffdio_wake.len); |
2656 | + ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); |
2657 | if (ret) |
2658 | goto out; |
2659 | |
2660 | @@ -1718,7 +1716,7 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, |
2661 | sizeof(uffdio_copy)-sizeof(__s64))) |
2662 | goto out; |
2663 | |
2664 | - ret = validate_range(ctx->mm, &uffdio_copy.dst, uffdio_copy.len); |
2665 | + ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); |
2666 | if (ret) |
2667 | goto out; |
2668 | /* |
2669 | @@ -1774,7 +1772,7 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, |
2670 | sizeof(uffdio_zeropage)-sizeof(__s64))) |
2671 | goto out; |
2672 | |
2673 | - ret = validate_range(ctx->mm, &uffdio_zeropage.range.start, |
2674 | + ret = validate_range(ctx->mm, uffdio_zeropage.range.start, |
2675 | uffdio_zeropage.range.len); |
2676 | if (ret) |
2677 | goto out; |
2678 | diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h |
2679 | index 10100a4bbe2ad..afb27cb6a7bd8 100644 |
2680 | --- a/include/drm/drm_ioctl.h |
2681 | +++ b/include/drm/drm_ioctl.h |
2682 | @@ -68,6 +68,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, |
2683 | unsigned long arg); |
2684 | |
2685 | #define DRM_IOCTL_NR(n) _IOC_NR(n) |
2686 | +#define DRM_IOCTL_TYPE(n) _IOC_TYPE(n) |
2687 | #define DRM_MAJOR 226 |
2688 | |
2689 | /** |
2690 | diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h |
2691 | index c612cabbc378f..61af4af871190 100644 |
2692 | --- a/include/trace/events/afs.h |
2693 | +++ b/include/trace/events/afs.h |
2694 | @@ -111,6 +111,34 @@ enum afs_vl_operation { |
2695 | afs_VL_GetCapabilities = 65537, /* AFS Get VL server capabilities */ |
2696 | }; |
2697 | |
2698 | +enum afs_cm_operation { |
2699 | + afs_CB_CallBack = 204, /* AFS break callback promises */ |
2700 | + afs_CB_InitCallBackState = 205, /* AFS initialise callback state */ |
2701 | + afs_CB_Probe = 206, /* AFS probe client */ |
2702 | + afs_CB_GetLock = 207, /* AFS get contents of CM lock table */ |
2703 | + afs_CB_GetCE = 208, /* AFS get cache file description */ |
2704 | + afs_CB_GetXStatsVersion = 209, /* AFS get version of extended statistics */ |
2705 | + afs_CB_GetXStats = 210, /* AFS get contents of extended statistics data */ |
2706 | + afs_CB_InitCallBackState3 = 213, /* AFS initialise callback state, version 3 */ |
2707 | + afs_CB_ProbeUuid = 214, /* AFS check the client hasn't rebooted */ |
2708 | +}; |
2709 | + |
2710 | +enum yfs_cm_operation { |
2711 | + yfs_CB_Probe = 206, /* YFS probe client */ |
2712 | + yfs_CB_GetLock = 207, /* YFS get contents of CM lock table */ |
2713 | + yfs_CB_XStatsVersion = 209, /* YFS get version of extended statistics */ |
2714 | + yfs_CB_GetXStats = 210, /* YFS get contents of extended statistics data */ |
2715 | + yfs_CB_InitCallBackState3 = 213, /* YFS initialise callback state, version 3 */ |
2716 | + yfs_CB_ProbeUuid = 214, /* YFS check the client hasn't rebooted */ |
2717 | + yfs_CB_GetServerPrefs = 215, |
2718 | + yfs_CB_GetCellServDV = 216, |
2719 | + yfs_CB_GetLocalCell = 217, |
2720 | + yfs_CB_GetCacheConfig = 218, |
2721 | + yfs_CB_GetCellByNum = 65537, |
2722 | + yfs_CB_TellMeAboutYourself = 65538, /* get client capabilities */ |
2723 | + yfs_CB_CallBack = 64204, |
2724 | +}; |
2725 | + |
2726 | enum afs_edit_dir_op { |
2727 | afs_edit_dir_create, |
2728 | afs_edit_dir_create_error, |
2729 | @@ -312,6 +340,32 @@ enum afs_cb_break_reason { |
2730 | EM(afs_YFSVL_GetEndpoints, "YFSVL.GetEndpoints") \ |
2731 | E_(afs_VL_GetCapabilities, "VL.GetCapabilities") |
2732 | |
2733 | +#define afs_cm_operations \ |
2734 | + EM(afs_CB_CallBack, "CB.CallBack") \ |
2735 | + EM(afs_CB_InitCallBackState, "CB.InitCallBackState") \ |
2736 | + EM(afs_CB_Probe, "CB.Probe") \ |
2737 | + EM(afs_CB_GetLock, "CB.GetLock") \ |
2738 | + EM(afs_CB_GetCE, "CB.GetCE") \ |
2739 | + EM(afs_CB_GetXStatsVersion, "CB.GetXStatsVersion") \ |
2740 | + EM(afs_CB_GetXStats, "CB.GetXStats") \ |
2741 | + EM(afs_CB_InitCallBackState3, "CB.InitCallBackState3") \ |
2742 | + E_(afs_CB_ProbeUuid, "CB.ProbeUuid") |
2743 | + |
2744 | +#define yfs_cm_operations \ |
2745 | + EM(yfs_CB_Probe, "YFSCB.Probe") \ |
2746 | + EM(yfs_CB_GetLock, "YFSCB.GetLock") \ |
2747 | + EM(yfs_CB_XStatsVersion, "YFSCB.XStatsVersion") \ |
2748 | + EM(yfs_CB_GetXStats, "YFSCB.GetXStats") \ |
2749 | + EM(yfs_CB_InitCallBackState3, "YFSCB.InitCallBackState3") \ |
2750 | + EM(yfs_CB_ProbeUuid, "YFSCB.ProbeUuid") \ |
2751 | + EM(yfs_CB_GetServerPrefs, "YFSCB.GetServerPrefs") \ |
2752 | + EM(yfs_CB_GetCellServDV, "YFSCB.GetCellServDV") \ |
2753 | + EM(yfs_CB_GetLocalCell, "YFSCB.GetLocalCell") \ |
2754 | + EM(yfs_CB_GetCacheConfig, "YFSCB.GetCacheConfig") \ |
2755 | + EM(yfs_CB_GetCellByNum, "YFSCB.GetCellByNum") \ |
2756 | + EM(yfs_CB_TellMeAboutYourself, "YFSCB.TellMeAboutYourself") \ |
2757 | + E_(yfs_CB_CallBack, "YFSCB.CallBack") |
2758 | + |
2759 | #define afs_edit_dir_ops \ |
2760 | EM(afs_edit_dir_create, "create") \ |
2761 | EM(afs_edit_dir_create_error, "c_fail") \ |
2762 | @@ -442,6 +496,8 @@ afs_call_traces; |
2763 | afs_server_traces; |
2764 | afs_fs_operations; |
2765 | afs_vl_operations; |
2766 | +afs_cm_operations; |
2767 | +yfs_cm_operations; |
2768 | afs_edit_dir_ops; |
2769 | afs_edit_dir_reasons; |
2770 | afs_eproto_causes; |
2771 | @@ -522,20 +578,21 @@ TRACE_EVENT(afs_cb_call, |
2772 | |
2773 | TP_STRUCT__entry( |
2774 | __field(unsigned int, call ) |
2775 | - __field(const char *, name ) |
2776 | __field(u32, op ) |
2777 | + __field(u16, service_id ) |
2778 | ), |
2779 | |
2780 | TP_fast_assign( |
2781 | __entry->call = call->debug_id; |
2782 | - __entry->name = call->type->name; |
2783 | __entry->op = call->operation_ID; |
2784 | + __entry->service_id = call->service_id; |
2785 | ), |
2786 | |
2787 | - TP_printk("c=%08x %s o=%u", |
2788 | + TP_printk("c=%08x %s", |
2789 | __entry->call, |
2790 | - __entry->name, |
2791 | - __entry->op) |
2792 | + __entry->service_id == 2501 ? |
2793 | + __print_symbolic(__entry->op, yfs_cm_operations) : |
2794 | + __print_symbolic(__entry->op, afs_cm_operations)) |
2795 | ); |
2796 | |
2797 | TRACE_EVENT(afs_call, |
2798 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
2799 | index 7283741666538..5e1b9f6e77f31 100644 |
2800 | --- a/kernel/trace/ring_buffer.c |
2801 | +++ b/kernel/trace/ring_buffer.c |
2802 | @@ -3221,10 +3221,30 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
2803 | if (unlikely(!head)) |
2804 | return true; |
2805 | |
2806 | - return reader->read == rb_page_commit(reader) && |
2807 | - (commit == reader || |
2808 | - (commit == head && |
2809 | - head->read == rb_page_commit(commit))); |
2810 | + /* Reader should exhaust content in reader page */ |
2811 | + if (reader->read != rb_page_commit(reader)) |
2812 | + return false; |
2813 | + |
2814 | + /* |
2815 | + * If writers are committing on the reader page, knowing all |
2816 | + * committed content has been read, the ring buffer is empty. |
2817 | + */ |
2818 | + if (commit == reader) |
2819 | + return true; |
2820 | + |
2821 | + /* |
2822 | + * If writers are committing on a page other than reader page |
2823 | + * and head page, there should always be content to read. |
2824 | + */ |
2825 | + if (commit != head) |
2826 | + return false; |
2827 | + |
2828 | + /* |
2829 | + * Writers are committing on the head page, we just need |
2830 | + * to care about there're committed data, and the reader will |
2831 | + * swap reader page with head page when it is to read data. |
2832 | + */ |
2833 | + return rb_page_commit(commit) == 0; |
2834 | } |
2835 | |
2836 | /** |
2837 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
2838 | index 002412a1abf91..5240ba9a82db8 100644 |
2839 | --- a/kernel/trace/trace.c |
2840 | +++ b/kernel/trace/trace.c |
2841 | @@ -4975,6 +4975,10 @@ static const char readme_msg[] = |
2842 | "\t [:name=histname1]\n" |
2843 | "\t [:<handler>.<action>]\n" |
2844 | "\t [if <filter>]\n\n" |
2845 | + "\t Note, special fields can be used as well:\n" |
2846 | + "\t common_timestamp - to record current timestamp\n" |
2847 | + "\t common_cpu - to record the CPU the event happened on\n" |
2848 | + "\n" |
2849 | "\t When a matching event is hit, an entry is added to a hash\n" |
2850 | "\t table using the key(s) and value(s) named, and the value of a\n" |
2851 | "\t sum called 'hitcount' is incremented. Keys and values\n" |
2852 | diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c |
2853 | index f136449821bfe..553add1eb457f 100644 |
2854 | --- a/kernel/trace/trace_events_hist.c |
2855 | +++ b/kernel/trace/trace_events_hist.c |
2856 | @@ -2001,7 +2001,7 @@ static const char *hist_field_name(struct hist_field *field, |
2857 | field->flags & HIST_FIELD_FL_ALIAS) |
2858 | field_name = hist_field_name(field->operands[0], ++level); |
2859 | else if (field->flags & HIST_FIELD_FL_CPU) |
2860 | - field_name = "cpu"; |
2861 | + field_name = "common_cpu"; |
2862 | else if (field->flags & HIST_FIELD_FL_EXPR || |
2863 | field->flags & HIST_FIELD_FL_VAR_REF) { |
2864 | if (field->system) { |
2865 | @@ -2873,14 +2873,24 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, |
2866 | hist_data->enable_timestamps = true; |
2867 | if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS) |
2868 | hist_data->attrs->ts_in_usecs = true; |
2869 | - } else if (strcmp(field_name, "cpu") == 0) |
2870 | + } else if (strcmp(field_name, "common_cpu") == 0) |
2871 | *flags |= HIST_FIELD_FL_CPU; |
2872 | else { |
2873 | field = trace_find_event_field(file->event_call, field_name); |
2874 | if (!field || !field->size) { |
2875 | - hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, errpos(field_name)); |
2876 | - field = ERR_PTR(-EINVAL); |
2877 | - goto out; |
2878 | + /* |
2879 | + * For backward compatibility, if field_name |
2880 | + * was "cpu", then we treat this the same as |
2881 | + * common_cpu. |
2882 | + */ |
2883 | + if (strcmp(field_name, "cpu") == 0) { |
2884 | + *flags |= HIST_FIELD_FL_CPU; |
2885 | + } else { |
2886 | + hist_err(tr, HIST_ERR_FIELD_NOT_FOUND, |
2887 | + errpos(field_name)); |
2888 | + field = ERR_PTR(-EINVAL); |
2889 | + goto out; |
2890 | + } |
2891 | } |
2892 | } |
2893 | out: |
2894 | @@ -5641,7 +5651,7 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) |
2895 | seq_printf(m, "%s=", hist_field->var.name); |
2896 | |
2897 | if (hist_field->flags & HIST_FIELD_FL_CPU) |
2898 | - seq_puts(m, "cpu"); |
2899 | + seq_puts(m, "common_cpu"); |
2900 | else if (field_name) { |
2901 | if (hist_field->flags & HIST_FIELD_FL_VAR_REF || |
2902 | hist_field->flags & HIST_FIELD_FL_ALIAS) |
2903 | diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c |
2904 | index ef14da50a9819..8fa98c62c4fcf 100644 |
2905 | --- a/net/caif/caif_socket.c |
2906 | +++ b/net/caif/caif_socket.c |
2907 | @@ -539,7 +539,8 @@ static int caif_seqpkt_sendmsg(struct socket *sock, struct msghdr *msg, |
2908 | goto err; |
2909 | |
2910 | ret = -EINVAL; |
2911 | - if (unlikely(msg->msg_iter.iov->iov_base == NULL)) |
2912 | + if (unlikely(msg->msg_iter.nr_segs == 0) || |
2913 | + unlikely(msg->msg_iter.iov->iov_base == NULL)) |
2914 | goto err; |
2915 | noblock = msg->msg_flags & MSG_DONTWAIT; |
2916 | |
2917 | diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c |
2918 | index 3349ea81f9016..b9b847dc097cc 100644 |
2919 | --- a/net/decnet/af_decnet.c |
2920 | +++ b/net/decnet/af_decnet.c |
2921 | @@ -815,7 +815,7 @@ static int dn_auto_bind(struct socket *sock) |
2922 | static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) |
2923 | { |
2924 | struct dn_scp *scp = DN_SK(sk); |
2925 | - DEFINE_WAIT(wait); |
2926 | + DEFINE_WAIT_FUNC(wait, woken_wake_function); |
2927 | int err; |
2928 | |
2929 | if (scp->state != DN_CR) |
2930 | @@ -825,11 +825,11 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) |
2931 | scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); |
2932 | dn_send_conn_conf(sk, allocation); |
2933 | |
2934 | - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2935 | + add_wait_queue(sk_sleep(sk), &wait); |
2936 | for(;;) { |
2937 | release_sock(sk); |
2938 | if (scp->state == DN_CC) |
2939 | - *timeo = schedule_timeout(*timeo); |
2940 | + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); |
2941 | lock_sock(sk); |
2942 | err = 0; |
2943 | if (scp->state == DN_RUN) |
2944 | @@ -843,9 +843,8 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) |
2945 | err = -EAGAIN; |
2946 | if (!*timeo) |
2947 | break; |
2948 | - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2949 | } |
2950 | - finish_wait(sk_sleep(sk), &wait); |
2951 | + remove_wait_queue(sk_sleep(sk), &wait); |
2952 | if (err == 0) { |
2953 | sk->sk_socket->state = SS_CONNECTED; |
2954 | } else if (scp->state != DN_CC) { |
2955 | @@ -857,7 +856,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) |
2956 | static int dn_wait_run(struct sock *sk, long *timeo) |
2957 | { |
2958 | struct dn_scp *scp = DN_SK(sk); |
2959 | - DEFINE_WAIT(wait); |
2960 | + DEFINE_WAIT_FUNC(wait, woken_wake_function); |
2961 | int err = 0; |
2962 | |
2963 | if (scp->state == DN_RUN) |
2964 | @@ -866,11 +865,11 @@ static int dn_wait_run(struct sock *sk, long *timeo) |
2965 | if (!*timeo) |
2966 | return -EALREADY; |
2967 | |
2968 | - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2969 | + add_wait_queue(sk_sleep(sk), &wait); |
2970 | for(;;) { |
2971 | release_sock(sk); |
2972 | if (scp->state == DN_CI || scp->state == DN_CC) |
2973 | - *timeo = schedule_timeout(*timeo); |
2974 | + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); |
2975 | lock_sock(sk); |
2976 | err = 0; |
2977 | if (scp->state == DN_RUN) |
2978 | @@ -884,9 +883,8 @@ static int dn_wait_run(struct sock *sk, long *timeo) |
2979 | err = -ETIMEDOUT; |
2980 | if (!*timeo) |
2981 | break; |
2982 | - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2983 | } |
2984 | - finish_wait(sk_sleep(sk), &wait); |
2985 | + remove_wait_queue(sk_sleep(sk), &wait); |
2986 | out: |
2987 | if (err == 0) { |
2988 | sk->sk_socket->state = SS_CONNECTED; |
2989 | @@ -1031,16 +1029,16 @@ static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) |
2990 | |
2991 | static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) |
2992 | { |
2993 | - DEFINE_WAIT(wait); |
2994 | + DEFINE_WAIT_FUNC(wait, woken_wake_function); |
2995 | struct sk_buff *skb = NULL; |
2996 | int err = 0; |
2997 | |
2998 | - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
2999 | + add_wait_queue(sk_sleep(sk), &wait); |
3000 | for(;;) { |
3001 | release_sock(sk); |
3002 | skb = skb_dequeue(&sk->sk_receive_queue); |
3003 | if (skb == NULL) { |
3004 | - *timeo = schedule_timeout(*timeo); |
3005 | + *timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, *timeo); |
3006 | skb = skb_dequeue(&sk->sk_receive_queue); |
3007 | } |
3008 | lock_sock(sk); |
3009 | @@ -1055,9 +1053,8 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) |
3010 | err = -EAGAIN; |
3011 | if (!*timeo) |
3012 | break; |
3013 | - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
3014 | } |
3015 | - finish_wait(sk_sleep(sk), &wait); |
3016 | + remove_wait_queue(sk_sleep(sk), &wait); |
3017 | |
3018 | return skb == NULL ? ERR_PTR(err) : skb; |
3019 | } |
3020 | diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c |
3021 | index 819255ee4e42d..6a0c4326d9cf2 100644 |
3022 | --- a/net/ipv4/tcp_bpf.c |
3023 | +++ b/net/ipv4/tcp_bpf.c |
3024 | @@ -636,7 +636,7 @@ static int __init tcp_bpf_v4_build_proto(void) |
3025 | tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot); |
3026 | return 0; |
3027 | } |
3028 | -core_initcall(tcp_bpf_v4_build_proto); |
3029 | +late_initcall(tcp_bpf_v4_build_proto); |
3030 | |
3031 | static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock) |
3032 | { |
3033 | diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c |
3034 | index a9971e41f31bb..8af4fefe371f2 100644 |
3035 | --- a/net/ipv4/tcp_fastopen.c |
3036 | +++ b/net/ipv4/tcp_fastopen.c |
3037 | @@ -504,8 +504,15 @@ void tcp_fastopen_active_disable(struct sock *sk) |
3038 | { |
3039 | struct net *net = sock_net(sk); |
3040 | |
3041 | + /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ |
3042 | + WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies); |
3043 | + |
3044 | + /* Paired with smp_rmb() in tcp_fastopen_active_should_disable(). |
3045 | + * We want net->ipv4.tfo_active_disable_stamp to be updated first. |
3046 | + */ |
3047 | + smp_mb__before_atomic(); |
3048 | atomic_inc(&net->ipv4.tfo_active_disable_times); |
3049 | - net->ipv4.tfo_active_disable_stamp = jiffies; |
3050 | + |
3051 | NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); |
3052 | } |
3053 | |
3054 | @@ -523,10 +530,16 @@ bool tcp_fastopen_active_should_disable(struct sock *sk) |
3055 | if (!tfo_da_times) |
3056 | return false; |
3057 | |
3058 | + /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */ |
3059 | + smp_rmb(); |
3060 | + |
3061 | /* Limit timout to max: 2^6 * initial timeout */ |
3062 | multiplier = 1 << min(tfo_da_times - 1, 6); |
3063 | - timeout = multiplier * tfo_bh_timeout * HZ; |
3064 | - if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout)) |
3065 | + |
3066 | + /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */ |
3067 | + timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) + |
3068 | + multiplier * tfo_bh_timeout * HZ; |
3069 | + if (time_before(jiffies, timeout)) |
3070 | return true; |
3071 | |
3072 | /* Mark check bit so we can check for successful active TFO |
3073 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
3074 | index 4dcbb1ccab25f..33444d9856819 100644 |
3075 | --- a/net/ipv6/ip6_output.c |
3076 | +++ b/net/ipv6/ip6_output.c |
3077 | @@ -477,7 +477,9 @@ int ip6_forward(struct sk_buff *skb) |
3078 | if (skb_warn_if_lro(skb)) |
3079 | goto drop; |
3080 | |
3081 | - if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { |
3082 | + if (!net->ipv6.devconf_all->disable_policy && |
3083 | + !idev->cnf.disable_policy && |
3084 | + !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { |
3085 | __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); |
3086 | goto drop; |
3087 | } |
3088 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
3089 | index b903fe28ce507..d6fc22f7d7a67 100644 |
3090 | --- a/net/ipv6/route.c |
3091 | +++ b/net/ipv6/route.c |
3092 | @@ -3655,7 +3655,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, |
3093 | err = PTR_ERR(rt->fib6_metrics); |
3094 | /* Do not leave garbage there. */ |
3095 | rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics; |
3096 | - goto out; |
3097 | + goto out_free; |
3098 | } |
3099 | |
3100 | if (cfg->fc_flags & RTF_ADDRCONF) |
3101 | diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c |
3102 | index 9115f8a7dd45b..a8da88db7893f 100644 |
3103 | --- a/net/netrom/nr_timer.c |
3104 | +++ b/net/netrom/nr_timer.c |
3105 | @@ -121,11 +121,9 @@ static void nr_heartbeat_expiry(struct timer_list *t) |
3106 | is accepted() it isn't 'dead' so doesn't get removed. */ |
3107 | if (sock_flag(sk, SOCK_DESTROY) || |
3108 | (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { |
3109 | - sock_hold(sk); |
3110 | bh_unlock_sock(sk); |
3111 | nr_destroy_socket(sk); |
3112 | - sock_put(sk); |
3113 | - return; |
3114 | + goto out; |
3115 | } |
3116 | break; |
3117 | |
3118 | @@ -146,6 +144,8 @@ static void nr_heartbeat_expiry(struct timer_list *t) |
3119 | |
3120 | nr_start_heartbeat(sk); |
3121 | bh_unlock_sock(sk); |
3122 | +out: |
3123 | + sock_put(sk); |
3124 | } |
3125 | |
3126 | static void nr_t2timer_expiry(struct timer_list *t) |
3127 | @@ -159,6 +159,7 @@ static void nr_t2timer_expiry(struct timer_list *t) |
3128 | nr_enquiry_response(sk); |
3129 | } |
3130 | bh_unlock_sock(sk); |
3131 | + sock_put(sk); |
3132 | } |
3133 | |
3134 | static void nr_t4timer_expiry(struct timer_list *t) |
3135 | @@ -169,6 +170,7 @@ static void nr_t4timer_expiry(struct timer_list *t) |
3136 | bh_lock_sock(sk); |
3137 | nr_sk(sk)->condition &= ~NR_COND_PEER_RX_BUSY; |
3138 | bh_unlock_sock(sk); |
3139 | + sock_put(sk); |
3140 | } |
3141 | |
3142 | static void nr_idletimer_expiry(struct timer_list *t) |
3143 | @@ -197,6 +199,7 @@ static void nr_idletimer_expiry(struct timer_list *t) |
3144 | sock_set_flag(sk, SOCK_DEAD); |
3145 | } |
3146 | bh_unlock_sock(sk); |
3147 | + sock_put(sk); |
3148 | } |
3149 | |
3150 | static void nr_t1timer_expiry(struct timer_list *t) |
3151 | @@ -209,8 +212,7 @@ static void nr_t1timer_expiry(struct timer_list *t) |
3152 | case NR_STATE_1: |
3153 | if (nr->n2count == nr->n2) { |
3154 | nr_disconnect(sk, ETIMEDOUT); |
3155 | - bh_unlock_sock(sk); |
3156 | - return; |
3157 | + goto out; |
3158 | } else { |
3159 | nr->n2count++; |
3160 | nr_write_internal(sk, NR_CONNREQ); |
3161 | @@ -220,8 +222,7 @@ static void nr_t1timer_expiry(struct timer_list *t) |
3162 | case NR_STATE_2: |
3163 | if (nr->n2count == nr->n2) { |
3164 | nr_disconnect(sk, ETIMEDOUT); |
3165 | - bh_unlock_sock(sk); |
3166 | - return; |
3167 | + goto out; |
3168 | } else { |
3169 | nr->n2count++; |
3170 | nr_write_internal(sk, NR_DISCREQ); |
3171 | @@ -231,8 +232,7 @@ static void nr_t1timer_expiry(struct timer_list *t) |
3172 | case NR_STATE_3: |
3173 | if (nr->n2count == nr->n2) { |
3174 | nr_disconnect(sk, ETIMEDOUT); |
3175 | - bh_unlock_sock(sk); |
3176 | - return; |
3177 | + goto out; |
3178 | } else { |
3179 | nr->n2count++; |
3180 | nr_requeue_frames(sk); |
3181 | @@ -241,5 +241,7 @@ static void nr_t1timer_expiry(struct timer_list *t) |
3182 | } |
3183 | |
3184 | nr_start_t1timer(sk); |
3185 | +out: |
3186 | bh_unlock_sock(sk); |
3187 | + sock_put(sk); |
3188 | } |
3189 | diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c |
3190 | index e858a0a9c0457..f60d349542b10 100644 |
3191 | --- a/net/sched/act_skbmod.c |
3192 | +++ b/net/sched/act_skbmod.c |
3193 | @@ -6,6 +6,7 @@ |
3194 | */ |
3195 | |
3196 | #include <linux/module.h> |
3197 | +#include <linux/if_arp.h> |
3198 | #include <linux/init.h> |
3199 | #include <linux/kernel.h> |
3200 | #include <linux/skbuff.h> |
3201 | @@ -33,6 +34,13 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, |
3202 | tcf_lastuse_update(&d->tcf_tm); |
3203 | bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb); |
3204 | |
3205 | + action = READ_ONCE(d->tcf_action); |
3206 | + if (unlikely(action == TC_ACT_SHOT)) |
3207 | + goto drop; |
3208 | + |
3209 | + if (!skb->dev || skb->dev->type != ARPHRD_ETHER) |
3210 | + return action; |
3211 | + |
3212 | /* XXX: if you are going to edit more fields beyond ethernet header |
3213 | * (example when you add IP header replacement or vlan swap) |
3214 | * then MAX_EDIT_LEN needs to change appropriately |
3215 | @@ -41,10 +49,6 @@ static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a, |
3216 | if (unlikely(err)) /* best policy is to drop on the floor */ |
3217 | goto drop; |
3218 | |
3219 | - action = READ_ONCE(d->tcf_action); |
3220 | - if (unlikely(action == TC_ACT_SHOT)) |
3221 | - goto drop; |
3222 | - |
3223 | p = rcu_dereference_bh(d->skbmod_p); |
3224 | flags = p->flags; |
3225 | if (flags & SKBMOD_F_DMAC) |
3226 | diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c |
3227 | index 83e5a8aa2fb11..7f20fd37e01e0 100644 |
3228 | --- a/net/sched/cls_api.c |
3229 | +++ b/net/sched/cls_api.c |
3230 | @@ -2866,7 +2866,7 @@ replay: |
3231 | break; |
3232 | case RTM_GETCHAIN: |
3233 | err = tc_chain_notify(chain, skb, n->nlmsg_seq, |
3234 | - n->nlmsg_seq, n->nlmsg_type, true); |
3235 | + n->nlmsg_flags, n->nlmsg_type, true); |
3236 | if (err < 0) |
3237 | NL_SET_ERR_MSG(extack, "Failed to send chain notify message"); |
3238 | break; |
3239 | diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c |
3240 | index 3e81f87d0c89f..684187a1fdb91 100644 |
3241 | --- a/net/sched/cls_tcindex.c |
3242 | +++ b/net/sched/cls_tcindex.c |
3243 | @@ -278,6 +278,8 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r, |
3244 | TCA_TCINDEX_POLICE); |
3245 | } |
3246 | |
3247 | +static void tcindex_free_perfect_hash(struct tcindex_data *cp); |
3248 | + |
3249 | static void tcindex_partial_destroy_work(struct work_struct *work) |
3250 | { |
3251 | struct tcindex_data *p = container_of(to_rcu_work(work), |
3252 | @@ -285,7 +287,8 @@ static void tcindex_partial_destroy_work(struct work_struct *work) |
3253 | rwork); |
3254 | |
3255 | rtnl_lock(); |
3256 | - kfree(p->perfect); |
3257 | + if (p->perfect) |
3258 | + tcindex_free_perfect_hash(p); |
3259 | kfree(p); |
3260 | rtnl_unlock(); |
3261 | } |
3262 | diff --git a/net/sctp/auth.c b/net/sctp/auth.c |
3263 | index 1d898ee4018c9..7eced1e523a5e 100644 |
3264 | --- a/net/sctp/auth.c |
3265 | +++ b/net/sctp/auth.c |
3266 | @@ -866,6 +866,8 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, |
3267 | if (replace) { |
3268 | list_del_init(&shkey->key_list); |
3269 | sctp_auth_shkey_release(shkey); |
3270 | + if (asoc && asoc->active_key_id == auth_key->sca_keynumber) |
3271 | + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); |
3272 | } |
3273 | list_add(&cur_key->key_list, sh_keys); |
3274 | |
3275 | diff --git a/sound/isa/sb/sb16_csp.c b/sound/isa/sb/sb16_csp.c |
3276 | index ef1289cc78a48..30021ab5e0e99 100644 |
3277 | --- a/sound/isa/sb/sb16_csp.c |
3278 | +++ b/sound/isa/sb/sb16_csp.c |
3279 | @@ -814,6 +814,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel |
3280 | mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); |
3281 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); |
3282 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); |
3283 | + spin_unlock_irqrestore(&p->chip->mixer_lock, flags); |
3284 | |
3285 | spin_lock(&p->chip->reg_lock); |
3286 | set_mode_register(p->chip, 0xc0); /* c0 = STOP */ |
3287 | @@ -853,6 +854,7 @@ static int snd_sb_csp_start(struct snd_sb_csp * p, int sample_width, int channel |
3288 | spin_unlock(&p->chip->reg_lock); |
3289 | |
3290 | /* restore PCM volume */ |
3291 | + spin_lock_irqsave(&p->chip->mixer_lock, flags); |
3292 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); |
3293 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); |
3294 | spin_unlock_irqrestore(&p->chip->mixer_lock, flags); |
3295 | @@ -878,6 +880,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) |
3296 | mixR = snd_sbmixer_read(p->chip, SB_DSP4_PCM_DEV + 1); |
3297 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL & 0x7); |
3298 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR & 0x7); |
3299 | + spin_unlock_irqrestore(&p->chip->mixer_lock, flags); |
3300 | |
3301 | spin_lock(&p->chip->reg_lock); |
3302 | if (p->running & SNDRV_SB_CSP_ST_QSOUND) { |
3303 | @@ -892,6 +895,7 @@ static int snd_sb_csp_stop(struct snd_sb_csp * p) |
3304 | spin_unlock(&p->chip->reg_lock); |
3305 | |
3306 | /* restore PCM volume */ |
3307 | + spin_lock_irqsave(&p->chip->mixer_lock, flags); |
3308 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV, mixL); |
3309 | snd_sbmixer_write(p->chip, SB_DSP4_PCM_DEV + 1, mixR); |
3310 | spin_unlock_irqrestore(&p->chip->mixer_lock, flags); |
3311 | diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c |
3312 | index f620b402b309f..5128a5df16fd3 100644 |
3313 | --- a/sound/pci/hda/patch_hdmi.c |
3314 | +++ b/sound/pci/hda/patch_hdmi.c |
3315 | @@ -1820,6 +1820,7 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid) |
3316 | static const struct snd_pci_quirk force_connect_list[] = { |
3317 | SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1), |
3318 | SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1), |
3319 | + SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1), |
3320 | {} |
3321 | }; |
3322 | |
3323 | diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c |
3324 | index f70b9f7e68bba..281957a8fa867 100644 |
3325 | --- a/sound/soc/codecs/rt5631.c |
3326 | +++ b/sound/soc/codecs/rt5631.c |
3327 | @@ -1691,6 +1691,8 @@ static const struct regmap_config rt5631_regmap_config = { |
3328 | .reg_defaults = rt5631_reg, |
3329 | .num_reg_defaults = ARRAY_SIZE(rt5631_reg), |
3330 | .cache_type = REGCACHE_RBTREE, |
3331 | + .use_single_read = true, |
3332 | + .use_single_write = true, |
3333 | }; |
3334 | |
3335 | static int rt5631_i2c_probe(struct i2c_client *i2c, |
3336 | diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
3337 | index 22841ed2411e2..67eb1293fa155 100644 |
3338 | --- a/sound/usb/mixer.c |
3339 | +++ b/sound/usb/mixer.c |
3340 | @@ -3242,7 +3242,15 @@ static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, |
3341 | { |
3342 | struct usb_mixer_elem_info *cval = mixer_elem_list_to_info(list); |
3343 | static const char * const val_types[] = { |
3344 | - "BOOLEAN", "INV_BOOLEAN", "S8", "U8", "S16", "U16", "S32", "U32", |
3345 | + [USB_MIXER_BOOLEAN] = "BOOLEAN", |
3346 | + [USB_MIXER_INV_BOOLEAN] = "INV_BOOLEAN", |
3347 | + [USB_MIXER_S8] = "S8", |
3348 | + [USB_MIXER_U8] = "U8", |
3349 | + [USB_MIXER_S16] = "S16", |
3350 | + [USB_MIXER_U16] = "U16", |
3351 | + [USB_MIXER_S32] = "S32", |
3352 | + [USB_MIXER_U32] = "U32", |
3353 | + [USB_MIXER_BESPOKEN] = "BESPOKEN", |
3354 | }; |
3355 | snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, " |
3356 | "channels=%i, type=\"%s\"\n", cval->head.id, |
3357 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
3358 | index 186e90e3636c7..5b17a5c5785c0 100644 |
3359 | --- a/sound/usb/quirks.c |
3360 | +++ b/sound/usb/quirks.c |
3361 | @@ -1840,6 +1840,9 @@ static const struct registration_quirk registration_quirks[] = { |
3362 | REG_QUIRK_ENTRY(0x0951, 0x16d8, 2), /* Kingston HyperX AMP */ |
3363 | REG_QUIRK_ENTRY(0x0951, 0x16ed, 2), /* Kingston HyperX Cloud Alpha S */ |
3364 | REG_QUIRK_ENTRY(0x0951, 0x16ea, 2), /* Kingston HyperX Cloud Flight S */ |
3365 | + REG_QUIRK_ENTRY(0x0ecb, 0x1f46, 2), /* JBL Quantum 600 */ |
3366 | + REG_QUIRK_ENTRY(0x0ecb, 0x2039, 2), /* JBL Quantum 400 */ |
3367 | + REG_QUIRK_ENTRY(0x0ecb, 0x203e, 2), /* JBL Quantum 800 */ |
3368 | { 0 } /* terminator */ |
3369 | }; |
3370 | |
3371 | diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c |
3372 | index 88264abaa738a..a209f53901b8c 100644 |
3373 | --- a/tools/bpf/bpftool/common.c |
3374 | +++ b/tools/bpf/bpftool/common.c |
3375 | @@ -171,6 +171,11 @@ int mount_bpffs_for_pin(const char *name) |
3376 | int err = 0; |
3377 | |
3378 | file = malloc(strlen(name) + 1); |
3379 | + if (!file) { |
3380 | + p_err("mem alloc failed"); |
3381 | + return -1; |
3382 | + } |
3383 | + |
3384 | strcpy(file, name); |
3385 | dir = dirname(file); |
3386 | |
3387 | diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c |
3388 | index 0d524ef3606d7..6b6bb86d62d35 100644 |
3389 | --- a/tools/perf/builtin-inject.c |
3390 | +++ b/tools/perf/builtin-inject.c |
3391 | @@ -836,8 +836,10 @@ int cmd_inject(int argc, const char **argv) |
3392 | |
3393 | data.path = inject.input_name; |
3394 | inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool); |
3395 | - if (IS_ERR(inject.session)) |
3396 | - return PTR_ERR(inject.session); |
3397 | + if (IS_ERR(inject.session)) { |
3398 | + ret = PTR_ERR(inject.session); |
3399 | + goto out_close_output; |
3400 | + } |
3401 | |
3402 | if (zstd_init(&(inject.session->zstd_data), 0) < 0) |
3403 | pr_warning("Decompression initialization failed.\n"); |
3404 | @@ -874,5 +876,7 @@ int cmd_inject(int argc, const char **argv) |
3405 | out_delete: |
3406 | zstd_fini(&(inject.session->zstd_data)); |
3407 | perf_session__delete(inject.session); |
3408 | +out_close_output: |
3409 | + perf_data__close(&inject.output); |
3410 | return ret; |
3411 | } |
3412 | diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c |
3413 | index da016f398aa80..f3ff825d9dd33 100644 |
3414 | --- a/tools/perf/builtin-script.c |
3415 | +++ b/tools/perf/builtin-script.c |
3416 | @@ -2474,6 +2474,12 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script) |
3417 | } |
3418 | } |
3419 | |
3420 | +static void perf_script__exit(struct perf_script *script) |
3421 | +{ |
3422 | + perf_thread_map__put(script->threads); |
3423 | + perf_cpu_map__put(script->cpus); |
3424 | +} |
3425 | + |
3426 | static int __cmd_script(struct perf_script *script) |
3427 | { |
3428 | int ret; |
3429 | @@ -3893,6 +3899,7 @@ out_delete: |
3430 | |
3431 | perf_evlist__free_stats(session->evlist); |
3432 | perf_session__delete(session); |
3433 | + perf_script__exit(&script); |
3434 | |
3435 | if (script_started) |
3436 | cleanup_scripting(); |
3437 | diff --git a/tools/perf/tests/event_update.c b/tools/perf/tests/event_update.c |
3438 | index c727379cf20e1..195b29797acc4 100644 |
3439 | --- a/tools/perf/tests/event_update.c |
3440 | +++ b/tools/perf/tests/event_update.c |
3441 | @@ -119,6 +119,6 @@ int test__event_update(struct test *test __maybe_unused, int subtest __maybe_unu |
3442 | TEST_ASSERT_VAL("failed to synthesize attr update cpus", |
3443 | !perf_event__synthesize_event_update_cpus(&tmp.tool, evsel, process_event_cpus)); |
3444 | |
3445 | - perf_cpu_map__put(evsel->core.own_cpus); |
3446 | + evlist__delete(evlist); |
3447 | return 0; |
3448 | } |
3449 | diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c |
3450 | index 22daf2bdf5faf..f4a2c0df09549 100644 |
3451 | --- a/tools/perf/tests/topology.c |
3452 | +++ b/tools/perf/tests/topology.c |
3453 | @@ -52,6 +52,7 @@ static int session_write_header(char *path) |
3454 | TEST_ASSERT_VAL("failed to write header", |
3455 | !perf_session__write_header(session, session->evlist, data.file.fd, true)); |
3456 | |
3457 | + evlist__delete(session->evlist); |
3458 | perf_session__delete(session); |
3459 | |
3460 | return 0; |
3461 | diff --git a/tools/perf/util/data.c b/tools/perf/util/data.c |
3462 | index 7534455ffc6a6..a3f912615690f 100644 |
3463 | --- a/tools/perf/util/data.c |
3464 | +++ b/tools/perf/util/data.c |
3465 | @@ -20,7 +20,7 @@ |
3466 | |
3467 | static void close_dir(struct perf_data_file *files, int nr) |
3468 | { |
3469 | - while (--nr >= 1) { |
3470 | + while (--nr >= 0) { |
3471 | close(files[nr].fd); |
3472 | zfree(&files[nr].path); |
3473 | } |
3474 | diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c |
3475 | index ab2e130dc07a6..7f07a5dc555f8 100644 |
3476 | --- a/tools/perf/util/dso.c |
3477 | +++ b/tools/perf/util/dso.c |
3478 | @@ -1086,8 +1086,10 @@ struct map *dso__new_map(const char *name) |
3479 | struct map *map = NULL; |
3480 | struct dso *dso = dso__new(name); |
3481 | |
3482 | - if (dso) |
3483 | + if (dso) { |
3484 | map = map__new2(0, dso); |
3485 | + dso__put(dso); |
3486 | + } |
3487 | |
3488 | return map; |
3489 | } |
3490 | diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c |
3491 | index 018ecf7b6da9b..0fafcf264d235 100644 |
3492 | --- a/tools/perf/util/env.c |
3493 | +++ b/tools/perf/util/env.c |
3494 | @@ -175,6 +175,7 @@ void perf_env__exit(struct perf_env *env) |
3495 | zfree(&env->cpuid); |
3496 | zfree(&env->cmdline); |
3497 | zfree(&env->cmdline_argv); |
3498 | + zfree(&env->sibling_dies); |
3499 | zfree(&env->sibling_cores); |
3500 | zfree(&env->sibling_threads); |
3501 | zfree(&env->pmu_mappings); |
3502 | diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c |
3503 | index 39062df026291..51424cdc3b682 100644 |
3504 | --- a/tools/perf/util/lzma.c |
3505 | +++ b/tools/perf/util/lzma.c |
3506 | @@ -69,7 +69,7 @@ int lzma_decompress_to_file(const char *input, int output_fd) |
3507 | |
3508 | if (ferror(infile)) { |
3509 | pr_err("lzma: read error: %s\n", strerror(errno)); |
3510 | - goto err_fclose; |
3511 | + goto err_lzma_end; |
3512 | } |
3513 | |
3514 | if (feof(infile)) |
3515 | @@ -83,7 +83,7 @@ int lzma_decompress_to_file(const char *input, int output_fd) |
3516 | |
3517 | if (writen(output_fd, buf_out, write_size) != write_size) { |
3518 | pr_err("lzma: write error: %s\n", strerror(errno)); |
3519 | - goto err_fclose; |
3520 | + goto err_lzma_end; |
3521 | } |
3522 | |
3523 | strm.next_out = buf_out; |
3524 | @@ -95,11 +95,13 @@ int lzma_decompress_to_file(const char *input, int output_fd) |
3525 | break; |
3526 | |
3527 | pr_err("lzma: failed %s\n", lzma_strerror(ret)); |
3528 | - goto err_fclose; |
3529 | + goto err_lzma_end; |
3530 | } |
3531 | } |
3532 | |
3533 | err = 0; |
3534 | +err_lzma_end: |
3535 | + lzma_end(&strm); |
3536 | err_fclose: |
3537 | fclose(infile); |
3538 | return err; |
3539 | diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c |
3540 | index 571e99c908a0e..1ae5c51a70359 100644 |
3541 | --- a/tools/perf/util/map.c |
3542 | +++ b/tools/perf/util/map.c |
3543 | @@ -214,6 +214,8 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, |
3544 | if (!(prot & PROT_EXEC)) |
3545 | dso__set_loaded(dso); |
3546 | } |
3547 | + |
3548 | + nsinfo__put(dso->nsinfo); |
3549 | dso->nsinfo = nsi; |
3550 | dso__put(dso); |
3551 | } |
3552 | diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c |
3553 | index a5cb1a3a10644..6357ac508ad1e 100644 |
3554 | --- a/tools/perf/util/probe-event.c |
3555 | +++ b/tools/perf/util/probe-event.c |
3556 | @@ -175,8 +175,10 @@ struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user) |
3557 | struct map *map; |
3558 | |
3559 | map = dso__new_map(target); |
3560 | - if (map && map->dso) |
3561 | + if (map && map->dso) { |
3562 | + nsinfo__put(map->dso->nsinfo); |
3563 | map->dso->nsinfo = nsinfo__get(nsi); |
3564 | + } |
3565 | return map; |
3566 | } else { |
3567 | return kernel_get_module_map(target); |
3568 | diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c |
3569 | index f778f8e7e65a3..5558e2adebe4e 100644 |
3570 | --- a/tools/perf/util/probe-file.c |
3571 | +++ b/tools/perf/util/probe-file.c |
3572 | @@ -337,11 +337,11 @@ int probe_file__del_events(int fd, struct strfilter *filter) |
3573 | |
3574 | ret = probe_file__get_events(fd, filter, namelist); |
3575 | if (ret < 0) |
3576 | - return ret; |
3577 | + goto out; |
3578 | |
3579 | ret = probe_file__del_strlist(fd, namelist); |
3580 | +out: |
3581 | strlist__delete(namelist); |
3582 | - |
3583 | return ret; |
3584 | } |
3585 | |
3586 | diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh |
3587 | index bf361f30d6ef9..104a7a5f13b1e 100755 |
3588 | --- a/tools/testing/selftests/net/icmp_redirect.sh |
3589 | +++ b/tools/testing/selftests/net/icmp_redirect.sh |
3590 | @@ -309,9 +309,10 @@ check_exception() |
3591 | fi |
3592 | log_test $? 0 "IPv4: ${desc}" |
3593 | |
3594 | - if [ "$with_redirect" = "yes" ]; then |
3595 | + # No PMTU info for test "redirect" and "mtu exception plus redirect" |
3596 | + if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then |
3597 | ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \ |
3598 | - grep -q "${H2_N2_IP6} from :: via ${R2_LLADDR} dev br0.*${mtu}" |
3599 | + grep -v "mtu" | grep -q "${H2_N2_IP6} .*via ${R2_LLADDR} dev br0" |
3600 | elif [ -n "${mtu}" ]; then |
3601 | ip -netns h1 -6 ro get ${H1_VRF_ARG} ${H2_N2_IP6} | \ |
3602 | grep -q "${mtu}" |
3603 | diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c |
3604 | index d3362777a4258..17ac167823a6d 100644 |
3605 | --- a/tools/testing/selftests/vm/userfaultfd.c |
3606 | +++ b/tools/testing/selftests/vm/userfaultfd.c |
3607 | @@ -139,8 +139,10 @@ static int anon_release_pages(char *rel_area) |
3608 | |
3609 | static void anon_allocate_area(void **alloc_area) |
3610 | { |
3611 | - if (posix_memalign(alloc_area, page_size, nr_pages * page_size)) { |
3612 | - fprintf(stderr, "out of memory\n"); |
3613 | + *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, |
3614 | + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
3615 | + if (*alloc_area == MAP_FAILED) |
3616 | + fprintf(stderr, "mmap of anonymous memory failed"); |
3617 | *alloc_area = NULL; |
3618 | } |
3619 | } |