Annotation of /trunk/kernel-alx/patches-4.19/0137-4.19.38-all-fixes.patch
Parent Directory | Revision Log
Revision 3416 -
(hide annotations)
(download)
Fri Aug 2 11:47:46 2019 UTC (5 years, 1 month ago) by niro
File size: 127326 byte(s)
Fri Aug 2 11:47:46 2019 UTC (5 years, 1 month ago) by niro
File size: 127326 byte(s)
-linux-4.19.38
1 | niro | 3416 | diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt |
2 | index f5acf35c712f..8b6567f7cb9b 100644 | ||
3 | --- a/Documentation/admin-guide/kernel-parameters.txt | ||
4 | +++ b/Documentation/admin-guide/kernel-parameters.txt | ||
5 | @@ -2805,7 +2805,7 @@ | ||
6 | check bypass). With this option data leaks are possible | ||
7 | in the system. | ||
8 | |||
9 | - nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2 | ||
10 | + nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2 | ||
11 | (indirect branch prediction) vulnerability. System may | ||
12 | allow data leaks with this option, which is equivalent | ||
13 | to spectre_v2=off. | ||
14 | diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt | ||
15 | index 960de8fe3f40..2c31208528d5 100644 | ||
16 | --- a/Documentation/networking/ip-sysctl.txt | ||
17 | +++ b/Documentation/networking/ip-sysctl.txt | ||
18 | @@ -410,6 +410,7 @@ tcp_min_rtt_wlen - INTEGER | ||
19 | minimum RTT when it is moved to a longer path (e.g., due to traffic | ||
20 | engineering). A longer window makes the filter more resistant to RTT | ||
21 | inflations such as transient congestion. The unit is seconds. | ||
22 | + Possible values: 0 - 86400 (1 day) | ||
23 | Default: 300 | ||
24 | |||
25 | tcp_moderate_rcvbuf - BOOLEAN | ||
26 | diff --git a/Makefile b/Makefile | ||
27 | index 7b495cad8c2e..14d4aeb48907 100644 | ||
28 | --- a/Makefile | ||
29 | +++ b/Makefile | ||
30 | @@ -1,7 +1,7 @@ | ||
31 | # SPDX-License-Identifier: GPL-2.0 | ||
32 | VERSION = 4 | ||
33 | PATCHLEVEL = 19 | ||
34 | -SUBLEVEL = 37 | ||
35 | +SUBLEVEL = 38 | ||
36 | EXTRAVERSION = | ||
37 | NAME = "People's Front" | ||
38 | |||
39 | diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S | ||
40 | index 517e0e18f0b8..e205bbbe2794 100644 | ||
41 | --- a/arch/arm/boot/compressed/head.S | ||
42 | +++ b/arch/arm/boot/compressed/head.S | ||
43 | @@ -1395,7 +1395,21 @@ ENTRY(efi_stub_entry) | ||
44 | |||
45 | @ Preserve return value of efi_entry() in r4 | ||
46 | mov r4, r0 | ||
47 | - bl cache_clean_flush | ||
48 | + | ||
49 | + @ our cache maintenance code relies on CP15 barrier instructions | ||
50 | + @ but since we arrived here with the MMU and caches configured | ||
51 | + @ by UEFI, we must check that the CP15BEN bit is set in SCTLR. | ||
52 | + @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in | ||
53 | + @ the enable path will be executed on v7+ only. | ||
54 | + mrc p15, 0, r1, c1, c0, 0 @ read SCTLR | ||
55 | + tst r1, #(1 << 5) @ CP15BEN bit set? | ||
56 | + bne 0f | ||
57 | + orr r1, r1, #(1 << 5) @ CP15 barrier instructions | ||
58 | + mcr p15, 0, r1, c1, c0, 0 @ write SCTLR | ||
59 | + ARM( .inst 0xf57ff06f @ v7+ isb ) | ||
60 | + THUMB( isb ) | ||
61 | + | ||
62 | +0: bl cache_clean_flush | ||
63 | bl cache_off | ||
64 | |||
65 | @ Set parameters for booting zImage according to boot protocol | ||
66 | diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S | ||
67 | index 73913f072e39..579608342ac6 100644 | ||
68 | --- a/arch/mips/kernel/scall64-o32.S | ||
69 | +++ b/arch/mips/kernel/scall64-o32.S | ||
70 | @@ -125,7 +125,7 @@ trace_a_syscall: | ||
71 | subu t1, v0, __NR_O32_Linux | ||
72 | move a1, v0 | ||
73 | bnez t1, 1f /* __NR_syscall at offset 0 */ | ||
74 | - lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ | ||
75 | + ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ | ||
76 | .set pop | ||
77 | |||
78 | 1: jal syscall_trace_enter | ||
79 | diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig | ||
80 | index 6bd5e7261335..ffeaed63675b 100644 | ||
81 | --- a/arch/powerpc/configs/skiroot_defconfig | ||
82 | +++ b/arch/powerpc/configs/skiroot_defconfig | ||
83 | @@ -195,6 +195,7 @@ CONFIG_UDF_FS=m | ||
84 | CONFIG_MSDOS_FS=m | ||
85 | CONFIG_VFAT_FS=m | ||
86 | CONFIG_PROC_KCORE=y | ||
87 | +CONFIG_HUGETLBFS=y | ||
88 | CONFIG_TMPFS=y | ||
89 | CONFIG_TMPFS_POSIX_ACL=y | ||
90 | # CONFIG_MISC_FILESYSTEMS is not set | ||
91 | diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S | ||
92 | index 769c2624e0a6..75cff3f336b3 100644 | ||
93 | --- a/arch/powerpc/kernel/vdso32/gettimeofday.S | ||
94 | +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S | ||
95 | @@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime) | ||
96 | * can be used, r7 contains NSEC_PER_SEC. | ||
97 | */ | ||
98 | |||
99 | - lwz r5,WTOM_CLOCK_SEC(r9) | ||
100 | + lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9) | ||
101 | lwz r6,WTOM_CLOCK_NSEC(r9) | ||
102 | |||
103 | /* We now have our offset in r5,r6. We create a fake dependency | ||
104 | diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype | ||
105 | index 6c6a7c72cae4..ad0216c41d2c 100644 | ||
106 | --- a/arch/powerpc/platforms/Kconfig.cputype | ||
107 | +++ b/arch/powerpc/platforms/Kconfig.cputype | ||
108 | @@ -330,7 +330,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK | ||
109 | |||
110 | config PPC_RADIX_MMU | ||
111 | bool "Radix MMU Support" | ||
112 | - depends on PPC_BOOK3S_64 | ||
113 | + depends on PPC_BOOK3S_64 && HUGETLB_PAGE | ||
114 | select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA | ||
115 | default y | ||
116 | help | ||
117 | diff --git a/arch/x86/Makefile b/arch/x86/Makefile | ||
118 | index b84f61bc5e7a..ffc823a8312f 100644 | ||
119 | --- a/arch/x86/Makefile | ||
120 | +++ b/arch/x86/Makefile | ||
121 | @@ -224,6 +224,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | ||
122 | # Avoid indirect branches in kernel to deal with Spectre | ||
123 | ifdef CONFIG_RETPOLINE | ||
124 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) | ||
125 | + # Additionally, avoid generating expensive indirect jumps which | ||
126 | + # are subject to retpolines for small number of switch cases. | ||
127 | + # clang turns off jump table generation by default when under | ||
128 | + # retpoline builds, however, gcc does not for x86. This has | ||
129 | + # only been fixed starting from gcc stable version 8.4.0 and | ||
130 | + # onwards, but not for older ones. See gcc bug #86952. | ||
131 | + ifndef CONFIG_CC_IS_CLANG | ||
132 | + KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables) | ||
133 | + endif | ||
134 | endif | ||
135 | |||
136 | archscripts: scripts_basic | ||
137 | diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c | ||
138 | index 9f8084f18d58..6eb76106c469 100644 | ||
139 | --- a/arch/x86/events/intel/cstate.c | ||
140 | +++ b/arch/x86/events/intel/cstate.c | ||
141 | @@ -76,15 +76,15 @@ | ||
142 | * Scope: Package (physical package) | ||
143 | * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. | ||
144 | * perf code: 0x04 | ||
145 | - * Available model: HSW ULT,CNL | ||
146 | + * Available model: HSW ULT,KBL,CNL | ||
147 | * Scope: Package (physical package) | ||
148 | * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. | ||
149 | * perf code: 0x05 | ||
150 | - * Available model: HSW ULT,CNL | ||
151 | + * Available model: HSW ULT,KBL,CNL | ||
152 | * Scope: Package (physical package) | ||
153 | * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. | ||
154 | * perf code: 0x06 | ||
155 | - * Available model: HSW ULT,GLM,CNL | ||
156 | + * Available model: HSW ULT,KBL,GLM,CNL | ||
157 | * Scope: Package (physical package) | ||
158 | * | ||
159 | */ | ||
160 | @@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { | ||
161 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates), | ||
162 | X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates), | ||
163 | |||
164 | - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), | ||
165 | - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), | ||
166 | + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates), | ||
167 | + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates), | ||
168 | |||
169 | X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), | ||
170 | |||
171 | diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h | ||
172 | index cec5fae23eb3..baa549f8e918 100644 | ||
173 | --- a/arch/x86/include/asm/efi.h | ||
174 | +++ b/arch/x86/include/asm/efi.h | ||
175 | @@ -82,8 +82,7 @@ struct efi_scratch { | ||
176 | #define arch_efi_call_virt_setup() \ | ||
177 | ({ \ | ||
178 | efi_sync_low_kernel_mappings(); \ | ||
179 | - preempt_disable(); \ | ||
180 | - __kernel_fpu_begin(); \ | ||
181 | + kernel_fpu_begin(); \ | ||
182 | firmware_restrict_branch_speculation_start(); \ | ||
183 | \ | ||
184 | if (!efi_enabled(EFI_OLD_MEMMAP)) \ | ||
185 | @@ -99,8 +98,7 @@ struct efi_scratch { | ||
186 | efi_switch_mm(efi_scratch.prev_mm); \ | ||
187 | \ | ||
188 | firmware_restrict_branch_speculation_end(); \ | ||
189 | - __kernel_fpu_end(); \ | ||
190 | - preempt_enable(); \ | ||
191 | + kernel_fpu_end(); \ | ||
192 | }) | ||
193 | |||
194 | extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size, | ||
195 | diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h | ||
196 | index a9caac9d4a72..b56d504af654 100644 | ||
197 | --- a/arch/x86/include/asm/fpu/api.h | ||
198 | +++ b/arch/x86/include/asm/fpu/api.h | ||
199 | @@ -12,17 +12,12 @@ | ||
200 | #define _ASM_X86_FPU_API_H | ||
201 | |||
202 | /* | ||
203 | - * Careful: __kernel_fpu_begin/end() must be called with preempt disabled | ||
204 | - * and they don't touch the preempt state on their own. | ||
205 | - * If you enable preemption after __kernel_fpu_begin(), preempt notifier | ||
206 | - * should call the __kernel_fpu_end() to prevent the kernel/user FPU | ||
207 | - * state from getting corrupted. KVM for example uses this model. | ||
208 | - * | ||
209 | - * All other cases use kernel_fpu_begin/end() which disable preemption | ||
210 | - * during kernel FPU usage. | ||
211 | + * Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It | ||
212 | + * disables preemption so be careful if you intend to use it for long periods | ||
213 | + * of time. | ||
214 | + * If you intend to use the FPU in softirq you need to check first with | ||
215 | + * irq_fpu_usable() if it is possible. | ||
216 | */ | ||
217 | -extern void __kernel_fpu_begin(void); | ||
218 | -extern void __kernel_fpu_end(void); | ||
219 | extern void kernel_fpu_begin(void); | ||
220 | extern void kernel_fpu_end(void); | ||
221 | extern bool irq_fpu_usable(void); | ||
222 | diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c | ||
223 | index 2ea85b32421a..2e5003fef51a 100644 | ||
224 | --- a/arch/x86/kernel/fpu/core.c | ||
225 | +++ b/arch/x86/kernel/fpu/core.c | ||
226 | @@ -93,7 +93,7 @@ bool irq_fpu_usable(void) | ||
227 | } | ||
228 | EXPORT_SYMBOL(irq_fpu_usable); | ||
229 | |||
230 | -void __kernel_fpu_begin(void) | ||
231 | +static void __kernel_fpu_begin(void) | ||
232 | { | ||
233 | struct fpu *fpu = ¤t->thread.fpu; | ||
234 | |||
235 | @@ -111,9 +111,8 @@ void __kernel_fpu_begin(void) | ||
236 | __cpu_invalidate_fpregs_state(); | ||
237 | } | ||
238 | } | ||
239 | -EXPORT_SYMBOL(__kernel_fpu_begin); | ||
240 | |||
241 | -void __kernel_fpu_end(void) | ||
242 | +static void __kernel_fpu_end(void) | ||
243 | { | ||
244 | struct fpu *fpu = ¤t->thread.fpu; | ||
245 | |||
246 | @@ -122,7 +121,6 @@ void __kernel_fpu_end(void) | ||
247 | |||
248 | kernel_fpu_enable(); | ||
249 | } | ||
250 | -EXPORT_SYMBOL(__kernel_fpu_end); | ||
251 | |||
252 | void kernel_fpu_begin(void) | ||
253 | { | ||
254 | diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c | ||
255 | index 030c98f35cca..a654ccfd1a22 100644 | ||
256 | --- a/drivers/android/binder_alloc.c | ||
257 | +++ b/drivers/android/binder_alloc.c | ||
258 | @@ -958,14 +958,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | ||
259 | |||
260 | index = page - alloc->pages; | ||
261 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | ||
262 | + | ||
263 | + mm = alloc->vma_vm_mm; | ||
264 | + if (!mmget_not_zero(mm)) | ||
265 | + goto err_mmget; | ||
266 | + if (!down_write_trylock(&mm->mmap_sem)) | ||
267 | + goto err_down_write_mmap_sem_failed; | ||
268 | vma = binder_alloc_get_vma(alloc); | ||
269 | - if (vma) { | ||
270 | - if (!mmget_not_zero(alloc->vma_vm_mm)) | ||
271 | - goto err_mmget; | ||
272 | - mm = alloc->vma_vm_mm; | ||
273 | - if (!down_write_trylock(&mm->mmap_sem)) | ||
274 | - goto err_down_write_mmap_sem_failed; | ||
275 | - } | ||
276 | |||
277 | list_lru_isolate(lru, item); | ||
278 | spin_unlock(lock); | ||
279 | @@ -978,10 +977,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | ||
280 | PAGE_SIZE); | ||
281 | |||
282 | trace_binder_unmap_user_end(alloc, index); | ||
283 | - | ||
284 | - up_write(&mm->mmap_sem); | ||
285 | - mmput(mm); | ||
286 | } | ||
287 | + up_write(&mm->mmap_sem); | ||
288 | + mmput(mm); | ||
289 | |||
290 | trace_binder_unmap_kernel_start(alloc, index); | ||
291 | |||
292 | diff --git a/drivers/block/loop.c b/drivers/block/loop.c | ||
293 | index a63da9e07341..f1e63eb7cbca 100644 | ||
294 | --- a/drivers/block/loop.c | ||
295 | +++ b/drivers/block/loop.c | ||
296 | @@ -1112,8 +1112,9 @@ out_unlock: | ||
297 | err = __blkdev_reread_part(bdev); | ||
298 | else | ||
299 | err = blkdev_reread_part(bdev); | ||
300 | - pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", | ||
301 | - __func__, lo_number, err); | ||
302 | + if (err) | ||
303 | + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n", | ||
304 | + __func__, lo_number, err); | ||
305 | /* Device is gone, no point in returning error */ | ||
306 | err = 0; | ||
307 | } | ||
308 | diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c | ||
309 | index a65505db09e5..70cbd0ee1b07 100644 | ||
310 | --- a/drivers/block/zram/zram_drv.c | ||
311 | +++ b/drivers/block/zram/zram_drv.c | ||
312 | @@ -515,18 +515,18 @@ struct zram_work { | ||
313 | struct zram *zram; | ||
314 | unsigned long entry; | ||
315 | struct bio *bio; | ||
316 | + struct bio_vec bvec; | ||
317 | }; | ||
318 | |||
319 | #if PAGE_SIZE != 4096 | ||
320 | static void zram_sync_read(struct work_struct *work) | ||
321 | { | ||
322 | - struct bio_vec bvec; | ||
323 | struct zram_work *zw = container_of(work, struct zram_work, work); | ||
324 | struct zram *zram = zw->zram; | ||
325 | unsigned long entry = zw->entry; | ||
326 | struct bio *bio = zw->bio; | ||
327 | |||
328 | - read_from_bdev_async(zram, &bvec, entry, bio); | ||
329 | + read_from_bdev_async(zram, &zw->bvec, entry, bio); | ||
330 | } | ||
331 | |||
332 | /* | ||
333 | @@ -539,6 +539,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec, | ||
334 | { | ||
335 | struct zram_work work; | ||
336 | |||
337 | + work.bvec = *bvec; | ||
338 | work.zram = zram; | ||
339 | work.entry = entry; | ||
340 | work.bio = bio; | ||
341 | diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c | ||
342 | index 48ee35e2bce6..0b05a1e08d21 100644 | ||
343 | --- a/drivers/dma/sh/rcar-dmac.c | ||
344 | +++ b/drivers/dma/sh/rcar-dmac.c | ||
345 | @@ -1281,6 +1281,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | ||
346 | enum dma_status status; | ||
347 | unsigned int residue = 0; | ||
348 | unsigned int dptr = 0; | ||
349 | + unsigned int chcrb; | ||
350 | + unsigned int tcrb; | ||
351 | + unsigned int i; | ||
352 | |||
353 | if (!desc) | ||
354 | return 0; | ||
355 | @@ -1328,6 +1331,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | + /* | ||
360 | + * We need to read two registers. | ||
361 | + * Make sure the control register does not skip to next chunk | ||
362 | + * while reading the counter. | ||
363 | + * Trying it 3 times should be enough: Initial read, retry, retry | ||
364 | + * for the paranoid. | ||
365 | + */ | ||
366 | + for (i = 0; i < 3; i++) { | ||
367 | + chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | ||
368 | + RCAR_DMACHCRB_DPTR_MASK; | ||
369 | + tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB); | ||
370 | + /* Still the same? */ | ||
371 | + if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | ||
372 | + RCAR_DMACHCRB_DPTR_MASK)) | ||
373 | + break; | ||
374 | + } | ||
375 | + WARN_ONCE(i >= 3, "residue might be not continuous!"); | ||
376 | + | ||
377 | /* | ||
378 | * In descriptor mode the descriptor running pointer is not maintained | ||
379 | * by the interrupt handler, find the running descriptor from the | ||
380 | @@ -1335,8 +1356,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | ||
381 | * mode just use the running descriptor pointer. | ||
382 | */ | ||
383 | if (desc->hwdescs.use) { | ||
384 | - dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | ||
385 | - RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; | ||
386 | + dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT; | ||
387 | if (dptr == 0) | ||
388 | dptr = desc->nchunks; | ||
389 | dptr--; | ||
390 | @@ -1354,7 +1374,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | ||
391 | } | ||
392 | |||
393 | /* Add the residue for the current chunk. */ | ||
394 | - residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; | ||
395 | + residue += tcrb << desc->xfer_shift; | ||
396 | |||
397 | return residue; | ||
398 | } | ||
399 | @@ -1367,6 +1387,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, | ||
400 | enum dma_status status; | ||
401 | unsigned long flags; | ||
402 | unsigned int residue; | ||
403 | + bool cyclic; | ||
404 | |||
405 | status = dma_cookie_status(chan, cookie, txstate); | ||
406 | if (status == DMA_COMPLETE || !txstate) | ||
407 | @@ -1374,10 +1395,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, | ||
408 | |||
409 | spin_lock_irqsave(&rchan->lock, flags); | ||
410 | residue = rcar_dmac_chan_get_residue(rchan, cookie); | ||
411 | + cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false; | ||
412 | spin_unlock_irqrestore(&rchan->lock, flags); | ||
413 | |||
414 | /* if there's no residue, the cookie is complete */ | ||
415 | - if (!residue) | ||
416 | + if (!residue && !cyclic) | ||
417 | return DMA_COMPLETE; | ||
418 | |||
419 | dma_set_residue(txstate, residue); | ||
420 | diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c | ||
421 | index e41223c05f6e..6cf2e2ce4093 100644 | ||
422 | --- a/drivers/gpio/gpio-eic-sprd.c | ||
423 | +++ b/drivers/gpio/gpio-eic-sprd.c | ||
424 | @@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) | ||
425 | irq_set_handler_locked(data, handle_edge_irq); | ||
426 | break; | ||
427 | case IRQ_TYPE_EDGE_BOTH: | ||
428 | + sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0); | ||
429 | sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1); | ||
430 | irq_set_handler_locked(data, handle_edge_irq); | ||
431 | break; | ||
432 | diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c | ||
433 | index 6f91634880aa..2d6506c08bf7 100644 | ||
434 | --- a/drivers/gpu/drm/i915/intel_fbdev.c | ||
435 | +++ b/drivers/gpu/drm/i915/intel_fbdev.c | ||
436 | @@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | ||
437 | bool *enabled, int width, int height) | ||
438 | { | ||
439 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); | ||
440 | + unsigned long conn_configured, conn_seq, mask; | ||
441 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); | ||
442 | - unsigned long conn_configured, conn_seq; | ||
443 | int i, j; | ||
444 | bool *save_enabled; | ||
445 | bool fallback = true, ret = true; | ||
446 | @@ -353,9 +353,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | ||
447 | drm_modeset_backoff(&ctx); | ||
448 | |||
449 | memcpy(save_enabled, enabled, count); | ||
450 | - conn_seq = GENMASK(count - 1, 0); | ||
451 | + mask = GENMASK(count - 1, 0); | ||
452 | conn_configured = 0; | ||
453 | retry: | ||
454 | + conn_seq = conn_configured; | ||
455 | for (i = 0; i < count; i++) { | ||
456 | struct drm_fb_helper_connector *fb_conn; | ||
457 | struct drm_connector *connector; | ||
458 | @@ -368,8 +369,7 @@ retry: | ||
459 | if (conn_configured & BIT(i)) | ||
460 | continue; | ||
461 | |||
462 | - /* First pass, only consider tiled connectors */ | ||
463 | - if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) | ||
464 | + if (conn_seq == 0 && !connector->has_tile) | ||
465 | continue; | ||
466 | |||
467 | if (connector->status == connector_status_connected) | ||
468 | @@ -473,10 +473,8 @@ retry: | ||
469 | conn_configured |= BIT(i); | ||
470 | } | ||
471 | |||
472 | - if (conn_configured != conn_seq) { /* repeat until no more are found */ | ||
473 | - conn_seq = conn_configured; | ||
474 | + if ((conn_configured & mask) != mask && conn_configured != conn_seq) | ||
475 | goto retry; | ||
476 | - } | ||
477 | |||
478 | /* | ||
479 | * If the BIOS didn't enable everything it could, fall back to have the | ||
480 | diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c | ||
481 | index 5a485489a1e2..6c8b14fb1d2f 100644 | ||
482 | --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c | ||
483 | +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c | ||
484 | @@ -113,7 +113,7 @@ static int cdp_dp_mailbox_write(struct cdn_dp_device *dp, u8 val) | ||
485 | |||
486 | static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp, | ||
487 | u8 module_id, u8 opcode, | ||
488 | - u8 req_size) | ||
489 | + u16 req_size) | ||
490 | { | ||
491 | u32 mbox_size, i; | ||
492 | u8 header[4]; | ||
493 | diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c | ||
494 | index 0e6a121858d1..5615ceb15708 100644 | ||
495 | --- a/drivers/gpu/drm/vc4/vc4_crtc.c | ||
496 | +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | ||
497 | @@ -998,7 +998,7 @@ static void | ||
498 | vc4_crtc_reset(struct drm_crtc *crtc) | ||
499 | { | ||
500 | if (crtc->state) | ||
501 | - __drm_atomic_helper_crtc_destroy_state(crtc->state); | ||
502 | + vc4_crtc_destroy_state(crtc, crtc->state); | ||
503 | |||
504 | crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL); | ||
505 | if (crtc->state) | ||
506 | diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c | ||
507 | index cc287cf6eb29..edc52d75e6bd 100644 | ||
508 | --- a/drivers/hwtracing/intel_th/gth.c | ||
509 | +++ b/drivers/hwtracing/intel_th/gth.c | ||
510 | @@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev, | ||
511 | othdev->output.port = -1; | ||
512 | othdev->output.active = false; | ||
513 | gth->output[port].output = NULL; | ||
514 | - for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++) | ||
515 | + for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++) | ||
516 | if (gth->master[master] == port) | ||
517 | gth->master[master] = -1; | ||
518 | spin_unlock(>h->gth_lock); | ||
519 | diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c | ||
520 | index 50be240df331..8cc4da62f050 100644 | ||
521 | --- a/drivers/infiniband/hw/mlx5/main.c | ||
522 | +++ b/drivers/infiniband/hw/mlx5/main.c | ||
523 | @@ -2014,6 +2014,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, | ||
524 | |||
525 | if (vma->vm_flags & VM_WRITE) | ||
526 | return -EPERM; | ||
527 | + vma->vm_flags &= ~VM_MAYWRITE; | ||
528 | |||
529 | if (!dev->mdev->clock_info_page) | ||
530 | return -EOPNOTSUPP; | ||
531 | @@ -2197,6 +2198,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm | ||
532 | |||
533 | if (vma->vm_flags & VM_WRITE) | ||
534 | return -EPERM; | ||
535 | + vma->vm_flags &= ~VM_MAYWRITE; | ||
536 | |||
537 | /* Don't expose to user-space information it shouldn't have */ | ||
538 | if (PAGE_SIZE > 4096) | ||
539 | diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c | ||
540 | index 49c9541050d4..5819c9d6ffdc 100644 | ||
541 | --- a/drivers/infiniband/sw/rdmavt/mr.c | ||
542 | +++ b/drivers/infiniband/sw/rdmavt/mr.c | ||
543 | @@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) | ||
544 | if (unlikely(mapped_segs == mr->mr.max_segs)) | ||
545 | return -ENOMEM; | ||
546 | |||
547 | - if (mr->mr.length == 0) { | ||
548 | - mr->mr.user_base = addr; | ||
549 | - mr->mr.iova = addr; | ||
550 | - } | ||
551 | - | ||
552 | m = mapped_segs / RVT_SEGSZ; | ||
553 | n = mapped_segs % RVT_SEGSZ; | ||
554 | mr->mr.map[m]->segs[n].vaddr = (void *)addr; | ||
555 | @@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr) | ||
556 | * @sg_nents: number of entries in sg | ||
557 | * @sg_offset: offset in bytes into sg | ||
558 | * | ||
559 | + * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages. | ||
560 | + * | ||
561 | * Return: number of sg elements mapped to the memory region | ||
562 | */ | ||
563 | int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, | ||
564 | int sg_nents, unsigned int *sg_offset) | ||
565 | { | ||
566 | struct rvt_mr *mr = to_imr(ibmr); | ||
567 | + int ret; | ||
568 | |||
569 | mr->mr.length = 0; | ||
570 | mr->mr.page_shift = PAGE_SHIFT; | ||
571 | - return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, | ||
572 | - rvt_set_page); | ||
573 | + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page); | ||
574 | + mr->mr.user_base = ibmr->iova; | ||
575 | + mr->mr.iova = ibmr->iova; | ||
576 | + mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; | ||
577 | + mr->mr.length = (size_t)ibmr->length; | ||
578 | + return ret; | ||
579 | } | ||
580 | |||
581 | /** | ||
582 | @@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key, | ||
583 | ibmr->rkey = key; | ||
584 | mr->mr.lkey = key; | ||
585 | mr->mr.access_flags = access; | ||
586 | + mr->mr.iova = ibmr->iova; | ||
587 | atomic_set(&mr->mr.lkey_invalid, 0); | ||
588 | |||
589 | return 0; | ||
590 | diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c | ||
591 | index df64d6aed4f7..93901ebd122a 100644 | ||
592 | --- a/drivers/input/rmi4/rmi_f11.c | ||
593 | +++ b/drivers/input/rmi4/rmi_f11.c | ||
594 | @@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn) | ||
595 | } | ||
596 | |||
597 | rc = f11_write_control_regs(fn, &f11->sens_query, | ||
598 | - &f11->dev_controls, fn->fd.query_base_addr); | ||
599 | + &f11->dev_controls, fn->fd.control_base_addr); | ||
600 | if (rc) | ||
601 | dev_warn(&fn->dev, "Failed to write control registers\n"); | ||
602 | |||
603 | diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c | ||
604 | index dabe89968a78..2caa5c0c2bc4 100644 | ||
605 | --- a/drivers/net/dsa/mv88e6xxx/chip.c | ||
606 | +++ b/drivers/net/dsa/mv88e6xxx/chip.c | ||
607 | @@ -4821,6 +4821,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) | ||
608 | if (err) | ||
609 | goto out; | ||
610 | |||
611 | + mv88e6xxx_ports_cmode_init(chip); | ||
612 | mv88e6xxx_phy_init(chip); | ||
613 | |||
614 | if (chip->info->ops->get_eeprom) { | ||
615 | diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | ||
616 | index b043370c2685..cc84133c184d 100644 | ||
617 | --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c | ||
618 | +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | ||
619 | @@ -1169,6 +1169,12 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | ||
620 | if (!h->phy_dev) | ||
621 | return 0; | ||
622 | |||
623 | + phy_dev->supported &= h->if_support; | ||
624 | + phy_dev->advertising = phy_dev->supported; | ||
625 | + | ||
626 | + if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | ||
627 | + phy_dev->autoneg = false; | ||
628 | + | ||
629 | if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { | ||
630 | phy_dev->dev_flags = 0; | ||
631 | |||
632 | @@ -1180,15 +1186,6 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) | ||
633 | if (unlikely(ret)) | ||
634 | return -ENODEV; | ||
635 | |||
636 | - phy_dev->supported &= h->if_support; | ||
637 | - phy_dev->advertising = phy_dev->supported; | ||
638 | - | ||
639 | - if (h->phy_if == PHY_INTERFACE_MODE_XGMII) | ||
640 | - phy_dev->autoneg = false; | ||
641 | - | ||
642 | - if (h->phy_if == PHY_INTERFACE_MODE_SGMII) | ||
643 | - phy_stop(phy_dev); | ||
644 | - | ||
645 | return 0; | ||
646 | } | ||
647 | |||
648 | diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c | ||
649 | index a475f36ddf8c..426789e2c23d 100644 | ||
650 | --- a/drivers/net/ethernet/ibm/ibmvnic.c | ||
651 | +++ b/drivers/net/ethernet/ibm/ibmvnic.c | ||
652 | @@ -1859,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, | ||
653 | |||
654 | if (adapter->reset_reason != VNIC_RESET_FAILOVER && | ||
655 | adapter->reset_reason != VNIC_RESET_CHANGE_PARAM) | ||
656 | - netdev_notify_peers(netdev); | ||
657 | + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev); | ||
658 | |||
659 | netif_carrier_on(netdev); | ||
660 | |||
661 | diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | ||
662 | index 3f536541f45f..78a43d688cb1 100644 | ||
663 | --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c | ||
664 | +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | ||
665 | @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void) | ||
666 | /* create driver workqueue */ | ||
667 | fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, | ||
668 | fm10k_driver_name); | ||
669 | + if (!fm10k_workqueue) | ||
670 | + return -ENOMEM; | ||
671 | |||
672 | fm10k_dbg_init(); | ||
673 | |||
674 | diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | ||
675 | index 9988c89ed9fd..9b10abb604cb 100644 | ||
676 | --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | ||
677 | +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | ||
678 | @@ -4272,7 +4272,7 @@ static void mvpp2_phylink_validate(struct net_device *dev, | ||
679 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
680 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
681 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
682 | - if (port->gop_id == 0) | ||
683 | + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) | ||
684 | goto empty_set; | ||
685 | break; | ||
686 | default: | ||
687 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | ||
688 | index 4a33c9a7cac7..599114ab7821 100644 | ||
689 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | ||
690 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | ||
691 | @@ -33,6 +33,26 @@ | ||
692 | #include <linux/bpf_trace.h> | ||
693 | #include "en/xdp.h" | ||
694 | |||
695 | +int mlx5e_xdp_max_mtu(struct mlx5e_params *params) | ||
696 | +{ | ||
697 | + int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM; | ||
698 | + | ||
699 | + /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)). | ||
700 | + * The condition checked in mlx5e_rx_is_linear_skb is: | ||
701 | + * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1) | ||
702 | + * (Note that hw_mtu == sw_mtu + hard_mtu.) | ||
703 | + * What is returned from this function is: | ||
704 | + * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2) | ||
705 | + * After assigning sw_mtu := max_mtu, the left side of (1) turns to | ||
706 | + * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE, | ||
707 | + * because both PAGE_SIZE and S are already aligned. Any number greater | ||
708 | + * than max_mtu would make the left side of (1) greater than PAGE_SIZE, | ||
709 | + * so max_mtu is the maximum MTU allowed. | ||
710 | + */ | ||
711 | + | ||
712 | + return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr)); | ||
713 | +} | ||
714 | + | ||
715 | static inline bool | ||
716 | mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, | ||
717 | struct xdp_buff *xdp) | ||
718 | @@ -207,9 +227,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) | ||
719 | sqcc++; | ||
720 | |||
721 | if (is_redirect) { | ||
722 | - xdp_return_frame(xdpi->xdpf); | ||
723 | dma_unmap_single(sq->pdev, xdpi->dma_addr, | ||
724 | xdpi->xdpf->len, DMA_TO_DEVICE); | ||
725 | + xdp_return_frame(xdpi->xdpf); | ||
726 | } else { | ||
727 | /* Recycle RX page */ | ||
728 | mlx5e_page_release(rq, &xdpi->di, true); | ||
729 | @@ -243,9 +263,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) | ||
730 | sq->cc++; | ||
731 | |||
732 | if (is_redirect) { | ||
733 | - xdp_return_frame(xdpi->xdpf); | ||
734 | dma_unmap_single(sq->pdev, xdpi->dma_addr, | ||
735 | xdpi->xdpf->len, DMA_TO_DEVICE); | ||
736 | + xdp_return_frame(xdpi->xdpf); | ||
737 | } else { | ||
738 | /* Recycle RX page */ | ||
739 | mlx5e_page_release(rq, &xdpi->di, false); | ||
740 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | ||
741 | index 4d096623178b..827ceef5fa93 100644 | ||
742 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | ||
743 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | ||
744 | @@ -34,12 +34,11 @@ | ||
745 | |||
746 | #include "en.h" | ||
747 | |||
748 | -#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ | ||
749 | - MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) | ||
750 | #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) | ||
751 | #define MLX5E_XDP_TX_DS_COUNT \ | ||
752 | ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) | ||
753 | |||
754 | +int mlx5e_xdp_max_mtu(struct mlx5e_params *params); | ||
755 | bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, | ||
756 | void *va, u16 *rx_headroom, u32 *len); | ||
757 | bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); | ||
758 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | ||
759 | index 5e5423076b03..9ca4f88d7cf6 100644 | ||
760 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | ||
761 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | ||
762 | @@ -1317,7 +1317,7 @@ static int mlx5e_get_module_info(struct net_device *netdev, | ||
763 | break; | ||
764 | case MLX5_MODULE_ID_SFP: | ||
765 | modinfo->type = ETH_MODULE_SFF_8472; | ||
766 | - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; | ||
767 | + modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH; | ||
768 | break; | ||
769 | default: | ||
770 | netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n", | ||
771 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | ||
772 | index 637d59c01fe5..b190c447aeb0 100644 | ||
773 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | ||
774 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | ||
775 | @@ -3761,7 +3761,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, | ||
776 | if (params->xdp_prog && | ||
777 | !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { | ||
778 | netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", | ||
779 | - new_mtu, MLX5E_XDP_MAX_MTU); | ||
780 | + new_mtu, mlx5e_xdp_max_mtu(params)); | ||
781 | err = -EINVAL; | ||
782 | goto out; | ||
783 | } | ||
784 | @@ -4227,7 +4227,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) | ||
785 | |||
786 | if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { | ||
787 | netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", | ||
788 | - new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); | ||
789 | + new_channels.params.sw_mtu, | ||
790 | + mlx5e_xdp_max_mtu(&new_channels.params)); | ||
791 | return -EINVAL; | ||
792 | } | ||
793 | |||
794 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c | ||
795 | index 31a9cbd85689..09b6b1bfbfa8 100644 | ||
796 | --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c | ||
797 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | ||
798 | @@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, | ||
799 | size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; | ||
800 | |||
801 | i2c_addr = MLX5_I2C_ADDR_LOW; | ||
802 | - if (offset >= MLX5_EEPROM_PAGE_LENGTH) { | ||
803 | - i2c_addr = MLX5_I2C_ADDR_HIGH; | ||
804 | - offset -= MLX5_EEPROM_PAGE_LENGTH; | ||
805 | - } | ||
806 | |||
807 | MLX5_SET(mcia_reg, in, l, 0); | ||
808 | MLX5_SET(mcia_reg, in, module, module_num); | ||
809 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | ||
810 | index 72cdaa01d56d..100618531021 100644 | ||
811 | --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | ||
812 | +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h | ||
813 | @@ -27,7 +27,7 @@ | ||
814 | |||
815 | #define MLXSW_PCI_SW_RESET 0xF0010 | ||
816 | #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) | ||
817 | -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000 | ||
818 | +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000 | ||
819 | #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100 | ||
820 | #define MLXSW_PCI_FW_READY 0xA1844 | ||
821 | #define MLXSW_PCI_FW_READY_MASK 0xFFFF | ||
822 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | ||
823 | index f9bef030ee05..c5b82e283d13 100644 | ||
824 | --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | ||
825 | +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | ||
826 | @@ -2504,11 +2504,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev, | ||
827 | if (err) | ||
828 | return err; | ||
829 | |||
830 | + mlxsw_sp_port->link.autoneg = autoneg; | ||
831 | + | ||
832 | if (!netif_running(dev)) | ||
833 | return 0; | ||
834 | |||
835 | - mlxsw_sp_port->link.autoneg = autoneg; | ||
836 | - | ||
837 | mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false); | ||
838 | mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true); | ||
839 | |||
840 | @@ -2783,7 +2783,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | ||
841 | err = mlxsw_sp_port_ets_set(mlxsw_sp_port, | ||
842 | MLXSW_REG_QEEC_HIERARCY_TC, | ||
843 | i + 8, i, | ||
844 | - false, 0); | ||
845 | + true, 100); | ||
846 | if (err) | ||
847 | return err; | ||
848 | } | ||
849 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | ||
850 | index 886176be818e..62460a5b4ad9 100644 | ||
851 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | ||
852 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | ||
853 | @@ -2595,8 +2595,6 @@ static int stmmac_open(struct net_device *dev) | ||
854 | u32 chan; | ||
855 | int ret; | ||
856 | |||
857 | - stmmac_check_ether_addr(priv); | ||
858 | - | ||
859 | if (priv->hw->pcs != STMMAC_PCS_RGMII && | ||
860 | priv->hw->pcs != STMMAC_PCS_TBI && | ||
861 | priv->hw->pcs != STMMAC_PCS_RTBI) { | ||
862 | @@ -4296,6 +4294,8 @@ int stmmac_dvr_probe(struct device *device, | ||
863 | if (ret) | ||
864 | goto error_hw_init; | ||
865 | |||
866 | + stmmac_check_ether_addr(priv); | ||
867 | + | ||
868 | /* Configure real RX and TX queues */ | ||
869 | netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); | ||
870 | netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); | ||
871 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | ||
872 | index d819e8eaba12..cc1e887e47b5 100644 | ||
873 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | ||
874 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | ||
875 | @@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = { | ||
876 | }, | ||
877 | .driver_data = (void *)&galileo_stmmac_dmi_data, | ||
878 | }, | ||
879 | + /* | ||
880 | + * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040. | ||
881 | + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which | ||
882 | + * has only one pci network device while other asset tags are | ||
883 | + * for IOT2040 which has two. | ||
884 | + */ | ||
885 | { | ||
886 | .matches = { | ||
887 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), | ||
888 | @@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = { | ||
889 | { | ||
890 | .matches = { | ||
891 | DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"), | ||
892 | - DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG, | ||
893 | - "6ES7647-0AA00-1YA2"), | ||
894 | }, | ||
895 | .driver_data = (void *)&iot2040_stmmac_dmi_data, | ||
896 | }, | ||
897 | diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c | ||
898 | index f4e93f5fc204..ea90db3c7705 100644 | ||
899 | --- a/drivers/net/slip/slhc.c | ||
900 | +++ b/drivers/net/slip/slhc.c | ||
901 | @@ -153,7 +153,7 @@ out_fail: | ||
902 | void | ||
903 | slhc_free(struct slcompress *comp) | ||
904 | { | ||
905 | - if ( comp == NULLSLCOMPR ) | ||
906 | + if ( IS_ERR_OR_NULL(comp) ) | ||
907 | return; | ||
908 | |||
909 | if ( comp->tstate != NULLSLSTATE ) | ||
910 | diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c | ||
911 | index e23eaf3f6d03..6c6230b44bcd 100644 | ||
912 | --- a/drivers/net/team/team.c | ||
913 | +++ b/drivers/net/team/team.c | ||
914 | @@ -1160,6 +1160,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | ||
915 | return -EINVAL; | ||
916 | } | ||
917 | |||
918 | + if (netdev_has_upper_dev(dev, port_dev)) { | ||
919 | + NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface"); | ||
920 | + netdev_err(dev, "Device %s is already an upper device of the team interface\n", | ||
921 | + portname); | ||
922 | + return -EBUSY; | ||
923 | + } | ||
924 | + | ||
925 | if (port_dev->features & NETIF_F_VLAN_CHALLENGED && | ||
926 | vlan_uses_dev(dev)) { | ||
927 | NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); | ||
928 | diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c | ||
929 | index a1f225f077cd..ef47c226e1d2 100644 | ||
930 | --- a/drivers/usb/core/driver.c | ||
931 | +++ b/drivers/usb/core/driver.c | ||
932 | @@ -1899,14 +1899,11 @@ int usb_runtime_idle(struct device *dev) | ||
933 | return -EBUSY; | ||
934 | } | ||
935 | |||
936 | -int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) | ||
937 | +static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) | ||
938 | { | ||
939 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | ||
940 | int ret = -EPERM; | ||
941 | |||
942 | - if (enable && !udev->usb2_hw_lpm_allowed) | ||
943 | - return 0; | ||
944 | - | ||
945 | if (hcd->driver->set_usb2_hw_lpm) { | ||
946 | ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable); | ||
947 | if (!ret) | ||
948 | @@ -1916,6 +1913,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) | ||
949 | return ret; | ||
950 | } | ||
951 | |||
952 | +int usb_enable_usb2_hardware_lpm(struct usb_device *udev) | ||
953 | +{ | ||
954 | + if (!udev->usb2_hw_lpm_capable || | ||
955 | + !udev->usb2_hw_lpm_allowed || | ||
956 | + udev->usb2_hw_lpm_enabled) | ||
957 | + return 0; | ||
958 | + | ||
959 | + return usb_set_usb2_hardware_lpm(udev, 1); | ||
960 | +} | ||
961 | + | ||
962 | +int usb_disable_usb2_hardware_lpm(struct usb_device *udev) | ||
963 | +{ | ||
964 | + if (!udev->usb2_hw_lpm_enabled) | ||
965 | + return 0; | ||
966 | + | ||
967 | + return usb_set_usb2_hardware_lpm(udev, 0); | ||
968 | +} | ||
969 | + | ||
970 | #endif /* CONFIG_PM */ | ||
971 | |||
972 | struct bus_type usb_bus_type = { | ||
973 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c | ||
974 | index 3adff4da2ee1..bbcfa63d0233 100644 | ||
975 | --- a/drivers/usb/core/hub.c | ||
976 | +++ b/drivers/usb/core/hub.c | ||
977 | @@ -3217,8 +3217,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | ||
978 | } | ||
979 | |||
980 | /* disable USB2 hardware LPM */ | ||
981 | - if (udev->usb2_hw_lpm_enabled == 1) | ||
982 | - usb_set_usb2_hardware_lpm(udev, 0); | ||
983 | + usb_disable_usb2_hardware_lpm(udev); | ||
984 | |||
985 | if (usb_disable_ltm(udev)) { | ||
986 | dev_err(&udev->dev, "Failed to disable LTM before suspend\n"); | ||
987 | @@ -3256,8 +3255,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg) | ||
988 | usb_enable_ltm(udev); | ||
989 | err_ltm: | ||
990 | /* Try to enable USB2 hardware LPM again */ | ||
991 | - if (udev->usb2_hw_lpm_capable == 1) | ||
992 | - usb_set_usb2_hardware_lpm(udev, 1); | ||
993 | + usb_enable_usb2_hardware_lpm(udev); | ||
994 | |||
995 | if (udev->do_remote_wakeup) | ||
996 | (void) usb_disable_remote_wakeup(udev); | ||
997 | @@ -3540,8 +3538,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) | ||
998 | hub_port_logical_disconnect(hub, port1); | ||
999 | } else { | ||
1000 | /* Try to enable USB2 hardware LPM */ | ||
1001 | - if (udev->usb2_hw_lpm_capable == 1) | ||
1002 | - usb_set_usb2_hardware_lpm(udev, 1); | ||
1003 | + usb_enable_usb2_hardware_lpm(udev); | ||
1004 | |||
1005 | /* Try to enable USB3 LTM */ | ||
1006 | usb_enable_ltm(udev); | ||
1007 | @@ -4432,7 +4429,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) | ||
1008 | if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) || | ||
1009 | connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) { | ||
1010 | udev->usb2_hw_lpm_allowed = 1; | ||
1011 | - usb_set_usb2_hardware_lpm(udev, 1); | ||
1012 | + usb_enable_usb2_hardware_lpm(udev); | ||
1013 | } | ||
1014 | } | ||
1015 | |||
1016 | @@ -5608,8 +5605,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) | ||
1017 | /* Disable USB2 hardware LPM. | ||
1018 | * It will be re-enabled by the enumeration process. | ||
1019 | */ | ||
1020 | - if (udev->usb2_hw_lpm_enabled == 1) | ||
1021 | - usb_set_usb2_hardware_lpm(udev, 0); | ||
1022 | + usb_disable_usb2_hardware_lpm(udev); | ||
1023 | |||
1024 | /* Disable LPM while we reset the device and reinstall the alt settings. | ||
1025 | * Device-initiated LPM, and system exit latency settings are cleared | ||
1026 | @@ -5712,7 +5708,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) | ||
1027 | |||
1028 | done: | ||
1029 | /* Now that the alt settings are re-installed, enable LTM and LPM. */ | ||
1030 | - usb_set_usb2_hardware_lpm(udev, 1); | ||
1031 | + usb_enable_usb2_hardware_lpm(udev); | ||
1032 | usb_unlocked_enable_lpm(udev); | ||
1033 | usb_enable_ltm(udev); | ||
1034 | usb_release_bos_descriptor(udev); | ||
1035 | diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c | ||
1036 | index bfa5eda0cc26..4f33eb632a88 100644 | ||
1037 | --- a/drivers/usb/core/message.c | ||
1038 | +++ b/drivers/usb/core/message.c | ||
1039 | @@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) | ||
1040 | dev->actconfig->interface[i] = NULL; | ||
1041 | } | ||
1042 | |||
1043 | - if (dev->usb2_hw_lpm_enabled == 1) | ||
1044 | - usb_set_usb2_hardware_lpm(dev, 0); | ||
1045 | + usb_disable_usb2_hardware_lpm(dev); | ||
1046 | usb_unlocked_disable_lpm(dev); | ||
1047 | usb_disable_ltm(dev); | ||
1048 | |||
1049 | diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c | ||
1050 | index ea18284dfa9a..7e88fdfe3cf5 100644 | ||
1051 | --- a/drivers/usb/core/sysfs.c | ||
1052 | +++ b/drivers/usb/core/sysfs.c | ||
1053 | @@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev, | ||
1054 | |||
1055 | if (!ret) { | ||
1056 | udev->usb2_hw_lpm_allowed = value; | ||
1057 | - ret = usb_set_usb2_hardware_lpm(udev, value); | ||
1058 | + if (value) | ||
1059 | + ret = usb_enable_usb2_hardware_lpm(udev); | ||
1060 | + else | ||
1061 | + ret = usb_disable_usb2_hardware_lpm(udev); | ||
1062 | } | ||
1063 | |||
1064 | usb_unlock_device(udev); | ||
1065 | diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h | ||
1066 | index 546a2219454b..d95a5358f73d 100644 | ||
1067 | --- a/drivers/usb/core/usb.h | ||
1068 | +++ b/drivers/usb/core/usb.h | ||
1069 | @@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev); | ||
1070 | extern int usb_runtime_suspend(struct device *dev); | ||
1071 | extern int usb_runtime_resume(struct device *dev); | ||
1072 | extern int usb_runtime_idle(struct device *dev); | ||
1073 | -extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable); | ||
1074 | +extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev); | ||
1075 | +extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev); | ||
1076 | |||
1077 | #else | ||
1078 | |||
1079 | @@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev) | ||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | -static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable) | ||
1084 | +static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev) | ||
1085 | +{ | ||
1086 | + return 0; | ||
1087 | +} | ||
1088 | + | ||
1089 | +static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev) | ||
1090 | { | ||
1091 | return 0; | ||
1092 | } | ||
1093 | diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c | ||
1094 | index 64cbc2d007c9..c36275754086 100644 | ||
1095 | --- a/drivers/vfio/vfio_iommu_type1.c | ||
1096 | +++ b/drivers/vfio/vfio_iommu_type1.c | ||
1097 | @@ -58,12 +58,18 @@ module_param_named(disable_hugepages, | ||
1098 | MODULE_PARM_DESC(disable_hugepages, | ||
1099 | "Disable VFIO IOMMU support for IOMMU hugepages."); | ||
1100 | |||
1101 | +static unsigned int dma_entry_limit __read_mostly = U16_MAX; | ||
1102 | +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644); | ||
1103 | +MODULE_PARM_DESC(dma_entry_limit, | ||
1104 | + "Maximum number of user DMA mappings per container (65535)."); | ||
1105 | + | ||
1106 | struct vfio_iommu { | ||
1107 | struct list_head domain_list; | ||
1108 | struct vfio_domain *external_domain; /* domain for external user */ | ||
1109 | struct mutex lock; | ||
1110 | struct rb_root dma_list; | ||
1111 | struct blocking_notifier_head notifier; | ||
1112 | + unsigned int dma_avail; | ||
1113 | bool v2; | ||
1114 | bool nesting; | ||
1115 | }; | ||
1116 | @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) | ||
1117 | vfio_unlink_dma(iommu, dma); | ||
1118 | put_task_struct(dma->task); | ||
1119 | kfree(dma); | ||
1120 | + iommu->dma_avail++; | ||
1121 | } | ||
1122 | |||
1123 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) | ||
1124 | @@ -1110,12 +1117,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, | ||
1125 | goto out_unlock; | ||
1126 | } | ||
1127 | |||
1128 | + if (!iommu->dma_avail) { | ||
1129 | + ret = -ENOSPC; | ||
1130 | + goto out_unlock; | ||
1131 | + } | ||
1132 | + | ||
1133 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); | ||
1134 | if (!dma) { | ||
1135 | ret = -ENOMEM; | ||
1136 | goto out_unlock; | ||
1137 | } | ||
1138 | |||
1139 | + iommu->dma_avail--; | ||
1140 | dma->iova = iova; | ||
1141 | dma->vaddr = vaddr; | ||
1142 | dma->prot = prot; | ||
1143 | @@ -1612,6 +1625,7 @@ static void *vfio_iommu_type1_open(unsigned long arg) | ||
1144 | |||
1145 | INIT_LIST_HEAD(&iommu->domain_list); | ||
1146 | iommu->dma_list = RB_ROOT; | ||
1147 | + iommu->dma_avail = dma_entry_limit; | ||
1148 | mutex_init(&iommu->lock); | ||
1149 | BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); | ||
1150 | |||
1151 | diff --git a/fs/aio.c b/fs/aio.c | ||
1152 | index 45d5ef8dd0a8..911e23087dfb 100644 | ||
1153 | --- a/fs/aio.c | ||
1154 | +++ b/fs/aio.c | ||
1155 | @@ -161,9 +161,13 @@ struct kioctx { | ||
1156 | unsigned id; | ||
1157 | }; | ||
1158 | |||
1159 | +/* | ||
1160 | + * First field must be the file pointer in all the | ||
1161 | + * iocb unions! See also 'struct kiocb' in <linux/fs.h> | ||
1162 | + */ | ||
1163 | struct fsync_iocb { | ||
1164 | - struct work_struct work; | ||
1165 | struct file *file; | ||
1166 | + struct work_struct work; | ||
1167 | bool datasync; | ||
1168 | }; | ||
1169 | |||
1170 | @@ -171,14 +175,21 @@ struct poll_iocb { | ||
1171 | struct file *file; | ||
1172 | struct wait_queue_head *head; | ||
1173 | __poll_t events; | ||
1174 | - bool woken; | ||
1175 | + bool done; | ||
1176 | bool cancelled; | ||
1177 | struct wait_queue_entry wait; | ||
1178 | struct work_struct work; | ||
1179 | }; | ||
1180 | |||
1181 | +/* | ||
1182 | + * NOTE! Each of the iocb union members has the file pointer | ||
1183 | + * as the first entry in their struct definition. So you can | ||
1184 | + * access the file pointer through any of the sub-structs, | ||
1185 | + * or directly as just 'ki_filp' in this struct. | ||
1186 | + */ | ||
1187 | struct aio_kiocb { | ||
1188 | union { | ||
1189 | + struct file *ki_filp; | ||
1190 | struct kiocb rw; | ||
1191 | struct fsync_iocb fsync; | ||
1192 | struct poll_iocb poll; | ||
1193 | @@ -187,8 +198,7 @@ struct aio_kiocb { | ||
1194 | struct kioctx *ki_ctx; | ||
1195 | kiocb_cancel_fn *ki_cancel; | ||
1196 | |||
1197 | - struct iocb __user *ki_user_iocb; /* user's aiocb */ | ||
1198 | - __u64 ki_user_data; /* user's data for completion */ | ||
1199 | + struct io_event ki_res; | ||
1200 | |||
1201 | struct list_head ki_list; /* the aio core uses this | ||
1202 | * for cancellation */ | ||
1203 | @@ -902,7 +912,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr) | ||
1204 | local_irq_restore(flags); | ||
1205 | } | ||
1206 | |||
1207 | -static bool get_reqs_available(struct kioctx *ctx) | ||
1208 | +static bool __get_reqs_available(struct kioctx *ctx) | ||
1209 | { | ||
1210 | struct kioctx_cpu *kcpu; | ||
1211 | bool ret = false; | ||
1212 | @@ -994,32 +1004,35 @@ static void user_refill_reqs_available(struct kioctx *ctx) | ||
1213 | spin_unlock_irq(&ctx->completion_lock); | ||
1214 | } | ||
1215 | |||
1216 | +static bool get_reqs_available(struct kioctx *ctx) | ||
1217 | +{ | ||
1218 | + if (__get_reqs_available(ctx)) | ||
1219 | + return true; | ||
1220 | + user_refill_reqs_available(ctx); | ||
1221 | + return __get_reqs_available(ctx); | ||
1222 | +} | ||
1223 | + | ||
1224 | /* aio_get_req | ||
1225 | * Allocate a slot for an aio request. | ||
1226 | * Returns NULL if no requests are free. | ||
1227 | + * | ||
1228 | + * The refcount is initialized to 2 - one for the async op completion, | ||
1229 | + * one for the synchronous code that does this. | ||
1230 | */ | ||
1231 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | ||
1232 | { | ||
1233 | struct aio_kiocb *req; | ||
1234 | |||
1235 | - if (!get_reqs_available(ctx)) { | ||
1236 | - user_refill_reqs_available(ctx); | ||
1237 | - if (!get_reqs_available(ctx)) | ||
1238 | - return NULL; | ||
1239 | - } | ||
1240 | - | ||
1241 | - req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); | ||
1242 | + req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); | ||
1243 | if (unlikely(!req)) | ||
1244 | - goto out_put; | ||
1245 | + return NULL; | ||
1246 | |||
1247 | percpu_ref_get(&ctx->reqs); | ||
1248 | - INIT_LIST_HEAD(&req->ki_list); | ||
1249 | - refcount_set(&req->ki_refcnt, 0); | ||
1250 | req->ki_ctx = ctx; | ||
1251 | + INIT_LIST_HEAD(&req->ki_list); | ||
1252 | + refcount_set(&req->ki_refcnt, 2); | ||
1253 | + req->ki_eventfd = NULL; | ||
1254 | return req; | ||
1255 | -out_put: | ||
1256 | - put_reqs_available(ctx, 1); | ||
1257 | - return NULL; | ||
1258 | } | ||
1259 | |||
1260 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | ||
1261 | @@ -1050,19 +1063,18 @@ out: | ||
1262 | return ret; | ||
1263 | } | ||
1264 | |||
1265 | -static inline void iocb_put(struct aio_kiocb *iocb) | ||
1266 | +static inline void iocb_destroy(struct aio_kiocb *iocb) | ||
1267 | { | ||
1268 | - if (refcount_read(&iocb->ki_refcnt) == 0 || | ||
1269 | - refcount_dec_and_test(&iocb->ki_refcnt)) { | ||
1270 | - percpu_ref_put(&iocb->ki_ctx->reqs); | ||
1271 | - kmem_cache_free(kiocb_cachep, iocb); | ||
1272 | - } | ||
1273 | + if (iocb->ki_filp) | ||
1274 | + fput(iocb->ki_filp); | ||
1275 | + percpu_ref_put(&iocb->ki_ctx->reqs); | ||
1276 | + kmem_cache_free(kiocb_cachep, iocb); | ||
1277 | } | ||
1278 | |||
1279 | /* aio_complete | ||
1280 | * Called when the io request on the given iocb is complete. | ||
1281 | */ | ||
1282 | -static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | ||
1283 | +static void aio_complete(struct aio_kiocb *iocb) | ||
1284 | { | ||
1285 | struct kioctx *ctx = iocb->ki_ctx; | ||
1286 | struct aio_ring *ring; | ||
1287 | @@ -1086,17 +1098,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | ||
1288 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | ||
1289 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; | ||
1290 | |||
1291 | - event->obj = (u64)(unsigned long)iocb->ki_user_iocb; | ||
1292 | - event->data = iocb->ki_user_data; | ||
1293 | - event->res = res; | ||
1294 | - event->res2 = res2; | ||
1295 | + *event = iocb->ki_res; | ||
1296 | |||
1297 | kunmap_atomic(ev_page); | ||
1298 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); | ||
1299 | |||
1300 | - pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", | ||
1301 | - ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, | ||
1302 | - res, res2); | ||
1303 | + pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, | ||
1304 | + (void __user *)(unsigned long)iocb->ki_res.obj, | ||
1305 | + iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2); | ||
1306 | |||
1307 | /* after flagging the request as done, we | ||
1308 | * must never even look at it again | ||
1309 | @@ -1138,7 +1147,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2) | ||
1310 | |||
1311 | if (waitqueue_active(&ctx->wait)) | ||
1312 | wake_up(&ctx->wait); | ||
1313 | - iocb_put(iocb); | ||
1314 | +} | ||
1315 | + | ||
1316 | +static inline void iocb_put(struct aio_kiocb *iocb) | ||
1317 | +{ | ||
1318 | + if (refcount_dec_and_test(&iocb->ki_refcnt)) { | ||
1319 | + aio_complete(iocb); | ||
1320 | + iocb_destroy(iocb); | ||
1321 | + } | ||
1322 | } | ||
1323 | |||
1324 | /* aio_read_events_ring | ||
1325 | @@ -1412,18 +1428,17 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2) | ||
1326 | file_end_write(kiocb->ki_filp); | ||
1327 | } | ||
1328 | |||
1329 | - fput(kiocb->ki_filp); | ||
1330 | - aio_complete(iocb, res, res2); | ||
1331 | + iocb->ki_res.res = res; | ||
1332 | + iocb->ki_res.res2 = res2; | ||
1333 | + iocb_put(iocb); | ||
1334 | } | ||
1335 | |||
1336 | -static int aio_prep_rw(struct kiocb *req, struct iocb *iocb) | ||
1337 | +static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) | ||
1338 | { | ||
1339 | int ret; | ||
1340 | |||
1341 | - req->ki_filp = fget(iocb->aio_fildes); | ||
1342 | - if (unlikely(!req->ki_filp)) | ||
1343 | - return -EBADF; | ||
1344 | req->ki_complete = aio_complete_rw; | ||
1345 | + req->private = NULL; | ||
1346 | req->ki_pos = iocb->aio_offset; | ||
1347 | req->ki_flags = iocb_flags(req->ki_filp); | ||
1348 | if (iocb->aio_flags & IOCB_FLAG_RESFD) | ||
1349 | @@ -1438,7 +1453,6 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb) | ||
1350 | ret = ioprio_check_cap(iocb->aio_reqprio); | ||
1351 | if (ret) { | ||
1352 | pr_debug("aio ioprio check cap error: %d\n", ret); | ||
1353 | - fput(req->ki_filp); | ||
1354 | return ret; | ||
1355 | } | ||
1356 | |||
1357 | @@ -1448,11 +1462,13 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb) | ||
1358 | |||
1359 | ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags); | ||
1360 | if (unlikely(ret)) | ||
1361 | - fput(req->ki_filp); | ||
1362 | - return ret; | ||
1363 | + return ret; | ||
1364 | + | ||
1365 | + req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ | ||
1366 | + return 0; | ||
1367 | } | ||
1368 | |||
1369 | -static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec, | ||
1370 | +static int aio_setup_rw(int rw, const struct iocb *iocb, struct iovec **iovec, | ||
1371 | bool vectored, bool compat, struct iov_iter *iter) | ||
1372 | { | ||
1373 | void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf; | ||
1374 | @@ -1487,12 +1503,12 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret) | ||
1375 | ret = -EINTR; | ||
1376 | /*FALLTHRU*/ | ||
1377 | default: | ||
1378 | - aio_complete_rw(req, ret, 0); | ||
1379 | + req->ki_complete(req, ret, 0); | ||
1380 | } | ||
1381 | } | ||
1382 | |||
1383 | -static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, | ||
1384 | - bool compat) | ||
1385 | +static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, | ||
1386 | + bool vectored, bool compat) | ||
1387 | { | ||
1388 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | ||
1389 | struct iov_iter iter; | ||
1390 | @@ -1503,29 +1519,24 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, | ||
1391 | if (ret) | ||
1392 | return ret; | ||
1393 | file = req->ki_filp; | ||
1394 | - | ||
1395 | - ret = -EBADF; | ||
1396 | if (unlikely(!(file->f_mode & FMODE_READ))) | ||
1397 | - goto out_fput; | ||
1398 | + return -EBADF; | ||
1399 | ret = -EINVAL; | ||
1400 | if (unlikely(!file->f_op->read_iter)) | ||
1401 | - goto out_fput; | ||
1402 | + return -EINVAL; | ||
1403 | |||
1404 | ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter); | ||
1405 | if (ret) | ||
1406 | - goto out_fput; | ||
1407 | + return ret; | ||
1408 | ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); | ||
1409 | if (!ret) | ||
1410 | aio_rw_done(req, call_read_iter(file, req, &iter)); | ||
1411 | kfree(iovec); | ||
1412 | -out_fput: | ||
1413 | - if (unlikely(ret)) | ||
1414 | - fput(file); | ||
1415 | return ret; | ||
1416 | } | ||
1417 | |||
1418 | -static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, | ||
1419 | - bool compat) | ||
1420 | +static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, | ||
1421 | + bool vectored, bool compat) | ||
1422 | { | ||
1423 | struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; | ||
1424 | struct iov_iter iter; | ||
1425 | @@ -1537,16 +1548,14 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, | ||
1426 | return ret; | ||
1427 | file = req->ki_filp; | ||
1428 | |||
1429 | - ret = -EBADF; | ||
1430 | if (unlikely(!(file->f_mode & FMODE_WRITE))) | ||
1431 | - goto out_fput; | ||
1432 | - ret = -EINVAL; | ||
1433 | + return -EBADF; | ||
1434 | if (unlikely(!file->f_op->write_iter)) | ||
1435 | - goto out_fput; | ||
1436 | + return -EINVAL; | ||
1437 | |||
1438 | ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter); | ||
1439 | if (ret) | ||
1440 | - goto out_fput; | ||
1441 | + return ret; | ||
1442 | ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); | ||
1443 | if (!ret) { | ||
1444 | /* | ||
1445 | @@ -1564,35 +1573,26 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, | ||
1446 | aio_rw_done(req, call_write_iter(file, req, &iter)); | ||
1447 | } | ||
1448 | kfree(iovec); | ||
1449 | -out_fput: | ||
1450 | - if (unlikely(ret)) | ||
1451 | - fput(file); | ||
1452 | return ret; | ||
1453 | } | ||
1454 | |||
1455 | static void aio_fsync_work(struct work_struct *work) | ||
1456 | { | ||
1457 | - struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); | ||
1458 | - int ret; | ||
1459 | + struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work); | ||
1460 | |||
1461 | - ret = vfs_fsync(req->file, req->datasync); | ||
1462 | - fput(req->file); | ||
1463 | - aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); | ||
1464 | + iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync); | ||
1465 | + iocb_put(iocb); | ||
1466 | } | ||
1467 | |||
1468 | -static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync) | ||
1469 | +static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, | ||
1470 | + bool datasync) | ||
1471 | { | ||
1472 | if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes || | ||
1473 | iocb->aio_rw_flags)) | ||
1474 | return -EINVAL; | ||
1475 | |||
1476 | - req->file = fget(iocb->aio_fildes); | ||
1477 | - if (unlikely(!req->file)) | ||
1478 | - return -EBADF; | ||
1479 | - if (unlikely(!req->file->f_op->fsync)) { | ||
1480 | - fput(req->file); | ||
1481 | + if (unlikely(!req->file->f_op->fsync)) | ||
1482 | return -EINVAL; | ||
1483 | - } | ||
1484 | |||
1485 | req->datasync = datasync; | ||
1486 | INIT_WORK(&req->work, aio_fsync_work); | ||
1487 | @@ -1600,14 +1600,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync) | ||
1488 | return 0; | ||
1489 | } | ||
1490 | |||
1491 | -static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask) | ||
1492 | -{ | ||
1493 | - struct file *file = iocb->poll.file; | ||
1494 | - | ||
1495 | - aio_complete(iocb, mangle_poll(mask), 0); | ||
1496 | - fput(file); | ||
1497 | -} | ||
1498 | - | ||
1499 | static void aio_poll_complete_work(struct work_struct *work) | ||
1500 | { | ||
1501 | struct poll_iocb *req = container_of(work, struct poll_iocb, work); | ||
1502 | @@ -1633,9 +1625,11 @@ static void aio_poll_complete_work(struct work_struct *work) | ||
1503 | return; | ||
1504 | } | ||
1505 | list_del_init(&iocb->ki_list); | ||
1506 | + iocb->ki_res.res = mangle_poll(mask); | ||
1507 | + req->done = true; | ||
1508 | spin_unlock_irq(&ctx->ctx_lock); | ||
1509 | |||
1510 | - aio_poll_complete(iocb, mask); | ||
1511 | + iocb_put(iocb); | ||
1512 | } | ||
1513 | |||
1514 | /* assumes we are called with irqs disabled */ | ||
1515 | @@ -1663,31 +1657,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, | ||
1516 | __poll_t mask = key_to_poll(key); | ||
1517 | unsigned long flags; | ||
1518 | |||
1519 | - req->woken = true; | ||
1520 | - | ||
1521 | /* for instances that support it check for an event match first: */ | ||
1522 | - if (mask) { | ||
1523 | - if (!(mask & req->events)) | ||
1524 | - return 0; | ||
1525 | + if (mask && !(mask & req->events)) | ||
1526 | + return 0; | ||
1527 | + | ||
1528 | + list_del_init(&req->wait.entry); | ||
1529 | |||
1530 | + if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { | ||
1531 | /* | ||
1532 | * Try to complete the iocb inline if we can. Use | ||
1533 | * irqsave/irqrestore because not all filesystems (e.g. fuse) | ||
1534 | * call this function with IRQs disabled and because IRQs | ||
1535 | * have to be disabled before ctx_lock is obtained. | ||
1536 | */ | ||
1537 | - if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { | ||
1538 | - list_del(&iocb->ki_list); | ||
1539 | - spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); | ||
1540 | - | ||
1541 | - list_del_init(&req->wait.entry); | ||
1542 | - aio_poll_complete(iocb, mask); | ||
1543 | - return 1; | ||
1544 | - } | ||
1545 | + list_del(&iocb->ki_list); | ||
1546 | + iocb->ki_res.res = mangle_poll(mask); | ||
1547 | + req->done = true; | ||
1548 | + spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); | ||
1549 | + iocb_put(iocb); | ||
1550 | + } else { | ||
1551 | + schedule_work(&req->work); | ||
1552 | } | ||
1553 | - | ||
1554 | - list_del_init(&req->wait.entry); | ||
1555 | - schedule_work(&req->work); | ||
1556 | return 1; | ||
1557 | } | ||
1558 | |||
1559 | @@ -1714,11 +1704,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head, | ||
1560 | add_wait_queue(head, &pt->iocb->poll.wait); | ||
1561 | } | ||
1562 | |||
1563 | -static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) | ||
1564 | +static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | ||
1565 | { | ||
1566 | struct kioctx *ctx = aiocb->ki_ctx; | ||
1567 | struct poll_iocb *req = &aiocb->poll; | ||
1568 | struct aio_poll_table apt; | ||
1569 | + bool cancel = false; | ||
1570 | __poll_t mask; | ||
1571 | |||
1572 | /* reject any unknown events outside the normal event mask. */ | ||
1573 | @@ -1730,9 +1721,10 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) | ||
1574 | |||
1575 | INIT_WORK(&req->work, aio_poll_complete_work); | ||
1576 | req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; | ||
1577 | - req->file = fget(iocb->aio_fildes); | ||
1578 | - if (unlikely(!req->file)) | ||
1579 | - return -EBADF; | ||
1580 | + | ||
1581 | + req->head = NULL; | ||
1582 | + req->done = false; | ||
1583 | + req->cancelled = false; | ||
1584 | |||
1585 | apt.pt._qproc = aio_poll_queue_proc; | ||
1586 | apt.pt._key = req->events; | ||
1587 | @@ -1743,83 +1735,79 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb) | ||
1588 | INIT_LIST_HEAD(&req->wait.entry); | ||
1589 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); | ||
1590 | |||
1591 | - /* one for removal from waitqueue, one for this function */ | ||
1592 | - refcount_set(&aiocb->ki_refcnt, 2); | ||
1593 | - | ||
1594 | mask = vfs_poll(req->file, &apt.pt) & req->events; | ||
1595 | - if (unlikely(!req->head)) { | ||
1596 | - /* we did not manage to set up a waitqueue, done */ | ||
1597 | - goto out; | ||
1598 | - } | ||
1599 | - | ||
1600 | spin_lock_irq(&ctx->ctx_lock); | ||
1601 | - spin_lock(&req->head->lock); | ||
1602 | - if (req->woken) { | ||
1603 | - /* wake_up context handles the rest */ | ||
1604 | - mask = 0; | ||
1605 | + if (likely(req->head)) { | ||
1606 | + spin_lock(&req->head->lock); | ||
1607 | + if (unlikely(list_empty(&req->wait.entry))) { | ||
1608 | + if (apt.error) | ||
1609 | + cancel = true; | ||
1610 | + apt.error = 0; | ||
1611 | + mask = 0; | ||
1612 | + } | ||
1613 | + if (mask || apt.error) { | ||
1614 | + list_del_init(&req->wait.entry); | ||
1615 | + } else if (cancel) { | ||
1616 | + WRITE_ONCE(req->cancelled, true); | ||
1617 | + } else if (!req->done) { /* actually waiting for an event */ | ||
1618 | + list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | ||
1619 | + aiocb->ki_cancel = aio_poll_cancel; | ||
1620 | + } | ||
1621 | + spin_unlock(&req->head->lock); | ||
1622 | + } | ||
1623 | + if (mask) { /* no async, we'd stolen it */ | ||
1624 | + aiocb->ki_res.res = mangle_poll(mask); | ||
1625 | apt.error = 0; | ||
1626 | - } else if (mask || apt.error) { | ||
1627 | - /* if we get an error or a mask we are done */ | ||
1628 | - WARN_ON_ONCE(list_empty(&req->wait.entry)); | ||
1629 | - list_del_init(&req->wait.entry); | ||
1630 | - } else { | ||
1631 | - /* actually waiting for an event */ | ||
1632 | - list_add_tail(&aiocb->ki_list, &ctx->active_reqs); | ||
1633 | - aiocb->ki_cancel = aio_poll_cancel; | ||
1634 | } | ||
1635 | - spin_unlock(&req->head->lock); | ||
1636 | spin_unlock_irq(&ctx->ctx_lock); | ||
1637 | - | ||
1638 | -out: | ||
1639 | - if (unlikely(apt.error)) { | ||
1640 | - fput(req->file); | ||
1641 | - return apt.error; | ||
1642 | - } | ||
1643 | - | ||
1644 | if (mask) | ||
1645 | - aio_poll_complete(aiocb, mask); | ||
1646 | - iocb_put(aiocb); | ||
1647 | - return 0; | ||
1648 | + iocb_put(aiocb); | ||
1649 | + return apt.error; | ||
1650 | } | ||
1651 | |||
1652 | -static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | ||
1653 | - bool compat) | ||
1654 | +static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, | ||
1655 | + struct iocb __user *user_iocb, bool compat) | ||
1656 | { | ||
1657 | struct aio_kiocb *req; | ||
1658 | - struct iocb iocb; | ||
1659 | ssize_t ret; | ||
1660 | |||
1661 | - if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) | ||
1662 | - return -EFAULT; | ||
1663 | - | ||
1664 | /* enforce forwards compatibility on users */ | ||
1665 | - if (unlikely(iocb.aio_reserved2)) { | ||
1666 | + if (unlikely(iocb->aio_reserved2)) { | ||
1667 | pr_debug("EINVAL: reserve field set\n"); | ||
1668 | return -EINVAL; | ||
1669 | } | ||
1670 | |||
1671 | /* prevent overflows */ | ||
1672 | if (unlikely( | ||
1673 | - (iocb.aio_buf != (unsigned long)iocb.aio_buf) || | ||
1674 | - (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) || | ||
1675 | - ((ssize_t)iocb.aio_nbytes < 0) | ||
1676 | + (iocb->aio_buf != (unsigned long)iocb->aio_buf) || | ||
1677 | + (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || | ||
1678 | + ((ssize_t)iocb->aio_nbytes < 0) | ||
1679 | )) { | ||
1680 | pr_debug("EINVAL: overflow check\n"); | ||
1681 | return -EINVAL; | ||
1682 | } | ||
1683 | |||
1684 | + if (!get_reqs_available(ctx)) | ||
1685 | + return -EAGAIN; | ||
1686 | + | ||
1687 | + ret = -EAGAIN; | ||
1688 | req = aio_get_req(ctx); | ||
1689 | if (unlikely(!req)) | ||
1690 | - return -EAGAIN; | ||
1691 | + goto out_put_reqs_available; | ||
1692 | + | ||
1693 | + req->ki_filp = fget(iocb->aio_fildes); | ||
1694 | + ret = -EBADF; | ||
1695 | + if (unlikely(!req->ki_filp)) | ||
1696 | + goto out_put_req; | ||
1697 | |||
1698 | - if (iocb.aio_flags & IOCB_FLAG_RESFD) { | ||
1699 | + if (iocb->aio_flags & IOCB_FLAG_RESFD) { | ||
1700 | /* | ||
1701 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an | ||
1702 | * instance of the file* now. The file descriptor must be | ||
1703 | * an eventfd() fd, and will be signaled for each completed | ||
1704 | * event using the eventfd_signal() function. | ||
1705 | */ | ||
1706 | - req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd); | ||
1707 | + req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); | ||
1708 | if (IS_ERR(req->ki_eventfd)) { | ||
1709 | ret = PTR_ERR(req->ki_eventfd); | ||
1710 | req->ki_eventfd = NULL; | ||
1711 | @@ -1833,54 +1821,70 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | ||
1712 | goto out_put_req; | ||
1713 | } | ||
1714 | |||
1715 | - req->ki_user_iocb = user_iocb; | ||
1716 | - req->ki_user_data = iocb.aio_data; | ||
1717 | + req->ki_res.obj = (u64)(unsigned long)user_iocb; | ||
1718 | + req->ki_res.data = iocb->aio_data; | ||
1719 | + req->ki_res.res = 0; | ||
1720 | + req->ki_res.res2 = 0; | ||
1721 | |||
1722 | - switch (iocb.aio_lio_opcode) { | ||
1723 | + switch (iocb->aio_lio_opcode) { | ||
1724 | case IOCB_CMD_PREAD: | ||
1725 | - ret = aio_read(&req->rw, &iocb, false, compat); | ||
1726 | + ret = aio_read(&req->rw, iocb, false, compat); | ||
1727 | break; | ||
1728 | case IOCB_CMD_PWRITE: | ||
1729 | - ret = aio_write(&req->rw, &iocb, false, compat); | ||
1730 | + ret = aio_write(&req->rw, iocb, false, compat); | ||
1731 | break; | ||
1732 | case IOCB_CMD_PREADV: | ||
1733 | - ret = aio_read(&req->rw, &iocb, true, compat); | ||
1734 | + ret = aio_read(&req->rw, iocb, true, compat); | ||
1735 | break; | ||
1736 | case IOCB_CMD_PWRITEV: | ||
1737 | - ret = aio_write(&req->rw, &iocb, true, compat); | ||
1738 | + ret = aio_write(&req->rw, iocb, true, compat); | ||
1739 | break; | ||
1740 | case IOCB_CMD_FSYNC: | ||
1741 | - ret = aio_fsync(&req->fsync, &iocb, false); | ||
1742 | + ret = aio_fsync(&req->fsync, iocb, false); | ||
1743 | break; | ||
1744 | case IOCB_CMD_FDSYNC: | ||
1745 | - ret = aio_fsync(&req->fsync, &iocb, true); | ||
1746 | + ret = aio_fsync(&req->fsync, iocb, true); | ||
1747 | break; | ||
1748 | case IOCB_CMD_POLL: | ||
1749 | - ret = aio_poll(req, &iocb); | ||
1750 | + ret = aio_poll(req, iocb); | ||
1751 | break; | ||
1752 | default: | ||
1753 | - pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode); | ||
1754 | + pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); | ||
1755 | ret = -EINVAL; | ||
1756 | break; | ||
1757 | } | ||
1758 | |||
1759 | + /* Done with the synchronous reference */ | ||
1760 | + iocb_put(req); | ||
1761 | + | ||
1762 | /* | ||
1763 | * If ret is 0, we'd either done aio_complete() ourselves or have | ||
1764 | * arranged for that to be done asynchronously. Anything non-zero | ||
1765 | * means that we need to destroy req ourselves. | ||
1766 | */ | ||
1767 | - if (ret) | ||
1768 | - goto out_put_req; | ||
1769 | - return 0; | ||
1770 | + if (!ret) | ||
1771 | + return 0; | ||
1772 | + | ||
1773 | out_put_req: | ||
1774 | - put_reqs_available(ctx, 1); | ||
1775 | - percpu_ref_put(&ctx->reqs); | ||
1776 | if (req->ki_eventfd) | ||
1777 | eventfd_ctx_put(req->ki_eventfd); | ||
1778 | - kmem_cache_free(kiocb_cachep, req); | ||
1779 | + iocb_destroy(req); | ||
1780 | +out_put_reqs_available: | ||
1781 | + put_reqs_available(ctx, 1); | ||
1782 | return ret; | ||
1783 | } | ||
1784 | |||
1785 | +static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | ||
1786 | + bool compat) | ||
1787 | +{ | ||
1788 | + struct iocb iocb; | ||
1789 | + | ||
1790 | + if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) | ||
1791 | + return -EFAULT; | ||
1792 | + | ||
1793 | + return __io_submit_one(ctx, &iocb, user_iocb, compat); | ||
1794 | +} | ||
1795 | + | ||
1796 | /* sys_io_submit: | ||
1797 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns | ||
1798 | * the number of iocbs queued. May return -EINVAL if the aio_context | ||
1799 | @@ -1973,24 +1977,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id, | ||
1800 | } | ||
1801 | #endif | ||
1802 | |||
1803 | -/* lookup_kiocb | ||
1804 | - * Finds a given iocb for cancellation. | ||
1805 | - */ | ||
1806 | -static struct aio_kiocb * | ||
1807 | -lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb) | ||
1808 | -{ | ||
1809 | - struct aio_kiocb *kiocb; | ||
1810 | - | ||
1811 | - assert_spin_locked(&ctx->ctx_lock); | ||
1812 | - | ||
1813 | - /* TODO: use a hash or array, this sucks. */ | ||
1814 | - list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { | ||
1815 | - if (kiocb->ki_user_iocb == iocb) | ||
1816 | - return kiocb; | ||
1817 | - } | ||
1818 | - return NULL; | ||
1819 | -} | ||
1820 | - | ||
1821 | /* sys_io_cancel: | ||
1822 | * Attempts to cancel an iocb previously passed to io_submit. If | ||
1823 | * the operation is successfully cancelled, the resulting event is | ||
1824 | @@ -2008,6 +1994,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | ||
1825 | struct aio_kiocb *kiocb; | ||
1826 | int ret = -EINVAL; | ||
1827 | u32 key; | ||
1828 | + u64 obj = (u64)(unsigned long)iocb; | ||
1829 | |||
1830 | if (unlikely(get_user(key, &iocb->aio_key))) | ||
1831 | return -EFAULT; | ||
1832 | @@ -2019,10 +2006,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | ||
1833 | return -EINVAL; | ||
1834 | |||
1835 | spin_lock_irq(&ctx->ctx_lock); | ||
1836 | - kiocb = lookup_kiocb(ctx, iocb); | ||
1837 | - if (kiocb) { | ||
1838 | - ret = kiocb->ki_cancel(&kiocb->rw); | ||
1839 | - list_del_init(&kiocb->ki_list); | ||
1840 | + /* TODO: use a hash or array, this sucks. */ | ||
1841 | + list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { | ||
1842 | + if (kiocb->ki_res.obj == obj) { | ||
1843 | + ret = kiocb->ki_cancel(&kiocb->rw); | ||
1844 | + list_del_init(&kiocb->ki_list); | ||
1845 | + break; | ||
1846 | + } | ||
1847 | } | ||
1848 | spin_unlock_irq(&ctx->ctx_lock); | ||
1849 | |||
1850 | diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c | ||
1851 | index 82928cea0209..7f3f64ba464f 100644 | ||
1852 | --- a/fs/ceph/dir.c | ||
1853 | +++ b/fs/ceph/dir.c | ||
1854 | @@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *dn) | ||
1855 | unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) | ||
1856 | { | ||
1857 | struct ceph_inode_info *dci = ceph_inode(dir); | ||
1858 | + unsigned hash; | ||
1859 | |||
1860 | switch (dci->i_dir_layout.dl_dir_hash) { | ||
1861 | case 0: /* for backward compat */ | ||
1862 | @@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) | ||
1863 | return dn->d_name.hash; | ||
1864 | |||
1865 | default: | ||
1866 | - return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, | ||
1867 | + spin_lock(&dn->d_lock); | ||
1868 | + hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash, | ||
1869 | dn->d_name.name, dn->d_name.len); | ||
1870 | + spin_unlock(&dn->d_lock); | ||
1871 | + return hash; | ||
1872 | } | ||
1873 | } | ||
1874 | |||
1875 | diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c | ||
1876 | index bc43c822426a..bfcf11c70bfa 100644 | ||
1877 | --- a/fs/ceph/mds_client.c | ||
1878 | +++ b/fs/ceph/mds_client.c | ||
1879 | @@ -1290,6 +1290,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, | ||
1880 | list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); | ||
1881 | ci->i_prealloc_cap_flush = NULL; | ||
1882 | } | ||
1883 | + | ||
1884 | + if (drop && | ||
1885 | + ci->i_wrbuffer_ref_head == 0 && | ||
1886 | + ci->i_wr_ref == 0 && | ||
1887 | + ci->i_dirty_caps == 0 && | ||
1888 | + ci->i_flushing_caps == 0) { | ||
1889 | + ceph_put_snap_context(ci->i_head_snapc); | ||
1890 | + ci->i_head_snapc = NULL; | ||
1891 | + } | ||
1892 | } | ||
1893 | spin_unlock(&ci->i_ceph_lock); | ||
1894 | while (!list_empty(&to_remove)) { | ||
1895 | @@ -1945,10 +1954,39 @@ retry: | ||
1896 | return path; | ||
1897 | } | ||
1898 | |||
1899 | +/* Duplicate the dentry->d_name.name safely */ | ||
1900 | +static int clone_dentry_name(struct dentry *dentry, const char **ppath, | ||
1901 | + int *ppathlen) | ||
1902 | +{ | ||
1903 | + u32 len; | ||
1904 | + char *name; | ||
1905 | + | ||
1906 | +retry: | ||
1907 | + len = READ_ONCE(dentry->d_name.len); | ||
1908 | + name = kmalloc(len + 1, GFP_NOFS); | ||
1909 | + if (!name) | ||
1910 | + return -ENOMEM; | ||
1911 | + | ||
1912 | + spin_lock(&dentry->d_lock); | ||
1913 | + if (dentry->d_name.len != len) { | ||
1914 | + spin_unlock(&dentry->d_lock); | ||
1915 | + kfree(name); | ||
1916 | + goto retry; | ||
1917 | + } | ||
1918 | + memcpy(name, dentry->d_name.name, len); | ||
1919 | + spin_unlock(&dentry->d_lock); | ||
1920 | + | ||
1921 | + name[len] = '\0'; | ||
1922 | + *ppath = name; | ||
1923 | + *ppathlen = len; | ||
1924 | + return 0; | ||
1925 | +} | ||
1926 | + | ||
1927 | static int build_dentry_path(struct dentry *dentry, struct inode *dir, | ||
1928 | const char **ppath, int *ppathlen, u64 *pino, | ||
1929 | - int *pfreepath) | ||
1930 | + bool *pfreepath, bool parent_locked) | ||
1931 | { | ||
1932 | + int ret; | ||
1933 | char *path; | ||
1934 | |||
1935 | rcu_read_lock(); | ||
1936 | @@ -1957,8 +1995,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir, | ||
1937 | if (dir && ceph_snap(dir) == CEPH_NOSNAP) { | ||
1938 | *pino = ceph_ino(dir); | ||
1939 | rcu_read_unlock(); | ||
1940 | - *ppath = dentry->d_name.name; | ||
1941 | - *ppathlen = dentry->d_name.len; | ||
1942 | + if (parent_locked) { | ||
1943 | + *ppath = dentry->d_name.name; | ||
1944 | + *ppathlen = dentry->d_name.len; | ||
1945 | + } else { | ||
1946 | + ret = clone_dentry_name(dentry, ppath, ppathlen); | ||
1947 | + if (ret) | ||
1948 | + return ret; | ||
1949 | + *pfreepath = true; | ||
1950 | + } | ||
1951 | return 0; | ||
1952 | } | ||
1953 | rcu_read_unlock(); | ||
1954 | @@ -1966,13 +2011,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir, | ||
1955 | if (IS_ERR(path)) | ||
1956 | return PTR_ERR(path); | ||
1957 | *ppath = path; | ||
1958 | - *pfreepath = 1; | ||
1959 | + *pfreepath = true; | ||
1960 | return 0; | ||
1961 | } | ||
1962 | |||
1963 | static int build_inode_path(struct inode *inode, | ||
1964 | const char **ppath, int *ppathlen, u64 *pino, | ||
1965 | - int *pfreepath) | ||
1966 | + bool *pfreepath) | ||
1967 | { | ||
1968 | struct dentry *dentry; | ||
1969 | char *path; | ||
1970 | @@ -1988,7 +2033,7 @@ static int build_inode_path(struct inode *inode, | ||
1971 | if (IS_ERR(path)) | ||
1972 | return PTR_ERR(path); | ||
1973 | *ppath = path; | ||
1974 | - *pfreepath = 1; | ||
1975 | + *pfreepath = true; | ||
1976 | return 0; | ||
1977 | } | ||
1978 | |||
1979 | @@ -1999,7 +2044,7 @@ static int build_inode_path(struct inode *inode, | ||
1980 | static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | ||
1981 | struct inode *rdiri, const char *rpath, | ||
1982 | u64 rino, const char **ppath, int *pathlen, | ||
1983 | - u64 *ino, int *freepath) | ||
1984 | + u64 *ino, bool *freepath, bool parent_locked) | ||
1985 | { | ||
1986 | int r = 0; | ||
1987 | |||
1988 | @@ -2009,7 +2054,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, | ||
1989 | ceph_snap(rinode)); | ||
1990 | } else if (rdentry) { | ||
1991 | r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino, | ||
1992 | - freepath); | ||
1993 | + freepath, parent_locked); | ||
1994 | dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, | ||
1995 | *ppath); | ||
1996 | } else if (rpath || rino) { | ||
1997 | @@ -2035,7 +2080,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | ||
1998 | const char *path2 = NULL; | ||
1999 | u64 ino1 = 0, ino2 = 0; | ||
2000 | int pathlen1 = 0, pathlen2 = 0; | ||
2001 | - int freepath1 = 0, freepath2 = 0; | ||
2002 | + bool freepath1 = false, freepath2 = false; | ||
2003 | int len; | ||
2004 | u16 releases; | ||
2005 | void *p, *end; | ||
2006 | @@ -2043,16 +2088,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, | ||
2007 | |||
2008 | ret = set_request_path_attr(req->r_inode, req->r_dentry, | ||
2009 | req->r_parent, req->r_path1, req->r_ino1.ino, | ||
2010 | - &path1, &pathlen1, &ino1, &freepath1); | ||
2011 | + &path1, &pathlen1, &ino1, &freepath1, | ||
2012 | + test_bit(CEPH_MDS_R_PARENT_LOCKED, | ||
2013 | + &req->r_req_flags)); | ||
2014 | if (ret < 0) { | ||
2015 | msg = ERR_PTR(ret); | ||
2016 | goto out; | ||
2017 | } | ||
2018 | |||
2019 | + /* If r_old_dentry is set, then assume that its parent is locked */ | ||
2020 | ret = set_request_path_attr(NULL, req->r_old_dentry, | ||
2021 | req->r_old_dentry_dir, | ||
2022 | req->r_path2, req->r_ino2.ino, | ||
2023 | - &path2, &pathlen2, &ino2, &freepath2); | ||
2024 | + &path2, &pathlen2, &ino2, &freepath2, true); | ||
2025 | if (ret < 0) { | ||
2026 | msg = ERR_PTR(ret); | ||
2027 | goto out_free1; | ||
2028 | diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c | ||
2029 | index f74193da0e09..1f46b02f7314 100644 | ||
2030 | --- a/fs/ceph/snap.c | ||
2031 | +++ b/fs/ceph/snap.c | ||
2032 | @@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) | ||
2033 | old_snapc = NULL; | ||
2034 | |||
2035 | update_snapc: | ||
2036 | - if (ci->i_head_snapc) { | ||
2037 | + if (ci->i_wrbuffer_ref_head == 0 && | ||
2038 | + ci->i_wr_ref == 0 && | ||
2039 | + ci->i_dirty_caps == 0 && | ||
2040 | + ci->i_flushing_caps == 0) { | ||
2041 | + ci->i_head_snapc = NULL; | ||
2042 | + } else { | ||
2043 | ci->i_head_snapc = ceph_get_snap_context(new_snapc); | ||
2044 | dout(" new snapc is %p\n", new_snapc); | ||
2045 | } | ||
2046 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c | ||
2047 | index b59ebed4f615..1fadd314ae7f 100644 | ||
2048 | --- a/fs/cifs/inode.c | ||
2049 | +++ b/fs/cifs/inode.c | ||
2050 | @@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, | ||
2051 | if (rc == 0 || rc != -EBUSY) | ||
2052 | goto do_rename_exit; | ||
2053 | |||
2054 | + /* Don't fall back to using SMB on SMB 2+ mount */ | ||
2055 | + if (server->vals->protocol_id != 0) | ||
2056 | + goto do_rename_exit; | ||
2057 | + | ||
2058 | /* open-file renames don't work across directories */ | ||
2059 | if (to_dentry->d_parent != from_dentry->d_parent) | ||
2060 | goto do_rename_exit; | ||
2061 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c | ||
2062 | index c6fd3acc5560..33afb637e6f8 100644 | ||
2063 | --- a/fs/cifs/smb2pdu.c | ||
2064 | +++ b/fs/cifs/smb2pdu.c | ||
2065 | @@ -3285,6 +3285,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | ||
2066 | rc); | ||
2067 | } | ||
2068 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); | ||
2069 | + cifs_small_buf_release(req); | ||
2070 | return rc == -ENODATA ? 0 : rc; | ||
2071 | } else | ||
2072 | trace_smb3_read_done(xid, req->PersistentFileId, | ||
2073 | diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c | ||
2074 | index c0ba5206cd9d..006c277dc22e 100644 | ||
2075 | --- a/fs/ext4/xattr.c | ||
2076 | +++ b/fs/ext4/xattr.c | ||
2077 | @@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage) | ||
2078 | bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO); | ||
2079 | if (IS_ERR(bh)) { | ||
2080 | ret = PTR_ERR(bh); | ||
2081 | + bh = NULL; | ||
2082 | goto out; | ||
2083 | } | ||
2084 | |||
2085 | @@ -2907,6 +2908,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode, | ||
2086 | if (error == -EIO) | ||
2087 | EXT4_ERROR_INODE(inode, "block %llu read error", | ||
2088 | EXT4_I(inode)->i_file_acl); | ||
2089 | + bh = NULL; | ||
2090 | goto cleanup; | ||
2091 | } | ||
2092 | error = ext4_xattr_check_block(inode, bh); | ||
2093 | @@ -3063,6 +3065,7 @@ ext4_xattr_block_cache_find(struct inode *inode, | ||
2094 | if (IS_ERR(bh)) { | ||
2095 | if (PTR_ERR(bh) == -ENOMEM) | ||
2096 | return NULL; | ||
2097 | + bh = NULL; | ||
2098 | EXT4_ERROR_INODE(inode, "block %lu read error", | ||
2099 | (unsigned long)ce->e_value); | ||
2100 | } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { | ||
2101 | diff --git a/fs/nfs/super.c b/fs/nfs/super.c | ||
2102 | index 6b666d187907..6df9b85caf20 100644 | ||
2103 | --- a/fs/nfs/super.c | ||
2104 | +++ b/fs/nfs/super.c | ||
2105 | @@ -2052,7 +2052,8 @@ static int nfs23_validate_mount_data(void *options, | ||
2106 | memcpy(sap, &data->addr, sizeof(data->addr)); | ||
2107 | args->nfs_server.addrlen = sizeof(data->addr); | ||
2108 | args->nfs_server.port = ntohs(data->addr.sin_port); | ||
2109 | - if (!nfs_verify_server_address(sap)) | ||
2110 | + if (sap->sa_family != AF_INET || | ||
2111 | + !nfs_verify_server_address(sap)) | ||
2112 | goto out_no_address; | ||
2113 | |||
2114 | if (!(data->flags & NFS_MOUNT_TCP)) | ||
2115 | diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c | ||
2116 | index 601bf33c26a0..ebbb0285addb 100644 | ||
2117 | --- a/fs/nfsd/nfs4callback.c | ||
2118 | +++ b/fs/nfsd/nfs4callback.c | ||
2119 | @@ -926,8 +926,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) | ||
2120 | cb->cb_seq_status = 1; | ||
2121 | cb->cb_status = 0; | ||
2122 | if (minorversion) { | ||
2123 | - if (!nfsd41_cb_get_slot(clp, task)) | ||
2124 | + if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task)) | ||
2125 | return; | ||
2126 | + cb->cb_holds_slot = true; | ||
2127 | } | ||
2128 | rpc_call_start(task); | ||
2129 | } | ||
2130 | @@ -954,6 +955,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback | ||
2131 | return true; | ||
2132 | } | ||
2133 | |||
2134 | + if (!cb->cb_holds_slot) | ||
2135 | + goto need_restart; | ||
2136 | + | ||
2137 | switch (cb->cb_seq_status) { | ||
2138 | case 0: | ||
2139 | /* | ||
2140 | @@ -992,6 +996,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback | ||
2141 | cb->cb_seq_status); | ||
2142 | } | ||
2143 | |||
2144 | + cb->cb_holds_slot = false; | ||
2145 | clear_bit(0, &clp->cl_cb_slot_busy); | ||
2146 | rpc_wake_up_next(&clp->cl_cb_waitq); | ||
2147 | dprintk("%s: freed slot, new seqid=%d\n", __func__, | ||
2148 | @@ -1199,6 +1204,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp, | ||
2149 | cb->cb_seq_status = 1; | ||
2150 | cb->cb_status = 0; | ||
2151 | cb->cb_need_restart = false; | ||
2152 | + cb->cb_holds_slot = false; | ||
2153 | } | ||
2154 | |||
2155 | void nfsd4_run_cb(struct nfsd4_callback *cb) | ||
2156 | diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h | ||
2157 | index 0b15dac7e609..0f07ad6dc1ef 100644 | ||
2158 | --- a/fs/nfsd/state.h | ||
2159 | +++ b/fs/nfsd/state.h | ||
2160 | @@ -70,6 +70,7 @@ struct nfsd4_callback { | ||
2161 | int cb_seq_status; | ||
2162 | int cb_status; | ||
2163 | bool cb_need_restart; | ||
2164 | + bool cb_holds_slot; | ||
2165 | }; | ||
2166 | |||
2167 | struct nfsd4_callback_ops { | ||
2168 | diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c | ||
2169 | index d65390727541..7325baa8f9d4 100644 | ||
2170 | --- a/fs/proc/proc_sysctl.c | ||
2171 | +++ b/fs/proc/proc_sysctl.c | ||
2172 | @@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header) | ||
2173 | if (--header->nreg) | ||
2174 | return; | ||
2175 | |||
2176 | - if (parent) | ||
2177 | + if (parent) { | ||
2178 | put_links(header); | ||
2179 | - start_unregistering(header); | ||
2180 | + start_unregistering(header); | ||
2181 | + } | ||
2182 | + | ||
2183 | if (!--header->count) | ||
2184 | kfree_rcu(header, rcu); | ||
2185 | |||
2186 | diff --git a/fs/splice.c b/fs/splice.c | ||
2187 | index 29e92b506394..c78e0e3ff6c4 100644 | ||
2188 | --- a/fs/splice.c | ||
2189 | +++ b/fs/splice.c | ||
2190 | @@ -333,8 +333,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = { | ||
2191 | .get = generic_pipe_buf_get, | ||
2192 | }; | ||
2193 | |||
2194 | -static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, | ||
2195 | - struct pipe_buffer *buf) | ||
2196 | +int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe, | ||
2197 | + struct pipe_buffer *buf) | ||
2198 | { | ||
2199 | return 1; | ||
2200 | } | ||
2201 | diff --git a/include/linux/fs.h b/include/linux/fs.h | ||
2202 | index 7b6084854bfe..111c94c4baa1 100644 | ||
2203 | --- a/include/linux/fs.h | ||
2204 | +++ b/include/linux/fs.h | ||
2205 | @@ -304,13 +304,19 @@ enum rw_hint { | ||
2206 | |||
2207 | struct kiocb { | ||
2208 | struct file *ki_filp; | ||
2209 | + | ||
2210 | + /* The 'ki_filp' pointer is shared in a union for aio */ | ||
2211 | + randomized_struct_fields_start | ||
2212 | + | ||
2213 | loff_t ki_pos; | ||
2214 | void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); | ||
2215 | void *private; | ||
2216 | int ki_flags; | ||
2217 | u16 ki_hint; | ||
2218 | u16 ki_ioprio; /* See linux/ioprio.h */ | ||
2219 | -} __randomize_layout; | ||
2220 | + | ||
2221 | + randomized_struct_fields_end | ||
2222 | +}; | ||
2223 | |||
2224 | static inline bool is_sync_kiocb(struct kiocb *kiocb) | ||
2225 | { | ||
2226 | diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h | ||
2227 | index 3ecd7ea212ae..66ee63cd5968 100644 | ||
2228 | --- a/include/linux/pipe_fs_i.h | ||
2229 | +++ b/include/linux/pipe_fs_i.h | ||
2230 | @@ -181,6 +181,7 @@ void free_pipe_info(struct pipe_inode_info *); | ||
2231 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | ||
2232 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | ||
2233 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | ||
2234 | +int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); | ||
2235 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | ||
2236 | void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); | ||
2237 | |||
2238 | diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h | ||
2239 | index 0f39ac487012..f2be5d041ba3 100644 | ||
2240 | --- a/include/net/netfilter/nf_tables.h | ||
2241 | +++ b/include/net/netfilter/nf_tables.h | ||
2242 | @@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type); | ||
2243 | * @dtype: data type (verdict or numeric type defined by userspace) | ||
2244 | * @objtype: object type (see NFT_OBJECT_* definitions) | ||
2245 | * @size: maximum set size | ||
2246 | + * @use: number of rules references to this set | ||
2247 | * @nelems: number of elements | ||
2248 | * @ndeact: number of deactivated elements queued for removal | ||
2249 | * @timeout: default timeout value in jiffies | ||
2250 | @@ -407,6 +408,7 @@ struct nft_set { | ||
2251 | u32 dtype; | ||
2252 | u32 objtype; | ||
2253 | u32 size; | ||
2254 | + u32 use; | ||
2255 | atomic_t nelems; | ||
2256 | u32 ndeact; | ||
2257 | u64 timeout; | ||
2258 | @@ -416,7 +418,8 @@ struct nft_set { | ||
2259 | unsigned char *udata; | ||
2260 | /* runtime data below here */ | ||
2261 | const struct nft_set_ops *ops ____cacheline_aligned; | ||
2262 | - u16 flags:14, | ||
2263 | + u16 flags:13, | ||
2264 | + bound:1, | ||
2265 | genmask:2; | ||
2266 | u8 klen; | ||
2267 | u8 dlen; | ||
2268 | @@ -466,10 +469,15 @@ struct nft_set_binding { | ||
2269 | u32 flags; | ||
2270 | }; | ||
2271 | |||
2272 | +enum nft_trans_phase; | ||
2273 | +void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2274 | + struct nft_set_binding *binding, | ||
2275 | + enum nft_trans_phase phase); | ||
2276 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2277 | struct nft_set_binding *binding); | ||
2278 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2279 | - struct nft_set_binding *binding); | ||
2280 | + struct nft_set_binding *binding, bool commit); | ||
2281 | +void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); | ||
2282 | |||
2283 | /** | ||
2284 | * enum nft_set_extensions - set extension type IDs | ||
2285 | @@ -689,10 +697,12 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, | ||
2286 | gcb->elems[gcb->head.cnt++] = elem; | ||
2287 | } | ||
2288 | |||
2289 | +struct nft_expr_ops; | ||
2290 | /** | ||
2291 | * struct nft_expr_type - nf_tables expression type | ||
2292 | * | ||
2293 | * @select_ops: function to select nft_expr_ops | ||
2294 | + * @release_ops: release nft_expr_ops | ||
2295 | * @ops: default ops, used when no select_ops functions is present | ||
2296 | * @list: used internally | ||
2297 | * @name: Identifier | ||
2298 | @@ -705,6 +715,7 @@ static inline void nft_set_gc_batch_add(struct nft_set_gc_batch *gcb, | ||
2299 | struct nft_expr_type { | ||
2300 | const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *, | ||
2301 | const struct nlattr * const tb[]); | ||
2302 | + void (*release_ops)(const struct nft_expr_ops *ops); | ||
2303 | const struct nft_expr_ops *ops; | ||
2304 | struct list_head list; | ||
2305 | const char *name; | ||
2306 | @@ -718,13 +729,22 @@ struct nft_expr_type { | ||
2307 | #define NFT_EXPR_STATEFUL 0x1 | ||
2308 | #define NFT_EXPR_GC 0x2 | ||
2309 | |||
2310 | +enum nft_trans_phase { | ||
2311 | + NFT_TRANS_PREPARE, | ||
2312 | + NFT_TRANS_ABORT, | ||
2313 | + NFT_TRANS_COMMIT, | ||
2314 | + NFT_TRANS_RELEASE | ||
2315 | +}; | ||
2316 | + | ||
2317 | /** | ||
2318 | * struct nft_expr_ops - nf_tables expression operations | ||
2319 | * | ||
2320 | * @eval: Expression evaluation function | ||
2321 | * @size: full expression size, including private data size | ||
2322 | * @init: initialization function | ||
2323 | - * @destroy: destruction function | ||
2324 | + * @activate: activate expression in the next generation | ||
2325 | + * @deactivate: deactivate expression in next generation | ||
2326 | + * @destroy: destruction function, called after synchronize_rcu | ||
2327 | * @dump: function to dump parameters | ||
2328 | * @type: expression type | ||
2329 | * @validate: validate expression, called during loop detection | ||
2330 | @@ -745,7 +765,8 @@ struct nft_expr_ops { | ||
2331 | void (*activate)(const struct nft_ctx *ctx, | ||
2332 | const struct nft_expr *expr); | ||
2333 | void (*deactivate)(const struct nft_ctx *ctx, | ||
2334 | - const struct nft_expr *expr); | ||
2335 | + const struct nft_expr *expr, | ||
2336 | + enum nft_trans_phase phase); | ||
2337 | void (*destroy)(const struct nft_ctx *ctx, | ||
2338 | const struct nft_expr *expr); | ||
2339 | void (*destroy_clone)(const struct nft_ctx *ctx, | ||
2340 | diff --git a/include/net/netrom.h b/include/net/netrom.h | ||
2341 | index 5a0714ff500f..80f15b1c1a48 100644 | ||
2342 | --- a/include/net/netrom.h | ||
2343 | +++ b/include/net/netrom.h | ||
2344 | @@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *); | ||
2345 | int nr_t1timer_running(struct sock *); | ||
2346 | |||
2347 | /* sysctl_net_netrom.c */ | ||
2348 | -void nr_register_sysctl(void); | ||
2349 | +int nr_register_sysctl(void); | ||
2350 | void nr_unregister_sysctl(void); | ||
2351 | |||
2352 | #endif | ||
2353 | diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c | ||
2354 | index 91e4202b0634..72c07059ef37 100644 | ||
2355 | --- a/kernel/sched/deadline.c | ||
2356 | +++ b/kernel/sched/deadline.c | ||
2357 | @@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p) | ||
2358 | if (dl_entity_is_special(dl_se)) | ||
2359 | return; | ||
2360 | |||
2361 | - WARN_ON(hrtimer_active(&dl_se->inactive_timer)); | ||
2362 | WARN_ON(dl_se->dl_non_contending); | ||
2363 | |||
2364 | zerolag_time = dl_se->deadline - | ||
2365 | @@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p) | ||
2366 | * If the "0-lag time" already passed, decrease the active | ||
2367 | * utilization now, instead of starting a timer | ||
2368 | */ | ||
2369 | - if (zerolag_time < 0) { | ||
2370 | + if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) { | ||
2371 | if (dl_task(p)) | ||
2372 | sub_running_bw(dl_se, dl_rq); | ||
2373 | if (!dl_task(p) || p->state == TASK_DEAD) { | ||
2374 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c | ||
2375 | index 4aa8e7d90c25..d31916366d39 100644 | ||
2376 | --- a/kernel/sched/fair.c | ||
2377 | +++ b/kernel/sched/fair.c | ||
2378 | @@ -2016,6 +2016,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period) | ||
2379 | if (p->last_task_numa_placement) { | ||
2380 | delta = runtime - p->last_sum_exec_runtime; | ||
2381 | *period = now - p->last_task_numa_placement; | ||
2382 | + | ||
2383 | + /* Avoid time going backwards, prevent potential divide error: */ | ||
2384 | + if (unlikely((s64)*period < 0)) | ||
2385 | + *period = 0; | ||
2386 | } else { | ||
2387 | delta = p->se.avg.load_sum; | ||
2388 | *period = LOAD_AVG_MAX; | ||
2389 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c | ||
2390 | index 34b4c32b0692..805aef83b5cf 100644 | ||
2391 | --- a/kernel/trace/ring_buffer.c | ||
2392 | +++ b/kernel/trace/ring_buffer.c | ||
2393 | @@ -730,7 +730,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) | ||
2394 | |||
2395 | preempt_disable_notrace(); | ||
2396 | time = rb_time_stamp(buffer); | ||
2397 | - preempt_enable_no_resched_notrace(); | ||
2398 | + preempt_enable_notrace(); | ||
2399 | |||
2400 | return time; | ||
2401 | } | ||
2402 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c | ||
2403 | index c65cea71d1ee..5455ee05bc3b 100644 | ||
2404 | --- a/kernel/trace/trace.c | ||
2405 | +++ b/kernel/trace/trace.c | ||
2406 | @@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, | ||
2407 | * not modified. | ||
2408 | */ | ||
2409 | pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL); | ||
2410 | - if (!pid_list) | ||
2411 | + if (!pid_list) { | ||
2412 | + trace_parser_put(&parser); | ||
2413 | return -ENOMEM; | ||
2414 | + } | ||
2415 | |||
2416 | pid_list->pid_max = READ_ONCE(pid_max); | ||
2417 | |||
2418 | @@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids, | ||
2419 | |||
2420 | pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3); | ||
2421 | if (!pid_list->pids) { | ||
2422 | + trace_parser_put(&parser); | ||
2423 | kfree(pid_list); | ||
2424 | return -ENOMEM; | ||
2425 | } | ||
2426 | @@ -6800,19 +6803,23 @@ struct buffer_ref { | ||
2427 | struct ring_buffer *buffer; | ||
2428 | void *page; | ||
2429 | int cpu; | ||
2430 | - int ref; | ||
2431 | + refcount_t refcount; | ||
2432 | }; | ||
2433 | |||
2434 | +static void buffer_ref_release(struct buffer_ref *ref) | ||
2435 | +{ | ||
2436 | + if (!refcount_dec_and_test(&ref->refcount)) | ||
2437 | + return; | ||
2438 | + ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); | ||
2439 | + kfree(ref); | ||
2440 | +} | ||
2441 | + | ||
2442 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | ||
2443 | struct pipe_buffer *buf) | ||
2444 | { | ||
2445 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
2446 | |||
2447 | - if (--ref->ref) | ||
2448 | - return; | ||
2449 | - | ||
2450 | - ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); | ||
2451 | - kfree(ref); | ||
2452 | + buffer_ref_release(ref); | ||
2453 | buf->private = 0; | ||
2454 | } | ||
2455 | |||
2456 | @@ -6821,7 +6828,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | ||
2457 | { | ||
2458 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
2459 | |||
2460 | - ref->ref++; | ||
2461 | + refcount_inc(&ref->refcount); | ||
2462 | } | ||
2463 | |||
2464 | /* Pipe buffer operations for a buffer. */ | ||
2465 | @@ -6829,7 +6836,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = { | ||
2466 | .can_merge = 0, | ||
2467 | .confirm = generic_pipe_buf_confirm, | ||
2468 | .release = buffer_pipe_buf_release, | ||
2469 | - .steal = generic_pipe_buf_steal, | ||
2470 | + .steal = generic_pipe_buf_nosteal, | ||
2471 | .get = buffer_pipe_buf_get, | ||
2472 | }; | ||
2473 | |||
2474 | @@ -6842,11 +6849,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | ||
2475 | struct buffer_ref *ref = | ||
2476 | (struct buffer_ref *)spd->partial[i].private; | ||
2477 | |||
2478 | - if (--ref->ref) | ||
2479 | - return; | ||
2480 | - | ||
2481 | - ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page); | ||
2482 | - kfree(ref); | ||
2483 | + buffer_ref_release(ref); | ||
2484 | spd->partial[i].private = 0; | ||
2485 | } | ||
2486 | |||
2487 | @@ -6901,7 +6904,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | ||
2488 | break; | ||
2489 | } | ||
2490 | |||
2491 | - ref->ref = 1; | ||
2492 | + refcount_set(&ref->refcount, 1); | ||
2493 | ref->buffer = iter->trace_buffer->buffer; | ||
2494 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); | ||
2495 | if (IS_ERR(ref->page)) { | ||
2496 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c | ||
2497 | index 0280deac392e..cd8b61bded78 100644 | ||
2498 | --- a/kernel/workqueue.c | ||
2499 | +++ b/kernel/workqueue.c | ||
2500 | @@ -2908,6 +2908,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) | ||
2501 | if (WARN_ON(!wq_online)) | ||
2502 | return false; | ||
2503 | |||
2504 | + if (WARN_ON(!work->func)) | ||
2505 | + return false; | ||
2506 | + | ||
2507 | if (!from_cancel) { | ||
2508 | lock_map_acquire(&work->lockdep_map); | ||
2509 | lock_map_release(&work->lockdep_map); | ||
2510 | diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug | ||
2511 | index 4966c4fbe7f7..3dea52f7be9c 100644 | ||
2512 | --- a/lib/Kconfig.debug | ||
2513 | +++ b/lib/Kconfig.debug | ||
2514 | @@ -1934,6 +1934,7 @@ config TEST_KMOD | ||
2515 | depends on m | ||
2516 | depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS | ||
2517 | depends on NETDEVICES && NET_CORE && INET # for TUN | ||
2518 | + depends on BLOCK | ||
2519 | select TEST_LKM | ||
2520 | select XFS_FS | ||
2521 | select TUN | ||
2522 | diff --git a/mm/memory.c b/mm/memory.c | ||
2523 | index 5b3f71bcd1ae..9c69278173b7 100644 | ||
2524 | --- a/mm/memory.c | ||
2525 | +++ b/mm/memory.c | ||
2526 | @@ -1787,10 +1787,15 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr, | ||
2527 | * in may not match the PFN we have mapped if the | ||
2528 | * mapped PFN is a writeable COW page. In the mkwrite | ||
2529 | * case we are creating a writable PTE for a shared | ||
2530 | - * mapping and we expect the PFNs to match. | ||
2531 | + * mapping and we expect the PFNs to match. If they | ||
2532 | + * don't match, we are likely racing with block | ||
2533 | + * allocation and mapping invalidation so just skip the | ||
2534 | + * update. | ||
2535 | */ | ||
2536 | - if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn))) | ||
2537 | + if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) { | ||
2538 | + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); | ||
2539 | goto out_unlock; | ||
2540 | + } | ||
2541 | entry = *pte; | ||
2542 | goto out_mkwrite; | ||
2543 | } else | ||
2544 | diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c | ||
2545 | index f77888ec93f1..0bb4d712b80c 100644 | ||
2546 | --- a/net/bridge/netfilter/ebtables.c | ||
2547 | +++ b/net/bridge/netfilter/ebtables.c | ||
2548 | @@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, | ||
2549 | if (match_kern) | ||
2550 | match_kern->match_size = ret; | ||
2551 | |||
2552 | - if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) | ||
2553 | + /* rule should have no remaining data after target */ | ||
2554 | + if (type == EBT_COMPAT_TARGET && size_left) | ||
2555 | return -EINVAL; | ||
2556 | |||
2557 | match32 = (struct compat_ebt_entry_mwt *) buf; | ||
2558 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c | ||
2559 | index 98c81c21b753..8bacbcd2db90 100644 | ||
2560 | --- a/net/ipv4/route.c | ||
2561 | +++ b/net/ipv4/route.c | ||
2562 | @@ -1185,25 +1185,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) | ||
2563 | return dst; | ||
2564 | } | ||
2565 | |||
2566 | -static void ipv4_link_failure(struct sk_buff *skb) | ||
2567 | +static void ipv4_send_dest_unreach(struct sk_buff *skb) | ||
2568 | { | ||
2569 | struct ip_options opt; | ||
2570 | - struct rtable *rt; | ||
2571 | int res; | ||
2572 | |||
2573 | /* Recompile ip options since IPCB may not be valid anymore. | ||
2574 | + * Also check we have a reasonable ipv4 header. | ||
2575 | */ | ||
2576 | - memset(&opt, 0, sizeof(opt)); | ||
2577 | - opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr); | ||
2578 | + if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) || | ||
2579 | + ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5) | ||
2580 | + return; | ||
2581 | |||
2582 | - rcu_read_lock(); | ||
2583 | - res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); | ||
2584 | - rcu_read_unlock(); | ||
2585 | + memset(&opt, 0, sizeof(opt)); | ||
2586 | + if (ip_hdr(skb)->ihl > 5) { | ||
2587 | + if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4)) | ||
2588 | + return; | ||
2589 | + opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr); | ||
2590 | |||
2591 | - if (res) | ||
2592 | - return; | ||
2593 | + rcu_read_lock(); | ||
2594 | + res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL); | ||
2595 | + rcu_read_unlock(); | ||
2596 | |||
2597 | + if (res) | ||
2598 | + return; | ||
2599 | + } | ||
2600 | __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt); | ||
2601 | +} | ||
2602 | + | ||
2603 | +static void ipv4_link_failure(struct sk_buff *skb) | ||
2604 | +{ | ||
2605 | + struct rtable *rt; | ||
2606 | + | ||
2607 | + ipv4_send_dest_unreach(skb); | ||
2608 | |||
2609 | rt = skb_rtable(skb); | ||
2610 | if (rt) | ||
2611 | diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c | ||
2612 | index 891ed2f91467..ce64453d337d 100644 | ||
2613 | --- a/net/ipv4/sysctl_net_ipv4.c | ||
2614 | +++ b/net/ipv4/sysctl_net_ipv4.c | ||
2615 | @@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 }; | ||
2616 | static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; | ||
2617 | static int comp_sack_nr_max = 255; | ||
2618 | static u32 u32_max_div_HZ = UINT_MAX / HZ; | ||
2619 | +static int one_day_secs = 24 * 3600; | ||
2620 | |||
2621 | /* obsolete */ | ||
2622 | static int sysctl_tcp_low_latency __read_mostly; | ||
2623 | @@ -1140,7 +1141,9 @@ static struct ctl_table ipv4_net_table[] = { | ||
2624 | .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen, | ||
2625 | .maxlen = sizeof(int), | ||
2626 | .mode = 0644, | ||
2627 | - .proc_handler = proc_dointvec | ||
2628 | + .proc_handler = proc_dointvec_minmax, | ||
2629 | + .extra1 = &zero, | ||
2630 | + .extra2 = &one_day_secs | ||
2631 | }, | ||
2632 | { | ||
2633 | .procname = "tcp_autocorking", | ||
2634 | diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c | ||
2635 | index 8fd8d06454d6..2d4e048762f6 100644 | ||
2636 | --- a/net/netfilter/ipvs/ip_vs_ctl.c | ||
2637 | +++ b/net/netfilter/ipvs/ip_vs_ctl.c | ||
2638 | @@ -896,12 +896,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | ||
2639 | { | ||
2640 | struct ip_vs_dest *dest; | ||
2641 | unsigned int atype, i; | ||
2642 | - int ret = 0; | ||
2643 | |||
2644 | EnterFunction(2); | ||
2645 | |||
2646 | #ifdef CONFIG_IP_VS_IPV6 | ||
2647 | if (udest->af == AF_INET6) { | ||
2648 | + int ret; | ||
2649 | + | ||
2650 | atype = ipv6_addr_type(&udest->addr.in6); | ||
2651 | if ((!(atype & IPV6_ADDR_UNICAST) || | ||
2652 | atype & IPV6_ADDR_LINKLOCAL) && | ||
2653 | diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c | ||
2654 | index c06393fc716d..1af54119bafc 100644 | ||
2655 | --- a/net/netfilter/nf_tables_api.c | ||
2656 | +++ b/net/netfilter/nf_tables_api.c | ||
2657 | @@ -112,6 +112,23 @@ static void nft_trans_destroy(struct nft_trans *trans) | ||
2658 | kfree(trans); | ||
2659 | } | ||
2660 | |||
2661 | +static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) | ||
2662 | +{ | ||
2663 | + struct net *net = ctx->net; | ||
2664 | + struct nft_trans *trans; | ||
2665 | + | ||
2666 | + if (!nft_set_is_anonymous(set)) | ||
2667 | + return; | ||
2668 | + | ||
2669 | + list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { | ||
2670 | + if (trans->msg_type == NFT_MSG_NEWSET && | ||
2671 | + nft_trans_set(trans) == set) { | ||
2672 | + set->bound = true; | ||
2673 | + break; | ||
2674 | + } | ||
2675 | + } | ||
2676 | +} | ||
2677 | + | ||
2678 | static int nf_tables_register_hook(struct net *net, | ||
2679 | const struct nft_table *table, | ||
2680 | struct nft_chain *chain) | ||
2681 | @@ -222,14 +239,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, | ||
2682 | } | ||
2683 | |||
2684 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, | ||
2685 | - struct nft_rule *rule) | ||
2686 | + struct nft_rule *rule, | ||
2687 | + enum nft_trans_phase phase) | ||
2688 | { | ||
2689 | struct nft_expr *expr; | ||
2690 | |||
2691 | expr = nft_expr_first(rule); | ||
2692 | while (expr != nft_expr_last(rule) && expr->ops) { | ||
2693 | if (expr->ops->deactivate) | ||
2694 | - expr->ops->deactivate(ctx, expr); | ||
2695 | + expr->ops->deactivate(ctx, expr, phase); | ||
2696 | |||
2697 | expr = nft_expr_next(expr); | ||
2698 | } | ||
2699 | @@ -280,7 +298,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) | ||
2700 | nft_trans_destroy(trans); | ||
2701 | return err; | ||
2702 | } | ||
2703 | - nft_rule_expr_deactivate(ctx, rule); | ||
2704 | + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE); | ||
2705 | |||
2706 | return 0; | ||
2707 | } | ||
2708 | @@ -301,7 +319,7 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) | ||
2709 | return 0; | ||
2710 | } | ||
2711 | |||
2712 | -static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, | ||
2713 | +static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, | ||
2714 | struct nft_set *set) | ||
2715 | { | ||
2716 | struct nft_trans *trans; | ||
2717 | @@ -321,7 +339,7 @@ static int nft_trans_set_add(struct nft_ctx *ctx, int msg_type, | ||
2718 | return 0; | ||
2719 | } | ||
2720 | |||
2721 | -static int nft_delset(struct nft_ctx *ctx, struct nft_set *set) | ||
2722 | +static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) | ||
2723 | { | ||
2724 | int err; | ||
2725 | |||
2726 | @@ -2105,6 +2123,7 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, | ||
2727 | { | ||
2728 | struct nft_expr_info info; | ||
2729 | struct nft_expr *expr; | ||
2730 | + struct module *owner; | ||
2731 | int err; | ||
2732 | |||
2733 | err = nf_tables_expr_parse(ctx, nla, &info); | ||
2734 | @@ -2124,7 +2143,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx, | ||
2735 | err3: | ||
2736 | kfree(expr); | ||
2737 | err2: | ||
2738 | - module_put(info.ops->type->owner); | ||
2739 | + owner = info.ops->type->owner; | ||
2740 | + if (info.ops->type->release_ops) | ||
2741 | + info.ops->type->release_ops(info.ops); | ||
2742 | + | ||
2743 | + module_put(owner); | ||
2744 | err1: | ||
2745 | return ERR_PTR(err); | ||
2746 | } | ||
2747 | @@ -2458,7 +2481,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | ||
2748 | static void nf_tables_rule_release(const struct nft_ctx *ctx, | ||
2749 | struct nft_rule *rule) | ||
2750 | { | ||
2751 | - nft_rule_expr_deactivate(ctx, rule); | ||
2752 | + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); | ||
2753 | nf_tables_rule_destroy(ctx, rule); | ||
2754 | } | ||
2755 | |||
2756 | @@ -3562,19 +3585,15 @@ err1: | ||
2757 | |||
2758 | static void nft_set_destroy(struct nft_set *set) | ||
2759 | { | ||
2760 | + if (WARN_ON(set->use > 0)) | ||
2761 | + return; | ||
2762 | + | ||
2763 | set->ops->destroy(set); | ||
2764 | module_put(to_set_type(set->ops)->owner); | ||
2765 | kfree(set->name); | ||
2766 | kvfree(set); | ||
2767 | } | ||
2768 | |||
2769 | -static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set) | ||
2770 | -{ | ||
2771 | - list_del_rcu(&set->list); | ||
2772 | - nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); | ||
2773 | - nft_set_destroy(set); | ||
2774 | -} | ||
2775 | - | ||
2776 | static int nf_tables_delset(struct net *net, struct sock *nlsk, | ||
2777 | struct sk_buff *skb, const struct nlmsghdr *nlh, | ||
2778 | const struct nlattr * const nla[], | ||
2779 | @@ -3609,7 +3628,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk, | ||
2780 | NL_SET_BAD_ATTR(extack, attr); | ||
2781 | return PTR_ERR(set); | ||
2782 | } | ||
2783 | - if (!list_empty(&set->bindings) || | ||
2784 | + if (set->use || | ||
2785 | (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) { | ||
2786 | NL_SET_BAD_ATTR(extack, attr); | ||
2787 | return -EBUSY; | ||
2788 | @@ -3639,6 +3658,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2789 | struct nft_set_binding *i; | ||
2790 | struct nft_set_iter iter; | ||
2791 | |||
2792 | + if (set->use == UINT_MAX) | ||
2793 | + return -EOVERFLOW; | ||
2794 | + | ||
2795 | if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) | ||
2796 | return -EBUSY; | ||
2797 | |||
2798 | @@ -3665,21 +3687,53 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2799 | bind: | ||
2800 | binding->chain = ctx->chain; | ||
2801 | list_add_tail_rcu(&binding->list, &set->bindings); | ||
2802 | + nft_set_trans_bind(ctx, set); | ||
2803 | + set->use++; | ||
2804 | + | ||
2805 | return 0; | ||
2806 | } | ||
2807 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); | ||
2808 | |||
2809 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2810 | - struct nft_set_binding *binding) | ||
2811 | + struct nft_set_binding *binding, bool event) | ||
2812 | { | ||
2813 | list_del_rcu(&binding->list); | ||
2814 | |||
2815 | - if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | ||
2816 | - nft_is_active(ctx->net, set)) | ||
2817 | - nf_tables_set_destroy(ctx, set); | ||
2818 | + if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { | ||
2819 | + list_del_rcu(&set->list); | ||
2820 | + if (event) | ||
2821 | + nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, | ||
2822 | + GFP_KERNEL); | ||
2823 | + } | ||
2824 | } | ||
2825 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); | ||
2826 | |||
2827 | +void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
2828 | + struct nft_set_binding *binding, | ||
2829 | + enum nft_trans_phase phase) | ||
2830 | +{ | ||
2831 | + switch (phase) { | ||
2832 | + case NFT_TRANS_PREPARE: | ||
2833 | + set->use--; | ||
2834 | + return; | ||
2835 | + case NFT_TRANS_ABORT: | ||
2836 | + case NFT_TRANS_RELEASE: | ||
2837 | + set->use--; | ||
2838 | + /* fall through */ | ||
2839 | + default: | ||
2840 | + nf_tables_unbind_set(ctx, set, binding, | ||
2841 | + phase == NFT_TRANS_COMMIT); | ||
2842 | + } | ||
2843 | +} | ||
2844 | +EXPORT_SYMBOL_GPL(nf_tables_deactivate_set); | ||
2845 | + | ||
2846 | +void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) | ||
2847 | +{ | ||
2848 | + if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) | ||
2849 | + nft_set_destroy(set); | ||
2850 | +} | ||
2851 | +EXPORT_SYMBOL_GPL(nf_tables_destroy_set); | ||
2852 | + | ||
2853 | const struct nft_set_ext_type nft_set_ext_types[] = { | ||
2854 | [NFT_SET_EXT_KEY] = { | ||
2855 | .align = __alignof__(u32), | ||
2856 | @@ -6429,6 +6483,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | ||
2857 | nf_tables_rule_notify(&trans->ctx, | ||
2858 | nft_trans_rule(trans), | ||
2859 | NFT_MSG_DELRULE); | ||
2860 | + nft_rule_expr_deactivate(&trans->ctx, | ||
2861 | + nft_trans_rule(trans), | ||
2862 | + NFT_TRANS_COMMIT); | ||
2863 | break; | ||
2864 | case NFT_MSG_NEWSET: | ||
2865 | nft_clear(net, nft_trans_set(trans)); | ||
2866 | @@ -6577,7 +6634,9 @@ static int __nf_tables_abort(struct net *net) | ||
2867 | case NFT_MSG_NEWRULE: | ||
2868 | trans->ctx.chain->use--; | ||
2869 | list_del_rcu(&nft_trans_rule(trans)->list); | ||
2870 | - nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); | ||
2871 | + nft_rule_expr_deactivate(&trans->ctx, | ||
2872 | + nft_trans_rule(trans), | ||
2873 | + NFT_TRANS_ABORT); | ||
2874 | break; | ||
2875 | case NFT_MSG_DELRULE: | ||
2876 | trans->ctx.chain->use++; | ||
2877 | @@ -6587,6 +6646,10 @@ static int __nf_tables_abort(struct net *net) | ||
2878 | break; | ||
2879 | case NFT_MSG_NEWSET: | ||
2880 | trans->ctx.table->use--; | ||
2881 | + if (nft_trans_set(trans)->bound) { | ||
2882 | + nft_trans_destroy(trans); | ||
2883 | + break; | ||
2884 | + } | ||
2885 | list_del_rcu(&nft_trans_set(trans)->list); | ||
2886 | break; | ||
2887 | case NFT_MSG_DELSET: | ||
2888 | @@ -6595,8 +6658,11 @@ static int __nf_tables_abort(struct net *net) | ||
2889 | nft_trans_destroy(trans); | ||
2890 | break; | ||
2891 | case NFT_MSG_NEWSETELEM: | ||
2892 | + if (nft_trans_elem_set(trans)->bound) { | ||
2893 | + nft_trans_destroy(trans); | ||
2894 | + break; | ||
2895 | + } | ||
2896 | te = (struct nft_trans_elem *)trans->data; | ||
2897 | - | ||
2898 | te->set->ops->remove(net, te->set, &te->elem); | ||
2899 | atomic_dec(&te->set->nelems); | ||
2900 | break; | ||
2901 | diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c | ||
2902 | index 38da1f5436b4..1245e02239d9 100644 | ||
2903 | --- a/net/netfilter/nft_compat.c | ||
2904 | +++ b/net/netfilter/nft_compat.c | ||
2905 | @@ -23,19 +23,6 @@ | ||
2906 | #include <linux/netfilter_arp/arp_tables.h> | ||
2907 | #include <net/netfilter/nf_tables.h> | ||
2908 | |||
2909 | -struct nft_xt { | ||
2910 | - struct list_head head; | ||
2911 | - struct nft_expr_ops ops; | ||
2912 | - unsigned int refcnt; | ||
2913 | - | ||
2914 | - /* Unlike other expressions, ops doesn't have static storage duration. | ||
2915 | - * nft core assumes they do. We use kfree_rcu so that nft core can | ||
2916 | - * can check expr->ops->size even after nft_compat->destroy() frees | ||
2917 | - * the nft_xt struct that holds the ops structure. | ||
2918 | - */ | ||
2919 | - struct rcu_head rcu_head; | ||
2920 | -}; | ||
2921 | - | ||
2922 | /* Used for matches where *info is larger than X byte */ | ||
2923 | #define NFT_MATCH_LARGE_THRESH 192 | ||
2924 | |||
2925 | @@ -43,17 +30,6 @@ struct nft_xt_match_priv { | ||
2926 | void *info; | ||
2927 | }; | ||
2928 | |||
2929 | -static bool nft_xt_put(struct nft_xt *xt) | ||
2930 | -{ | ||
2931 | - if (--xt->refcnt == 0) { | ||
2932 | - list_del(&xt->head); | ||
2933 | - kfree_rcu(xt, rcu_head); | ||
2934 | - return true; | ||
2935 | - } | ||
2936 | - | ||
2937 | - return false; | ||
2938 | -} | ||
2939 | - | ||
2940 | static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx, | ||
2941 | const char *tablename) | ||
2942 | { | ||
2943 | @@ -248,7 +224,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
2944 | struct xt_target *target = expr->ops->data; | ||
2945 | struct xt_tgchk_param par; | ||
2946 | size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO])); | ||
2947 | - struct nft_xt *nft_xt; | ||
2948 | u16 proto = 0; | ||
2949 | bool inv = false; | ||
2950 | union nft_entry e = {}; | ||
2951 | @@ -272,8 +247,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
2952 | if (!target->target) | ||
2953 | return -EINVAL; | ||
2954 | |||
2955 | - nft_xt = container_of(expr->ops, struct nft_xt, ops); | ||
2956 | - nft_xt->refcnt++; | ||
2957 | return 0; | ||
2958 | } | ||
2959 | |||
2960 | @@ -292,8 +265,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | ||
2961 | if (par.target->destroy != NULL) | ||
2962 | par.target->destroy(&par); | ||
2963 | |||
2964 | - if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) | ||
2965 | - module_put(me); | ||
2966 | + module_put(me); | ||
2967 | + kfree(expr->ops); | ||
2968 | } | ||
2969 | |||
2970 | static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr) | ||
2971 | @@ -447,7 +420,6 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
2972 | struct xt_match *match = expr->ops->data; | ||
2973 | struct xt_mtchk_param par; | ||
2974 | size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO])); | ||
2975 | - struct nft_xt *nft_xt; | ||
2976 | u16 proto = 0; | ||
2977 | bool inv = false; | ||
2978 | union nft_entry e = {}; | ||
2979 | @@ -463,13 +435,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
2980 | |||
2981 | nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv); | ||
2982 | |||
2983 | - ret = xt_check_match(&par, size, proto, inv); | ||
2984 | - if (ret < 0) | ||
2985 | - return ret; | ||
2986 | - | ||
2987 | - nft_xt = container_of(expr->ops, struct nft_xt, ops); | ||
2988 | - nft_xt->refcnt++; | ||
2989 | - return 0; | ||
2990 | + return xt_check_match(&par, size, proto, inv); | ||
2991 | } | ||
2992 | |||
2993 | static int | ||
2994 | @@ -512,8 +478,8 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr, | ||
2995 | if (par.match->destroy != NULL) | ||
2996 | par.match->destroy(&par); | ||
2997 | |||
2998 | - if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) | ||
2999 | - module_put(me); | ||
3000 | + module_put(me); | ||
3001 | + kfree(expr->ops); | ||
3002 | } | ||
3003 | |||
3004 | static void | ||
3005 | @@ -715,22 +681,13 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { | ||
3006 | .cb = nfnl_nft_compat_cb, | ||
3007 | }; | ||
3008 | |||
3009 | -static LIST_HEAD(nft_match_list); | ||
3010 | - | ||
3011 | static struct nft_expr_type nft_match_type; | ||
3012 | |||
3013 | -static bool nft_match_cmp(const struct xt_match *match, | ||
3014 | - const char *name, u32 rev, u32 family) | ||
3015 | -{ | ||
3016 | - return strcmp(match->name, name) == 0 && match->revision == rev && | ||
3017 | - (match->family == NFPROTO_UNSPEC || match->family == family); | ||
3018 | -} | ||
3019 | - | ||
3020 | static const struct nft_expr_ops * | ||
3021 | nft_match_select_ops(const struct nft_ctx *ctx, | ||
3022 | const struct nlattr * const tb[]) | ||
3023 | { | ||
3024 | - struct nft_xt *nft_match; | ||
3025 | + struct nft_expr_ops *ops; | ||
3026 | struct xt_match *match; | ||
3027 | unsigned int matchsize; | ||
3028 | char *mt_name; | ||
3029 | @@ -746,14 +703,6 @@ nft_match_select_ops(const struct nft_ctx *ctx, | ||
3030 | rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); | ||
3031 | family = ctx->family; | ||
3032 | |||
3033 | - /* Re-use the existing match if it's already loaded. */ | ||
3034 | - list_for_each_entry(nft_match, &nft_match_list, head) { | ||
3035 | - struct xt_match *match = nft_match->ops.data; | ||
3036 | - | ||
3037 | - if (nft_match_cmp(match, mt_name, rev, family)) | ||
3038 | - return &nft_match->ops; | ||
3039 | - } | ||
3040 | - | ||
3041 | match = xt_request_find_match(family, mt_name, rev); | ||
3042 | if (IS_ERR(match)) | ||
3043 | return ERR_PTR(-ENOENT); | ||
3044 | @@ -763,66 +712,62 @@ nft_match_select_ops(const struct nft_ctx *ctx, | ||
3045 | goto err; | ||
3046 | } | ||
3047 | |||
3048 | - /* This is the first time we use this match, allocate operations */ | ||
3049 | - nft_match = kzalloc(sizeof(struct nft_xt), GFP_KERNEL); | ||
3050 | - if (nft_match == NULL) { | ||
3051 | + ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL); | ||
3052 | + if (!ops) { | ||
3053 | err = -ENOMEM; | ||
3054 | goto err; | ||
3055 | } | ||
3056 | |||
3057 | - nft_match->refcnt = 0; | ||
3058 | - nft_match->ops.type = &nft_match_type; | ||
3059 | - nft_match->ops.eval = nft_match_eval; | ||
3060 | - nft_match->ops.init = nft_match_init; | ||
3061 | - nft_match->ops.destroy = nft_match_destroy; | ||
3062 | - nft_match->ops.dump = nft_match_dump; | ||
3063 | - nft_match->ops.validate = nft_match_validate; | ||
3064 | - nft_match->ops.data = match; | ||
3065 | + ops->type = &nft_match_type; | ||
3066 | + ops->eval = nft_match_eval; | ||
3067 | + ops->init = nft_match_init; | ||
3068 | + ops->destroy = nft_match_destroy; | ||
3069 | + ops->dump = nft_match_dump; | ||
3070 | + ops->validate = nft_match_validate; | ||
3071 | + ops->data = match; | ||
3072 | |||
3073 | matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize)); | ||
3074 | if (matchsize > NFT_MATCH_LARGE_THRESH) { | ||
3075 | matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv)); | ||
3076 | |||
3077 | - nft_match->ops.eval = nft_match_large_eval; | ||
3078 | - nft_match->ops.init = nft_match_large_init; | ||
3079 | - nft_match->ops.destroy = nft_match_large_destroy; | ||
3080 | - nft_match->ops.dump = nft_match_large_dump; | ||
3081 | + ops->eval = nft_match_large_eval; | ||
3082 | + ops->init = nft_match_large_init; | ||
3083 | + ops->destroy = nft_match_large_destroy; | ||
3084 | + ops->dump = nft_match_large_dump; | ||
3085 | } | ||
3086 | |||
3087 | - nft_match->ops.size = matchsize; | ||
3088 | - | ||
3089 | - list_add(&nft_match->head, &nft_match_list); | ||
3090 | + ops->size = matchsize; | ||
3091 | |||
3092 | - return &nft_match->ops; | ||
3093 | + return ops; | ||
3094 | err: | ||
3095 | module_put(match->me); | ||
3096 | return ERR_PTR(err); | ||
3097 | } | ||
3098 | |||
3099 | +static void nft_match_release_ops(const struct nft_expr_ops *ops) | ||
3100 | +{ | ||
3101 | + struct xt_match *match = ops->data; | ||
3102 | + | ||
3103 | + module_put(match->me); | ||
3104 | + kfree(ops); | ||
3105 | +} | ||
3106 | + | ||
3107 | static struct nft_expr_type nft_match_type __read_mostly = { | ||
3108 | .name = "match", | ||
3109 | .select_ops = nft_match_select_ops, | ||
3110 | + .release_ops = nft_match_release_ops, | ||
3111 | .policy = nft_match_policy, | ||
3112 | .maxattr = NFTA_MATCH_MAX, | ||
3113 | .owner = THIS_MODULE, | ||
3114 | }; | ||
3115 | |||
3116 | -static LIST_HEAD(nft_target_list); | ||
3117 | - | ||
3118 | static struct nft_expr_type nft_target_type; | ||
3119 | |||
3120 | -static bool nft_target_cmp(const struct xt_target *tg, | ||
3121 | - const char *name, u32 rev, u32 family) | ||
3122 | -{ | ||
3123 | - return strcmp(tg->name, name) == 0 && tg->revision == rev && | ||
3124 | - (tg->family == NFPROTO_UNSPEC || tg->family == family); | ||
3125 | -} | ||
3126 | - | ||
3127 | static const struct nft_expr_ops * | ||
3128 | nft_target_select_ops(const struct nft_ctx *ctx, | ||
3129 | const struct nlattr * const tb[]) | ||
3130 | { | ||
3131 | - struct nft_xt *nft_target; | ||
3132 | + struct nft_expr_ops *ops; | ||
3133 | struct xt_target *target; | ||
3134 | char *tg_name; | ||
3135 | u32 rev, family; | ||
3136 | @@ -842,17 +787,6 @@ nft_target_select_ops(const struct nft_ctx *ctx, | ||
3137 | strcmp(tg_name, "standard") == 0) | ||
3138 | return ERR_PTR(-EINVAL); | ||
3139 | |||
3140 | - /* Re-use the existing target if it's already loaded. */ | ||
3141 | - list_for_each_entry(nft_target, &nft_target_list, head) { | ||
3142 | - struct xt_target *target = nft_target->ops.data; | ||
3143 | - | ||
3144 | - if (!target->target) | ||
3145 | - continue; | ||
3146 | - | ||
3147 | - if (nft_target_cmp(target, tg_name, rev, family)) | ||
3148 | - return &nft_target->ops; | ||
3149 | - } | ||
3150 | - | ||
3151 | target = xt_request_find_target(family, tg_name, rev); | ||
3152 | if (IS_ERR(target)) | ||
3153 | return ERR_PTR(-ENOENT); | ||
3154 | @@ -867,38 +801,43 @@ nft_target_select_ops(const struct nft_ctx *ctx, | ||
3155 | goto err; | ||
3156 | } | ||
3157 | |||
3158 | - /* This is the first time we use this target, allocate operations */ | ||
3159 | - nft_target = kzalloc(sizeof(struct nft_xt), GFP_KERNEL); | ||
3160 | - if (nft_target == NULL) { | ||
3161 | + ops = kzalloc(sizeof(struct nft_expr_ops), GFP_KERNEL); | ||
3162 | + if (!ops) { | ||
3163 | err = -ENOMEM; | ||
3164 | goto err; | ||
3165 | } | ||
3166 | |||
3167 | - nft_target->refcnt = 0; | ||
3168 | - nft_target->ops.type = &nft_target_type; | ||
3169 | - nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | ||
3170 | - nft_target->ops.init = nft_target_init; | ||
3171 | - nft_target->ops.destroy = nft_target_destroy; | ||
3172 | - nft_target->ops.dump = nft_target_dump; | ||
3173 | - nft_target->ops.validate = nft_target_validate; | ||
3174 | - nft_target->ops.data = target; | ||
3175 | + ops->type = &nft_target_type; | ||
3176 | + ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | ||
3177 | + ops->init = nft_target_init; | ||
3178 | + ops->destroy = nft_target_destroy; | ||
3179 | + ops->dump = nft_target_dump; | ||
3180 | + ops->validate = nft_target_validate; | ||
3181 | + ops->data = target; | ||
3182 | |||
3183 | if (family == NFPROTO_BRIDGE) | ||
3184 | - nft_target->ops.eval = nft_target_eval_bridge; | ||
3185 | + ops->eval = nft_target_eval_bridge; | ||
3186 | else | ||
3187 | - nft_target->ops.eval = nft_target_eval_xt; | ||
3188 | - | ||
3189 | - list_add(&nft_target->head, &nft_target_list); | ||
3190 | + ops->eval = nft_target_eval_xt; | ||
3191 | |||
3192 | - return &nft_target->ops; | ||
3193 | + return ops; | ||
3194 | err: | ||
3195 | module_put(target->me); | ||
3196 | return ERR_PTR(err); | ||
3197 | } | ||
3198 | |||
3199 | +static void nft_target_release_ops(const struct nft_expr_ops *ops) | ||
3200 | +{ | ||
3201 | + struct xt_target *target = ops->data; | ||
3202 | + | ||
3203 | + module_put(target->me); | ||
3204 | + kfree(ops); | ||
3205 | +} | ||
3206 | + | ||
3207 | static struct nft_expr_type nft_target_type __read_mostly = { | ||
3208 | .name = "target", | ||
3209 | .select_ops = nft_target_select_ops, | ||
3210 | + .release_ops = nft_target_release_ops, | ||
3211 | .policy = nft_target_policy, | ||
3212 | .maxattr = NFTA_TARGET_MAX, | ||
3213 | .owner = THIS_MODULE, | ||
3214 | @@ -923,7 +862,6 @@ static int __init nft_compat_module_init(void) | ||
3215 | } | ||
3216 | |||
3217 | return ret; | ||
3218 | - | ||
3219 | err_target: | ||
3220 | nft_unregister_expr(&nft_target_type); | ||
3221 | err_match: | ||
3222 | @@ -933,32 +871,6 @@ err_match: | ||
3223 | |||
3224 | static void __exit nft_compat_module_exit(void) | ||
3225 | { | ||
3226 | - struct nft_xt *xt, *next; | ||
3227 | - | ||
3228 | - /* list should be empty here, it can be non-empty only in case there | ||
3229 | - * was an error that caused nft_xt expr to not be initialized fully | ||
3230 | - * and noone else requested the same expression later. | ||
3231 | - * | ||
3232 | - * In this case, the lists contain 0-refcount entries that still | ||
3233 | - * hold module reference. | ||
3234 | - */ | ||
3235 | - list_for_each_entry_safe(xt, next, &nft_target_list, head) { | ||
3236 | - struct xt_target *target = xt->ops.data; | ||
3237 | - | ||
3238 | - if (WARN_ON_ONCE(xt->refcnt)) | ||
3239 | - continue; | ||
3240 | - module_put(target->me); | ||
3241 | - kfree(xt); | ||
3242 | - } | ||
3243 | - | ||
3244 | - list_for_each_entry_safe(xt, next, &nft_match_list, head) { | ||
3245 | - struct xt_match *match = xt->ops.data; | ||
3246 | - | ||
3247 | - if (WARN_ON_ONCE(xt->refcnt)) | ||
3248 | - continue; | ||
3249 | - module_put(match->me); | ||
3250 | - kfree(xt); | ||
3251 | - } | ||
3252 | nfnetlink_subsys_unregister(&nfnl_compat_subsys); | ||
3253 | nft_unregister_expr(&nft_target_type); | ||
3254 | nft_unregister_expr(&nft_match_type); | ||
3255 | diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c | ||
3256 | index 6e91a37d57f2..eb7f9a5f2aeb 100644 | ||
3257 | --- a/net/netfilter/nft_dynset.c | ||
3258 | +++ b/net/netfilter/nft_dynset.c | ||
3259 | @@ -235,14 +235,32 @@ err1: | ||
3260 | return err; | ||
3261 | } | ||
3262 | |||
3263 | +static void nft_dynset_deactivate(const struct nft_ctx *ctx, | ||
3264 | + const struct nft_expr *expr, | ||
3265 | + enum nft_trans_phase phase) | ||
3266 | +{ | ||
3267 | + struct nft_dynset *priv = nft_expr_priv(expr); | ||
3268 | + | ||
3269 | + nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase); | ||
3270 | +} | ||
3271 | + | ||
3272 | +static void nft_dynset_activate(const struct nft_ctx *ctx, | ||
3273 | + const struct nft_expr *expr) | ||
3274 | +{ | ||
3275 | + struct nft_dynset *priv = nft_expr_priv(expr); | ||
3276 | + | ||
3277 | + priv->set->use++; | ||
3278 | +} | ||
3279 | + | ||
3280 | static void nft_dynset_destroy(const struct nft_ctx *ctx, | ||
3281 | const struct nft_expr *expr) | ||
3282 | { | ||
3283 | struct nft_dynset *priv = nft_expr_priv(expr); | ||
3284 | |||
3285 | - nf_tables_unbind_set(ctx, priv->set, &priv->binding); | ||
3286 | if (priv->expr != NULL) | ||
3287 | nft_expr_destroy(ctx, priv->expr); | ||
3288 | + | ||
3289 | + nf_tables_destroy_set(ctx, priv->set); | ||
3290 | } | ||
3291 | |||
3292 | static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr) | ||
3293 | @@ -279,6 +297,8 @@ static const struct nft_expr_ops nft_dynset_ops = { | ||
3294 | .eval = nft_dynset_eval, | ||
3295 | .init = nft_dynset_init, | ||
3296 | .destroy = nft_dynset_destroy, | ||
3297 | + .activate = nft_dynset_activate, | ||
3298 | + .deactivate = nft_dynset_deactivate, | ||
3299 | .dump = nft_dynset_dump, | ||
3300 | }; | ||
3301 | |||
3302 | diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c | ||
3303 | index 0777a93211e2..3f6d1d2a6281 100644 | ||
3304 | --- a/net/netfilter/nft_immediate.c | ||
3305 | +++ b/net/netfilter/nft_immediate.c | ||
3306 | @@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, | ||
3307 | } | ||
3308 | |||
3309 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, | ||
3310 | - const struct nft_expr *expr) | ||
3311 | + const struct nft_expr *expr, | ||
3312 | + enum nft_trans_phase phase) | ||
3313 | { | ||
3314 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | ||
3315 | |||
3316 | + if (phase == NFT_TRANS_COMMIT) | ||
3317 | + return; | ||
3318 | + | ||
3319 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); | ||
3320 | } | ||
3321 | |||
3322 | diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c | ||
3323 | index ad13e8643599..161c3451a747 100644 | ||
3324 | --- a/net/netfilter/nft_lookup.c | ||
3325 | +++ b/net/netfilter/nft_lookup.c | ||
3326 | @@ -121,12 +121,29 @@ static int nft_lookup_init(const struct nft_ctx *ctx, | ||
3327 | return 0; | ||
3328 | } | ||
3329 | |||
3330 | +static void nft_lookup_deactivate(const struct nft_ctx *ctx, | ||
3331 | + const struct nft_expr *expr, | ||
3332 | + enum nft_trans_phase phase) | ||
3333 | +{ | ||
3334 | + struct nft_lookup *priv = nft_expr_priv(expr); | ||
3335 | + | ||
3336 | + nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase); | ||
3337 | +} | ||
3338 | + | ||
3339 | +static void nft_lookup_activate(const struct nft_ctx *ctx, | ||
3340 | + const struct nft_expr *expr) | ||
3341 | +{ | ||
3342 | + struct nft_lookup *priv = nft_expr_priv(expr); | ||
3343 | + | ||
3344 | + priv->set->use++; | ||
3345 | +} | ||
3346 | + | ||
3347 | static void nft_lookup_destroy(const struct nft_ctx *ctx, | ||
3348 | const struct nft_expr *expr) | ||
3349 | { | ||
3350 | struct nft_lookup *priv = nft_expr_priv(expr); | ||
3351 | |||
3352 | - nf_tables_unbind_set(ctx, priv->set, &priv->binding); | ||
3353 | + nf_tables_destroy_set(ctx, priv->set); | ||
3354 | } | ||
3355 | |||
3356 | static int nft_lookup_dump(struct sk_buff *skb, const struct nft_expr *expr) | ||
3357 | @@ -209,6 +226,8 @@ static const struct nft_expr_ops nft_lookup_ops = { | ||
3358 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), | ||
3359 | .eval = nft_lookup_eval, | ||
3360 | .init = nft_lookup_init, | ||
3361 | + .activate = nft_lookup_activate, | ||
3362 | + .deactivate = nft_lookup_deactivate, | ||
3363 | .destroy = nft_lookup_destroy, | ||
3364 | .dump = nft_lookup_dump, | ||
3365 | .validate = nft_lookup_validate, | ||
3366 | diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c | ||
3367 | index cdf348f751ec..bf92a40dd1b2 100644 | ||
3368 | --- a/net/netfilter/nft_objref.c | ||
3369 | +++ b/net/netfilter/nft_objref.c | ||
3370 | @@ -64,21 +64,34 @@ nla_put_failure: | ||
3371 | return -1; | ||
3372 | } | ||
3373 | |||
3374 | -static void nft_objref_destroy(const struct nft_ctx *ctx, | ||
3375 | - const struct nft_expr *expr) | ||
3376 | +static void nft_objref_deactivate(const struct nft_ctx *ctx, | ||
3377 | + const struct nft_expr *expr, | ||
3378 | + enum nft_trans_phase phase) | ||
3379 | { | ||
3380 | struct nft_object *obj = nft_objref_priv(expr); | ||
3381 | |||
3382 | + if (phase == NFT_TRANS_COMMIT) | ||
3383 | + return; | ||
3384 | + | ||
3385 | obj->use--; | ||
3386 | } | ||
3387 | |||
3388 | +static void nft_objref_activate(const struct nft_ctx *ctx, | ||
3389 | + const struct nft_expr *expr) | ||
3390 | +{ | ||
3391 | + struct nft_object *obj = nft_objref_priv(expr); | ||
3392 | + | ||
3393 | + obj->use++; | ||
3394 | +} | ||
3395 | + | ||
3396 | static struct nft_expr_type nft_objref_type; | ||
3397 | static const struct nft_expr_ops nft_objref_ops = { | ||
3398 | .type = &nft_objref_type, | ||
3399 | .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)), | ||
3400 | .eval = nft_objref_eval, | ||
3401 | .init = nft_objref_init, | ||
3402 | - .destroy = nft_objref_destroy, | ||
3403 | + .activate = nft_objref_activate, | ||
3404 | + .deactivate = nft_objref_deactivate, | ||
3405 | .dump = nft_objref_dump, | ||
3406 | }; | ||
3407 | |||
3408 | @@ -155,12 +168,29 @@ nla_put_failure: | ||
3409 | return -1; | ||
3410 | } | ||
3411 | |||
3412 | +static void nft_objref_map_deactivate(const struct nft_ctx *ctx, | ||
3413 | + const struct nft_expr *expr, | ||
3414 | + enum nft_trans_phase phase) | ||
3415 | +{ | ||
3416 | + struct nft_objref_map *priv = nft_expr_priv(expr); | ||
3417 | + | ||
3418 | + nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase); | ||
3419 | +} | ||
3420 | + | ||
3421 | +static void nft_objref_map_activate(const struct nft_ctx *ctx, | ||
3422 | + const struct nft_expr *expr) | ||
3423 | +{ | ||
3424 | + struct nft_objref_map *priv = nft_expr_priv(expr); | ||
3425 | + | ||
3426 | + priv->set->use++; | ||
3427 | +} | ||
3428 | + | ||
3429 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, | ||
3430 | const struct nft_expr *expr) | ||
3431 | { | ||
3432 | struct nft_objref_map *priv = nft_expr_priv(expr); | ||
3433 | |||
3434 | - nf_tables_unbind_set(ctx, priv->set, &priv->binding); | ||
3435 | + nf_tables_destroy_set(ctx, priv->set); | ||
3436 | } | ||
3437 | |||
3438 | static struct nft_expr_type nft_objref_type; | ||
3439 | @@ -169,6 +199,8 @@ static const struct nft_expr_ops nft_objref_map_ops = { | ||
3440 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), | ||
3441 | .eval = nft_objref_map_eval, | ||
3442 | .init = nft_objref_map_init, | ||
3443 | + .activate = nft_objref_map_activate, | ||
3444 | + .deactivate = nft_objref_map_deactivate, | ||
3445 | .destroy = nft_objref_map_destroy, | ||
3446 | .dump = nft_objref_map_dump, | ||
3447 | }; | ||
3448 | diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c | ||
3449 | index 1d3144d19903..71ffd1a6dc7c 100644 | ||
3450 | --- a/net/netrom/af_netrom.c | ||
3451 | +++ b/net/netrom/af_netrom.c | ||
3452 | @@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void) | ||
3453 | int i; | ||
3454 | int rc = proto_register(&nr_proto, 0); | ||
3455 | |||
3456 | - if (rc != 0) | ||
3457 | - goto out; | ||
3458 | + if (rc) | ||
3459 | + return rc; | ||
3460 | |||
3461 | if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { | ||
3462 | - printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); | ||
3463 | - return -1; | ||
3464 | + pr_err("NET/ROM: %s - nr_ndevs parameter too large\n", | ||
3465 | + __func__); | ||
3466 | + rc = -EINVAL; | ||
3467 | + goto unregister_proto; | ||
3468 | } | ||
3469 | |||
3470 | dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); | ||
3471 | - if (dev_nr == NULL) { | ||
3472 | - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); | ||
3473 | - return -1; | ||
3474 | + if (!dev_nr) { | ||
3475 | + pr_err("NET/ROM: %s - unable to allocate device array\n", | ||
3476 | + __func__); | ||
3477 | + rc = -ENOMEM; | ||
3478 | + goto unregister_proto; | ||
3479 | } | ||
3480 | |||
3481 | for (i = 0; i < nr_ndevs; i++) { | ||
3482 | @@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void) | ||
3483 | sprintf(name, "nr%d", i); | ||
3484 | dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); | ||
3485 | if (!dev) { | ||
3486 | - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); | ||
3487 | + rc = -ENOMEM; | ||
3488 | goto fail; | ||
3489 | } | ||
3490 | |||
3491 | dev->base_addr = i; | ||
3492 | - if (register_netdev(dev)) { | ||
3493 | - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); | ||
3494 | + rc = register_netdev(dev); | ||
3495 | + if (rc) { | ||
3496 | free_netdev(dev); | ||
3497 | goto fail; | ||
3498 | } | ||
3499 | @@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void) | ||
3500 | dev_nr[i] = dev; | ||
3501 | } | ||
3502 | |||
3503 | - if (sock_register(&nr_family_ops)) { | ||
3504 | - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); | ||
3505 | + rc = sock_register(&nr_family_ops); | ||
3506 | + if (rc) | ||
3507 | goto fail; | ||
3508 | - } | ||
3509 | |||
3510 | - register_netdevice_notifier(&nr_dev_notifier); | ||
3511 | + rc = register_netdevice_notifier(&nr_dev_notifier); | ||
3512 | + if (rc) | ||
3513 | + goto out_sock; | ||
3514 | |||
3515 | ax25_register_pid(&nr_pid); | ||
3516 | ax25_linkfail_register(&nr_linkfail_notifier); | ||
3517 | |||
3518 | #ifdef CONFIG_SYSCTL | ||
3519 | - nr_register_sysctl(); | ||
3520 | + rc = nr_register_sysctl(); | ||
3521 | + if (rc) | ||
3522 | + goto out_sysctl; | ||
3523 | #endif | ||
3524 | |||
3525 | nr_loopback_init(); | ||
3526 | |||
3527 | - proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops); | ||
3528 | - proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops); | ||
3529 | - proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops); | ||
3530 | -out: | ||
3531 | - return rc; | ||
3532 | + rc = -ENOMEM; | ||
3533 | + if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops)) | ||
3534 | + goto proc_remove1; | ||
3535 | + if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net, | ||
3536 | + &nr_neigh_seqops)) | ||
3537 | + goto proc_remove2; | ||
3538 | + if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net, | ||
3539 | + &nr_node_seqops)) | ||
3540 | + goto proc_remove3; | ||
3541 | + | ||
3542 | + return 0; | ||
3543 | + | ||
3544 | +proc_remove3: | ||
3545 | + remove_proc_entry("nr_neigh", init_net.proc_net); | ||
3546 | +proc_remove2: | ||
3547 | + remove_proc_entry("nr", init_net.proc_net); | ||
3548 | +proc_remove1: | ||
3549 | + | ||
3550 | + nr_loopback_clear(); | ||
3551 | + nr_rt_free(); | ||
3552 | + | ||
3553 | +#ifdef CONFIG_SYSCTL | ||
3554 | + nr_unregister_sysctl(); | ||
3555 | +out_sysctl: | ||
3556 | +#endif | ||
3557 | + ax25_linkfail_release(&nr_linkfail_notifier); | ||
3558 | + ax25_protocol_release(AX25_P_NETROM); | ||
3559 | + unregister_netdevice_notifier(&nr_dev_notifier); | ||
3560 | +out_sock: | ||
3561 | + sock_unregister(PF_NETROM); | ||
3562 | fail: | ||
3563 | while (--i >= 0) { | ||
3564 | unregister_netdev(dev_nr[i]); | ||
3565 | free_netdev(dev_nr[i]); | ||
3566 | } | ||
3567 | kfree(dev_nr); | ||
3568 | +unregister_proto: | ||
3569 | proto_unregister(&nr_proto); | ||
3570 | - rc = -1; | ||
3571 | - goto out; | ||
3572 | + return rc; | ||
3573 | } | ||
3574 | |||
3575 | module_init(nr_proto_init); | ||
3576 | diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c | ||
3577 | index 215ad22a9647..93d13f019981 100644 | ||
3578 | --- a/net/netrom/nr_loopback.c | ||
3579 | +++ b/net/netrom/nr_loopback.c | ||
3580 | @@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused) | ||
3581 | } | ||
3582 | } | ||
3583 | |||
3584 | -void __exit nr_loopback_clear(void) | ||
3585 | +void nr_loopback_clear(void) | ||
3586 | { | ||
3587 | del_timer_sync(&loopback_timer); | ||
3588 | skb_queue_purge(&loopback_queue); | ||
3589 | diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c | ||
3590 | index 6485f593e2f0..b76aa668a94b 100644 | ||
3591 | --- a/net/netrom/nr_route.c | ||
3592 | +++ b/net/netrom/nr_route.c | ||
3593 | @@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = { | ||
3594 | /* | ||
3595 | * Free all memory associated with the nodes and routes lists. | ||
3596 | */ | ||
3597 | -void __exit nr_rt_free(void) | ||
3598 | +void nr_rt_free(void) | ||
3599 | { | ||
3600 | struct nr_neigh *s = NULL; | ||
3601 | struct nr_node *t = NULL; | ||
3602 | diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c | ||
3603 | index ba1c368b3f18..771011b84270 100644 | ||
3604 | --- a/net/netrom/sysctl_net_netrom.c | ||
3605 | +++ b/net/netrom/sysctl_net_netrom.c | ||
3606 | @@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = { | ||
3607 | { } | ||
3608 | }; | ||
3609 | |||
3610 | -void __init nr_register_sysctl(void) | ||
3611 | +int __init nr_register_sysctl(void) | ||
3612 | { | ||
3613 | nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); | ||
3614 | + if (!nr_table_header) | ||
3615 | + return -ENOMEM; | ||
3616 | + return 0; | ||
3617 | } | ||
3618 | |||
3619 | void nr_unregister_sysctl(void) | ||
3620 | diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c | ||
3621 | index 65387e1e6964..cd7e01ea8144 100644 | ||
3622 | --- a/net/rds/af_rds.c | ||
3623 | +++ b/net/rds/af_rds.c | ||
3624 | @@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr, | ||
3625 | struct rds_sock *rs = rds_sk_to_rs(sk); | ||
3626 | int ret = 0; | ||
3627 | |||
3628 | + if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
3629 | + return -EINVAL; | ||
3630 | + | ||
3631 | lock_sock(sk); | ||
3632 | |||
3633 | switch (uaddr->sa_family) { | ||
3634 | diff --git a/net/rds/bind.c b/net/rds/bind.c | ||
3635 | index 17c9d9f0c848..0f4398e7f2a7 100644 | ||
3636 | --- a/net/rds/bind.c | ||
3637 | +++ b/net/rds/bind.c | ||
3638 | @@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | ||
3639 | /* We allow an RDS socket to be bound to either IPv4 or IPv6 | ||
3640 | * address. | ||
3641 | */ | ||
3642 | + if (addr_len < offsetofend(struct sockaddr, sa_family)) | ||
3643 | + return -EINVAL; | ||
3644 | if (uaddr->sa_family == AF_INET) { | ||
3645 | struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; | ||
3646 | |||
3647 | diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c | ||
3648 | index e0f70c4051b6..01e764f8f224 100644 | ||
3649 | --- a/net/rds/ib_fmr.c | ||
3650 | +++ b/net/rds/ib_fmr.c | ||
3651 | @@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages) | ||
3652 | else | ||
3653 | pool = rds_ibdev->mr_1m_pool; | ||
3654 | |||
3655 | + if (atomic_read(&pool->dirty_count) >= pool->max_items / 10) | ||
3656 | + queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); | ||
3657 | + | ||
3658 | + /* Switch pools if one of the pool is reaching upper limit */ | ||
3659 | + if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) { | ||
3660 | + if (pool->pool_type == RDS_IB_MR_8K_POOL) | ||
3661 | + pool = rds_ibdev->mr_1m_pool; | ||
3662 | + else | ||
3663 | + pool = rds_ibdev->mr_8k_pool; | ||
3664 | + } | ||
3665 | + | ||
3666 | ibmr = rds_ib_try_reuse_ibmr(pool); | ||
3667 | if (ibmr) | ||
3668 | return ibmr; | ||
3669 | diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c | ||
3670 | index 63c8d107adcf..d664e9ade74d 100644 | ||
3671 | --- a/net/rds/ib_rdma.c | ||
3672 | +++ b/net/rds/ib_rdma.c | ||
3673 | @@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool) | ||
3674 | struct rds_ib_mr *ibmr = NULL; | ||
3675 | int iter = 0; | ||
3676 | |||
3677 | - if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10) | ||
3678 | - queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10); | ||
3679 | - | ||
3680 | while (1) { | ||
3681 | ibmr = rds_ib_reuse_mr(pool); | ||
3682 | if (ibmr) | ||
3683 | diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c | ||
3684 | index 7af4f99c4a93..094a6621f8e8 100644 | ||
3685 | --- a/net/rose/rose_loopback.c | ||
3686 | +++ b/net/rose/rose_loopback.c | ||
3687 | @@ -16,6 +16,7 @@ | ||
3688 | #include <linux/init.h> | ||
3689 | |||
3690 | static struct sk_buff_head loopback_queue; | ||
3691 | +#define ROSE_LOOPBACK_LIMIT 1000 | ||
3692 | static struct timer_list loopback_timer; | ||
3693 | |||
3694 | static void rose_set_loopback_timer(void); | ||
3695 | @@ -35,29 +36,27 @@ static int rose_loopback_running(void) | ||
3696 | |||
3697 | int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) | ||
3698 | { | ||
3699 | - struct sk_buff *skbn; | ||
3700 | + struct sk_buff *skbn = NULL; | ||
3701 | |||
3702 | - skbn = skb_clone(skb, GFP_ATOMIC); | ||
3703 | + if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT) | ||
3704 | + skbn = skb_clone(skb, GFP_ATOMIC); | ||
3705 | |||
3706 | - kfree_skb(skb); | ||
3707 | - | ||
3708 | - if (skbn != NULL) { | ||
3709 | + if (skbn) { | ||
3710 | + consume_skb(skb); | ||
3711 | skb_queue_tail(&loopback_queue, skbn); | ||
3712 | |||
3713 | if (!rose_loopback_running()) | ||
3714 | rose_set_loopback_timer(); | ||
3715 | + } else { | ||
3716 | + kfree_skb(skb); | ||
3717 | } | ||
3718 | |||
3719 | return 1; | ||
3720 | } | ||
3721 | |||
3722 | - | ||
3723 | static void rose_set_loopback_timer(void) | ||
3724 | { | ||
3725 | - del_timer(&loopback_timer); | ||
3726 | - | ||
3727 | - loopback_timer.expires = jiffies + 10; | ||
3728 | - add_timer(&loopback_timer); | ||
3729 | + mod_timer(&loopback_timer, jiffies + 10); | ||
3730 | } | ||
3731 | |||
3732 | static void rose_loopback_timer(struct timer_list *unused) | ||
3733 | @@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused) | ||
3734 | struct sock *sk; | ||
3735 | unsigned short frametype; | ||
3736 | unsigned int lci_i, lci_o; | ||
3737 | + int count; | ||
3738 | |||
3739 | - while ((skb = skb_dequeue(&loopback_queue)) != NULL) { | ||
3740 | + for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) { | ||
3741 | + skb = skb_dequeue(&loopback_queue); | ||
3742 | + if (!skb) | ||
3743 | + return; | ||
3744 | if (skb->len < ROSE_MIN_LEN) { | ||
3745 | kfree_skb(skb); | ||
3746 | continue; | ||
3747 | @@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused) | ||
3748 | kfree_skb(skb); | ||
3749 | } | ||
3750 | } | ||
3751 | + if (!skb_queue_empty(&loopback_queue)) | ||
3752 | + mod_timer(&loopback_timer, jiffies + 1); | ||
3753 | } | ||
3754 | |||
3755 | void __exit rose_loopback_clear(void) | ||
3756 | diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c | ||
3757 | index 570b49d2da42..d591f54cb91f 100644 | ||
3758 | --- a/net/rxrpc/input.c | ||
3759 | +++ b/net/rxrpc/input.c | ||
3760 | @@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) | ||
3761 | * handle data received on the local endpoint | ||
3762 | * - may be called in interrupt context | ||
3763 | * | ||
3764 | - * The socket is locked by the caller and this prevents the socket from being | ||
3765 | - * shut down and the local endpoint from going away, thus sk_user_data will not | ||
3766 | - * be cleared until this function returns. | ||
3767 | + * [!] Note that as this is called from the encap_rcv hook, the socket is not | ||
3768 | + * held locked by the caller and nothing prevents sk_user_data on the UDP from | ||
3769 | + * being cleared in the middle of processing this function. | ||
3770 | * | ||
3771 | * Called with the RCU read lock held from the IP layer via UDP. | ||
3772 | */ | ||
3773 | int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | ||
3774 | { | ||
3775 | + struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk); | ||
3776 | struct rxrpc_connection *conn; | ||
3777 | struct rxrpc_channel *chan; | ||
3778 | struct rxrpc_call *call = NULL; | ||
3779 | struct rxrpc_skb_priv *sp; | ||
3780 | - struct rxrpc_local *local = udp_sk->sk_user_data; | ||
3781 | struct rxrpc_peer *peer = NULL; | ||
3782 | struct rxrpc_sock *rx = NULL; | ||
3783 | unsigned int channel; | ||
3784 | @@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) | ||
3785 | |||
3786 | _enter("%p", udp_sk); | ||
3787 | |||
3788 | + if (unlikely(!local)) { | ||
3789 | + kfree_skb(skb); | ||
3790 | + return 0; | ||
3791 | + } | ||
3792 | if (skb->tstamp == 0) | ||
3793 | skb->tstamp = ktime_get_real(); | ||
3794 | |||
3795 | diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c | ||
3796 | index 0906e51d3cfb..10317dbdab5f 100644 | ||
3797 | --- a/net/rxrpc/local_object.c | ||
3798 | +++ b/net/rxrpc/local_object.c | ||
3799 | @@ -304,7 +304,8 @@ nomem: | ||
3800 | ret = -ENOMEM; | ||
3801 | sock_error: | ||
3802 | mutex_unlock(&rxnet->local_mutex); | ||
3803 | - kfree(local); | ||
3804 | + if (local) | ||
3805 | + call_rcu(&local->rcu, rxrpc_local_rcu); | ||
3806 | _leave(" = %d", ret); | ||
3807 | return ERR_PTR(ret); | ||
3808 | |||
3809 | diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c | ||
3810 | index b6e8eccf2a52..214440c5b14e 100644 | ||
3811 | --- a/net/sunrpc/cache.c | ||
3812 | +++ b/net/sunrpc/cache.c | ||
3813 | @@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail) | ||
3814 | h->last_refresh = now; | ||
3815 | } | ||
3816 | |||
3817 | +static inline int cache_is_valid(struct cache_head *h); | ||
3818 | static void cache_fresh_locked(struct cache_head *head, time_t expiry, | ||
3819 | struct cache_detail *detail); | ||
3820 | static void cache_fresh_unlocked(struct cache_head *head, | ||
3821 | @@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, | ||
3822 | if (cache_is_expired(detail, tmp)) { | ||
3823 | hlist_del_init(&tmp->cache_list); | ||
3824 | detail->entries --; | ||
3825 | + if (cache_is_valid(tmp) == -EAGAIN) | ||
3826 | + set_bit(CACHE_NEGATIVE, &tmp->flags); | ||
3827 | cache_fresh_locked(tmp, 0, detail); | ||
3828 | freeme = tmp; | ||
3829 | break; | ||
3830 | diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c | ||
3831 | index 0b21187d74df..588d5aa14c41 100644 | ||
3832 | --- a/net/tipc/netlink_compat.c | ||
3833 | +++ b/net/tipc/netlink_compat.c | ||
3834 | @@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, | ||
3835 | if (msg->rep_type) | ||
3836 | tipc_tlv_init(msg->rep, msg->rep_type); | ||
3837 | |||
3838 | - if (cmd->header) | ||
3839 | - (*cmd->header)(msg); | ||
3840 | + if (cmd->header) { | ||
3841 | + err = (*cmd->header)(msg); | ||
3842 | + if (err) { | ||
3843 | + kfree_skb(msg->rep); | ||
3844 | + msg->rep = NULL; | ||
3845 | + return err; | ||
3846 | + } | ||
3847 | + } | ||
3848 | |||
3849 | arg = nlmsg_new(0, GFP_KERNEL); | ||
3850 | if (!arg) { | ||
3851 | @@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd, | ||
3852 | if (!bearer) | ||
3853 | return -EMSGSIZE; | ||
3854 | |||
3855 | - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); | ||
3856 | + len = TLV_GET_DATA_LEN(msg->req); | ||
3857 | + len -= offsetof(struct tipc_bearer_config, name); | ||
3858 | + if (len <= 0) | ||
3859 | + return -EINVAL; | ||
3860 | + | ||
3861 | + len = min_t(int, len, TIPC_MAX_BEARER_NAME); | ||
3862 | if (!string_is_valid(b->name, len)) | ||
3863 | return -EINVAL; | ||
3864 | |||
3865 | @@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd, | ||
3866 | |||
3867 | lc = (struct tipc_link_config *)TLV_DATA(msg->req); | ||
3868 | |||
3869 | - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); | ||
3870 | + len = TLV_GET_DATA_LEN(msg->req); | ||
3871 | + len -= offsetof(struct tipc_link_config, name); | ||
3872 | + if (len <= 0) | ||
3873 | + return -EINVAL; | ||
3874 | + | ||
3875 | + len = min_t(int, len, TIPC_MAX_LINK_NAME); | ||
3876 | if (!string_is_valid(lc->name, len)) | ||
3877 | return -EINVAL; | ||
3878 | |||
3879 | diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c | ||
3880 | index 961b07d4d41c..c9588b682db4 100644 | ||
3881 | --- a/net/tls/tls_device.c | ||
3882 | +++ b/net/tls/tls_device.c | ||
3883 | @@ -874,7 +874,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) | ||
3884 | goto release_netdev; | ||
3885 | |||
3886 | free_sw_resources: | ||
3887 | + up_read(&device_offload_lock); | ||
3888 | tls_sw_free_resources_rx(sk); | ||
3889 | + down_read(&device_offload_lock); | ||
3890 | release_ctx: | ||
3891 | ctx->priv_ctx_rx = NULL; | ||
3892 | release_netdev: | ||
3893 | @@ -909,8 +911,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk) | ||
3894 | } | ||
3895 | out: | ||
3896 | up_read(&device_offload_lock); | ||
3897 | - kfree(tls_ctx->rx.rec_seq); | ||
3898 | - kfree(tls_ctx->rx.iv); | ||
3899 | tls_sw_release_resources_rx(sk); | ||
3900 | } | ||
3901 | |||
3902 | diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c | ||
3903 | index 450a6dbc5a88..ef8934fd8698 100644 | ||
3904 | --- a/net/tls/tls_device_fallback.c | ||
3905 | +++ b/net/tls/tls_device_fallback.c | ||
3906 | @@ -193,6 +193,9 @@ static void update_chksum(struct sk_buff *skb, int headln) | ||
3907 | |||
3908 | static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) | ||
3909 | { | ||
3910 | + struct sock *sk = skb->sk; | ||
3911 | + int delta; | ||
3912 | + | ||
3913 | skb_copy_header(nskb, skb); | ||
3914 | |||
3915 | skb_put(nskb, skb->len); | ||
3916 | @@ -200,11 +203,15 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln) | ||
3917 | update_chksum(nskb, headln); | ||
3918 | |||
3919 | nskb->destructor = skb->destructor; | ||
3920 | - nskb->sk = skb->sk; | ||
3921 | + nskb->sk = sk; | ||
3922 | skb->destructor = NULL; | ||
3923 | skb->sk = NULL; | ||
3924 | - refcount_add(nskb->truesize - skb->truesize, | ||
3925 | - &nskb->sk->sk_wmem_alloc); | ||
3926 | + | ||
3927 | + delta = nskb->truesize - skb->truesize; | ||
3928 | + if (likely(delta < 0)) | ||
3929 | + WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); | ||
3930 | + else if (delta) | ||
3931 | + refcount_add(delta, &sk->sk_wmem_alloc); | ||
3932 | } | ||
3933 | |||
3934 | /* This function may be called after the user socket is already | ||
3935 | diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c | ||
3936 | index a091c03abcb2..25b3fb585777 100644 | ||
3937 | --- a/net/tls/tls_main.c | ||
3938 | +++ b/net/tls/tls_main.c | ||
3939 | @@ -290,11 +290,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) | ||
3940 | tls_sw_free_resources_tx(sk); | ||
3941 | } | ||
3942 | |||
3943 | - if (ctx->rx_conf == TLS_SW) { | ||
3944 | - kfree(ctx->rx.rec_seq); | ||
3945 | - kfree(ctx->rx.iv); | ||
3946 | + if (ctx->rx_conf == TLS_SW) | ||
3947 | tls_sw_free_resources_rx(sk); | ||
3948 | - } | ||
3949 | |||
3950 | #ifdef CONFIG_TLS_DEVICE | ||
3951 | if (ctx->rx_conf == TLS_HW) | ||
3952 | diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c | ||
3953 | index b9c6ecfbcfea..6848a8196711 100644 | ||
3954 | --- a/net/tls/tls_sw.c | ||
3955 | +++ b/net/tls/tls_sw.c | ||
3956 | @@ -1118,6 +1118,9 @@ void tls_sw_release_resources_rx(struct sock *sk) | ||
3957 | struct tls_context *tls_ctx = tls_get_ctx(sk); | ||
3958 | struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); | ||
3959 | |||
3960 | + kfree(tls_ctx->rx.rec_seq); | ||
3961 | + kfree(tls_ctx->rx.iv); | ||
3962 | + | ||
3963 | if (ctx->aead_recv) { | ||
3964 | kfree_skb(ctx->recv_pkt); | ||
3965 | ctx->recv_pkt = NULL; | ||
3966 | diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c | ||
3967 | index 3ae3a33da70b..602715fc9a75 100644 | ||
3968 | --- a/net/vmw_vsock/virtio_transport_common.c | ||
3969 | +++ b/net/vmw_vsock/virtio_transport_common.c | ||
3970 | @@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk, | ||
3971 | */ | ||
3972 | static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) | ||
3973 | { | ||
3974 | + const struct virtio_transport *t; | ||
3975 | + struct virtio_vsock_pkt *reply; | ||
3976 | struct virtio_vsock_pkt_info info = { | ||
3977 | .op = VIRTIO_VSOCK_OP_RST, | ||
3978 | .type = le16_to_cpu(pkt->hdr.type), | ||
3979 | @@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt) | ||
3980 | if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST) | ||
3981 | return 0; | ||
3982 | |||
3983 | - pkt = virtio_transport_alloc_pkt(&info, 0, | ||
3984 | - le64_to_cpu(pkt->hdr.dst_cid), | ||
3985 | - le32_to_cpu(pkt->hdr.dst_port), | ||
3986 | - le64_to_cpu(pkt->hdr.src_cid), | ||
3987 | - le32_to_cpu(pkt->hdr.src_port)); | ||
3988 | - if (!pkt) | ||
3989 | + reply = virtio_transport_alloc_pkt(&info, 0, | ||
3990 | + le64_to_cpu(pkt->hdr.dst_cid), | ||
3991 | + le32_to_cpu(pkt->hdr.dst_port), | ||
3992 | + le64_to_cpu(pkt->hdr.src_cid), | ||
3993 | + le32_to_cpu(pkt->hdr.src_port)); | ||
3994 | + if (!reply) | ||
3995 | return -ENOMEM; | ||
3996 | |||
3997 | - return virtio_transport_get_ops()->send_pkt(pkt); | ||
3998 | + t = virtio_transport_get_ops(); | ||
3999 | + if (!t) { | ||
4000 | + virtio_transport_free_pkt(reply); | ||
4001 | + return -ENOTCONN; | ||
4002 | + } | ||
4003 | + | ||
4004 | + return t->send_pkt(reply); | ||
4005 | } | ||
4006 | |||
4007 | static void virtio_transport_wait_close(struct sock *sk, long timeout) | ||
4008 | diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c | ||
4009 | index 80f73810b21b..0436789e7cd8 100644 | ||
4010 | --- a/sound/pci/hda/patch_ca0132.c | ||
4011 | +++ b/sound/pci/hda/patch_ca0132.c | ||
4012 | @@ -7394,8 +7394,10 @@ static void ca0132_free(struct hda_codec *codec) | ||
4013 | ca0132_exit_chip(codec); | ||
4014 | |||
4015 | snd_hda_power_down(codec); | ||
4016 | - if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) | ||
4017 | +#ifdef CONFIG_PCI | ||
4018 | + if (spec->mem_base) | ||
4019 | pci_iounmap(codec->bus->pci, spec->mem_base); | ||
4020 | +#endif | ||
4021 | kfree(spec->spec_init_verbs); | ||
4022 | kfree(codec->spec); | ||
4023 | } |