Magellan Linux

Contents of /trunk/kernel-magellan/patches-5.0/0110-5.0.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3339 - (show annotations) (download)
Thu May 2 14:04:48 2019 UTC (4 years, 10 months ago) by niro
File size: 114340 byte(s)
-linux-5.0.11
1 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2 index acdfb5d2bcaa..e2142fe40cda 100644
3 --- a/Documentation/networking/ip-sysctl.txt
4 +++ b/Documentation/networking/ip-sysctl.txt
5 @@ -422,6 +422,7 @@ tcp_min_rtt_wlen - INTEGER
6 minimum RTT when it is moved to a longer path (e.g., due to traffic
7 engineering). A longer window makes the filter more resistant to RTT
8 inflations such as transient congestion. The unit is seconds.
9 + Possible values: 0 - 86400 (1 day)
10 Default: 300
11
12 tcp_moderate_rcvbuf - BOOLEAN
13 diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
14 index 187ce4f599a2..e4dfaf0d6e87 100644
15 --- a/Documentation/sysctl/vm.txt
16 +++ b/Documentation/sysctl/vm.txt
17 @@ -866,14 +866,14 @@ The intent is that compaction has less work to do in the future and to
18 increase the success rate of future high-order allocations such as SLUB
19 allocations, THP and hugetlbfs pages.
20
21 -To make it sensible with respect to the watermark_scale_factor parameter,
22 -the unit is in fractions of 10,000. The default value of 15,000 means
23 -that up to 150% of the high watermark will be reclaimed in the event of
24 -a pageblock being mixed due to fragmentation. The level of reclaim is
25 -determined by the number of fragmentation events that occurred in the
26 -recent past. If this value is smaller than a pageblock then a pageblocks
27 -worth of pages will be reclaimed (e.g. 2MB on 64-bit x86). A boost factor
28 -of 0 will disable the feature.
29 +To make it sensible with respect to the watermark_scale_factor
30 +parameter, the unit is in fractions of 10,000. The default value of
31 +15,000 on !DISCONTIGMEM configurations means that up to 150% of the high
32 +watermark will be reclaimed in the event of a pageblock being mixed due
33 +to fragmentation. The level of reclaim is determined by the number of
34 +fragmentation events that occurred in the recent past. If this value is
35 +smaller than a pageblock then a pageblocks worth of pages will be reclaimed
36 +(e.g. 2MB on 64-bit x86). A boost factor of 0 will disable the feature.
37
38 =============================================================
39
40 diff --git a/Makefile b/Makefile
41 index b282c4143b21..c3daaefa979c 100644
42 --- a/Makefile
43 +++ b/Makefile
44 @@ -1,7 +1,7 @@
45 # SPDX-License-Identifier: GPL-2.0
46 VERSION = 5
47 PATCHLEVEL = 0
48 -SUBLEVEL = 10
49 +SUBLEVEL = 11
50 EXTRAVERSION =
51 NAME = Shy Crocodile
52
53 diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
54 index 6c7ccb428c07..7135820f76d4 100644
55 --- a/arch/arm/boot/compressed/head.S
56 +++ b/arch/arm/boot/compressed/head.S
57 @@ -1438,7 +1438,21 @@ ENTRY(efi_stub_entry)
58
59 @ Preserve return value of efi_entry() in r4
60 mov r4, r0
61 - bl cache_clean_flush
62 +
63 + @ our cache maintenance code relies on CP15 barrier instructions
64 + @ but since we arrived here with the MMU and caches configured
65 + @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
66 + @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
67 + @ the enable path will be executed on v7+ only.
68 + mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
69 + tst r1, #(1 << 5) @ CP15BEN bit set?
70 + bne 0f
71 + orr r1, r1, #(1 << 5) @ CP15 barrier instructions
72 + mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
73 + ARM( .inst 0xf57ff06f @ v7+ isb )
74 + THUMB( isb )
75 +
76 +0: bl cache_clean_flush
77 bl cache_off
78
79 @ Set parameters for booting zImage according to boot protocol
80 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
81 index 7205a9085b4d..c9411774555d 100644
82 --- a/arch/arm64/mm/init.c
83 +++ b/arch/arm64/mm/init.c
84 @@ -406,7 +406,7 @@ void __init arm64_memblock_init(void)
85 * Otherwise, this is a no-op
86 */
87 u64 base = phys_initrd_start & PAGE_MASK;
88 - u64 size = PAGE_ALIGN(phys_initrd_size);
89 + u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
90
91 /*
92 * We can only add back the initrd memory if we don't end up
93 diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
94 index f158c5894a9a..feb2653490df 100644
95 --- a/arch/mips/kernel/scall64-o32.S
96 +++ b/arch/mips/kernel/scall64-o32.S
97 @@ -125,7 +125,7 @@ trace_a_syscall:
98 subu t1, v0, __NR_O32_Linux
99 move a1, v0
100 bnez t1, 1f /* __NR_syscall at offset 0 */
101 - lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
102 + ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
103 .set pop
104
105 1: jal syscall_trace_enter
106 diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
107 index cfdd08897a06..e2b0c5f15c7b 100644
108 --- a/arch/powerpc/configs/skiroot_defconfig
109 +++ b/arch/powerpc/configs/skiroot_defconfig
110 @@ -260,6 +260,7 @@ CONFIG_UDF_FS=m
111 CONFIG_MSDOS_FS=m
112 CONFIG_VFAT_FS=m
113 CONFIG_PROC_KCORE=y
114 +CONFIG_HUGETLBFS=y
115 # CONFIG_MISC_FILESYSTEMS is not set
116 # CONFIG_NETWORK_FILESYSTEMS is not set
117 CONFIG_NLS=y
118 diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
119 index 1e0bc5955a40..afd516b572f8 100644
120 --- a/arch/powerpc/kernel/vdso32/gettimeofday.S
121 +++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
122 @@ -98,7 +98,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
123 * can be used, r7 contains NSEC_PER_SEC.
124 */
125
126 - lwz r5,WTOM_CLOCK_SEC(r9)
127 + lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
128 lwz r6,WTOM_CLOCK_NSEC(r9)
129
130 /* We now have our offset in r5,r6. We create a fake dependency
131 diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
132 index 8c7464c3f27f..2782188a5ba1 100644
133 --- a/arch/powerpc/platforms/Kconfig.cputype
134 +++ b/arch/powerpc/platforms/Kconfig.cputype
135 @@ -318,7 +318,7 @@ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
136
137 config PPC_RADIX_MMU
138 bool "Radix MMU Support"
139 - depends on PPC_BOOK3S_64
140 + depends on PPC_BOOK3S_64 && HUGETLB_PAGE
141 select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA
142 default y
143 help
144 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
145 index 9c5a67d1b9c1..c0c7291d4ccf 100644
146 --- a/arch/x86/Makefile
147 +++ b/arch/x86/Makefile
148 @@ -217,6 +217,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
149 # Avoid indirect branches in kernel to deal with Spectre
150 ifdef CONFIG_RETPOLINE
151 KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
152 + # Additionally, avoid generating expensive indirect jumps which
153 + # are subject to retpolines for small number of switch cases.
154 + # clang turns off jump table generation by default when under
155 + # retpoline builds, however, gcc does not for x86. This has
156 + # only been fixed starting from gcc stable version 8.4.0 and
157 + # onwards, but not for older ones. See gcc bug #86952.
158 + ifndef CONFIG_CC_IS_CLANG
159 + KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
160 + endif
161 endif
162
163 archscripts: scripts_basic
164 diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
165 index d2e780705c5a..56194c571299 100644
166 --- a/arch/x86/events/intel/cstate.c
167 +++ b/arch/x86/events/intel/cstate.c
168 @@ -76,15 +76,15 @@
169 * Scope: Package (physical package)
170 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
171 * perf code: 0x04
172 - * Available model: HSW ULT,CNL
173 + * Available model: HSW ULT,KBL,CNL
174 * Scope: Package (physical package)
175 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
176 * perf code: 0x05
177 - * Available model: HSW ULT,CNL
178 + * Available model: HSW ULT,KBL,CNL
179 * Scope: Package (physical package)
180 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
181 * perf code: 0x06
182 - * Available model: HSW ULT,GLM,CNL
183 + * Available model: HSW ULT,KBL,GLM,CNL
184 * Scope: Package (physical package)
185 *
186 */
187 @@ -572,8 +572,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
188 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
189 X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_X, snb_cstates),
190
191 - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates),
192 - X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates),
193 + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, hswult_cstates),
194 + X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, hswult_cstates),
195
196 X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates),
197
198 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
199 index e5ed28629271..72510c470001 100644
200 --- a/block/bfq-iosched.c
201 +++ b/block/bfq-iosched.c
202 @@ -2804,7 +2804,7 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
203 bfq_remove_request(q, rq);
204 }
205
206 -static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
207 +static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
208 {
209 /*
210 * If this bfqq is shared between multiple processes, check
211 @@ -2837,9 +2837,11 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
212 /*
213 * All in-service entities must have been properly deactivated
214 * or requeued before executing the next function, which
215 - * resets all in-service entites as no more in service.
216 + * resets all in-service entities as no more in service. This
217 + * may cause bfqq to be freed. If this happens, the next
218 + * function returns true.
219 */
220 - __bfq_bfqd_reset_in_service(bfqd);
221 + return __bfq_bfqd_reset_in_service(bfqd);
222 }
223
224 /**
225 @@ -3244,7 +3246,6 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
226 bool slow;
227 unsigned long delta = 0;
228 struct bfq_entity *entity = &bfqq->entity;
229 - int ref;
230
231 /*
232 * Check whether the process is slow (see bfq_bfqq_is_slow).
233 @@ -3313,10 +3314,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
234 * reason.
235 */
236 __bfq_bfqq_recalc_budget(bfqd, bfqq, reason);
237 - ref = bfqq->ref;
238 - __bfq_bfqq_expire(bfqd, bfqq);
239 -
240 - if (ref == 1) /* bfqq is gone, no more actions on it */
241 + if (__bfq_bfqq_expire(bfqd, bfqq))
242 + /* bfqq is gone, no more actions on it */
243 return;
244
245 bfqq->injected_service = 0;
246 diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
247 index 746bd570b85a..ca98c98a8179 100644
248 --- a/block/bfq-iosched.h
249 +++ b/block/bfq-iosched.h
250 @@ -993,7 +993,7 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity,
251 bool ins_into_idle_tree);
252 bool next_queue_may_preempt(struct bfq_data *bfqd);
253 struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd);
254 -void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
255 +bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd);
256 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
257 bool ins_into_idle_tree, bool expiration);
258 void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
259 diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
260 index 4aab1a8191f0..8077bf71d2ac 100644
261 --- a/block/bfq-wf2q.c
262 +++ b/block/bfq-wf2q.c
263 @@ -1599,7 +1599,8 @@ struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
264 return bfqq;
265 }
266
267 -void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
268 +/* returns true if the in-service queue gets freed */
269 +bool __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
270 {
271 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue;
272 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity;
273 @@ -1623,8 +1624,20 @@ void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
274 * service tree either, then release the service reference to
275 * the queue it represents (taken with bfq_get_entity).
276 */
277 - if (!in_serv_entity->on_st)
278 + if (!in_serv_entity->on_st) {
279 + /*
280 + * If no process is referencing in_serv_bfqq any
281 + * longer, then the service reference may be the only
282 + * reference to the queue. If this is the case, then
283 + * bfqq gets freed here.
284 + */
285 + int ref = in_serv_bfqq->ref;
286 bfq_put_queue(in_serv_bfqq);
287 + if (ref == 1)
288 + return true;
289 + }
290 +
291 + return false;
292 }
293
294 void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
295 diff --git a/crypto/lrw.c b/crypto/lrw.c
296 index 0430ccd08728..08a0e458bc3e 100644
297 --- a/crypto/lrw.c
298 +++ b/crypto/lrw.c
299 @@ -212,8 +212,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
300 {
301 struct skcipher_request *req = areq->data;
302
303 - if (!err)
304 + if (!err) {
305 + struct rctx *rctx = skcipher_request_ctx(req);
306 +
307 + rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
308 err = xor_tweak_post(req);
309 + }
310
311 skcipher_request_complete(req, err);
312 }
313 diff --git a/crypto/xts.c b/crypto/xts.c
314 index 847f54f76789..2f948328cabb 100644
315 --- a/crypto/xts.c
316 +++ b/crypto/xts.c
317 @@ -137,8 +137,12 @@ static void crypt_done(struct crypto_async_request *areq, int err)
318 {
319 struct skcipher_request *req = areq->data;
320
321 - if (!err)
322 + if (!err) {
323 + struct rctx *rctx = skcipher_request_ctx(req);
324 +
325 + rctx->subreq.base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
326 err = xor_tweak_post(req);
327 + }
328
329 skcipher_request_complete(req, err);
330 }
331 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
332 index 022cd80e80cc..a6e556bf62df 100644
333 --- a/drivers/android/binder_alloc.c
334 +++ b/drivers/android/binder_alloc.c
335 @@ -959,14 +959,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
336
337 index = page - alloc->pages;
338 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
339 +
340 + mm = alloc->vma_vm_mm;
341 + if (!mmget_not_zero(mm))
342 + goto err_mmget;
343 + if (!down_write_trylock(&mm->mmap_sem))
344 + goto err_down_write_mmap_sem_failed;
345 vma = binder_alloc_get_vma(alloc);
346 - if (vma) {
347 - if (!mmget_not_zero(alloc->vma_vm_mm))
348 - goto err_mmget;
349 - mm = alloc->vma_vm_mm;
350 - if (!down_write_trylock(&mm->mmap_sem))
351 - goto err_down_write_mmap_sem_failed;
352 - }
353
354 list_lru_isolate(lru, item);
355 spin_unlock(lock);
356 @@ -979,10 +978,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
357 PAGE_SIZE);
358
359 trace_binder_unmap_user_end(alloc, index);
360 -
361 - up_write(&mm->mmap_sem);
362 - mmput(mm);
363 }
364 + up_write(&mm->mmap_sem);
365 + mmput(mm);
366
367 trace_binder_unmap_kernel_start(alloc, index);
368
369 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
370 index 9a8d83bc1e75..fc7aefd42ae0 100644
371 --- a/drivers/block/loop.c
372 +++ b/drivers/block/loop.c
373 @@ -1111,8 +1111,9 @@ out_unlock:
374 err = __blkdev_reread_part(bdev);
375 else
376 err = blkdev_reread_part(bdev);
377 - pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
378 - __func__, lo_number, err);
379 + if (err)
380 + pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
381 + __func__, lo_number, err);
382 /* Device is gone, no point in returning error */
383 err = 0;
384 }
385 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
386 index 684854d3b0ad..7e57f8f012c3 100644
387 --- a/drivers/block/zram/zram_drv.c
388 +++ b/drivers/block/zram/zram_drv.c
389 @@ -774,18 +774,18 @@ struct zram_work {
390 struct zram *zram;
391 unsigned long entry;
392 struct bio *bio;
393 + struct bio_vec bvec;
394 };
395
396 #if PAGE_SIZE != 4096
397 static void zram_sync_read(struct work_struct *work)
398 {
399 - struct bio_vec bvec;
400 struct zram_work *zw = container_of(work, struct zram_work, work);
401 struct zram *zram = zw->zram;
402 unsigned long entry = zw->entry;
403 struct bio *bio = zw->bio;
404
405 - read_from_bdev_async(zram, &bvec, entry, bio);
406 + read_from_bdev_async(zram, &zw->bvec, entry, bio);
407 }
408
409 /*
410 @@ -798,6 +798,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
411 {
412 struct zram_work work;
413
414 + work.bvec = *bvec;
415 work.zram = zram;
416 work.entry = entry;
417 work.bio = bio;
418 diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
419 index 131f3974740d..814853842e29 100644
420 --- a/drivers/dma/mediatek/mtk-cqdma.c
421 +++ b/drivers/dma/mediatek/mtk-cqdma.c
422 @@ -253,7 +253,7 @@ static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
423 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
424 mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT);
425 #else
426 - mtk_dma_set(pc, MTK_CQDMA_SRC2, 0);
427 + mtk_dma_set(pc, MTK_CQDMA_DST2, 0);
428 #endif
429
430 /* setup the length */
431 diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
432 index 2b4f25698169..e2a5398f89b5 100644
433 --- a/drivers/dma/sh/rcar-dmac.c
434 +++ b/drivers/dma/sh/rcar-dmac.c
435 @@ -1282,6 +1282,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
436 enum dma_status status;
437 unsigned int residue = 0;
438 unsigned int dptr = 0;
439 + unsigned int chcrb;
440 + unsigned int tcrb;
441 + unsigned int i;
442
443 if (!desc)
444 return 0;
445 @@ -1329,6 +1332,24 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
446 return 0;
447 }
448
449 + /*
450 + * We need to read two registers.
451 + * Make sure the control register does not skip to next chunk
452 + * while reading the counter.
453 + * Trying it 3 times should be enough: Initial read, retry, retry
454 + * for the paranoid.
455 + */
456 + for (i = 0; i < 3; i++) {
457 + chcrb = rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
458 + RCAR_DMACHCRB_DPTR_MASK;
459 + tcrb = rcar_dmac_chan_read(chan, RCAR_DMATCRB);
460 + /* Still the same? */
461 + if (chcrb == (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
462 + RCAR_DMACHCRB_DPTR_MASK))
463 + break;
464 + }
465 + WARN_ONCE(i >= 3, "residue might be not continuous!");
466 +
467 /*
468 * In descriptor mode the descriptor running pointer is not maintained
469 * by the interrupt handler, find the running descriptor from the
470 @@ -1336,8 +1357,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
471 * mode just use the running descriptor pointer.
472 */
473 if (desc->hwdescs.use) {
474 - dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) &
475 - RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT;
476 + dptr = chcrb >> RCAR_DMACHCRB_DPTR_SHIFT;
477 if (dptr == 0)
478 dptr = desc->nchunks;
479 dptr--;
480 @@ -1355,7 +1375,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan,
481 }
482
483 /* Add the residue for the current chunk. */
484 - residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift;
485 + residue += tcrb << desc->xfer_shift;
486
487 return residue;
488 }
489 @@ -1368,6 +1388,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
490 enum dma_status status;
491 unsigned long flags;
492 unsigned int residue;
493 + bool cyclic;
494
495 status = dma_cookie_status(chan, cookie, txstate);
496 if (status == DMA_COMPLETE || !txstate)
497 @@ -1375,10 +1396,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
498
499 spin_lock_irqsave(&rchan->lock, flags);
500 residue = rcar_dmac_chan_get_residue(rchan, cookie);
501 + cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
502 spin_unlock_irqrestore(&rchan->lock, flags);
503
504 /* if there's no residue, the cookie is complete */
505 - if (!residue)
506 + if (!residue && !cyclic)
507 return DMA_COMPLETE;
508
509 dma_set_residue(txstate, residue);
510 diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
511 index e41223c05f6e..6cf2e2ce4093 100644
512 --- a/drivers/gpio/gpio-eic-sprd.c
513 +++ b/drivers/gpio/gpio-eic-sprd.c
514 @@ -414,6 +414,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
515 irq_set_handler_locked(data, handle_edge_irq);
516 break;
517 case IRQ_TYPE_EDGE_BOTH:
518 + sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
519 sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
520 irq_set_handler_locked(data, handle_edge_irq);
521 break;
522 diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
523 index 4ee16b264dbe..7f365ac0b549 100644
524 --- a/drivers/gpu/drm/i915/intel_fbdev.c
525 +++ b/drivers/gpu/drm/i915/intel_fbdev.c
526 @@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
527 bool *enabled, int width, int height)
528 {
529 struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
530 + unsigned long conn_configured, conn_seq, mask;
531 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
532 - unsigned long conn_configured, conn_seq;
533 int i, j;
534 bool *save_enabled;
535 bool fallback = true, ret = true;
536 @@ -355,9 +355,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
537 drm_modeset_backoff(&ctx);
538
539 memcpy(save_enabled, enabled, count);
540 - conn_seq = GENMASK(count - 1, 0);
541 + mask = GENMASK(count - 1, 0);
542 conn_configured = 0;
543 retry:
544 + conn_seq = conn_configured;
545 for (i = 0; i < count; i++) {
546 struct drm_fb_helper_connector *fb_conn;
547 struct drm_connector *connector;
548 @@ -370,8 +371,7 @@ retry:
549 if (conn_configured & BIT(i))
550 continue;
551
552 - /* First pass, only consider tiled connectors */
553 - if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
554 + if (conn_seq == 0 && !connector->has_tile)
555 continue;
556
557 if (connector->status == connector_status_connected)
558 @@ -475,10 +475,8 @@ retry:
559 conn_configured |= BIT(i);
560 }
561
562 - if (conn_configured != conn_seq) { /* repeat until no more are found */
563 - conn_seq = conn_configured;
564 + if ((conn_configured & mask) != mask && conn_configured != conn_seq)
565 goto retry;
566 - }
567
568 /*
569 * If the BIOS didn't enable everything it could, fall back to have the
570 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
571 index 0ec08394e17a..996cadd83f24 100644
572 --- a/drivers/gpu/drm/ttm/ttm_bo.c
573 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
574 @@ -49,9 +49,8 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
575 * ttm_global_mutex - protecting the global BO state
576 */
577 DEFINE_MUTEX(ttm_global_mutex);
578 -struct ttm_bo_global ttm_bo_glob = {
579 - .use_count = 0
580 -};
581 +unsigned ttm_bo_glob_use_count;
582 +struct ttm_bo_global ttm_bo_glob;
583
584 static struct attribute ttm_bo_count = {
585 .name = "bo_count",
586 @@ -1535,12 +1534,13 @@ static void ttm_bo_global_release(void)
587 struct ttm_bo_global *glob = &ttm_bo_glob;
588
589 mutex_lock(&ttm_global_mutex);
590 - if (--glob->use_count > 0)
591 + if (--ttm_bo_glob_use_count > 0)
592 goto out;
593
594 kobject_del(&glob->kobj);
595 kobject_put(&glob->kobj);
596 ttm_mem_global_release(&ttm_mem_glob);
597 + memset(glob, 0, sizeof(*glob));
598 out:
599 mutex_unlock(&ttm_global_mutex);
600 }
601 @@ -1552,7 +1552,7 @@ static int ttm_bo_global_init(void)
602 unsigned i;
603
604 mutex_lock(&ttm_global_mutex);
605 - if (++glob->use_count > 1)
606 + if (++ttm_bo_glob_use_count > 1)
607 goto out;
608
609 ret = ttm_mem_global_init(&ttm_mem_glob);
610 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
611 index f1567c353b54..9a0909decb36 100644
612 --- a/drivers/gpu/drm/ttm/ttm_memory.c
613 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
614 @@ -461,8 +461,8 @@ out_no_zone:
615
616 void ttm_mem_global_release(struct ttm_mem_global *glob)
617 {
618 - unsigned int i;
619 struct ttm_mem_zone *zone;
620 + unsigned int i;
621
622 /* let the page allocator first stop the shrink work. */
623 ttm_page_alloc_fini();
624 @@ -475,9 +475,10 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
625 zone = glob->zones[i];
626 kobject_del(&zone->kobj);
627 kobject_put(&zone->kobj);
628 - }
629 + }
630 kobject_del(&glob->kobj);
631 kobject_put(&glob->kobj);
632 + memset(glob, 0, sizeof(*glob));
633 }
634
635 static void ttm_check_swapping(struct ttm_mem_global *glob)
636 diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
637 index 3ce136ba8791..2ae4ece0dcea 100644
638 --- a/drivers/gpu/drm/vc4/vc4_crtc.c
639 +++ b/drivers/gpu/drm/vc4/vc4_crtc.c
640 @@ -999,7 +999,7 @@ static void
641 vc4_crtc_reset(struct drm_crtc *crtc)
642 {
643 if (crtc->state)
644 - __drm_atomic_helper_crtc_destroy_state(crtc->state);
645 + vc4_crtc_destroy_state(crtc, crtc->state);
646
647 crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
648 if (crtc->state)
649 diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
650 index cc287cf6eb29..edc52d75e6bd 100644
651 --- a/drivers/hwtracing/intel_th/gth.c
652 +++ b/drivers/hwtracing/intel_th/gth.c
653 @@ -616,7 +616,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
654 othdev->output.port = -1;
655 othdev->output.active = false;
656 gth->output[port].output = NULL;
657 - for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
658 + for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
659 if (gth->master[master] == port)
660 gth->master[master] = -1;
661 spin_unlock(&gth->gth_lock);
662 diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
663 index ea0bc6885517..32cc8fe7902f 100644
664 --- a/drivers/infiniband/core/uverbs.h
665 +++ b/drivers/infiniband/core/uverbs.h
666 @@ -160,6 +160,7 @@ struct ib_uverbs_file {
667
668 struct mutex umap_lock;
669 struct list_head umaps;
670 + struct page *disassociate_page;
671
672 struct idr idr;
673 /* spinlock protects write access to idr */
674 diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
675 index e2a4570a47e8..27ca4022ca70 100644
676 --- a/drivers/infiniband/core/uverbs_main.c
677 +++ b/drivers/infiniband/core/uverbs_main.c
678 @@ -208,6 +208,9 @@ void ib_uverbs_release_file(struct kref *ref)
679 kref_put(&file->async_file->ref,
680 ib_uverbs_release_async_event_file);
681 put_device(&file->device->dev);
682 +
683 + if (file->disassociate_page)
684 + __free_pages(file->disassociate_page, 0);
685 kfree(file);
686 }
687
688 @@ -876,9 +879,50 @@ static void rdma_umap_close(struct vm_area_struct *vma)
689 kfree(priv);
690 }
691
692 +/*
693 + * Once the zap_vma_ptes has been called touches to the VMA will come here and
694 + * we return a dummy writable zero page for all the pfns.
695 + */
696 +static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
697 +{
698 + struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
699 + struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
700 + vm_fault_t ret = 0;
701 +
702 + if (!priv)
703 + return VM_FAULT_SIGBUS;
704 +
705 + /* Read only pages can just use the system zero page. */
706 + if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
707 + vmf->page = ZERO_PAGE(vmf->address);
708 + get_page(vmf->page);
709 + return 0;
710 + }
711 +
712 + mutex_lock(&ufile->umap_lock);
713 + if (!ufile->disassociate_page)
714 + ufile->disassociate_page =
715 + alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
716 +
717 + if (ufile->disassociate_page) {
718 + /*
719 + * This VMA is forced to always be shared so this doesn't have
720 + * to worry about COW.
721 + */
722 + vmf->page = ufile->disassociate_page;
723 + get_page(vmf->page);
724 + } else {
725 + ret = VM_FAULT_SIGBUS;
726 + }
727 + mutex_unlock(&ufile->umap_lock);
728 +
729 + return ret;
730 +}
731 +
732 static const struct vm_operations_struct rdma_umap_ops = {
733 .open = rdma_umap_open,
734 .close = rdma_umap_close,
735 + .fault = rdma_umap_fault,
736 };
737
738 static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
739 @@ -888,6 +932,9 @@ static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
740 struct ib_uverbs_file *ufile = ucontext->ufile;
741 struct rdma_umap_priv *priv;
742
743 + if (!(vma->vm_flags & VM_SHARED))
744 + return ERR_PTR(-EINVAL);
745 +
746 if (vma->vm_end - vma->vm_start != size)
747 return ERR_PTR(-EINVAL);
748
749 @@ -991,7 +1038,7 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
750 * at a time to get the lock ordering right. Typically there
751 * will only be one mm, so no big deal.
752 */
753 - down_write(&mm->mmap_sem);
754 + down_read(&mm->mmap_sem);
755 if (!mmget_still_valid(mm))
756 goto skip_mm;
757 mutex_lock(&ufile->umap_lock);
758 @@ -1005,11 +1052,10 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
759
760 zap_vma_ptes(vma, vma->vm_start,
761 vma->vm_end - vma->vm_start);
762 - vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
763 }
764 mutex_unlock(&ufile->umap_lock);
765 skip_mm:
766 - up_write(&mm->mmap_sem);
767 + up_read(&mm->mmap_sem);
768 mmput(mm);
769 }
770 }
771 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
772 index 94fe253d4956..497181f5ba09 100644
773 --- a/drivers/infiniband/hw/mlx5/main.c
774 +++ b/drivers/infiniband/hw/mlx5/main.c
775 @@ -1982,6 +1982,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
776
777 if (vma->vm_flags & VM_WRITE)
778 return -EPERM;
779 + vma->vm_flags &= ~VM_MAYWRITE;
780
781 if (!dev->mdev->clock_info_page)
782 return -EOPNOTSUPP;
783 @@ -2147,19 +2148,18 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
784
785 if (vma->vm_flags & VM_WRITE)
786 return -EPERM;
787 + vma->vm_flags &= ~VM_MAYWRITE;
788
789 /* Don't expose to user-space information it shouldn't have */
790 if (PAGE_SIZE > 4096)
791 return -EOPNOTSUPP;
792
793 - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
794 pfn = (dev->mdev->iseg_base +
795 offsetof(struct mlx5_init_seg, internal_timer_h)) >>
796 PAGE_SHIFT;
797 - if (io_remap_pfn_range(vma, vma->vm_start, pfn,
798 - PAGE_SIZE, vma->vm_page_prot))
799 - return -EAGAIN;
800 - break;
801 + return rdma_user_mmap_io(&context->ibucontext, vma, pfn,
802 + PAGE_SIZE,
803 + pgprot_noncached(vma->vm_page_prot));
804 case MLX5_IB_MMAP_CLOCK_INFO:
805 return mlx5_ib_mmap_clock_info_page(dev, vma, context);
806
807 diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
808 index 49c9541050d4..5819c9d6ffdc 100644
809 --- a/drivers/infiniband/sw/rdmavt/mr.c
810 +++ b/drivers/infiniband/sw/rdmavt/mr.c
811 @@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
812 if (unlikely(mapped_segs == mr->mr.max_segs))
813 return -ENOMEM;
814
815 - if (mr->mr.length == 0) {
816 - mr->mr.user_base = addr;
817 - mr->mr.iova = addr;
818 - }
819 -
820 m = mapped_segs / RVT_SEGSZ;
821 n = mapped_segs % RVT_SEGSZ;
822 mr->mr.map[m]->segs[n].vaddr = (void *)addr;
823 @@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
824 * @sg_nents: number of entries in sg
825 * @sg_offset: offset in bytes into sg
826 *
827 + * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
828 + *
829 * Return: number of sg elements mapped to the memory region
830 */
831 int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
832 int sg_nents, unsigned int *sg_offset)
833 {
834 struct rvt_mr *mr = to_imr(ibmr);
835 + int ret;
836
837 mr->mr.length = 0;
838 mr->mr.page_shift = PAGE_SHIFT;
839 - return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
840 - rvt_set_page);
841 + ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
842 + mr->mr.user_base = ibmr->iova;
843 + mr->mr.iova = ibmr->iova;
844 + mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
845 + mr->mr.length = (size_t)ibmr->length;
846 + return ret;
847 }
848
849 /**
850 @@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
851 ibmr->rkey = key;
852 mr->mr.lkey = key;
853 mr->mr.access_flags = access;
854 + mr->mr.iova = ibmr->iova;
855 atomic_set(&mr->mr.lkey_invalid, 0);
856
857 return 0;
858 diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
859 index df64d6aed4f7..93901ebd122a 100644
860 --- a/drivers/input/rmi4/rmi_f11.c
861 +++ b/drivers/input/rmi4/rmi_f11.c
862 @@ -1230,7 +1230,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
863 }
864
865 rc = f11_write_control_regs(fn, &f11->sens_query,
866 - &f11->dev_controls, fn->fd.query_base_addr);
867 + &f11->dev_controls, fn->fd.control_base_addr);
868 if (rc)
869 dev_warn(&fn->dev, "Failed to write control registers\n");
870
871 diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
872 index 6fd15a734324..58f02c85f2fe 100644
873 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
874 +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
875 @@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
876 /* create driver workqueue */
877 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
878 fm10k_driver_name);
879 + if (!fm10k_workqueue)
880 + return -ENOMEM;
881
882 fm10k_dbg_init();
883
884 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
885 index 03b2a9f9c589..cad34d6f5f45 100644
886 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
887 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
888 @@ -33,6 +33,26 @@
889 #include <linux/bpf_trace.h>
890 #include "en/xdp.h"
891
892 +int mlx5e_xdp_max_mtu(struct mlx5e_params *params)
893 +{
894 + int hr = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
895 +
896 + /* Let S := SKB_DATA_ALIGN(sizeof(struct skb_shared_info)).
897 + * The condition checked in mlx5e_rx_is_linear_skb is:
898 + * SKB_DATA_ALIGN(sw_mtu + hard_mtu + hr) + S <= PAGE_SIZE (1)
899 + * (Note that hw_mtu == sw_mtu + hard_mtu.)
900 + * What is returned from this function is:
901 + * max_mtu = PAGE_SIZE - S - hr - hard_mtu (2)
902 + * After assigning sw_mtu := max_mtu, the left side of (1) turns to
903 + * SKB_DATA_ALIGN(PAGE_SIZE - S) + S, which is equal to PAGE_SIZE,
904 + * because both PAGE_SIZE and S are already aligned. Any number greater
905 + * than max_mtu would make the left side of (1) greater than PAGE_SIZE,
906 + * so max_mtu is the maximum MTU allowed.
907 + */
908 +
909 + return MLX5E_HW2SW_MTU(params, SKB_MAX_HEAD(hr));
910 +}
911 +
912 static inline bool
913 mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di,
914 struct xdp_buff *xdp)
915 @@ -304,9 +324,9 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
916 mlx5e_xdpi_fifo_pop(xdpi_fifo);
917
918 if (is_redirect) {
919 - xdp_return_frame(xdpi.xdpf);
920 dma_unmap_single(sq->pdev, xdpi.dma_addr,
921 xdpi.xdpf->len, DMA_TO_DEVICE);
922 + xdp_return_frame(xdpi.xdpf);
923 } else {
924 /* Recycle RX page */
925 mlx5e_page_release(rq, &xdpi.di, true);
926 @@ -345,9 +365,9 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
927 mlx5e_xdpi_fifo_pop(xdpi_fifo);
928
929 if (is_redirect) {
930 - xdp_return_frame(xdpi.xdpf);
931 dma_unmap_single(sq->pdev, xdpi.dma_addr,
932 xdpi.xdpf->len, DMA_TO_DEVICE);
933 + xdp_return_frame(xdpi.xdpf);
934 } else {
935 /* Recycle RX page */
936 mlx5e_page_release(rq, &xdpi.di, false);
937 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
938 index ee27a7c8cd87..553956cadc8a 100644
939 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
940 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
941 @@ -34,13 +34,12 @@
942
943 #include "en.h"
944
945 -#define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \
946 - MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM)))
947 #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
948 #define MLX5E_XDP_TX_EMPTY_DS_COUNT \
949 (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
950 #define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
951
952 +int mlx5e_xdp_max_mtu(struct mlx5e_params *params);
953 bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
954 void *va, u16 *rx_headroom, u32 *len);
955 bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq);
956 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
957 index 3b9e5f0d0212..253496c4a3db 100644
958 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
959 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
960 @@ -1470,7 +1470,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
961 break;
962 case MLX5_MODULE_ID_SFP:
963 modinfo->type = ETH_MODULE_SFF_8472;
964 - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
965 + modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
966 break;
967 default:
968 netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
969 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
970 index 0cb19e4dd439..2d269acdbc8e 100644
971 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
972 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
973 @@ -3816,7 +3816,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
974 if (params->xdp_prog &&
975 !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
976 netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
977 - new_mtu, MLX5E_XDP_MAX_MTU);
978 + new_mtu, mlx5e_xdp_max_mtu(params));
979 err = -EINVAL;
980 goto out;
981 }
982 @@ -4280,7 +4280,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
983
984 if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
985 netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
986 - new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU);
987 + new_channels.params.sw_mtu,
988 + mlx5e_xdp_max_mtu(&new_channels.params));
989 return -EINVAL;
990 }
991
992 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
993 index 2b82f35f4c35..efce1fa37f6f 100644
994 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
995 +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
996 @@ -404,10 +404,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
997 size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
998
999 i2c_addr = MLX5_I2C_ADDR_LOW;
1000 - if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
1001 - i2c_addr = MLX5_I2C_ADDR_HIGH;
1002 - offset -= MLX5_EEPROM_PAGE_LENGTH;
1003 - }
1004
1005 MLX5_SET(mcia_reg, in, l, 0);
1006 MLX5_SET(mcia_reg, in, module, module_num);
1007 diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1008 index ffee38e36ce8..8648ca171254 100644
1009 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1010 +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
1011 @@ -27,7 +27,7 @@
1012
1013 #define MLXSW_PCI_SW_RESET 0xF0010
1014 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
1015 -#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 13000
1016 +#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 20000
1017 #define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
1018 #define MLXSW_PCI_FW_READY 0xA1844
1019 #define MLXSW_PCI_FW_READY_MASK 0xFFFF
1020 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1021 index cbdee5164be7..ce49504e1f9c 100644
1022 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1023 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
1024 @@ -2667,11 +2667,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
1025 if (err)
1026 return err;
1027
1028 + mlxsw_sp_port->link.autoneg = autoneg;
1029 +
1030 if (!netif_running(dev))
1031 return 0;
1032
1033 - mlxsw_sp_port->link.autoneg = autoneg;
1034 -
1035 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1036 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1037
1038 @@ -2961,7 +2961,7 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1039 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1040 MLXSW_REG_QEEC_HIERARCY_TC,
1041 i + 8, i,
1042 - false, 0);
1043 + true, 100);
1044 if (err)
1045 return err;
1046 }
1047 diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
1048 index a18149720aa2..cba5881b2746 100644
1049 --- a/drivers/net/ethernet/socionext/netsec.c
1050 +++ b/drivers/net/ethernet/socionext/netsec.c
1051 @@ -673,7 +673,8 @@ static void netsec_process_tx(struct netsec_priv *priv)
1052 }
1053
1054 static void *netsec_alloc_rx_data(struct netsec_priv *priv,
1055 - dma_addr_t *dma_handle, u16 *desc_len)
1056 + dma_addr_t *dma_handle, u16 *desc_len,
1057 + bool napi)
1058 {
1059 size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1060 size_t payload_len = NETSEC_RX_BUF_SZ;
1061 @@ -682,7 +683,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
1062
1063 total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
1064
1065 - buf = napi_alloc_frag(total_len);
1066 + buf = napi ? napi_alloc_frag(total_len) : netdev_alloc_frag(total_len);
1067 if (!buf)
1068 return NULL;
1069
1070 @@ -765,7 +766,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
1071 /* allocate a fresh buffer and map it to the hardware.
1072 * This will eventually replace the old buffer in the hardware
1073 */
1074 - buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
1075 + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len,
1076 + true);
1077 if (unlikely(!buf_addr))
1078 break;
1079
1080 @@ -1069,7 +1071,8 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
1081 void *buf;
1082 u16 len;
1083
1084 - buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
1085 + buf = netsec_alloc_rx_data(priv, &dma_handle, &len,
1086 + false);
1087 if (!buf) {
1088 netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
1089 goto err_out;
1090 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1091 index 019ab99e65bb..1d8d6f2ddfd6 100644
1092 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1093 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1094 @@ -2590,8 +2590,6 @@ static int stmmac_open(struct net_device *dev)
1095 u32 chan;
1096 int ret;
1097
1098 - stmmac_check_ether_addr(priv);
1099 -
1100 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1101 priv->hw->pcs != STMMAC_PCS_TBI &&
1102 priv->hw->pcs != STMMAC_PCS_RTBI) {
1103 @@ -4265,6 +4263,8 @@ int stmmac_dvr_probe(struct device *device,
1104 if (ret)
1105 goto error_hw_init;
1106
1107 + stmmac_check_ether_addr(priv);
1108 +
1109 /* Configure real RX and TX queues */
1110 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
1111 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
1112 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1113 index d819e8eaba12..cc1e887e47b5 100644
1114 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1115 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
1116 @@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
1117 },
1118 .driver_data = (void *)&galileo_stmmac_dmi_data,
1119 },
1120 + /*
1121 + * There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
1122 + * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
1123 + * has only one pci network device while other asset tags are
1124 + * for IOT2040 which has two.
1125 + */
1126 {
1127 .matches = {
1128 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1129 @@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
1130 {
1131 .matches = {
1132 DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
1133 - DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
1134 - "6ES7647-0AA00-1YA2"),
1135 },
1136 .driver_data = (void *)&iot2040_stmmac_dmi_data,
1137 },
1138 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
1139 index f4e93f5fc204..ea90db3c7705 100644
1140 --- a/drivers/net/slip/slhc.c
1141 +++ b/drivers/net/slip/slhc.c
1142 @@ -153,7 +153,7 @@ out_fail:
1143 void
1144 slhc_free(struct slcompress *comp)
1145 {
1146 - if ( comp == NULLSLCOMPR )
1147 + if ( IS_ERR_OR_NULL(comp) )
1148 return;
1149
1150 if ( comp->tstate != NULLSLSTATE )
1151 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1152 index 1283632091d5..7dcda9364009 100644
1153 --- a/drivers/net/team/team.c
1154 +++ b/drivers/net/team/team.c
1155 @@ -1157,6 +1157,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
1156 return -EINVAL;
1157 }
1158
1159 + if (netdev_has_upper_dev(dev, port_dev)) {
1160 + NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1161 + netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1162 + portname);
1163 + return -EBUSY;
1164 + }
1165 +
1166 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1167 vlan_uses_dev(dev)) {
1168 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1169 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1170 index 6359053bd0c7..862fd2b92d12 100644
1171 --- a/drivers/net/wireless/mac80211_hwsim.c
1172 +++ b/drivers/net/wireless/mac80211_hwsim.c
1173 @@ -2642,7 +2642,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1174 enum nl80211_band band;
1175 const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
1176 struct net *net;
1177 - int idx;
1178 + int idx, i;
1179 int n_limits = 0;
1180
1181 if (WARN_ON(param->channels > 1 && !param->use_chanctx))
1182 @@ -2766,12 +2766,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
1183 goto failed_hw;
1184 }
1185
1186 + data->if_combination.max_interfaces = 0;
1187 + for (i = 0; i < n_limits; i++)
1188 + data->if_combination.max_interfaces +=
1189 + data->if_limits[i].max;
1190 +
1191 data->if_combination.n_limits = n_limits;
1192 - data->if_combination.max_interfaces = 2048;
1193 data->if_combination.limits = data->if_limits;
1194
1195 - hw->wiphy->iface_combinations = &data->if_combination;
1196 - hw->wiphy->n_iface_combinations = 1;
1197 + /*
1198 + * If we actually were asked to support combinations,
1199 + * advertise them - if there's only a single thing like
1200 + * only IBSS then don't advertise it as combinations.
1201 + */
1202 + if (data->if_combination.max_interfaces > 1) {
1203 + hw->wiphy->iface_combinations = &data->if_combination;
1204 + hw->wiphy->n_iface_combinations = 1;
1205 + }
1206
1207 if (param->ciphers) {
1208 memcpy(data->ciphers, param->ciphers,
1209 diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
1210 index 53564386ed57..8987cec9549d 100644
1211 --- a/drivers/usb/core/driver.c
1212 +++ b/drivers/usb/core/driver.c
1213 @@ -1896,14 +1896,11 @@ int usb_runtime_idle(struct device *dev)
1214 return -EBUSY;
1215 }
1216
1217 -int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1218 +static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1219 {
1220 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
1221 int ret = -EPERM;
1222
1223 - if (enable && !udev->usb2_hw_lpm_allowed)
1224 - return 0;
1225 -
1226 if (hcd->driver->set_usb2_hw_lpm) {
1227 ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
1228 if (!ret)
1229 @@ -1913,6 +1910,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1230 return ret;
1231 }
1232
1233 +int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
1234 +{
1235 + if (!udev->usb2_hw_lpm_capable ||
1236 + !udev->usb2_hw_lpm_allowed ||
1237 + udev->usb2_hw_lpm_enabled)
1238 + return 0;
1239 +
1240 + return usb_set_usb2_hardware_lpm(udev, 1);
1241 +}
1242 +
1243 +int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
1244 +{
1245 + if (!udev->usb2_hw_lpm_enabled)
1246 + return 0;
1247 +
1248 + return usb_set_usb2_hardware_lpm(udev, 0);
1249 +}
1250 +
1251 #endif /* CONFIG_PM */
1252
1253 struct bus_type usb_bus_type = {
1254 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
1255 index 1d1e61e980f3..55c87be5764c 100644
1256 --- a/drivers/usb/core/hub.c
1257 +++ b/drivers/usb/core/hub.c
1258 @@ -3220,8 +3220,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
1259 }
1260
1261 /* disable USB2 hardware LPM */
1262 - if (udev->usb2_hw_lpm_enabled == 1)
1263 - usb_set_usb2_hardware_lpm(udev, 0);
1264 + usb_disable_usb2_hardware_lpm(udev);
1265
1266 if (usb_disable_ltm(udev)) {
1267 dev_err(&udev->dev, "Failed to disable LTM before suspend\n");
1268 @@ -3259,8 +3258,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
1269 usb_enable_ltm(udev);
1270 err_ltm:
1271 /* Try to enable USB2 hardware LPM again */
1272 - if (udev->usb2_hw_lpm_capable == 1)
1273 - usb_set_usb2_hardware_lpm(udev, 1);
1274 + usb_enable_usb2_hardware_lpm(udev);
1275
1276 if (udev->do_remote_wakeup)
1277 (void) usb_disable_remote_wakeup(udev);
1278 @@ -3543,8 +3541,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
1279 hub_port_logical_disconnect(hub, port1);
1280 } else {
1281 /* Try to enable USB2 hardware LPM */
1282 - if (udev->usb2_hw_lpm_capable == 1)
1283 - usb_set_usb2_hardware_lpm(udev, 1);
1284 + usb_enable_usb2_hardware_lpm(udev);
1285
1286 /* Try to enable USB3 LTM */
1287 usb_enable_ltm(udev);
1288 @@ -4435,7 +4432,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
1289 if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
1290 connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
1291 udev->usb2_hw_lpm_allowed = 1;
1292 - usb_set_usb2_hardware_lpm(udev, 1);
1293 + usb_enable_usb2_hardware_lpm(udev);
1294 }
1295 }
1296
1297 @@ -5649,8 +5646,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
1298 /* Disable USB2 hardware LPM.
1299 * It will be re-enabled by the enumeration process.
1300 */
1301 - if (udev->usb2_hw_lpm_enabled == 1)
1302 - usb_set_usb2_hardware_lpm(udev, 0);
1303 + usb_disable_usb2_hardware_lpm(udev);
1304
1305 /* Disable LPM while we reset the device and reinstall the alt settings.
1306 * Device-initiated LPM, and system exit latency settings are cleared
1307 @@ -5753,7 +5749,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
1308
1309 done:
1310 /* Now that the alt settings are re-installed, enable LTM and LPM. */
1311 - usb_set_usb2_hardware_lpm(udev, 1);
1312 + usb_enable_usb2_hardware_lpm(udev);
1313 usb_unlocked_enable_lpm(udev);
1314 usb_enable_ltm(udev);
1315 usb_release_bos_descriptor(udev);
1316 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1317 index bfa5eda0cc26..4f33eb632a88 100644
1318 --- a/drivers/usb/core/message.c
1319 +++ b/drivers/usb/core/message.c
1320 @@ -1243,8 +1243,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
1321 dev->actconfig->interface[i] = NULL;
1322 }
1323
1324 - if (dev->usb2_hw_lpm_enabled == 1)
1325 - usb_set_usb2_hardware_lpm(dev, 0);
1326 + usb_disable_usb2_hardware_lpm(dev);
1327 usb_unlocked_disable_lpm(dev);
1328 usb_disable_ltm(dev);
1329
1330 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
1331 index ea18284dfa9a..7e88fdfe3cf5 100644
1332 --- a/drivers/usb/core/sysfs.c
1333 +++ b/drivers/usb/core/sysfs.c
1334 @@ -528,7 +528,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
1335
1336 if (!ret) {
1337 udev->usb2_hw_lpm_allowed = value;
1338 - ret = usb_set_usb2_hardware_lpm(udev, value);
1339 + if (value)
1340 + ret = usb_enable_usb2_hardware_lpm(udev);
1341 + else
1342 + ret = usb_disable_usb2_hardware_lpm(udev);
1343 }
1344
1345 usb_unlock_device(udev);
1346 diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
1347 index 546a2219454b..d95a5358f73d 100644
1348 --- a/drivers/usb/core/usb.h
1349 +++ b/drivers/usb/core/usb.h
1350 @@ -92,7 +92,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
1351 extern int usb_runtime_suspend(struct device *dev);
1352 extern int usb_runtime_resume(struct device *dev);
1353 extern int usb_runtime_idle(struct device *dev);
1354 -extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
1355 +extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
1356 +extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
1357
1358 #else
1359
1360 @@ -112,7 +113,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
1361 return 0;
1362 }
1363
1364 -static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
1365 +static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
1366 +{
1367 + return 0;
1368 +}
1369 +
1370 +static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
1371 {
1372 return 0;
1373 }
1374 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1375 index 73652e21efec..d0f731c9920a 100644
1376 --- a/drivers/vfio/vfio_iommu_type1.c
1377 +++ b/drivers/vfio/vfio_iommu_type1.c
1378 @@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
1379 MODULE_PARM_DESC(disable_hugepages,
1380 "Disable VFIO IOMMU support for IOMMU hugepages.");
1381
1382 +static unsigned int dma_entry_limit __read_mostly = U16_MAX;
1383 +module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
1384 +MODULE_PARM_DESC(dma_entry_limit,
1385 + "Maximum number of user DMA mappings per container (65535).");
1386 +
1387 struct vfio_iommu {
1388 struct list_head domain_list;
1389 struct vfio_domain *external_domain; /* domain for external user */
1390 struct mutex lock;
1391 struct rb_root dma_list;
1392 struct blocking_notifier_head notifier;
1393 + unsigned int dma_avail;
1394 bool v2;
1395 bool nesting;
1396 };
1397 @@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
1398 vfio_unlink_dma(iommu, dma);
1399 put_task_struct(dma->task);
1400 kfree(dma);
1401 + iommu->dma_avail++;
1402 }
1403
1404 static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
1405 @@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1406 goto out_unlock;
1407 }
1408
1409 + if (!iommu->dma_avail) {
1410 + ret = -ENOSPC;
1411 + goto out_unlock;
1412 + }
1413 +
1414 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1415 if (!dma) {
1416 ret = -ENOMEM;
1417 goto out_unlock;
1418 }
1419
1420 + iommu->dma_avail--;
1421 dma->iova = iova;
1422 dma->vaddr = vaddr;
1423 dma->prot = prot;
1424 @@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
1425
1426 INIT_LIST_HEAD(&iommu->domain_list);
1427 iommu->dma_list = RB_ROOT;
1428 + iommu->dma_avail = dma_entry_limit;
1429 mutex_init(&iommu->lock);
1430 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
1431
1432 diff --git a/fs/aio.c b/fs/aio.c
1433 index 3d9669d011b9..efa13410e04e 100644
1434 --- a/fs/aio.c
1435 +++ b/fs/aio.c
1436 @@ -181,7 +181,7 @@ struct poll_iocb {
1437 struct file *file;
1438 struct wait_queue_head *head;
1439 __poll_t events;
1440 - bool woken;
1441 + bool done;
1442 bool cancelled;
1443 struct wait_queue_entry wait;
1444 struct work_struct work;
1445 @@ -204,8 +204,7 @@ struct aio_kiocb {
1446 struct kioctx *ki_ctx;
1447 kiocb_cancel_fn *ki_cancel;
1448
1449 - struct iocb __user *ki_user_iocb; /* user's aiocb */
1450 - __u64 ki_user_data; /* user's data for completion */
1451 + struct io_event ki_res;
1452
1453 struct list_head ki_list; /* the aio core uses this
1454 * for cancellation */
1455 @@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
1456 /* aio_get_req
1457 * Allocate a slot for an aio request.
1458 * Returns NULL if no requests are free.
1459 + *
1460 + * The refcount is initialized to 2 - one for the async op completion,
1461 + * one for the synchronous code that does this.
1462 */
1463 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1464 {
1465 @@ -1034,7 +1036,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1466 percpu_ref_get(&ctx->reqs);
1467 req->ki_ctx = ctx;
1468 INIT_LIST_HEAD(&req->ki_list);
1469 - refcount_set(&req->ki_refcnt, 0);
1470 + refcount_set(&req->ki_refcnt, 2);
1471 req->ki_eventfd = NULL;
1472 return req;
1473 }
1474 @@ -1067,30 +1069,18 @@ out:
1475 return ret;
1476 }
1477
1478 -static inline void iocb_put(struct aio_kiocb *iocb)
1479 +static inline void iocb_destroy(struct aio_kiocb *iocb)
1480 {
1481 - if (refcount_read(&iocb->ki_refcnt) == 0 ||
1482 - refcount_dec_and_test(&iocb->ki_refcnt)) {
1483 - if (iocb->ki_filp)
1484 - fput(iocb->ki_filp);
1485 - percpu_ref_put(&iocb->ki_ctx->reqs);
1486 - kmem_cache_free(kiocb_cachep, iocb);
1487 - }
1488 -}
1489 -
1490 -static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
1491 - long res, long res2)
1492 -{
1493 - ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
1494 - ev->data = iocb->ki_user_data;
1495 - ev->res = res;
1496 - ev->res2 = res2;
1497 + if (iocb->ki_filp)
1498 + fput(iocb->ki_filp);
1499 + percpu_ref_put(&iocb->ki_ctx->reqs);
1500 + kmem_cache_free(kiocb_cachep, iocb);
1501 }
1502
1503 /* aio_complete
1504 * Called when the io request on the given iocb is complete.
1505 */
1506 -static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1507 +static void aio_complete(struct aio_kiocb *iocb)
1508 {
1509 struct kioctx *ctx = iocb->ki_ctx;
1510 struct aio_ring *ring;
1511 @@ -1114,14 +1104,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1512 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1513 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1514
1515 - aio_fill_event(event, iocb, res, res2);
1516 + *event = iocb->ki_res;
1517
1518 kunmap_atomic(ev_page);
1519 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1520
1521 - pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
1522 - ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
1523 - res, res2);
1524 + pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1525 + (void __user *)(unsigned long)iocb->ki_res.obj,
1526 + iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1527
1528 /* after flagging the request as done, we
1529 * must never even look at it again
1530 @@ -1163,7 +1153,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1531
1532 if (waitqueue_active(&ctx->wait))
1533 wake_up(&ctx->wait);
1534 - iocb_put(iocb);
1535 +}
1536 +
1537 +static inline void iocb_put(struct aio_kiocb *iocb)
1538 +{
1539 + if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1540 + aio_complete(iocb);
1541 + iocb_destroy(iocb);
1542 + }
1543 }
1544
1545 /* aio_read_events_ring
1546 @@ -1437,7 +1434,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1547 file_end_write(kiocb->ki_filp);
1548 }
1549
1550 - aio_complete(iocb, res, res2);
1551 + iocb->ki_res.res = res;
1552 + iocb->ki_res.res2 = res2;
1553 + iocb_put(iocb);
1554 }
1555
1556 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
1557 @@ -1585,11 +1584,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1558
1559 static void aio_fsync_work(struct work_struct *work)
1560 {
1561 - struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
1562 - int ret;
1563 + struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1564
1565 - ret = vfs_fsync(req->file, req->datasync);
1566 - aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
1567 + iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1568 + iocb_put(iocb);
1569 }
1570
1571 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1572 @@ -1608,11 +1606,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1573 return 0;
1574 }
1575
1576 -static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
1577 -{
1578 - aio_complete(iocb, mangle_poll(mask), 0);
1579 -}
1580 -
1581 static void aio_poll_complete_work(struct work_struct *work)
1582 {
1583 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1584 @@ -1638,9 +1631,11 @@ static void aio_poll_complete_work(struct work_struct *work)
1585 return;
1586 }
1587 list_del_init(&iocb->ki_list);
1588 + iocb->ki_res.res = mangle_poll(mask);
1589 + req->done = true;
1590 spin_unlock_irq(&ctx->ctx_lock);
1591
1592 - aio_poll_complete(iocb, mask);
1593 + iocb_put(iocb);
1594 }
1595
1596 /* assumes we are called with irqs disabled */
1597 @@ -1668,31 +1663,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1598 __poll_t mask = key_to_poll(key);
1599 unsigned long flags;
1600
1601 - req->woken = true;
1602 -
1603 /* for instances that support it check for an event match first: */
1604 - if (mask) {
1605 - if (!(mask & req->events))
1606 - return 0;
1607 + if (mask && !(mask & req->events))
1608 + return 0;
1609 +
1610 + list_del_init(&req->wait.entry);
1611
1612 + if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1613 /*
1614 * Try to complete the iocb inline if we can. Use
1615 * irqsave/irqrestore because not all filesystems (e.g. fuse)
1616 * call this function with IRQs disabled and because IRQs
1617 * have to be disabled before ctx_lock is obtained.
1618 */
1619 - if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1620 - list_del(&iocb->ki_list);
1621 - spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
1622 -
1623 - list_del_init(&req->wait.entry);
1624 - aio_poll_complete(iocb, mask);
1625 - return 1;
1626 - }
1627 + list_del(&iocb->ki_list);
1628 + iocb->ki_res.res = mangle_poll(mask);
1629 + req->done = true;
1630 + spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
1631 + iocb_put(iocb);
1632 + } else {
1633 + schedule_work(&req->work);
1634 }
1635 -
1636 - list_del_init(&req->wait.entry);
1637 - schedule_work(&req->work);
1638 return 1;
1639 }
1640
1641 @@ -1724,6 +1715,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1642 struct kioctx *ctx = aiocb->ki_ctx;
1643 struct poll_iocb *req = &aiocb->poll;
1644 struct aio_poll_table apt;
1645 + bool cancel = false;
1646 __poll_t mask;
1647
1648 /* reject any unknown events outside the normal event mask. */
1649 @@ -1737,7 +1729,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1650 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1651
1652 req->head = NULL;
1653 - req->woken = false;
1654 + req->done = false;
1655 req->cancelled = false;
1656
1657 apt.pt._qproc = aio_poll_queue_proc;
1658 @@ -1749,41 +1741,34 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1659 INIT_LIST_HEAD(&req->wait.entry);
1660 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1661
1662 - /* one for removal from waitqueue, one for this function */
1663 - refcount_set(&aiocb->ki_refcnt, 2);
1664 -
1665 mask = vfs_poll(req->file, &apt.pt) & req->events;
1666 - if (unlikely(!req->head)) {
1667 - /* we did not manage to set up a waitqueue, done */
1668 - goto out;
1669 - }
1670 -
1671 spin_lock_irq(&ctx->ctx_lock);
1672 - spin_lock(&req->head->lock);
1673 - if (req->woken) {
1674 - /* wake_up context handles the rest */
1675 - mask = 0;
1676 + if (likely(req->head)) {
1677 + spin_lock(&req->head->lock);
1678 + if (unlikely(list_empty(&req->wait.entry))) {
1679 + if (apt.error)
1680 + cancel = true;
1681 + apt.error = 0;
1682 + mask = 0;
1683 + }
1684 + if (mask || apt.error) {
1685 + list_del_init(&req->wait.entry);
1686 + } else if (cancel) {
1687 + WRITE_ONCE(req->cancelled, true);
1688 + } else if (!req->done) { /* actually waiting for an event */
1689 + list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1690 + aiocb->ki_cancel = aio_poll_cancel;
1691 + }
1692 + spin_unlock(&req->head->lock);
1693 + }
1694 + if (mask) { /* no async, we'd stolen it */
1695 + aiocb->ki_res.res = mangle_poll(mask);
1696 apt.error = 0;
1697 - } else if (mask || apt.error) {
1698 - /* if we get an error or a mask we are done */
1699 - WARN_ON_ONCE(list_empty(&req->wait.entry));
1700 - list_del_init(&req->wait.entry);
1701 - } else {
1702 - /* actually waiting for an event */
1703 - list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1704 - aiocb->ki_cancel = aio_poll_cancel;
1705 }
1706 - spin_unlock(&req->head->lock);
1707 spin_unlock_irq(&ctx->ctx_lock);
1708 -
1709 -out:
1710 - if (unlikely(apt.error))
1711 - return apt.error;
1712 -
1713 if (mask)
1714 - aio_poll_complete(aiocb, mask);
1715 - iocb_put(aiocb);
1716 - return 0;
1717 + iocb_put(aiocb);
1718 + return apt.error;
1719 }
1720
1721 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1722 @@ -1842,8 +1827,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1723 goto out_put_req;
1724 }
1725
1726 - req->ki_user_iocb = user_iocb;
1727 - req->ki_user_data = iocb->aio_data;
1728 + req->ki_res.obj = (u64)(unsigned long)user_iocb;
1729 + req->ki_res.data = iocb->aio_data;
1730 + req->ki_res.res = 0;
1731 + req->ki_res.res2 = 0;
1732
1733 switch (iocb->aio_lio_opcode) {
1734 case IOCB_CMD_PREAD:
1735 @@ -1873,18 +1860,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1736 break;
1737 }
1738
1739 + /* Done with the synchronous reference */
1740 + iocb_put(req);
1741 +
1742 /*
1743 * If ret is 0, we'd either done aio_complete() ourselves or have
1744 * arranged for that to be done asynchronously. Anything non-zero
1745 * means that we need to destroy req ourselves.
1746 */
1747 - if (ret)
1748 - goto out_put_req;
1749 - return 0;
1750 + if (!ret)
1751 + return 0;
1752 +
1753 out_put_req:
1754 if (req->ki_eventfd)
1755 eventfd_ctx_put(req->ki_eventfd);
1756 - iocb_put(req);
1757 + iocb_destroy(req);
1758 out_put_reqs_available:
1759 put_reqs_available(ctx, 1);
1760 return ret;
1761 @@ -1997,24 +1987,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
1762 }
1763 #endif
1764
1765 -/* lookup_kiocb
1766 - * Finds a given iocb for cancellation.
1767 - */
1768 -static struct aio_kiocb *
1769 -lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
1770 -{
1771 - struct aio_kiocb *kiocb;
1772 -
1773 - assert_spin_locked(&ctx->ctx_lock);
1774 -
1775 - /* TODO: use a hash or array, this sucks. */
1776 - list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
1777 - if (kiocb->ki_user_iocb == iocb)
1778 - return kiocb;
1779 - }
1780 - return NULL;
1781 -}
1782 -
1783 /* sys_io_cancel:
1784 * Attempts to cancel an iocb previously passed to io_submit. If
1785 * the operation is successfully cancelled, the resulting event is
1786 @@ -2032,6 +2004,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1787 struct aio_kiocb *kiocb;
1788 int ret = -EINVAL;
1789 u32 key;
1790 + u64 obj = (u64)(unsigned long)iocb;
1791
1792 if (unlikely(get_user(key, &iocb->aio_key)))
1793 return -EFAULT;
1794 @@ -2043,10 +2016,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1795 return -EINVAL;
1796
1797 spin_lock_irq(&ctx->ctx_lock);
1798 - kiocb = lookup_kiocb(ctx, iocb);
1799 - if (kiocb) {
1800 - ret = kiocb->ki_cancel(&kiocb->rw);
1801 - list_del_init(&kiocb->ki_list);
1802 + /* TODO: use a hash or array, this sucks. */
1803 + list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
1804 + if (kiocb->ki_res.obj == obj) {
1805 + ret = kiocb->ki_cancel(&kiocb->rw);
1806 + list_del_init(&kiocb->ki_list);
1807 + break;
1808 + }
1809 }
1810 spin_unlock_irq(&ctx->ctx_lock);
1811
1812 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
1813 index 82928cea0209..7f3f64ba464f 100644
1814 --- a/fs/ceph/dir.c
1815 +++ b/fs/ceph/dir.c
1816 @@ -1470,6 +1470,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
1817 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1818 {
1819 struct ceph_inode_info *dci = ceph_inode(dir);
1820 + unsigned hash;
1821
1822 switch (dci->i_dir_layout.dl_dir_hash) {
1823 case 0: /* for backward compat */
1824 @@ -1477,8 +1478,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1825 return dn->d_name.hash;
1826
1827 default:
1828 - return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1829 + spin_lock(&dn->d_lock);
1830 + hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1831 dn->d_name.name, dn->d_name.len);
1832 + spin_unlock(&dn->d_lock);
1833 + return hash;
1834 }
1835 }
1836
1837 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
1838 index 163fc74bf221..5cec784e30f6 100644
1839 --- a/fs/ceph/mds_client.c
1840 +++ b/fs/ceph/mds_client.c
1841 @@ -1286,6 +1286,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1842 list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1843 ci->i_prealloc_cap_flush = NULL;
1844 }
1845 +
1846 + if (drop &&
1847 + ci->i_wrbuffer_ref_head == 0 &&
1848 + ci->i_wr_ref == 0 &&
1849 + ci->i_dirty_caps == 0 &&
1850 + ci->i_flushing_caps == 0) {
1851 + ceph_put_snap_context(ci->i_head_snapc);
1852 + ci->i_head_snapc = NULL;
1853 + }
1854 }
1855 spin_unlock(&ci->i_ceph_lock);
1856 while (!list_empty(&to_remove)) {
1857 @@ -1958,10 +1967,39 @@ retry:
1858 return path;
1859 }
1860
1861 +/* Duplicate the dentry->d_name.name safely */
1862 +static int clone_dentry_name(struct dentry *dentry, const char **ppath,
1863 + int *ppathlen)
1864 +{
1865 + u32 len;
1866 + char *name;
1867 +
1868 +retry:
1869 + len = READ_ONCE(dentry->d_name.len);
1870 + name = kmalloc(len + 1, GFP_NOFS);
1871 + if (!name)
1872 + return -ENOMEM;
1873 +
1874 + spin_lock(&dentry->d_lock);
1875 + if (dentry->d_name.len != len) {
1876 + spin_unlock(&dentry->d_lock);
1877 + kfree(name);
1878 + goto retry;
1879 + }
1880 + memcpy(name, dentry->d_name.name, len);
1881 + spin_unlock(&dentry->d_lock);
1882 +
1883 + name[len] = '\0';
1884 + *ppath = name;
1885 + *ppathlen = len;
1886 + return 0;
1887 +}
1888 +
1889 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1890 const char **ppath, int *ppathlen, u64 *pino,
1891 - int *pfreepath)
1892 + bool *pfreepath, bool parent_locked)
1893 {
1894 + int ret;
1895 char *path;
1896
1897 rcu_read_lock();
1898 @@ -1970,8 +2008,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1899 if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
1900 *pino = ceph_ino(dir);
1901 rcu_read_unlock();
1902 - *ppath = dentry->d_name.name;
1903 - *ppathlen = dentry->d_name.len;
1904 + if (parent_locked) {
1905 + *ppath = dentry->d_name.name;
1906 + *ppathlen = dentry->d_name.len;
1907 + } else {
1908 + ret = clone_dentry_name(dentry, ppath, ppathlen);
1909 + if (ret)
1910 + return ret;
1911 + *pfreepath = true;
1912 + }
1913 return 0;
1914 }
1915 rcu_read_unlock();
1916 @@ -1979,13 +2024,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
1917 if (IS_ERR(path))
1918 return PTR_ERR(path);
1919 *ppath = path;
1920 - *pfreepath = 1;
1921 + *pfreepath = true;
1922 return 0;
1923 }
1924
1925 static int build_inode_path(struct inode *inode,
1926 const char **ppath, int *ppathlen, u64 *pino,
1927 - int *pfreepath)
1928 + bool *pfreepath)
1929 {
1930 struct dentry *dentry;
1931 char *path;
1932 @@ -2001,7 +2046,7 @@ static int build_inode_path(struct inode *inode,
1933 if (IS_ERR(path))
1934 return PTR_ERR(path);
1935 *ppath = path;
1936 - *pfreepath = 1;
1937 + *pfreepath = true;
1938 return 0;
1939 }
1940
1941 @@ -2012,7 +2057,7 @@ static int build_inode_path(struct inode *inode,
1942 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1943 struct inode *rdiri, const char *rpath,
1944 u64 rino, const char **ppath, int *pathlen,
1945 - u64 *ino, int *freepath)
1946 + u64 *ino, bool *freepath, bool parent_locked)
1947 {
1948 int r = 0;
1949
1950 @@ -2022,7 +2067,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
1951 ceph_snap(rinode));
1952 } else if (rdentry) {
1953 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
1954 - freepath);
1955 + freepath, parent_locked);
1956 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
1957 *ppath);
1958 } else if (rpath || rino) {
1959 @@ -2048,7 +2093,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1960 const char *path2 = NULL;
1961 u64 ino1 = 0, ino2 = 0;
1962 int pathlen1 = 0, pathlen2 = 0;
1963 - int freepath1 = 0, freepath2 = 0;
1964 + bool freepath1 = false, freepath2 = false;
1965 int len;
1966 u16 releases;
1967 void *p, *end;
1968 @@ -2056,16 +2101,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
1969
1970 ret = set_request_path_attr(req->r_inode, req->r_dentry,
1971 req->r_parent, req->r_path1, req->r_ino1.ino,
1972 - &path1, &pathlen1, &ino1, &freepath1);
1973 + &path1, &pathlen1, &ino1, &freepath1,
1974 + test_bit(CEPH_MDS_R_PARENT_LOCKED,
1975 + &req->r_req_flags));
1976 if (ret < 0) {
1977 msg = ERR_PTR(ret);
1978 goto out;
1979 }
1980
1981 + /* If r_old_dentry is set, then assume that its parent is locked */
1982 ret = set_request_path_attr(NULL, req->r_old_dentry,
1983 req->r_old_dentry_dir,
1984 req->r_path2, req->r_ino2.ino,
1985 - &path2, &pathlen2, &ino2, &freepath2);
1986 + &path2, &pathlen2, &ino2, &freepath2, true);
1987 if (ret < 0) {
1988 msg = ERR_PTR(ret);
1989 goto out_free1;
1990 diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
1991 index f74193da0e09..1f46b02f7314 100644
1992 --- a/fs/ceph/snap.c
1993 +++ b/fs/ceph/snap.c
1994 @@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
1995 old_snapc = NULL;
1996
1997 update_snapc:
1998 - if (ci->i_head_snapc) {
1999 + if (ci->i_wrbuffer_ref_head == 0 &&
2000 + ci->i_wr_ref == 0 &&
2001 + ci->i_dirty_caps == 0 &&
2002 + ci->i_flushing_caps == 0) {
2003 + ci->i_head_snapc = NULL;
2004 + } else {
2005 ci->i_head_snapc = ceph_get_snap_context(new_snapc);
2006 dout(" new snapc is %p\n", new_snapc);
2007 }
2008 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2009 index 7c05353b766c..7c3f9d00586e 100644
2010 --- a/fs/cifs/file.c
2011 +++ b/fs/cifs/file.c
2012 @@ -2796,7 +2796,6 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
2013 struct cifs_tcon *tcon;
2014 struct cifs_sb_info *cifs_sb;
2015 struct dentry *dentry = ctx->cfile->dentry;
2016 - unsigned int i;
2017 int rc;
2018
2019 tcon = tlink_tcon(ctx->cfile->tlink);
2020 @@ -2860,10 +2859,6 @@ restart_loop:
2021 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2022 }
2023
2024 - if (!ctx->direct_io)
2025 - for (i = 0; i < ctx->npages; i++)
2026 - put_page(ctx->bv[i].bv_page);
2027 -
2028 cifs_stats_bytes_written(tcon, ctx->total_len);
2029 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
2030
2031 @@ -3472,7 +3467,6 @@ collect_uncached_read_data(struct cifs_aio_ctx *ctx)
2032 struct iov_iter *to = &ctx->iter;
2033 struct cifs_sb_info *cifs_sb;
2034 struct cifs_tcon *tcon;
2035 - unsigned int i;
2036 int rc;
2037
2038 tcon = tlink_tcon(ctx->cfile->tlink);
2039 @@ -3556,15 +3550,8 @@ again:
2040 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2041 }
2042
2043 - if (!ctx->direct_io) {
2044 - for (i = 0; i < ctx->npages; i++) {
2045 - if (ctx->should_dirty)
2046 - set_page_dirty(ctx->bv[i].bv_page);
2047 - put_page(ctx->bv[i].bv_page);
2048 - }
2049 -
2050 + if (!ctx->direct_io)
2051 ctx->total_len = ctx->len - iov_iter_count(to);
2052 - }
2053
2054 cifs_stats_bytes_read(tcon, ctx->total_len);
2055
2056 diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
2057 index 53fdb5df0d2e..538fd7d807e4 100644
2058 --- a/fs/cifs/inode.c
2059 +++ b/fs/cifs/inode.c
2060 @@ -1735,6 +1735,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
2061 if (rc == 0 || rc != -EBUSY)
2062 goto do_rename_exit;
2063
2064 + /* Don't fall back to using SMB on SMB 2+ mount */
2065 + if (server->vals->protocol_id != 0)
2066 + goto do_rename_exit;
2067 +
2068 /* open-file renames don't work across directories */
2069 if (to_dentry->d_parent != from_dentry->d_parent)
2070 goto do_rename_exit;
2071 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
2072 index 1e1626a2cfc3..0dc6f08020ac 100644
2073 --- a/fs/cifs/misc.c
2074 +++ b/fs/cifs/misc.c
2075 @@ -789,6 +789,11 @@ cifs_aio_ctx_alloc(void)
2076 {
2077 struct cifs_aio_ctx *ctx;
2078
2079 + /*
2080 + * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
2081 + * to false so that we know when we have to unreference pages within
2082 + * cifs_aio_ctx_release()
2083 + */
2084 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
2085 if (!ctx)
2086 return NULL;
2087 @@ -807,7 +812,23 @@ cifs_aio_ctx_release(struct kref *refcount)
2088 struct cifs_aio_ctx, refcount);
2089
2090 cifsFileInfo_put(ctx->cfile);
2091 - kvfree(ctx->bv);
2092 +
2093 + /*
2094 + * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
2095 + * which means that iov_iter_get_pages() was a success and thus that
2096 + * we have taken reference on pages.
2097 + */
2098 + if (ctx->bv) {
2099 + unsigned i;
2100 +
2101 + for (i = 0; i < ctx->npages; i++) {
2102 + if (ctx->should_dirty)
2103 + set_page_dirty(ctx->bv[i].bv_page);
2104 + put_page(ctx->bv[i].bv_page);
2105 + }
2106 + kvfree(ctx->bv);
2107 + }
2108 +
2109 kfree(ctx);
2110 }
2111
2112 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2113 index 938e75cc3b66..85a3c051e622 100644
2114 --- a/fs/cifs/smb2pdu.c
2115 +++ b/fs/cifs/smb2pdu.c
2116 @@ -3402,6 +3402,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
2117 rc);
2118 }
2119 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
2120 + cifs_small_buf_release(req);
2121 return rc == -ENODATA ? 0 : rc;
2122 } else
2123 trace_smb3_read_done(xid, req->PersistentFileId,
2124 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
2125 index 86ed9c686249..dc82e7757f67 100644
2126 --- a/fs/ext4/xattr.c
2127 +++ b/fs/ext4/xattr.c
2128 @@ -829,6 +829,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
2129 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2130 if (IS_ERR(bh)) {
2131 ret = PTR_ERR(bh);
2132 + bh = NULL;
2133 goto out;
2134 }
2135
2136 @@ -2903,6 +2904,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
2137 if (error == -EIO)
2138 EXT4_ERROR_INODE(inode, "block %llu read error",
2139 EXT4_I(inode)->i_file_acl);
2140 + bh = NULL;
2141 goto cleanup;
2142 }
2143 error = ext4_xattr_check_block(inode, bh);
2144 @@ -3059,6 +3061,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
2145 if (IS_ERR(bh)) {
2146 if (PTR_ERR(bh) == -ENOMEM)
2147 return NULL;
2148 + bh = NULL;
2149 EXT4_ERROR_INODE(inode, "block %lu read error",
2150 (unsigned long)ce->e_value);
2151 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
2152 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2153 index 0570391eaa16..15c025c1a305 100644
2154 --- a/fs/nfs/super.c
2155 +++ b/fs/nfs/super.c
2156 @@ -2041,7 +2041,8 @@ static int nfs23_validate_mount_data(void *options,
2157 memcpy(sap, &data->addr, sizeof(data->addr));
2158 args->nfs_server.addrlen = sizeof(data->addr);
2159 args->nfs_server.port = ntohs(data->addr.sin_port);
2160 - if (!nfs_verify_server_address(sap))
2161 + if (sap->sa_family != AF_INET ||
2162 + !nfs_verify_server_address(sap))
2163 goto out_no_address;
2164
2165 if (!(data->flags & NFS_MOUNT_TCP))
2166 diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
2167 index c74e4538d0eb..258f741d6a21 100644
2168 --- a/fs/nfsd/nfs4callback.c
2169 +++ b/fs/nfsd/nfs4callback.c
2170 @@ -1023,8 +1023,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
2171 cb->cb_seq_status = 1;
2172 cb->cb_status = 0;
2173 if (minorversion) {
2174 - if (!nfsd41_cb_get_slot(clp, task))
2175 + if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
2176 return;
2177 + cb->cb_holds_slot = true;
2178 }
2179 rpc_call_start(task);
2180 }
2181 @@ -1051,6 +1052,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
2182 return true;
2183 }
2184
2185 + if (!cb->cb_holds_slot)
2186 + goto need_restart;
2187 +
2188 switch (cb->cb_seq_status) {
2189 case 0:
2190 /*
2191 @@ -1089,6 +1093,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
2192 cb->cb_seq_status);
2193 }
2194
2195 + cb->cb_holds_slot = false;
2196 clear_bit(0, &clp->cl_cb_slot_busy);
2197 rpc_wake_up_next(&clp->cl_cb_waitq);
2198 dprintk("%s: freed slot, new seqid=%d\n", __func__,
2199 @@ -1296,6 +1301,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
2200 cb->cb_seq_status = 1;
2201 cb->cb_status = 0;
2202 cb->cb_need_restart = false;
2203 + cb->cb_holds_slot = false;
2204 }
2205
2206 void nfsd4_run_cb(struct nfsd4_callback *cb)
2207 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
2208 index 6a45fb00c5fc..f056b1d3fecd 100644
2209 --- a/fs/nfsd/nfs4state.c
2210 +++ b/fs/nfsd/nfs4state.c
2211 @@ -265,6 +265,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
2212 static void
2213 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
2214 {
2215 + locks_delete_block(&nbl->nbl_lock);
2216 locks_release_private(&nbl->nbl_lock);
2217 kfree(nbl);
2218 }
2219 @@ -293,11 +294,18 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
2220 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
2221 nbl_lru);
2222 list_del_init(&nbl->nbl_lru);
2223 - locks_delete_block(&nbl->nbl_lock);
2224 free_blocked_lock(nbl);
2225 }
2226 }
2227
2228 +static void
2229 +nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
2230 +{
2231 + struct nfsd4_blocked_lock *nbl = container_of(cb,
2232 + struct nfsd4_blocked_lock, nbl_cb);
2233 + locks_delete_block(&nbl->nbl_lock);
2234 +}
2235 +
2236 static int
2237 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
2238 {
2239 @@ -325,6 +333,7 @@ nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
2240 }
2241
2242 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
2243 + .prepare = nfsd4_cb_notify_lock_prepare,
2244 .done = nfsd4_cb_notify_lock_done,
2245 .release = nfsd4_cb_notify_lock_release,
2246 };
2247 @@ -4863,7 +4872,6 @@ nfs4_laundromat(struct nfsd_net *nn)
2248 nbl = list_first_entry(&reaplist,
2249 struct nfsd4_blocked_lock, nbl_lru);
2250 list_del_init(&nbl->nbl_lru);
2251 - locks_delete_block(&nbl->nbl_lock);
2252 free_blocked_lock(nbl);
2253 }
2254 out:
2255 diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
2256 index 396c76755b03..9d6cb246c6c5 100644
2257 --- a/fs/nfsd/state.h
2258 +++ b/fs/nfsd/state.h
2259 @@ -70,6 +70,7 @@ struct nfsd4_callback {
2260 int cb_seq_status;
2261 int cb_status;
2262 bool cb_need_restart;
2263 + bool cb_holds_slot;
2264 };
2265
2266 struct nfsd4_callback_ops {
2267 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
2268 index d65390727541..7325baa8f9d4 100644
2269 --- a/fs/proc/proc_sysctl.c
2270 +++ b/fs/proc/proc_sysctl.c
2271 @@ -1626,9 +1626,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
2272 if (--header->nreg)
2273 return;
2274
2275 - if (parent)
2276 + if (parent) {
2277 put_links(header);
2278 - start_unregistering(header);
2279 + start_unregistering(header);
2280 + }
2281 +
2282 if (!--header->count)
2283 kfree_rcu(header, rcu);
2284
2285 diff --git a/fs/splice.c b/fs/splice.c
2286 index 90c29675d573..7da7d5437472 100644
2287 --- a/fs/splice.c
2288 +++ b/fs/splice.c
2289 @@ -333,8 +333,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
2290 .get = generic_pipe_buf_get,
2291 };
2292
2293 -static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
2294 - struct pipe_buffer *buf)
2295 +int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
2296 + struct pipe_buffer *buf)
2297 {
2298 return 1;
2299 }
2300 diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
2301 index 1021106438b2..c80e5833b1d6 100644
2302 --- a/include/drm/ttm/ttm_bo_driver.h
2303 +++ b/include/drm/ttm/ttm_bo_driver.h
2304 @@ -411,7 +411,6 @@ extern struct ttm_bo_global {
2305 /**
2306 * Protected by ttm_global_mutex.
2307 */
2308 - unsigned int use_count;
2309 struct list_head device_list;
2310
2311 /**
2312 diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
2313 index 2c0af7b00715..c94ab8b53a23 100644
2314 --- a/include/linux/etherdevice.h
2315 +++ b/include/linux/etherdevice.h
2316 @@ -447,6 +447,18 @@ static inline void eth_addr_dec(u8 *addr)
2317 u64_to_ether_addr(u, addr);
2318 }
2319
2320 +/**
2321 + * eth_addr_inc() - Increment the given MAC address.
2322 + * @addr: Pointer to a six-byte array containing Ethernet address to increment.
2323 + */
2324 +static inline void eth_addr_inc(u8 *addr)
2325 +{
2326 + u64 u = ether_addr_to_u64(addr);
2327 +
2328 + u++;
2329 + u64_to_ether_addr(u, addr);
2330 +}
2331 +
2332 /**
2333 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
2334 * @dev: Pointer to a device structure
2335 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
2336 index 3ecd7ea212ae..66ee63cd5968 100644
2337 --- a/include/linux/pipe_fs_i.h
2338 +++ b/include/linux/pipe_fs_i.h
2339 @@ -181,6 +181,7 @@ void free_pipe_info(struct pipe_inode_info *);
2340 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
2341 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
2342 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
2343 +int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
2344 void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
2345 void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
2346
2347 diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
2348 index 0612439909dc..9e0b9ecb43db 100644
2349 --- a/include/net/netfilter/nf_tables.h
2350 +++ b/include/net/netfilter/nf_tables.h
2351 @@ -382,6 +382,7 @@ void nft_unregister_set(struct nft_set_type *type);
2352 * @dtype: data type (verdict or numeric type defined by userspace)
2353 * @objtype: object type (see NFT_OBJECT_* definitions)
2354 * @size: maximum set size
2355 + * @use: number of rules references to this set
2356 * @nelems: number of elements
2357 * @ndeact: number of deactivated elements queued for removal
2358 * @timeout: default timeout value in jiffies
2359 @@ -407,6 +408,7 @@ struct nft_set {
2360 u32 dtype;
2361 u32 objtype;
2362 u32 size;
2363 + u32 use;
2364 atomic_t nelems;
2365 u32 ndeact;
2366 u64 timeout;
2367 @@ -467,6 +469,10 @@ struct nft_set_binding {
2368 u32 flags;
2369 };
2370
2371 +enum nft_trans_phase;
2372 +void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
2373 + struct nft_set_binding *binding,
2374 + enum nft_trans_phase phase);
2375 int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2376 struct nft_set_binding *binding);
2377 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2378 diff --git a/include/net/netrom.h b/include/net/netrom.h
2379 index 5a0714ff500f..80f15b1c1a48 100644
2380 --- a/include/net/netrom.h
2381 +++ b/include/net/netrom.h
2382 @@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
2383 int nr_t1timer_running(struct sock *);
2384
2385 /* sysctl_net_netrom.c */
2386 -void nr_register_sysctl(void);
2387 +int nr_register_sysctl(void);
2388 void nr_unregister_sysctl(void);
2389
2390 #endif
2391 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
2392 index fb8b7b5d745d..451b1f9e80a6 100644
2393 --- a/kernel/sched/deadline.c
2394 +++ b/kernel/sched/deadline.c
2395 @@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
2396 if (dl_entity_is_special(dl_se))
2397 return;
2398
2399 - WARN_ON(hrtimer_active(&dl_se->inactive_timer));
2400 WARN_ON(dl_se->dl_non_contending);
2401
2402 zerolag_time = dl_se->deadline -
2403 @@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
2404 * If the "0-lag time" already passed, decrease the active
2405 * utilization now, instead of starting a timer
2406 */
2407 - if (zerolag_time < 0) {
2408 + if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
2409 if (dl_task(p))
2410 sub_running_bw(dl_se, dl_rq);
2411 if (!dl_task(p) || p->state == TASK_DEAD) {
2412 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2413 index eeb605656d59..be55a64748ba 100644
2414 --- a/kernel/sched/fair.c
2415 +++ b/kernel/sched/fair.c
2416 @@ -1994,6 +1994,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
2417 if (p->last_task_numa_placement) {
2418 delta = runtime - p->last_sum_exec_runtime;
2419 *period = now - p->last_task_numa_placement;
2420 +
2421 + /* Avoid time going backwards, prevent potential divide error: */
2422 + if (unlikely((s64)*period < 0))
2423 + *period = 0;
2424 } else {
2425 delta = p->se.avg.load_sum;
2426 *period = LOAD_AVG_MAX;
2427 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2428 index b49affb4666b..4463ae28bf1a 100644
2429 --- a/kernel/trace/ring_buffer.c
2430 +++ b/kernel/trace/ring_buffer.c
2431 @@ -776,7 +776,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
2432
2433 preempt_disable_notrace();
2434 time = rb_time_stamp(buffer);
2435 - preempt_enable_no_resched_notrace();
2436 + preempt_enable_notrace();
2437
2438 return time;
2439 }
2440 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2441 index 89158aa93fa6..d07fc2836786 100644
2442 --- a/kernel/trace/trace.c
2443 +++ b/kernel/trace/trace.c
2444 @@ -496,8 +496,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
2445 * not modified.
2446 */
2447 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
2448 - if (!pid_list)
2449 + if (!pid_list) {
2450 + trace_parser_put(&parser);
2451 return -ENOMEM;
2452 + }
2453
2454 pid_list->pid_max = READ_ONCE(pid_max);
2455
2456 @@ -507,6 +509,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
2457
2458 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
2459 if (!pid_list->pids) {
2460 + trace_parser_put(&parser);
2461 kfree(pid_list);
2462 return -ENOMEM;
2463 }
2464 @@ -6820,19 +6823,23 @@ struct buffer_ref {
2465 struct ring_buffer *buffer;
2466 void *page;
2467 int cpu;
2468 - int ref;
2469 + refcount_t refcount;
2470 };
2471
2472 +static void buffer_ref_release(struct buffer_ref *ref)
2473 +{
2474 + if (!refcount_dec_and_test(&ref->refcount))
2475 + return;
2476 + ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2477 + kfree(ref);
2478 +}
2479 +
2480 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
2481 struct pipe_buffer *buf)
2482 {
2483 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
2484
2485 - if (--ref->ref)
2486 - return;
2487 -
2488 - ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2489 - kfree(ref);
2490 + buffer_ref_release(ref);
2491 buf->private = 0;
2492 }
2493
2494 @@ -6841,7 +6848,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
2495 {
2496 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
2497
2498 - ref->ref++;
2499 + refcount_inc(&ref->refcount);
2500 }
2501
2502 /* Pipe buffer operations for a buffer. */
2503 @@ -6849,7 +6856,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
2504 .can_merge = 0,
2505 .confirm = generic_pipe_buf_confirm,
2506 .release = buffer_pipe_buf_release,
2507 - .steal = generic_pipe_buf_steal,
2508 + .steal = generic_pipe_buf_nosteal,
2509 .get = buffer_pipe_buf_get,
2510 };
2511
2512 @@ -6862,11 +6869,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2513 struct buffer_ref *ref =
2514 (struct buffer_ref *)spd->partial[i].private;
2515
2516 - if (--ref->ref)
2517 - return;
2518 -
2519 - ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
2520 - kfree(ref);
2521 + buffer_ref_release(ref);
2522 spd->partial[i].private = 0;
2523 }
2524
2525 @@ -6921,7 +6924,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2526 break;
2527 }
2528
2529 - ref->ref = 1;
2530 + refcount_set(&ref->refcount, 1);
2531 ref->buffer = iter->trace_buffer->buffer;
2532 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
2533 if (IS_ERR(ref->page)) {
2534 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2535 index fc5d23d752a5..e94d2b6bee7f 100644
2536 --- a/kernel/workqueue.c
2537 +++ b/kernel/workqueue.c
2538 @@ -2931,6 +2931,9 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
2539 if (WARN_ON(!wq_online))
2540 return false;
2541
2542 + if (WARN_ON(!work->func))
2543 + return false;
2544 +
2545 if (!from_cancel) {
2546 lock_map_acquire(&work->lockdep_map);
2547 lock_map_release(&work->lockdep_map);
2548 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
2549 index d4df5b24d75e..350d5328014f 100644
2550 --- a/lib/Kconfig.debug
2551 +++ b/lib/Kconfig.debug
2552 @@ -1952,6 +1952,7 @@ config TEST_KMOD
2553 depends on m
2554 depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
2555 depends on NETDEVICES && NET_CORE && INET # for TUN
2556 + depends on BLOCK
2557 select TEST_LKM
2558 select XFS_FS
2559 select TUN
2560 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2561 index 20dd3283bb1b..318ef6ccdb3b 100644
2562 --- a/mm/page_alloc.c
2563 +++ b/mm/page_alloc.c
2564 @@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = {
2565
2566 int min_free_kbytes = 1024;
2567 int user_min_free_kbytes = -1;
2568 +#ifdef CONFIG_DISCONTIGMEM
2569 +/*
2570 + * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
2571 + * are not on separate NUMA nodes. Functionally this works but with
2572 + * watermark_boost_factor, it can reclaim prematurely as the ranges can be
2573 + * quite small. By default, do not boost watermarks on discontigmem as in
2574 + * many cases very high-order allocations like THP are likely to be
2575 + * unsupported and the premature reclaim offsets the advantage of long-term
2576 + * fragmentation avoidance.
2577 + */
2578 +int watermark_boost_factor __read_mostly;
2579 +#else
2580 int watermark_boost_factor __read_mostly = 15000;
2581 +#endif
2582 int watermark_scale_factor = 10;
2583
2584 static unsigned long nr_kernel_pages __initdata;
2585 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2586 index f77888ec93f1..0bb4d712b80c 100644
2587 --- a/net/bridge/netfilter/ebtables.c
2588 +++ b/net/bridge/netfilter/ebtables.c
2589 @@ -2032,7 +2032,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2590 if (match_kern)
2591 match_kern->match_size = ret;
2592
2593 - if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
2594 + /* rule should have no remaining data after target */
2595 + if (type == EBT_COMPAT_TARGET && size_left)
2596 return -EINVAL;
2597
2598 match32 = (struct compat_ebt_entry_mwt *) buf;
2599 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2600 index 25d9bef27d03..3c89ca325947 100644
2601 --- a/net/ipv4/route.c
2602 +++ b/net/ipv4/route.c
2603 @@ -1183,25 +1183,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
2604 return dst;
2605 }
2606
2607 -static void ipv4_link_failure(struct sk_buff *skb)
2608 +static void ipv4_send_dest_unreach(struct sk_buff *skb)
2609 {
2610 struct ip_options opt;
2611 - struct rtable *rt;
2612 int res;
2613
2614 /* Recompile ip options since IPCB may not be valid anymore.
2615 + * Also check we have a reasonable ipv4 header.
2616 */
2617 - memset(&opt, 0, sizeof(opt));
2618 - opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
2619 + if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
2620 + ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
2621 + return;
2622
2623 - rcu_read_lock();
2624 - res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
2625 - rcu_read_unlock();
2626 + memset(&opt, 0, sizeof(opt));
2627 + if (ip_hdr(skb)->ihl > 5) {
2628 + if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
2629 + return;
2630 + opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
2631
2632 - if (res)
2633 - return;
2634 + rcu_read_lock();
2635 + res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
2636 + rcu_read_unlock();
2637
2638 + if (res)
2639 + return;
2640 + }
2641 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
2642 +}
2643 +
2644 +static void ipv4_link_failure(struct sk_buff *skb)
2645 +{
2646 + struct rtable *rt;
2647 +
2648 + ipv4_send_dest_unreach(skb);
2649
2650 rt = skb_rtable(skb);
2651 if (rt)
2652 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
2653 index ba0fc4b18465..eeb4041fa5f9 100644
2654 --- a/net/ipv4/sysctl_net_ipv4.c
2655 +++ b/net/ipv4/sysctl_net_ipv4.c
2656 @@ -49,6 +49,7 @@ static int ip_ping_group_range_min[] = { 0, 0 };
2657 static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
2658 static int comp_sack_nr_max = 255;
2659 static u32 u32_max_div_HZ = UINT_MAX / HZ;
2660 +static int one_day_secs = 24 * 3600;
2661
2662 /* obsolete */
2663 static int sysctl_tcp_low_latency __read_mostly;
2664 @@ -1151,7 +1152,9 @@ static struct ctl_table ipv4_net_table[] = {
2665 .data = &init_net.ipv4.sysctl_tcp_min_rtt_wlen,
2666 .maxlen = sizeof(int),
2667 .mode = 0644,
2668 - .proc_handler = proc_dointvec
2669 + .proc_handler = proc_dointvec_minmax,
2670 + .extra1 = &zero,
2671 + .extra2 = &one_day_secs
2672 },
2673 {
2674 .procname = "tcp_autocorking",
2675 diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
2676 index dc07fcc7938e..802db01e3075 100644
2677 --- a/net/ncsi/ncsi-rsp.c
2678 +++ b/net/ncsi/ncsi-rsp.c
2679 @@ -11,6 +11,7 @@
2680 #include <linux/kernel.h>
2681 #include <linux/init.h>
2682 #include <linux/netdevice.h>
2683 +#include <linux/etherdevice.h>
2684 #include <linux/skbuff.h>
2685
2686 #include <net/ncsi.h>
2687 @@ -667,7 +668,10 @@ static int ncsi_rsp_handler_oem_bcm_gma(struct ncsi_request *nr)
2688 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2689 memcpy(saddr.sa_data, &rsp->data[BCM_MAC_ADDR_OFFSET], ETH_ALEN);
2690 /* Increase mac address by 1 for BMC's address */
2691 - saddr.sa_data[ETH_ALEN - 1]++;
2692 + eth_addr_inc((u8 *)saddr.sa_data);
2693 + if (!is_valid_ether_addr((const u8 *)saddr.sa_data))
2694 + return -ENXIO;
2695 +
2696 ret = ops->ndo_set_mac_address(ndev, &saddr);
2697 if (ret < 0)
2698 netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n");
2699 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2700 index acb124ce92ec..e2aac80f9b7b 100644
2701 --- a/net/netfilter/nf_tables_api.c
2702 +++ b/net/netfilter/nf_tables_api.c
2703 @@ -3624,6 +3624,9 @@ err1:
2704
2705 static void nft_set_destroy(struct nft_set *set)
2706 {
2707 + if (WARN_ON(set->use > 0))
2708 + return;
2709 +
2710 set->ops->destroy(set);
2711 module_put(to_set_type(set->ops)->owner);
2712 kfree(set->name);
2713 @@ -3664,7 +3667,7 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
2714 NL_SET_BAD_ATTR(extack, attr);
2715 return PTR_ERR(set);
2716 }
2717 - if (!list_empty(&set->bindings) ||
2718 + if (set->use ||
2719 (nlh->nlmsg_flags & NLM_F_NONREC && atomic_read(&set->nelems) > 0)) {
2720 NL_SET_BAD_ATTR(extack, attr);
2721 return -EBUSY;
2722 @@ -3694,6 +3697,9 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
2723 struct nft_set_binding *i;
2724 struct nft_set_iter iter;
2725
2726 + if (set->use == UINT_MAX)
2727 + return -EOVERFLOW;
2728 +
2729 if (!list_empty(&set->bindings) && nft_set_is_anonymous(set))
2730 return -EBUSY;
2731
2732 @@ -3721,6 +3727,7 @@ bind:
2733 binding->chain = ctx->chain;
2734 list_add_tail_rcu(&binding->list, &set->bindings);
2735 nft_set_trans_bind(ctx, set);
2736 + set->use++;
2737
2738 return 0;
2739 }
2740 @@ -3740,6 +3747,25 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
2741 }
2742 EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
2743
2744 +void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
2745 + struct nft_set_binding *binding,
2746 + enum nft_trans_phase phase)
2747 +{
2748 + switch (phase) {
2749 + case NFT_TRANS_PREPARE:
2750 + set->use--;
2751 + return;
2752 + case NFT_TRANS_ABORT:
2753 + case NFT_TRANS_RELEASE:
2754 + set->use--;
2755 + /* fall through */
2756 + default:
2757 + nf_tables_unbind_set(ctx, set, binding,
2758 + phase == NFT_TRANS_COMMIT);
2759 + }
2760 +}
2761 +EXPORT_SYMBOL_GPL(nf_tables_deactivate_set);
2762 +
2763 void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
2764 {
2765 if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
2766 diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
2767 index f1172f99752b..eb7f9a5f2aeb 100644
2768 --- a/net/netfilter/nft_dynset.c
2769 +++ b/net/netfilter/nft_dynset.c
2770 @@ -241,11 +241,15 @@ static void nft_dynset_deactivate(const struct nft_ctx *ctx,
2771 {
2772 struct nft_dynset *priv = nft_expr_priv(expr);
2773
2774 - if (phase == NFT_TRANS_PREPARE)
2775 - return;
2776 + nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
2777 +}
2778 +
2779 +static void nft_dynset_activate(const struct nft_ctx *ctx,
2780 + const struct nft_expr *expr)
2781 +{
2782 + struct nft_dynset *priv = nft_expr_priv(expr);
2783
2784 - nf_tables_unbind_set(ctx, priv->set, &priv->binding,
2785 - phase == NFT_TRANS_COMMIT);
2786 + priv->set->use++;
2787 }
2788
2789 static void nft_dynset_destroy(const struct nft_ctx *ctx,
2790 @@ -293,6 +297,7 @@ static const struct nft_expr_ops nft_dynset_ops = {
2791 .eval = nft_dynset_eval,
2792 .init = nft_dynset_init,
2793 .destroy = nft_dynset_destroy,
2794 + .activate = nft_dynset_activate,
2795 .deactivate = nft_dynset_deactivate,
2796 .dump = nft_dynset_dump,
2797 };
2798 diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
2799 index 14496da5141d..161c3451a747 100644
2800 --- a/net/netfilter/nft_lookup.c
2801 +++ b/net/netfilter/nft_lookup.c
2802 @@ -127,11 +127,15 @@ static void nft_lookup_deactivate(const struct nft_ctx *ctx,
2803 {
2804 struct nft_lookup *priv = nft_expr_priv(expr);
2805
2806 - if (phase == NFT_TRANS_PREPARE)
2807 - return;
2808 + nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
2809 +}
2810 +
2811 +static void nft_lookup_activate(const struct nft_ctx *ctx,
2812 + const struct nft_expr *expr)
2813 +{
2814 + struct nft_lookup *priv = nft_expr_priv(expr);
2815
2816 - nf_tables_unbind_set(ctx, priv->set, &priv->binding,
2817 - phase == NFT_TRANS_COMMIT);
2818 + priv->set->use++;
2819 }
2820
2821 static void nft_lookup_destroy(const struct nft_ctx *ctx,
2822 @@ -222,6 +226,7 @@ static const struct nft_expr_ops nft_lookup_ops = {
2823 .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
2824 .eval = nft_lookup_eval,
2825 .init = nft_lookup_init,
2826 + .activate = nft_lookup_activate,
2827 .deactivate = nft_lookup_deactivate,
2828 .destroy = nft_lookup_destroy,
2829 .dump = nft_lookup_dump,
2830 diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
2831 index ae178e914486..bf92a40dd1b2 100644
2832 --- a/net/netfilter/nft_objref.c
2833 +++ b/net/netfilter/nft_objref.c
2834 @@ -64,21 +64,34 @@ nla_put_failure:
2835 return -1;
2836 }
2837
2838 -static void nft_objref_destroy(const struct nft_ctx *ctx,
2839 - const struct nft_expr *expr)
2840 +static void nft_objref_deactivate(const struct nft_ctx *ctx,
2841 + const struct nft_expr *expr,
2842 + enum nft_trans_phase phase)
2843 {
2844 struct nft_object *obj = nft_objref_priv(expr);
2845
2846 + if (phase == NFT_TRANS_COMMIT)
2847 + return;
2848 +
2849 obj->use--;
2850 }
2851
2852 +static void nft_objref_activate(const struct nft_ctx *ctx,
2853 + const struct nft_expr *expr)
2854 +{
2855 + struct nft_object *obj = nft_objref_priv(expr);
2856 +
2857 + obj->use++;
2858 +}
2859 +
2860 static struct nft_expr_type nft_objref_type;
2861 static const struct nft_expr_ops nft_objref_ops = {
2862 .type = &nft_objref_type,
2863 .size = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
2864 .eval = nft_objref_eval,
2865 .init = nft_objref_init,
2866 - .destroy = nft_objref_destroy,
2867 + .activate = nft_objref_activate,
2868 + .deactivate = nft_objref_deactivate,
2869 .dump = nft_objref_dump,
2870 };
2871
2872 @@ -161,11 +174,15 @@ static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
2873 {
2874 struct nft_objref_map *priv = nft_expr_priv(expr);
2875
2876 - if (phase == NFT_TRANS_PREPARE)
2877 - return;
2878 + nf_tables_deactivate_set(ctx, priv->set, &priv->binding, phase);
2879 +}
2880 +
2881 +static void nft_objref_map_activate(const struct nft_ctx *ctx,
2882 + const struct nft_expr *expr)
2883 +{
2884 + struct nft_objref_map *priv = nft_expr_priv(expr);
2885
2886 - nf_tables_unbind_set(ctx, priv->set, &priv->binding,
2887 - phase == NFT_TRANS_COMMIT);
2888 + priv->set->use++;
2889 }
2890
2891 static void nft_objref_map_destroy(const struct nft_ctx *ctx,
2892 @@ -182,6 +199,7 @@ static const struct nft_expr_ops nft_objref_map_ops = {
2893 .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
2894 .eval = nft_objref_map_eval,
2895 .init = nft_objref_map_init,
2896 + .activate = nft_objref_map_activate,
2897 .deactivate = nft_objref_map_deactivate,
2898 .destroy = nft_objref_map_destroy,
2899 .dump = nft_objref_map_dump,
2900 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
2901 index 1d3144d19903..71ffd1a6dc7c 100644
2902 --- a/net/netrom/af_netrom.c
2903 +++ b/net/netrom/af_netrom.c
2904 @@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
2905 int i;
2906 int rc = proto_register(&nr_proto, 0);
2907
2908 - if (rc != 0)
2909 - goto out;
2910 + if (rc)
2911 + return rc;
2912
2913 if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
2914 - printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n");
2915 - return -1;
2916 + pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
2917 + __func__);
2918 + rc = -EINVAL;
2919 + goto unregister_proto;
2920 }
2921
2922 dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
2923 - if (dev_nr == NULL) {
2924 - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n");
2925 - return -1;
2926 + if (!dev_nr) {
2927 + pr_err("NET/ROM: %s - unable to allocate device array\n",
2928 + __func__);
2929 + rc = -ENOMEM;
2930 + goto unregister_proto;
2931 }
2932
2933 for (i = 0; i < nr_ndevs; i++) {
2934 @@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
2935 sprintf(name, "nr%d", i);
2936 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
2937 if (!dev) {
2938 - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
2939 + rc = -ENOMEM;
2940 goto fail;
2941 }
2942
2943 dev->base_addr = i;
2944 - if (register_netdev(dev)) {
2945 - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n");
2946 + rc = register_netdev(dev);
2947 + if (rc) {
2948 free_netdev(dev);
2949 goto fail;
2950 }
2951 @@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
2952 dev_nr[i] = dev;
2953 }
2954
2955 - if (sock_register(&nr_family_ops)) {
2956 - printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n");
2957 + rc = sock_register(&nr_family_ops);
2958 + if (rc)
2959 goto fail;
2960 - }
2961
2962 - register_netdevice_notifier(&nr_dev_notifier);
2963 + rc = register_netdevice_notifier(&nr_dev_notifier);
2964 + if (rc)
2965 + goto out_sock;
2966
2967 ax25_register_pid(&nr_pid);
2968 ax25_linkfail_register(&nr_linkfail_notifier);
2969
2970 #ifdef CONFIG_SYSCTL
2971 - nr_register_sysctl();
2972 + rc = nr_register_sysctl();
2973 + if (rc)
2974 + goto out_sysctl;
2975 #endif
2976
2977 nr_loopback_init();
2978
2979 - proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops);
2980 - proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops);
2981 - proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops);
2982 -out:
2983 - return rc;
2984 + rc = -ENOMEM;
2985 + if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
2986 + goto proc_remove1;
2987 + if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
2988 + &nr_neigh_seqops))
2989 + goto proc_remove2;
2990 + if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
2991 + &nr_node_seqops))
2992 + goto proc_remove3;
2993 +
2994 + return 0;
2995 +
2996 +proc_remove3:
2997 + remove_proc_entry("nr_neigh", init_net.proc_net);
2998 +proc_remove2:
2999 + remove_proc_entry("nr", init_net.proc_net);
3000 +proc_remove1:
3001 +
3002 + nr_loopback_clear();
3003 + nr_rt_free();
3004 +
3005 +#ifdef CONFIG_SYSCTL
3006 + nr_unregister_sysctl();
3007 +out_sysctl:
3008 +#endif
3009 + ax25_linkfail_release(&nr_linkfail_notifier);
3010 + ax25_protocol_release(AX25_P_NETROM);
3011 + unregister_netdevice_notifier(&nr_dev_notifier);
3012 +out_sock:
3013 + sock_unregister(PF_NETROM);
3014 fail:
3015 while (--i >= 0) {
3016 unregister_netdev(dev_nr[i]);
3017 free_netdev(dev_nr[i]);
3018 }
3019 kfree(dev_nr);
3020 +unregister_proto:
3021 proto_unregister(&nr_proto);
3022 - rc = -1;
3023 - goto out;
3024 + return rc;
3025 }
3026
3027 module_init(nr_proto_init);
3028 diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
3029 index 215ad22a9647..93d13f019981 100644
3030 --- a/net/netrom/nr_loopback.c
3031 +++ b/net/netrom/nr_loopback.c
3032 @@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
3033 }
3034 }
3035
3036 -void __exit nr_loopback_clear(void)
3037 +void nr_loopback_clear(void)
3038 {
3039 del_timer_sync(&loopback_timer);
3040 skb_queue_purge(&loopback_queue);
3041 diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
3042 index 6485f593e2f0..b76aa668a94b 100644
3043 --- a/net/netrom/nr_route.c
3044 +++ b/net/netrom/nr_route.c
3045 @@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
3046 /*
3047 * Free all memory associated with the nodes and routes lists.
3048 */
3049 -void __exit nr_rt_free(void)
3050 +void nr_rt_free(void)
3051 {
3052 struct nr_neigh *s = NULL;
3053 struct nr_node *t = NULL;
3054 diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
3055 index ba1c368b3f18..771011b84270 100644
3056 --- a/net/netrom/sysctl_net_netrom.c
3057 +++ b/net/netrom/sysctl_net_netrom.c
3058 @@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
3059 { }
3060 };
3061
3062 -void __init nr_register_sysctl(void)
3063 +int __init nr_register_sysctl(void)
3064 {
3065 nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
3066 + if (!nr_table_header)
3067 + return -ENOMEM;
3068 + return 0;
3069 }
3070
3071 void nr_unregister_sysctl(void)
3072 diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
3073 index 65387e1e6964..cd7e01ea8144 100644
3074 --- a/net/rds/af_rds.c
3075 +++ b/net/rds/af_rds.c
3076 @@ -506,6 +506,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
3077 struct rds_sock *rs = rds_sk_to_rs(sk);
3078 int ret = 0;
3079
3080 + if (addr_len < offsetofend(struct sockaddr, sa_family))
3081 + return -EINVAL;
3082 +
3083 lock_sock(sk);
3084
3085 switch (uaddr->sa_family) {
3086 diff --git a/net/rds/bind.c b/net/rds/bind.c
3087 index 17c9d9f0c848..0f4398e7f2a7 100644
3088 --- a/net/rds/bind.c
3089 +++ b/net/rds/bind.c
3090 @@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3091 /* We allow an RDS socket to be bound to either IPv4 or IPv6
3092 * address.
3093 */
3094 + if (addr_len < offsetofend(struct sockaddr, sa_family))
3095 + return -EINVAL;
3096 if (uaddr->sa_family == AF_INET) {
3097 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
3098
3099 diff --git a/net/rds/ib_fmr.c b/net/rds/ib_fmr.c
3100 index e0f70c4051b6..01e764f8f224 100644
3101 --- a/net/rds/ib_fmr.c
3102 +++ b/net/rds/ib_fmr.c
3103 @@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
3104 else
3105 pool = rds_ibdev->mr_1m_pool;
3106
3107 + if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
3108 + queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
3109 +
3110 + /* Switch pools if one of the pool is reaching upper limit */
3111 + if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
3112 + if (pool->pool_type == RDS_IB_MR_8K_POOL)
3113 + pool = rds_ibdev->mr_1m_pool;
3114 + else
3115 + pool = rds_ibdev->mr_8k_pool;
3116 + }
3117 +
3118 ibmr = rds_ib_try_reuse_ibmr(pool);
3119 if (ibmr)
3120 return ibmr;
3121 diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
3122 index 63c8d107adcf..d664e9ade74d 100644
3123 --- a/net/rds/ib_rdma.c
3124 +++ b/net/rds/ib_rdma.c
3125 @@ -454,9 +454,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
3126 struct rds_ib_mr *ibmr = NULL;
3127 int iter = 0;
3128
3129 - if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
3130 - queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
3131 -
3132 while (1) {
3133 ibmr = rds_ib_reuse_mr(pool);
3134 if (ibmr)
3135 diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
3136 index 7af4f99c4a93..094a6621f8e8 100644
3137 --- a/net/rose/rose_loopback.c
3138 +++ b/net/rose/rose_loopback.c
3139 @@ -16,6 +16,7 @@
3140 #include <linux/init.h>
3141
3142 static struct sk_buff_head loopback_queue;
3143 +#define ROSE_LOOPBACK_LIMIT 1000
3144 static struct timer_list loopback_timer;
3145
3146 static void rose_set_loopback_timer(void);
3147 @@ -35,29 +36,27 @@ static int rose_loopback_running(void)
3148
3149 int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
3150 {
3151 - struct sk_buff *skbn;
3152 + struct sk_buff *skbn = NULL;
3153
3154 - skbn = skb_clone(skb, GFP_ATOMIC);
3155 + if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
3156 + skbn = skb_clone(skb, GFP_ATOMIC);
3157
3158 - kfree_skb(skb);
3159 -
3160 - if (skbn != NULL) {
3161 + if (skbn) {
3162 + consume_skb(skb);
3163 skb_queue_tail(&loopback_queue, skbn);
3164
3165 if (!rose_loopback_running())
3166 rose_set_loopback_timer();
3167 + } else {
3168 + kfree_skb(skb);
3169 }
3170
3171 return 1;
3172 }
3173
3174 -
3175 static void rose_set_loopback_timer(void)
3176 {
3177 - del_timer(&loopback_timer);
3178 -
3179 - loopback_timer.expires = jiffies + 10;
3180 - add_timer(&loopback_timer);
3181 + mod_timer(&loopback_timer, jiffies + 10);
3182 }
3183
3184 static void rose_loopback_timer(struct timer_list *unused)
3185 @@ -68,8 +67,12 @@ static void rose_loopback_timer(struct timer_list *unused)
3186 struct sock *sk;
3187 unsigned short frametype;
3188 unsigned int lci_i, lci_o;
3189 + int count;
3190
3191 - while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
3192 + for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
3193 + skb = skb_dequeue(&loopback_queue);
3194 + if (!skb)
3195 + return;
3196 if (skb->len < ROSE_MIN_LEN) {
3197 kfree_skb(skb);
3198 continue;
3199 @@ -106,6 +109,8 @@ static void rose_loopback_timer(struct timer_list *unused)
3200 kfree_skb(skb);
3201 }
3202 }
3203 + if (!skb_queue_empty(&loopback_queue))
3204 + mod_timer(&loopback_timer, jiffies + 1);
3205 }
3206
3207 void __exit rose_loopback_clear(void)
3208 diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
3209 index 9128aa0e40aa..b4ffb81223ad 100644
3210 --- a/net/rxrpc/input.c
3211 +++ b/net/rxrpc/input.c
3212 @@ -1155,19 +1155,19 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
3213 * handle data received on the local endpoint
3214 * - may be called in interrupt context
3215 *
3216 - * The socket is locked by the caller and this prevents the socket from being
3217 - * shut down and the local endpoint from going away, thus sk_user_data will not
3218 - * be cleared until this function returns.
3219 + * [!] Note that as this is called from the encap_rcv hook, the socket is not
3220 + * held locked by the caller and nothing prevents sk_user_data on the UDP from
3221 + * being cleared in the middle of processing this function.
3222 *
3223 * Called with the RCU read lock held from the IP layer via UDP.
3224 */
3225 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
3226 {
3227 + struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
3228 struct rxrpc_connection *conn;
3229 struct rxrpc_channel *chan;
3230 struct rxrpc_call *call = NULL;
3231 struct rxrpc_skb_priv *sp;
3232 - struct rxrpc_local *local = udp_sk->sk_user_data;
3233 struct rxrpc_peer *peer = NULL;
3234 struct rxrpc_sock *rx = NULL;
3235 unsigned int channel;
3236 @@ -1175,6 +1175,10 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
3237
3238 _enter("%p", udp_sk);
3239
3240 + if (unlikely(!local)) {
3241 + kfree_skb(skb);
3242 + return 0;
3243 + }
3244 if (skb->tstamp == 0)
3245 skb->tstamp = ktime_get_real();
3246
3247 diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
3248 index 0906e51d3cfb..10317dbdab5f 100644
3249 --- a/net/rxrpc/local_object.c
3250 +++ b/net/rxrpc/local_object.c
3251 @@ -304,7 +304,8 @@ nomem:
3252 ret = -ENOMEM;
3253 sock_error:
3254 mutex_unlock(&rxnet->local_mutex);
3255 - kfree(local);
3256 + if (local)
3257 + call_rcu(&local->rcu, rxrpc_local_rcu);
3258 _leave(" = %d", ret);
3259 return ERR_PTR(ret);
3260
3261 diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
3262 index 12bb23b8e0c5..261131dfa1f1 100644
3263 --- a/net/sunrpc/cache.c
3264 +++ b/net/sunrpc/cache.c
3265 @@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
3266 h->last_refresh = now;
3267 }
3268
3269 +static inline int cache_is_valid(struct cache_head *h);
3270 static void cache_fresh_locked(struct cache_head *head, time_t expiry,
3271 struct cache_detail *detail);
3272 static void cache_fresh_unlocked(struct cache_head *head,
3273 @@ -105,6 +106,8 @@ static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
3274 if (cache_is_expired(detail, tmp)) {
3275 hlist_del_init_rcu(&tmp->cache_list);
3276 detail->entries --;
3277 + if (cache_is_valid(tmp) == -EAGAIN)
3278 + set_bit(CACHE_NEGATIVE, &tmp->flags);
3279 cache_fresh_locked(tmp, 0, detail);
3280 freeme = tmp;
3281 break;
3282 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
3283 index 4ad3586da8f0..340a6e7c43a7 100644
3284 --- a/net/tipc/netlink_compat.c
3285 +++ b/net/tipc/netlink_compat.c
3286 @@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
3287 if (msg->rep_type)
3288 tipc_tlv_init(msg->rep, msg->rep_type);
3289
3290 - if (cmd->header)
3291 - (*cmd->header)(msg);
3292 + if (cmd->header) {
3293 + err = (*cmd->header)(msg);
3294 + if (err) {
3295 + kfree_skb(msg->rep);
3296 + msg->rep = NULL;
3297 + return err;
3298 + }
3299 + }
3300
3301 arg = nlmsg_new(0, GFP_KERNEL);
3302 if (!arg) {
3303 @@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
3304 if (!bearer)
3305 return -EMSGSIZE;
3306
3307 - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
3308 + len = TLV_GET_DATA_LEN(msg->req);
3309 + len -= offsetof(struct tipc_bearer_config, name);
3310 + if (len <= 0)
3311 + return -EINVAL;
3312 +
3313 + len = min_t(int, len, TIPC_MAX_BEARER_NAME);
3314 if (!string_is_valid(b->name, len))
3315 return -EINVAL;
3316
3317 @@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
3318
3319 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
3320
3321 - len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
3322 + len = TLV_GET_DATA_LEN(msg->req);
3323 + len -= offsetof(struct tipc_link_config, name);
3324 + if (len <= 0)
3325 + return -EINVAL;
3326 +
3327 + len = min_t(int, len, TIPC_MAX_LINK_NAME);
3328 if (!string_is_valid(lc->name, len))
3329 return -EINVAL;
3330
3331 diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
3332 index 4b5ff3d44912..5f1d937c4be9 100644
3333 --- a/net/tls/tls_device.c
3334 +++ b/net/tls/tls_device.c
3335 @@ -884,7 +884,9 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
3336 goto release_netdev;
3337
3338 free_sw_resources:
3339 + up_read(&device_offload_lock);
3340 tls_sw_free_resources_rx(sk);
3341 + down_read(&device_offload_lock);
3342 release_ctx:
3343 ctx->priv_ctx_rx = NULL;
3344 release_netdev:
3345 @@ -919,8 +921,6 @@ void tls_device_offload_cleanup_rx(struct sock *sk)
3346 }
3347 out:
3348 up_read(&device_offload_lock);
3349 - kfree(tls_ctx->rx.rec_seq);
3350 - kfree(tls_ctx->rx.iv);
3351 tls_sw_release_resources_rx(sk);
3352 }
3353
3354 diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
3355 index 450a6dbc5a88..ef8934fd8698 100644
3356 --- a/net/tls/tls_device_fallback.c
3357 +++ b/net/tls/tls_device_fallback.c
3358 @@ -193,6 +193,9 @@ static void update_chksum(struct sk_buff *skb, int headln)
3359
3360 static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
3361 {
3362 + struct sock *sk = skb->sk;
3363 + int delta;
3364 +
3365 skb_copy_header(nskb, skb);
3366
3367 skb_put(nskb, skb->len);
3368 @@ -200,11 +203,15 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
3369 update_chksum(nskb, headln);
3370
3371 nskb->destructor = skb->destructor;
3372 - nskb->sk = skb->sk;
3373 + nskb->sk = sk;
3374 skb->destructor = NULL;
3375 skb->sk = NULL;
3376 - refcount_add(nskb->truesize - skb->truesize,
3377 - &nskb->sk->sk_wmem_alloc);
3378 +
3379 + delta = nskb->truesize - skb->truesize;
3380 + if (likely(delta < 0))
3381 + WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
3382 + else if (delta)
3383 + refcount_add(delta, &sk->sk_wmem_alloc);
3384 }
3385
3386 /* This function may be called after the user socket is already
3387 diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
3388 index 96dbac91ac6e..ce5dd79365a7 100644
3389 --- a/net/tls/tls_main.c
3390 +++ b/net/tls/tls_main.c
3391 @@ -304,11 +304,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
3392 #endif
3393 }
3394
3395 - if (ctx->rx_conf == TLS_SW) {
3396 - kfree(ctx->rx.rec_seq);
3397 - kfree(ctx->rx.iv);
3398 + if (ctx->rx_conf == TLS_SW)
3399 tls_sw_free_resources_rx(sk);
3400 - }
3401
3402 #ifdef CONFIG_TLS_DEVICE
3403 if (ctx->rx_conf == TLS_HW)
3404 diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
3405 index d2d4f7c0d4be..839a0a0b5dfa 100644
3406 --- a/net/tls/tls_sw.c
3407 +++ b/net/tls/tls_sw.c
3408 @@ -1830,6 +1830,9 @@ void tls_sw_release_resources_rx(struct sock *sk)
3409 struct tls_context *tls_ctx = tls_get_ctx(sk);
3410 struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
3411
3412 + kfree(tls_ctx->rx.rec_seq);
3413 + kfree(tls_ctx->rx.iv);
3414 +
3415 if (ctx->aead_recv) {
3416 kfree_skb(ctx->recv_pkt);
3417 ctx->recv_pkt = NULL;
3418 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3419 index f061167062bc..a9f69c3a3e0b 100644
3420 --- a/sound/pci/hda/patch_realtek.c
3421 +++ b/sound/pci/hda/patch_realtek.c
3422 @@ -5490,7 +5490,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
3423 jack->jack->button_state = report;
3424 }
3425
3426 -static void alc295_fixup_chromebook(struct hda_codec *codec,
3427 +static void alc_fixup_headset_jack(struct hda_codec *codec,
3428 const struct hda_fixup *fix, int action)
3429 {
3430
3431 @@ -5500,16 +5500,6 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
3432 alc_headset_btn_callback);
3433 snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
3434 SND_JACK_HEADSET, alc_headset_btn_keymap);
3435 - switch (codec->core.vendor_id) {
3436 - case 0x10ec0295:
3437 - alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
3438 - alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
3439 - break;
3440 - case 0x10ec0236:
3441 - alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
3442 - alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
3443 - break;
3444 - }
3445 break;
3446 case HDA_FIXUP_ACT_INIT:
3447 switch (codec->core.vendor_id) {
3448 @@ -5530,6 +5520,25 @@ static void alc295_fixup_chromebook(struct hda_codec *codec,
3449 }
3450 }
3451
3452 +static void alc295_fixup_chromebook(struct hda_codec *codec,
3453 + const struct hda_fixup *fix, int action)
3454 +{
3455 + switch (action) {
3456 + case HDA_FIXUP_ACT_INIT:
3457 + switch (codec->core.vendor_id) {
3458 + case 0x10ec0295:
3459 + alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
3460 + alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
3461 + break;
3462 + case 0x10ec0236:
3463 + alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
3464 + alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
3465 + break;
3466 + }
3467 + break;
3468 + }
3469 +}
3470 +
3471 static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
3472 const struct hda_fixup *fix, int action)
3473 {
3474 @@ -5684,6 +5693,7 @@ enum {
3475 ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
3476 ALC255_FIXUP_ACER_HEADSET_MIC,
3477 ALC295_FIXUP_CHROME_BOOK,
3478 + ALC225_FIXUP_HEADSET_JACK,
3479 ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
3480 ALC225_FIXUP_WYSE_AUTO_MUTE,
3481 ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
3482 @@ -6645,6 +6655,12 @@ static const struct hda_fixup alc269_fixups[] = {
3483 [ALC295_FIXUP_CHROME_BOOK] = {
3484 .type = HDA_FIXUP_FUNC,
3485 .v.func = alc295_fixup_chromebook,
3486 + .chained = true,
3487 + .chain_id = ALC225_FIXUP_HEADSET_JACK
3488 + },
3489 + [ALC225_FIXUP_HEADSET_JACK] = {
3490 + .type = HDA_FIXUP_FUNC,
3491 + .v.func = alc_fixup_headset_jack,
3492 },
3493 [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
3494 .type = HDA_FIXUP_PINS,
3495 @@ -7143,7 +7159,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
3496 {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
3497 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
3498 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
3499 - {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
3500 + {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-headset-jack"},
3501 + {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-chrome-book"},
3502 {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
3503 {}
3504 };