Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0290-5.4.191-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months, 1 week ago) by niro
File size: 68854 byte(s)
-sync kernel patches
1 diff --git a/Documentation/filesystems/ext4/attributes.rst b/Documentation/filesystems/ext4/attributes.rst
2 index 54386a010a8d7..871d2da7a0a91 100644
3 --- a/Documentation/filesystems/ext4/attributes.rst
4 +++ b/Documentation/filesystems/ext4/attributes.rst
5 @@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
6 - Checksum of the extended attribute block.
7 * - 0x14
8 - \_\_u32
9 - - h\_reserved[2]
10 + - h\_reserved[3]
11 - Zero.
12
13 The checksum is calculated against the FS UUID, the 64-bit block number
14 diff --git a/Makefile b/Makefile
15 index fd239ec16278b..365b487e50d7f 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,7 +1,7 @@
19 # SPDX-License-Identifier: GPL-2.0
20 VERSION = 5
21 PATCHLEVEL = 4
22 -SUBLEVEL = 190
23 +SUBLEVEL = 191
24 EXTRAVERSION =
25 NAME = Kleptomaniac Octopus
26
27 diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
28 index cef1d3f2656f6..beb39930eedbe 100644
29 --- a/arch/arc/kernel/entry.S
30 +++ b/arch/arc/kernel/entry.S
31 @@ -199,6 +199,7 @@ tracesys_exit:
32 st r0, [sp, PT_r0] ; sys call return value in pt_regs
33
34 ;POST Sys Call Ptrace Hook
35 + mov r0, sp ; pt_regs needed
36 bl @syscall_trace_exit
37 b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
38 ; we'd done before calling post hook above
39 diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
40 index 1da11bdb1dfbd..1c6500c4e6a17 100644
41 --- a/arch/arm/mach-vexpress/spc.c
42 +++ b/arch/arm/mach-vexpress/spc.c
43 @@ -580,7 +580,7 @@ static int __init ve_spc_clk_init(void)
44 }
45
46 cluster = topology_physical_package_id(cpu_dev->id);
47 - if (init_opp_table[cluster])
48 + if (cluster < 0 || init_opp_table[cluster])
49 continue;
50
51 if (ve_init_opp_table(cpu_dev))
52 diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
53 index 03b947429e4de..4518a0f2d6c69 100644
54 --- a/arch/powerpc/kvm/book3s_64_vio.c
55 +++ b/arch/powerpc/kvm/book3s_64_vio.c
56 @@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
57 tbl[idx % TCES_PER_PAGE] = tce;
58 }
59
60 -static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
61 - unsigned long entry)
62 +static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
63 + struct iommu_table *tbl, unsigned long entry)
64 {
65 - unsigned long hpa = 0;
66 - enum dma_data_direction dir = DMA_NONE;
67 + unsigned long i;
68 + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
69 + unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
70 +
71 + for (i = 0; i < subpages; ++i) {
72 + unsigned long hpa = 0;
73 + enum dma_data_direction dir = DMA_NONE;
74
75 - iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
76 + iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
77 + }
78 }
79
80 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
81 @@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
82 break;
83 }
84
85 + iommu_tce_kill(tbl, io_entry, subpages);
86 +
87 return ret;
88 }
89
90 @@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
91 break;
92 }
93
94 + iommu_tce_kill(tbl, io_entry, subpages);
95 +
96 return ret;
97 }
98
99 @@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
100 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
101 entry, ua, dir);
102
103 - iommu_tce_kill(stit->tbl, entry, 1);
104
105 if (ret != H_SUCCESS) {
106 - kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
107 + kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
108 goto unlock_exit;
109 }
110 }
111 @@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
112 */
113 if (get_user(tce, tces + i)) {
114 ret = H_TOO_HARD;
115 - goto invalidate_exit;
116 + goto unlock_exit;
117 }
118 tce = be64_to_cpu(tce);
119
120 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
121 ret = H_PARAMETER;
122 - goto invalidate_exit;
123 + goto unlock_exit;
124 }
125
126 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
127 @@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
128 iommu_tce_direction(tce));
129
130 if (ret != H_SUCCESS) {
131 - kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
132 - entry);
133 - goto invalidate_exit;
134 + kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
135 + entry + i);
136 + goto unlock_exit;
137 }
138 }
139
140 kvmppc_tce_put(stt, entry + i, tce);
141 }
142
143 -invalidate_exit:
144 - list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
145 - iommu_tce_kill(stit->tbl, entry, npages);
146 -
147 unlock_exit:
148 srcu_read_unlock(&vcpu->kvm->srcu, idx);
149
150 @@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
151 continue;
152
153 if (ret == H_TOO_HARD)
154 - goto invalidate_exit;
155 + return ret;
156
157 WARN_ON_ONCE(1);
158 - kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
159 + kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
160 }
161 }
162
163 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
164 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
165
166 -invalidate_exit:
167 - list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
168 - iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
169 -
170 return ret;
171 }
172 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
173 diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
174 index 35fd67b4ceb41..abb49d8633298 100644
175 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
176 +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
177 @@ -251,13 +251,19 @@ extern void iommu_tce_kill_rm(struct iommu_table *tbl,
178 tbl->it_ops->tce_kill(tbl, entry, pages, true);
179 }
180
181 -static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
182 - unsigned long entry)
183 +static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
184 + struct iommu_table *tbl, unsigned long entry)
185 {
186 - unsigned long hpa = 0;
187 - enum dma_data_direction dir = DMA_NONE;
188 + unsigned long i;
189 + unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
190 + unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
191 +
192 + for (i = 0; i < subpages; ++i) {
193 + unsigned long hpa = 0;
194 + enum dma_data_direction dir = DMA_NONE;
195
196 - iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
197 + iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
198 + }
199 }
200
201 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
202 @@ -320,6 +326,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
203 break;
204 }
205
206 + iommu_tce_kill_rm(tbl, io_entry, subpages);
207 +
208 return ret;
209 }
210
211 @@ -383,6 +391,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
212 break;
213 }
214
215 + iommu_tce_kill_rm(tbl, io_entry, subpages);
216 +
217 return ret;
218 }
219
220 @@ -428,10 +438,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
221 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
222 stit->tbl, entry, ua, dir);
223
224 - iommu_tce_kill_rm(stit->tbl, entry, 1);
225 -
226 if (ret != H_SUCCESS) {
227 - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
228 + kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
229 return ret;
230 }
231 }
232 @@ -571,7 +579,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
233 ua = 0;
234 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
235 ret = H_PARAMETER;
236 - goto invalidate_exit;
237 + goto unlock_exit;
238 }
239
240 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
241 @@ -580,19 +588,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
242 iommu_tce_direction(tce));
243
244 if (ret != H_SUCCESS) {
245 - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
246 - entry);
247 - goto invalidate_exit;
248 + kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
249 + entry + i);
250 + goto unlock_exit;
251 }
252 }
253
254 kvmppc_rm_tce_put(stt, entry + i, tce);
255 }
256
257 -invalidate_exit:
258 - list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
259 - iommu_tce_kill_rm(stit->tbl, entry, npages);
260 -
261 unlock_exit:
262 if (rmap)
263 unlock_rmap(rmap);
264 @@ -635,20 +639,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
265 continue;
266
267 if (ret == H_TOO_HARD)
268 - goto invalidate_exit;
269 + return ret;
270
271 WARN_ON_ONCE_RM(1);
272 - kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
273 + kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
274 }
275 }
276
277 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
278 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
279
280 -invalidate_exit:
281 - list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
282 - iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
283 -
284 return ret;
285 }
286
287 diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
288 index 08c3ef7961982..1225f53609a44 100644
289 --- a/arch/powerpc/perf/power9-pmu.c
290 +++ b/arch/powerpc/perf/power9-pmu.c
291 @@ -131,11 +131,11 @@ int p9_dd22_bl_ev[] = {
292
293 /* Table of alternatives, sorted by column 0 */
294 static const unsigned int power9_event_alternatives[][MAX_ALT] = {
295 - { PM_INST_DISP, PM_INST_DISP_ALT },
296 - { PM_RUN_CYC_ALT, PM_RUN_CYC },
297 - { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
298 - { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
299 { PM_BR_2PATH, PM_BR_2PATH_ALT },
300 + { PM_INST_DISP, PM_INST_DISP_ALT },
301 + { PM_RUN_CYC_ALT, PM_RUN_CYC },
302 + { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
303 + { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
304 };
305
306 static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
307 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
308 index 22c4dfe659923..b4dd6ab0fdfce 100644
309 --- a/arch/x86/include/asm/compat.h
310 +++ b/arch/x86/include/asm/compat.h
311 @@ -31,15 +31,13 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
312 typedef u64 __attribute__((aligned(4))) compat_u64;
313
314 struct compat_stat {
315 - compat_dev_t st_dev;
316 - u16 __pad1;
317 + u32 st_dev;
318 compat_ino_t st_ino;
319 compat_mode_t st_mode;
320 compat_nlink_t st_nlink;
321 __compat_uid_t st_uid;
322 __compat_gid_t st_gid;
323 - compat_dev_t st_rdev;
324 - u16 __pad2;
325 + u32 st_rdev;
326 u32 st_size;
327 u32 st_blksize;
328 u32 st_blocks;
329 diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
330 index d956f87fcb095..6a6cc8dae5653 100644
331 --- a/arch/xtensa/kernel/coprocessor.S
332 +++ b/arch/xtensa/kernel/coprocessor.S
333 @@ -37,7 +37,7 @@
334 .if XTENSA_HAVE_COPROCESSOR(x); \
335 .align 4; \
336 .Lsave_cp_regs_cp##x: \
337 - xchal_cp##x##_store a2 a4 a5 a6 a7; \
338 + xchal_cp##x##_store a2 a3 a4 a5 a6; \
339 jx a0; \
340 .endif
341
342 @@ -54,7 +54,7 @@
343 .if XTENSA_HAVE_COPROCESSOR(x); \
344 .align 4; \
345 .Lload_cp_regs_cp##x: \
346 - xchal_cp##x##_load a2 a4 a5 a6 a7; \
347 + xchal_cp##x##_load a2 a3 a4 a5 a6; \
348 jx a0; \
349 .endif
350
351 diff --git a/arch/xtensa/kernel/jump_label.c b/arch/xtensa/kernel/jump_label.c
352 index 0dde21e0d3de4..ad1841cecdfb7 100644
353 --- a/arch/xtensa/kernel/jump_label.c
354 +++ b/arch/xtensa/kernel/jump_label.c
355 @@ -40,7 +40,7 @@ static int patch_text_stop_machine(void *data)
356 {
357 struct patch *patch = data;
358
359 - if (atomic_inc_return(&patch->cpu_count) == 1) {
360 + if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
361 local_patch_text(patch->addr, patch->data, patch->sz);
362 atomic_inc(&patch->cpu_count);
363 } else {
364 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
365 index 7f053468b50d7..d490ac220ba86 100644
366 --- a/block/compat_ioctl.c
367 +++ b/block/compat_ioctl.c
368 @@ -393,7 +393,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
369 return 0;
370 case BLKGETSIZE:
371 size = i_size_read(bdev->bd_inode);
372 - if ((size >> 9) > ~0UL)
373 + if ((size >> 9) > ~(compat_ulong_t)0)
374 return -EFBIG;
375 return compat_put_ulong(arg, size >> 9);
376
377 diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
378 index b066809ba9a11..c56f4043b0cc0 100644
379 --- a/drivers/ata/pata_marvell.c
380 +++ b/drivers/ata/pata_marvell.c
381 @@ -83,6 +83,8 @@ static int marvell_cable_detect(struct ata_port *ap)
382 switch(ap->port_no)
383 {
384 case 0:
385 + if (!ap->ioaddr.bmdma_addr)
386 + return ATA_CBL_PATA_UNK;
387 if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
388 return ATA_CBL_PATA40;
389 return ATA_CBL_PATA80;
390 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
391 index 9aae6b3da356b..6473a4a81d58b 100644
392 --- a/drivers/dma/at_xdmac.c
393 +++ b/drivers/dma/at_xdmac.c
394 @@ -1390,7 +1390,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
395 {
396 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
397 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
398 - struct at_xdmac_desc *desc, *_desc;
399 + struct at_xdmac_desc *desc, *_desc, *iter;
400 struct list_head *descs_list;
401 enum dma_status ret;
402 int residue, retry;
403 @@ -1505,11 +1505,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
404 * microblock.
405 */
406 descs_list = &desc->descs_list;
407 - list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
408 - dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
409 - residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
410 - if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
411 + list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
412 + dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
413 + residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
414 + if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
415 + desc = iter;
416 break;
417 + }
418 }
419 residue += cur_ubc << dwidth;
420
421 diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
422 index cc70da05db4b5..801bef83df2a5 100644
423 --- a/drivers/dma/imx-sdma.c
424 +++ b/drivers/dma/imx-sdma.c
425 @@ -1784,7 +1784,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
426 u32 reg, val, shift, num_map, i;
427 int ret = 0;
428
429 - if (IS_ERR(np) || IS_ERR(gpr_np))
430 + if (IS_ERR(np) || !gpr_np)
431 goto out;
432
433 event_remap = of_find_property(np, propname, NULL);
434 @@ -1832,7 +1832,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
435 }
436
437 out:
438 - if (!IS_ERR(gpr_np))
439 + if (gpr_np)
440 of_node_put(gpr_np);
441
442 return ret;
443 diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
444 index 9c0ea13ca7883..7718d09e3d29f 100644
445 --- a/drivers/dma/mediatek/mtk-uart-apdma.c
446 +++ b/drivers/dma/mediatek/mtk-uart-apdma.c
447 @@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
448 unsigned int status;
449 int ret;
450
451 - ret = pm_runtime_get_sync(mtkd->ddev.dev);
452 + ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
453 if (ret < 0) {
454 pm_runtime_put_noidle(chan->device->dev);
455 return ret;
456 @@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
457 ret = readx_poll_timeout(readl, c->base + VFF_EN,
458 status, !status, 10, 100);
459 if (ret)
460 - return ret;
461 + goto err_pm;
462
463 ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
464 IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
465 if (ret < 0) {
466 dev_err(chan->device->dev, "Can't request dma IRQ\n");
467 - return -EINVAL;
468 + ret = -EINVAL;
469 + goto err_pm;
470 }
471
472 if (mtkd->support_33bits)
473 mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
474
475 +err_pm:
476 + pm_runtime_put_noidle(mtkd->ddev.dev);
477 return ret;
478 }
479
480 diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
481 index d23a0782fb49c..4d2387f8e511b 100644
482 --- a/drivers/edac/synopsys_edac.c
483 +++ b/drivers/edac/synopsys_edac.c
484 @@ -163,6 +163,11 @@
485 #define ECC_STAT_CECNT_SHIFT 8
486 #define ECC_STAT_BITNUM_MASK 0x7F
487
488 +/* ECC error count register definitions */
489 +#define ECC_ERRCNT_UECNT_MASK 0xFFFF0000
490 +#define ECC_ERRCNT_UECNT_SHIFT 16
491 +#define ECC_ERRCNT_CECNT_MASK 0xFFFF
492 +
493 /* DDR QOS Interrupt register definitions */
494 #define DDR_QOS_IRQ_STAT_OFST 0x20200
495 #define DDR_QOSUE_MASK 0x4
496 @@ -418,15 +423,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
497 base = priv->baseaddr;
498 p = &priv->stat;
499
500 + regval = readl(base + ECC_ERRCNT_OFST);
501 + p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
502 + p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
503 + if (!p->ce_cnt)
504 + goto ue_err;
505 +
506 regval = readl(base + ECC_STAT_OFST);
507 if (!regval)
508 return 1;
509
510 - p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
511 - p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
512 - if (!p->ce_cnt)
513 - goto ue_err;
514 -
515 p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
516
517 regval = readl(base + ECC_CEADDR0_OFST);
518 diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
519 index 83423092de2ff..da07993339702 100644
520 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
521 +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
522 @@ -179,7 +179,10 @@ static void mdp5_plane_reset(struct drm_plane *plane)
523 drm_framebuffer_put(plane->state->fb);
524
525 kfree(to_mdp5_plane_state(plane->state));
526 + plane->state = NULL;
527 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
528 + if (!mdp5_state)
529 + return;
530
531 /* assign default blend parameters */
532 mdp5_state->alpha = 255;
533 diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
534 index bdb4d59c81277..a621dd28ff70d 100644
535 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
536 +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
537 @@ -232,7 +232,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
538
539 ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
540 if (ret)
541 - dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
542 + dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
543 }
544
545 static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
546 @@ -268,7 +268,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
547 return 0;
548 }
549
550 -static int rpi_touchscreen_enable(struct drm_panel *panel)
551 +static int rpi_touchscreen_prepare(struct drm_panel *panel)
552 {
553 struct rpi_touchscreen *ts = panel_to_ts(panel);
554 int i;
555 @@ -298,6 +298,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
556 rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
557 msleep(100);
558
559 + return 0;
560 +}
561 +
562 +static int rpi_touchscreen_enable(struct drm_panel *panel)
563 +{
564 + struct rpi_touchscreen *ts = panel_to_ts(panel);
565 +
566 /* Turn on the backlight. */
567 rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
568
569 @@ -352,7 +359,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel)
570 static const struct drm_panel_funcs rpi_touchscreen_funcs = {
571 .disable = rpi_touchscreen_disable,
572 .unprepare = rpi_touchscreen_noop,
573 - .prepare = rpi_touchscreen_noop,
574 + .prepare = rpi_touchscreen_prepare,
575 .enable = rpi_touchscreen_enable,
576 .get_modes = rpi_touchscreen_get_modes,
577 };
578 diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
579 index c78fa8144776e..0983949cc8c98 100644
580 --- a/drivers/gpu/drm/vc4/vc4_dsi.c
581 +++ b/drivers/gpu/drm/vc4/vc4_dsi.c
582 @@ -831,7 +831,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
583 unsigned long phy_clock;
584 int ret;
585
586 - ret = pm_runtime_get_sync(dev);
587 + ret = pm_runtime_resume_and_get(dev);
588 if (ret) {
589 DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port);
590 return;
591 diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
592 index c43e98bb6e2d7..b514b2eaa3180 100644
593 --- a/drivers/net/can/usb/usb_8dev.c
594 +++ b/drivers/net/can/usb/usb_8dev.c
595 @@ -670,9 +670,20 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb,
596 atomic_inc(&priv->active_tx_urbs);
597
598 err = usb_submit_urb(urb, GFP_ATOMIC);
599 - if (unlikely(err))
600 - goto failed;
601 - else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
602 + if (unlikely(err)) {
603 + can_free_echo_skb(netdev, context->echo_index);
604 +
605 + usb_unanchor_urb(urb);
606 + usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
607 +
608 + atomic_dec(&priv->active_tx_urbs);
609 +
610 + if (err == -ENODEV)
611 + netif_device_detach(netdev);
612 + else
613 + netdev_warn(netdev, "failed tx_urb %d\n", err);
614 + stats->tx_dropped++;
615 + } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS)
616 /* Slow down tx path */
617 netif_stop_queue(netdev);
618
619 @@ -691,19 +702,6 @@ nofreecontext:
620
621 return NETDEV_TX_BUSY;
622
623 -failed:
624 - can_free_echo_skb(netdev, context->echo_index);
625 -
626 - usb_unanchor_urb(urb);
627 - usb_free_coherent(priv->udev, size, buf, urb->transfer_dma);
628 -
629 - atomic_dec(&priv->active_tx_urbs);
630 -
631 - if (err == -ENODEV)
632 - netif_device_detach(netdev);
633 - else
634 - netdev_warn(netdev, "failed tx_urb %d\n", err);
635 -
636 nomembuf:
637 usb_free_urb(urb);
638
639 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
640 index 480d2ca369e6b..002a374f197bd 100644
641 --- a/drivers/net/ethernet/cadence/macb_main.c
642 +++ b/drivers/net/ethernet/cadence/macb_main.c
643 @@ -1378,6 +1378,7 @@ static void macb_tx_restart(struct macb_queue *queue)
644 unsigned int head = queue->tx_head;
645 unsigned int tail = queue->tx_tail;
646 struct macb *bp = queue->bp;
647 + unsigned int head_idx, tbqp;
648
649 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
650 queue_writel(queue, ISR, MACB_BIT(TXUBR));
651 @@ -1385,6 +1386,13 @@ static void macb_tx_restart(struct macb_queue *queue)
652 if (head == tail)
653 return;
654
655 + tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
656 + tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
657 + head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, head));
658 +
659 + if (tbqp == head_idx)
660 + return;
661 +
662 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
663 }
664
665 diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
666 index 7ce2e99b594d6..0a186d16e73f7 100644
667 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
668 +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
669 @@ -506,11 +506,15 @@ static int dpaa_get_ts_info(struct net_device *net_dev,
670 info->phc_index = -1;
671
672 fman_node = of_get_parent(mac_node);
673 - if (fman_node)
674 + if (fman_node) {
675 ptp_node = of_parse_phandle(fman_node, "ptimer-handle", 0);
676 + of_node_put(fman_node);
677 + }
678
679 - if (ptp_node)
680 + if (ptp_node) {
681 ptp_dev = of_find_device_by_node(ptp_node);
682 + of_node_put(ptp_node);
683 + }
684
685 if (ptp_dev)
686 ptp = platform_get_drvdata(ptp_dev);
687 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
688 index 58ff747a42ae6..1241b4734896f 100644
689 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
690 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
691 @@ -995,8 +995,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
692 {
693 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
694 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
695 - u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
696 - u16 lat_enc_d = 0; /* latency decoded */
697 + u32 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */
698 + u32 lat_enc_d = 0; /* latency decoded */
699 u16 lat_enc = 0; /* latency encoded */
700
701 if (link) {
702 diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c
703 index ed5d09c11c389..79252ca9e2133 100644
704 --- a/drivers/net/ethernet/intel/igc/igc_i225.c
705 +++ b/drivers/net/ethernet/intel/igc/igc_i225.c
706 @@ -156,8 +156,15 @@ void igc_release_swfw_sync_i225(struct igc_hw *hw, u16 mask)
707 {
708 u32 swfw_sync;
709
710 - while (igc_get_hw_semaphore_i225(hw))
711 - ; /* Empty */
712 + /* Releasing the resource requires first getting the HW semaphore.
713 + * If we fail to get the semaphore, there is nothing we can do,
714 + * except log an error and quit. We are not allowed to hang here
715 + * indefinitely, as it may cause denial of service or system crash.
716 + */
717 + if (igc_get_hw_semaphore_i225(hw)) {
718 + hw_dbg("Failed to release SW_FW_SYNC.\n");
719 + return;
720 + }
721
722 swfw_sync = rd32(IGC_SW_FW_SYNC);
723 swfw_sync &= ~mask;
724 diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
725 index 1a4947e6933c3..6156c76d765ff 100644
726 --- a/drivers/net/ethernet/intel/igc/igc_phy.c
727 +++ b/drivers/net/ethernet/intel/igc/igc_phy.c
728 @@ -569,7 +569,7 @@ static s32 igc_read_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 *data)
729 * the lower time out
730 */
731 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
732 - usleep_range(500, 1000);
733 + udelay(50);
734 mdic = rd32(IGC_MDIC);
735 if (mdic & IGC_MDIC_READY)
736 break;
737 @@ -626,7 +626,7 @@ static s32 igc_write_phy_reg_mdic(struct igc_hw *hw, u32 offset, u16 data)
738 * the lower time out
739 */
740 for (i = 0; i < IGC_GEN_POLL_TIMEOUT; i++) {
741 - usleep_range(500, 1000);
742 + udelay(50);
743 mdic = rd32(IGC_MDIC);
744 if (mdic & IGC_MDIC_READY)
745 break;
746 diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
747 index 120ed4633a096..b9c4d48e28e42 100644
748 --- a/drivers/net/ethernet/micrel/Kconfig
749 +++ b/drivers/net/ethernet/micrel/Kconfig
750 @@ -37,7 +37,6 @@ config KS8851
751 config KS8851_MLL
752 tristate "Micrel KS8851 MLL"
753 depends on HAS_IOMEM
754 - depends on PTP_1588_CLOCK_OPTIONAL
755 select MII
756 ---help---
757 This platform driver is for Micrel KS8851 Address/data bus
758 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
759 index c5991e31c557e..f4869b1836f30 100644
760 --- a/drivers/net/vxlan.c
761 +++ b/drivers/net/vxlan.c
762 @@ -679,11 +679,11 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
763
764 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
765 if (rd == NULL)
766 - return -ENOBUFS;
767 + return -ENOMEM;
768
769 if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) {
770 kfree(rd);
771 - return -ENOBUFS;
772 + return -ENOMEM;
773 }
774
775 rd->remote_ip = *ip;
776 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
777 index ef5521b9b3577..ddc999670484f 100644
778 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
779 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
780 @@ -550,7 +550,7 @@ enum brcmf_sdio_frmtype {
781 BRCMF_SDIO_FT_SUB,
782 };
783
784 -#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
785 +#define SDIOD_DRVSTR_KEY(chip, pmu) (((unsigned int)(chip) << 16) | (pmu))
786
787 /* SDIO Pad drive strength to select value mappings */
788 struct sdiod_drive_str {
789 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
790 index cf611d1b817c0..e6d7646a0d9ca 100644
791 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
792 +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
793 @@ -76,7 +76,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
794 mt76_rmw_field(dev, 0x15a10, 0x1f << 16, 0x9);
795
796 /* RG_SSUSB_G1_CDR_BIC_LTR = 0xf */
797 - mt76_rmw_field(dev, 0x15a0c, 0xf << 28, 0xf);
798 + mt76_rmw_field(dev, 0x15a0c, 0xfU << 28, 0xf);
799
800 /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
801 mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
802 diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
803 index df352b334ea77..b377872a8f9d6 100644
804 --- a/drivers/perf/arm_pmu.c
805 +++ b/drivers/perf/arm_pmu.c
806 @@ -322,6 +322,9 @@ validate_group(struct perf_event *event)
807 if (!validate_event(event->pmu, &fake_pmu, leader))
808 return -EINVAL;
809
810 + if (event == leader)
811 + return 0;
812 +
813 for_each_sibling_event(sibling, leader) {
814 if (!validate_event(event->pmu, &fake_pmu, sibling))
815 return -EINVAL;
816 @@ -411,12 +414,7 @@ __hw_perf_event_init(struct perf_event *event)
817 local64_set(&hwc->period_left, hwc->sample_period);
818 }
819
820 - if (event->group_leader != event) {
821 - if (validate_group(event) != 0)
822 - return -EINVAL;
823 - }
824 -
825 - return 0;
826 + return validate_group(event);
827 }
828
829 static int armpmu_event_init(struct perf_event *event)
830 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
831 index 9b6a93ff41ffb..91e468fcaf7cc 100644
832 --- a/drivers/platform/x86/samsung-laptop.c
833 +++ b/drivers/platform/x86/samsung-laptop.c
834 @@ -1121,8 +1121,6 @@ static void kbd_led_set(struct led_classdev *led_cdev,
835
836 if (value > samsung->kbd_led.max_brightness)
837 value = samsung->kbd_led.max_brightness;
838 - else if (value < 0)
839 - value = 0;
840
841 samsung->kbd_led_wk = value;
842 queue_work(samsung->led_workqueue, &samsung->kbd_led_work);
843 diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
844 index 24d3395964cc4..4c5bba52b1059 100644
845 --- a/drivers/reset/tegra/reset-bpmp.c
846 +++ b/drivers/reset/tegra/reset-bpmp.c
847 @@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
848 struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
849 struct mrq_reset_request request;
850 struct tegra_bpmp_message msg;
851 + int err;
852
853 memset(&request, 0, sizeof(request));
854 request.cmd = command;
855 @@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
856 msg.tx.data = &request;
857 msg.tx.size = sizeof(request);
858
859 - return tegra_bpmp_transfer(bpmp, &msg);
860 + err = tegra_bpmp_transfer(bpmp, &msg);
861 + if (err)
862 + return err;
863 + if (msg.rx.ret)
864 + return -EINVAL;
865 +
866 + return 0;
867 }
868
869 static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
870 diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
871 index 755f66b1ff9c7..f05fb4ddeaff0 100644
872 --- a/drivers/scsi/qedi/qedi_iscsi.c
873 +++ b/drivers/scsi/qedi/qedi_iscsi.c
874 @@ -797,6 +797,37 @@ static int qedi_task_xmit(struct iscsi_task *task)
875 return qedi_iscsi_send_ioreq(task);
876 }
877
878 +static void qedi_offload_work(struct work_struct *work)
879 +{
880 + struct qedi_endpoint *qedi_ep =
881 + container_of(work, struct qedi_endpoint, offload_work);
882 + struct qedi_ctx *qedi;
883 + int wait_delay = 5 * HZ;
884 + int ret;
885 +
886 + qedi = qedi_ep->qedi;
887 +
888 + ret = qedi_iscsi_offload_conn(qedi_ep);
889 + if (ret) {
890 + QEDI_ERR(&qedi->dbg_ctx,
891 + "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
892 + qedi_ep->iscsi_cid, qedi_ep, ret);
893 + qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
894 + return;
895 + }
896 +
897 + ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
898 + (qedi_ep->state ==
899 + EP_STATE_OFLDCONN_COMPL),
900 + wait_delay);
901 + if (ret <= 0 || qedi_ep->state != EP_STATE_OFLDCONN_COMPL) {
902 + qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
903 + QEDI_ERR(&qedi->dbg_ctx,
904 + "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
905 + qedi_ep->iscsi_cid, qedi_ep);
906 + }
907 +}
908 +
909 static struct iscsi_endpoint *
910 qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
911 int non_blocking)
912 @@ -840,6 +871,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
913 }
914 qedi_ep = ep->dd_data;
915 memset(qedi_ep, 0, sizeof(struct qedi_endpoint));
916 + INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
917 qedi_ep->state = EP_STATE_IDLE;
918 qedi_ep->iscsi_cid = (u32)-1;
919 qedi_ep->qedi = qedi;
920 @@ -996,12 +1028,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
921 qedi_ep = ep->dd_data;
922 qedi = qedi_ep->qedi;
923
924 + flush_work(&qedi_ep->offload_work);
925 +
926 if (qedi_ep->state == EP_STATE_OFLDCONN_START)
927 goto ep_exit_recover;
928
929 - if (qedi_ep->state != EP_STATE_OFLDCONN_NONE)
930 - flush_work(&qedi_ep->offload_work);
931 -
932 if (qedi_ep->conn) {
933 qedi_conn = qedi_ep->conn;
934 conn = qedi_conn->cls_conn->dd_data;
935 @@ -1161,37 +1192,6 @@ static int qedi_data_avail(struct qedi_ctx *qedi, u16 vlanid)
936 return rc;
937 }
938
939 -static void qedi_offload_work(struct work_struct *work)
940 -{
941 - struct qedi_endpoint *qedi_ep =
942 - container_of(work, struct qedi_endpoint, offload_work);
943 - struct qedi_ctx *qedi;
944 - int wait_delay = 5 * HZ;
945 - int ret;
946 -
947 - qedi = qedi_ep->qedi;
948 -
949 - ret = qedi_iscsi_offload_conn(qedi_ep);
950 - if (ret) {
951 - QEDI_ERR(&qedi->dbg_ctx,
952 - "offload error: iscsi_cid=%u, qedi_ep=%p, ret=%d\n",
953 - qedi_ep->iscsi_cid, qedi_ep, ret);
954 - qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
955 - return;
956 - }
957 -
958 - ret = wait_event_interruptible_timeout(qedi_ep->tcp_ofld_wait,
959 - (qedi_ep->state ==
960 - EP_STATE_OFLDCONN_COMPL),
961 - wait_delay);
962 - if ((ret <= 0) || (qedi_ep->state != EP_STATE_OFLDCONN_COMPL)) {
963 - qedi_ep->state = EP_STATE_OFLDCONN_FAILED;
964 - QEDI_ERR(&qedi->dbg_ctx,
965 - "Offload conn TIMEOUT iscsi_cid=%u, qedi_ep=%p\n",
966 - qedi_ep->iscsi_cid, qedi_ep);
967 - }
968 -}
969 -
970 static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
971 {
972 struct qedi_ctx *qedi;
973 @@ -1307,7 +1307,6 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
974 qedi_ep->dst_addr, qedi_ep->dst_port);
975 }
976
977 - INIT_WORK(&qedi_ep->offload_work, qedi_offload_work);
978 queue_work(qedi->offload_thread, &qedi_ep->offload_work);
979
980 ret = 0;
981 diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
982 index 5fd929e023e18..b4d85fd62ce91 100644
983 --- a/drivers/spi/atmel-quadspi.c
984 +++ b/drivers/spi/atmel-quadspi.c
985 @@ -202,6 +202,9 @@ static int atmel_qspi_find_mode(const struct spi_mem_op *op)
986 static bool atmel_qspi_supports_op(struct spi_mem *mem,
987 const struct spi_mem_op *op)
988 {
989 + if (!spi_mem_default_supports_op(mem, op))
990 + return false;
991 +
992 if (atmel_qspi_find_mode(op) < 0)
993 return false;
994
995 diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
996 index e6b1ca141b930..88888cc5d1932 100644
997 --- a/drivers/staging/android/ion/ion.c
998 +++ b/drivers/staging/android/ion/ion.c
999 @@ -114,6 +114,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
1000 void *vaddr;
1001
1002 if (buffer->kmap_cnt) {
1003 + if (buffer->kmap_cnt == INT_MAX)
1004 + return ERR_PTR(-EOVERFLOW);
1005 +
1006 buffer->kmap_cnt++;
1007 return buffer->vaddr;
1008 }
1009 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1010 index f44b6f9d07776..79a18692b84c5 100644
1011 --- a/fs/cifs/cifsfs.c
1012 +++ b/fs/cifs/cifsfs.c
1013 @@ -889,7 +889,7 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
1014 ssize_t rc;
1015 struct inode *inode = file_inode(iocb->ki_filp);
1016
1017 - if (iocb->ki_filp->f_flags & O_DIRECT)
1018 + if (iocb->ki_flags & IOCB_DIRECT)
1019 return cifs_user_readv(iocb, iter);
1020
1021 rc = cifs_revalidate_mapping(inode);
1022 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1023 index ae2cb15d95407..e932e84823714 100644
1024 --- a/fs/ext4/ext4.h
1025 +++ b/fs/ext4/ext4.h
1026 @@ -1966,6 +1966,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
1027 * Structure of a directory entry
1028 */
1029 #define EXT4_NAME_LEN 255
1030 +/*
1031 + * Base length of the ext4 directory entry excluding the name length
1032 + */
1033 +#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
1034
1035 struct ext4_dir_entry {
1036 __le32 inode; /* Inode number */
1037 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1038 index 0d62f05f89256..00686fbe3c27d 100644
1039 --- a/fs/ext4/inode.c
1040 +++ b/fs/ext4/inode.c
1041 @@ -4311,7 +4311,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1042 struct super_block *sb = inode->i_sb;
1043 ext4_lblk_t first_block, stop_block;
1044 struct address_space *mapping = inode->i_mapping;
1045 - loff_t first_block_offset, last_block_offset;
1046 + loff_t first_block_offset, last_block_offset, max_length;
1047 + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1048 handle_t *handle;
1049 unsigned int credits;
1050 int ret = 0;
1051 @@ -4357,6 +4358,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
1052 offset;
1053 }
1054
1055 + /*
1056 + * For punch hole the length + offset needs to be within one block
1057 + * before last range. Adjust the length if it goes beyond that limit.
1058 + */
1059 + max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
1060 + if (offset + length > max_length)
1061 + length = max_length - offset;
1062 +
1063 if (offset & (sb->s_blocksize - 1) ||
1064 (offset + length) & (sb->s_blocksize - 1)) {
1065 /*
1066 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1067 index 9905720df9248..f10307215d583 100644
1068 --- a/fs/ext4/namei.c
1069 +++ b/fs/ext4/namei.c
1070 @@ -1385,10 +1385,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
1071
1072 de = (struct ext4_dir_entry_2 *)search_buf;
1073 dlimit = search_buf + buf_size;
1074 - while ((char *) de < dlimit) {
1075 + while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
1076 /* this code is executed quadratically often */
1077 /* do minimal checking `by hand' */
1078 - if ((char *) de + de->name_len <= dlimit &&
1079 + if (de->name + de->name_len <= dlimit &&
1080 ext4_match(dir, fname, de)) {
1081 /* found a match - just to be sure, do
1082 * a full check */
1083 diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
1084 index 2cc9f2168b9e4..b66b335a0ca6f 100644
1085 --- a/fs/ext4/page-io.c
1086 +++ b/fs/ext4/page-io.c
1087 @@ -100,8 +100,10 @@ static void ext4_finish_bio(struct bio *bio)
1088 continue;
1089 }
1090 clear_buffer_async_write(bh);
1091 - if (bio->bi_status)
1092 + if (bio->bi_status) {
1093 + set_buffer_write_io_error(bh);
1094 buffer_io_error(bh);
1095 + }
1096 } while ((bh = bh->b_this_page) != head);
1097 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
1098 local_irq_restore(flags);
1099 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1100 index 5bc7fd0240a19..c13879bd21683 100644
1101 --- a/fs/ext4/super.c
1102 +++ b/fs/ext4/super.c
1103 @@ -3485,9 +3485,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
1104 ext4_fsblk_t first_block, last_block, b;
1105 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1106 int s, j, count = 0;
1107 + int has_super = ext4_bg_has_super(sb, grp);
1108
1109 if (!ext4_has_feature_bigalloc(sb))
1110 - return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
1111 + return (has_super + ext4_bg_num_gdb(sb, grp) +
1112 + (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
1113 sbi->s_itb_per_group + 2);
1114
1115 first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
1116 @@ -4512,9 +4514,18 @@ no_journal:
1117 * Get the # of file system overhead blocks from the
1118 * superblock if present.
1119 */
1120 - if (es->s_overhead_clusters)
1121 - sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
1122 - else {
1123 + sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
1124 + /* ignore the precalculated value if it is ridiculous */
1125 + if (sbi->s_overhead > ext4_blocks_count(es))
1126 + sbi->s_overhead = 0;
1127 + /*
1128 + * If the bigalloc feature is not enabled recalculating the
1129 + * overhead doesn't take long, so we might as well just redo
1130 + * it to make sure we are using the correct value.
1131 + */
1132 + if (!ext4_has_feature_bigalloc(sb))
1133 + sbi->s_overhead = 0;
1134 + if (sbi->s_overhead == 0) {
1135 err = ext4_calculate_overhead(sb);
1136 if (err)
1137 goto failed_mount_wq;
1138 diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
1139 index d7ec0ac87fc03..8153a3eac540a 100644
1140 --- a/fs/gfs2/rgrp.c
1141 +++ b/fs/gfs2/rgrp.c
1142 @@ -925,15 +925,15 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1143 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
1144 spin_lock_init(&rgd->rd_rsspin);
1145
1146 - error = compute_bitstructs(rgd);
1147 - if (error)
1148 - goto fail;
1149 -
1150 error = gfs2_glock_get(sdp, rgd->rd_addr,
1151 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
1152 if (error)
1153 goto fail;
1154
1155 + error = compute_bitstructs(rgd);
1156 + if (error)
1157 + goto fail_glock;
1158 +
1159 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
1160 rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
1161 if (rgd->rd_data > sdp->sd_max_rg_data)
1162 @@ -950,6 +950,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
1163 }
1164
1165 error = 0; /* someone else read in the rgrp; free it and ignore it */
1166 +fail_glock:
1167 gfs2_glock_put(rgd->rd_gl);
1168
1169 fail:
1170 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
1171 index 88146008b3e36..d45ceb2e21492 100644
1172 --- a/fs/jbd2/commit.c
1173 +++ b/fs/jbd2/commit.c
1174 @@ -451,7 +451,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1175 }
1176 spin_unlock(&commit_transaction->t_handle_lock);
1177 commit_transaction->t_state = T_SWITCH;
1178 - write_unlock(&journal->j_state_lock);
1179
1180 J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
1181 journal->j_max_transaction_buffers);
1182 @@ -471,6 +470,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1183 * has reserved. This is consistent with the existing behaviour
1184 * that multiple jbd2_journal_get_write_access() calls to the same
1185 * buffer are perfectly permissible.
1186 + * We use journal->j_state_lock here to serialize processing of
1187 + * t_reserved_list with eviction of buffers from journal_unmap_buffer().
1188 */
1189 while (commit_transaction->t_reserved_list) {
1190 jh = commit_transaction->t_reserved_list;
1191 @@ -490,6 +491,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
1192 jbd2_journal_refile_buffer(journal, jh);
1193 }
1194
1195 + write_unlock(&journal->j_state_lock);
1196 /*
1197 * Now try to drop any written-back buffers from the journal's
1198 * checkpoint lists. We do this *before* commit because it potentially
1199 diff --git a/fs/stat.c b/fs/stat.c
1200 index c38e4c2e1221c..268c9eb896564 100644
1201 --- a/fs/stat.c
1202 +++ b/fs/stat.c
1203 @@ -290,9 +290,6 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
1204 # define choose_32_64(a,b) b
1205 #endif
1206
1207 -#define valid_dev(x) choose_32_64(old_valid_dev(x),true)
1208 -#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
1209 -
1210 #ifndef INIT_STRUCT_STAT_PADDING
1211 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
1212 #endif
1213 @@ -301,7 +298,9 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1214 {
1215 struct stat tmp;
1216
1217 - if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
1218 + if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
1219 + return -EOVERFLOW;
1220 + if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
1221 return -EOVERFLOW;
1222 #if BITS_PER_LONG == 32
1223 if (stat->size > MAX_NON_LFS)
1224 @@ -309,7 +308,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1225 #endif
1226
1227 INIT_STRUCT_STAT_PADDING(tmp);
1228 - tmp.st_dev = encode_dev(stat->dev);
1229 + tmp.st_dev = new_encode_dev(stat->dev);
1230 tmp.st_ino = stat->ino;
1231 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
1232 return -EOVERFLOW;
1233 @@ -319,7 +318,7 @@ static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
1234 return -EOVERFLOW;
1235 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
1236 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
1237 - tmp.st_rdev = encode_dev(stat->rdev);
1238 + tmp.st_rdev = new_encode_dev(stat->rdev);
1239 tmp.st_size = stat->size;
1240 tmp.st_atime = stat->atime.tv_sec;
1241 tmp.st_mtime = stat->mtime.tv_sec;
1242 @@ -593,11 +592,13 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
1243 {
1244 struct compat_stat tmp;
1245
1246 - if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
1247 + if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
1248 + return -EOVERFLOW;
1249 + if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
1250 return -EOVERFLOW;
1251
1252 memset(&tmp, 0, sizeof(tmp));
1253 - tmp.st_dev = old_encode_dev(stat->dev);
1254 + tmp.st_dev = new_encode_dev(stat->dev);
1255 tmp.st_ino = stat->ino;
1256 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
1257 return -EOVERFLOW;
1258 @@ -607,7 +608,7 @@ static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
1259 return -EOVERFLOW;
1260 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
1261 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
1262 - tmp.st_rdev = old_encode_dev(stat->rdev);
1263 + tmp.st_rdev = new_encode_dev(stat->rdev);
1264 if ((u64) stat->size > MAX_NON_LFS)
1265 return -EOVERFLOW;
1266 tmp.st_size = stat->size;
1267 diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
1268 index f6564b572d779..0f1e95240c0c0 100644
1269 --- a/include/linux/etherdevice.h
1270 +++ b/include/linux/etherdevice.h
1271 @@ -127,7 +127,7 @@ static inline bool is_multicast_ether_addr(const u8 *addr)
1272 #endif
1273 }
1274
1275 -static inline bool is_multicast_ether_addr_64bits(const u8 addr[6+2])
1276 +static inline bool is_multicast_ether_addr_64bits(const u8 *addr)
1277 {
1278 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1279 #ifdef __BIG_ENDIAN
1280 @@ -341,8 +341,7 @@ static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
1281 * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
1282 */
1283
1284 -static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
1285 - const u8 addr2[6+2])
1286 +static inline bool ether_addr_equal_64bits(const u8 *addr1, const u8 *addr2)
1287 {
1288 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
1289 u64 fold = (*(const u64 *)addr1) ^ (*(const u64 *)addr2);
1290 diff --git a/include/linux/sched.h b/include/linux/sched.h
1291 index b341471de9d60..171cb7475b450 100644
1292 --- a/include/linux/sched.h
1293 +++ b/include/linux/sched.h
1294 @@ -1247,6 +1247,7 @@ struct task_struct {
1295 int pagefault_disabled;
1296 #ifdef CONFIG_MMU
1297 struct task_struct *oom_reaper_list;
1298 + struct timer_list oom_reaper_timer;
1299 #endif
1300 #ifdef CONFIG_VMAP_STACK
1301 struct vm_struct *stack_vm_area;
1302 diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
1303 index 59802eb8d2cc1..a1869a6789448 100644
1304 --- a/include/net/inet_hashtables.h
1305 +++ b/include/net/inet_hashtables.h
1306 @@ -247,8 +247,9 @@ void inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
1307 unsigned long high_limit);
1308 int inet_hashinfo2_init_mod(struct inet_hashinfo *h);
1309
1310 -bool inet_ehash_insert(struct sock *sk, struct sock *osk);
1311 -bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
1312 +bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk);
1313 +bool inet_ehash_nolisten(struct sock *sk, struct sock *osk,
1314 + bool *found_dup_sk);
1315 int __inet_hash(struct sock *sk, struct sock *osk);
1316 int inet_hash(struct sock *sk);
1317 void inet_unhash(struct sock *sk);
1318 diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
1319 index 3105dbf6c0e96..82580f7ffad95 100644
1320 --- a/kernel/trace/trace_events_trigger.c
1321 +++ b/kernel/trace/trace_events_trigger.c
1322 @@ -1219,7 +1219,14 @@ static void
1323 stacktrace_trigger(struct event_trigger_data *data, void *rec,
1324 struct ring_buffer_event *event)
1325 {
1326 - trace_dump_stack(STACK_SKIP);
1327 + struct trace_event_file *file = data->private_data;
1328 + unsigned long flags;
1329 +
1330 + if (file) {
1331 + local_save_flags(flags);
1332 + __trace_stack(file->tr, flags, STACK_SKIP, preempt_count());
1333 + } else
1334 + trace_dump_stack(STACK_SKIP);
1335 }
1336
1337 static void
1338 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1339 index dcbb9a28706fc..ee927ffeb718d 100644
1340 --- a/mm/oom_kill.c
1341 +++ b/mm/oom_kill.c
1342 @@ -631,7 +631,7 @@ done:
1343 */
1344 set_bit(MMF_OOM_SKIP, &mm->flags);
1345
1346 - /* Drop a reference taken by wake_oom_reaper */
1347 + /* Drop a reference taken by queue_oom_reaper */
1348 put_task_struct(tsk);
1349 }
1350
1351 @@ -641,12 +641,12 @@ static int oom_reaper(void *unused)
1352 struct task_struct *tsk = NULL;
1353
1354 wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
1355 - spin_lock(&oom_reaper_lock);
1356 + spin_lock_irq(&oom_reaper_lock);
1357 if (oom_reaper_list != NULL) {
1358 tsk = oom_reaper_list;
1359 oom_reaper_list = tsk->oom_reaper_list;
1360 }
1361 - spin_unlock(&oom_reaper_lock);
1362 + spin_unlock_irq(&oom_reaper_lock);
1363
1364 if (tsk)
1365 oom_reap_task(tsk);
1366 @@ -655,22 +655,48 @@ static int oom_reaper(void *unused)
1367 return 0;
1368 }
1369
1370 -static void wake_oom_reaper(struct task_struct *tsk)
1371 +static void wake_oom_reaper(struct timer_list *timer)
1372 {
1373 - /* mm is already queued? */
1374 - if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
1375 - return;
1376 + struct task_struct *tsk = container_of(timer, struct task_struct,
1377 + oom_reaper_timer);
1378 + struct mm_struct *mm = tsk->signal->oom_mm;
1379 + unsigned long flags;
1380
1381 - get_task_struct(tsk);
1382 + /* The victim managed to terminate on its own - see exit_mmap */
1383 + if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
1384 + put_task_struct(tsk);
1385 + return;
1386 + }
1387
1388 - spin_lock(&oom_reaper_lock);
1389 + spin_lock_irqsave(&oom_reaper_lock, flags);
1390 tsk->oom_reaper_list = oom_reaper_list;
1391 oom_reaper_list = tsk;
1392 - spin_unlock(&oom_reaper_lock);
1393 + spin_unlock_irqrestore(&oom_reaper_lock, flags);
1394 trace_wake_reaper(tsk->pid);
1395 wake_up(&oom_reaper_wait);
1396 }
1397
1398 +/*
1399 + * Give the OOM victim time to exit naturally before invoking the oom_reaping.
1400 + * The timers timeout is arbitrary... the longer it is, the longer the worst
1401 + * case scenario for the OOM can take. If it is too small, the oom_reaper can
1402 + * get in the way and release resources needed by the process exit path.
1403 + * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
1404 + * before the exit path is able to wake the futex waiters.
1405 + */
1406 +#define OOM_REAPER_DELAY (2*HZ)
1407 +static void queue_oom_reaper(struct task_struct *tsk)
1408 +{
1409 + /* mm is already queued? */
1410 + if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
1411 + return;
1412 +
1413 + get_task_struct(tsk);
1414 + timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
1415 + tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
1416 + add_timer(&tsk->oom_reaper_timer);
1417 +}
1418 +
1419 static int __init oom_init(void)
1420 {
1421 oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
1422 @@ -678,7 +704,7 @@ static int __init oom_init(void)
1423 }
1424 subsys_initcall(oom_init)
1425 #else
1426 -static inline void wake_oom_reaper(struct task_struct *tsk)
1427 +static inline void queue_oom_reaper(struct task_struct *tsk)
1428 {
1429 }
1430 #endif /* CONFIG_MMU */
1431 @@ -927,7 +953,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
1432 rcu_read_unlock();
1433
1434 if (can_oom_reap)
1435 - wake_oom_reaper(victim);
1436 + queue_oom_reaper(victim);
1437
1438 mmdrop(mm);
1439 put_task_struct(victim);
1440 @@ -963,7 +989,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
1441 task_lock(victim);
1442 if (task_will_free_mem(victim)) {
1443 mark_oom_victim(victim);
1444 - wake_oom_reaper(victim);
1445 + queue_oom_reaper(victim);
1446 task_unlock(victim);
1447 put_task_struct(victim);
1448 return;
1449 @@ -1061,7 +1087,7 @@ bool out_of_memory(struct oom_control *oc)
1450 */
1451 if (task_will_free_mem(current)) {
1452 mark_oom_victim(current);
1453 - wake_oom_reaper(current);
1454 + queue_oom_reaper(current);
1455 return true;
1456 }
1457
1458 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1459 index 7048ea59d58bd..f08ce248af2a9 100644
1460 --- a/mm/page_alloc.c
1461 +++ b/mm/page_alloc.c
1462 @@ -7588,7 +7588,7 @@ void __init mem_init_print_info(const char *str)
1463 */
1464 #define adj_init_size(start, end, size, pos, adj) \
1465 do { \
1466 - if (start <= pos && pos < end && size > adj) \
1467 + if (&start[0] <= &pos[0] && &pos[0] < &end[0] && size > adj) \
1468 size -= adj; \
1469 } while (0)
1470
1471 diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
1472 index d19557c6d04b5..7cf903f9e29a9 100644
1473 --- a/net/dccp/ipv4.c
1474 +++ b/net/dccp/ipv4.c
1475 @@ -427,7 +427,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
1476
1477 if (__inet_inherit_port(sk, newsk) < 0)
1478 goto put_and_exit;
1479 - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1480 + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
1481 if (*own_req)
1482 ireq->ireq_opt = NULL;
1483 else
1484 diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1485 index 9f73ccf46c9b1..7c24927e9c2c2 100644
1486 --- a/net/dccp/ipv6.c
1487 +++ b/net/dccp/ipv6.c
1488 @@ -538,7 +538,7 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
1489 dccp_done(newsk);
1490 goto out;
1491 }
1492 - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1493 + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
1494 /* Clone pktoptions received with SYN, if we own the req */
1495 if (*own_req && ireq->pktopts) {
1496 newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
1497 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
1498 index 85a88425edc48..6cbf0db57ad06 100644
1499 --- a/net/ipv4/inet_connection_sock.c
1500 +++ b/net/ipv4/inet_connection_sock.c
1501 @@ -791,7 +791,7 @@ static void reqsk_queue_hash_req(struct request_sock *req,
1502 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
1503 mod_timer(&req->rsk_timer, jiffies + timeout);
1504
1505 - inet_ehash_insert(req_to_sk(req), NULL);
1506 + inet_ehash_insert(req_to_sk(req), NULL, NULL);
1507 /* before letting lookups find us, make sure all req fields
1508 * are committed to memory and refcnt initialized.
1509 */
1510 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
1511 index 72fdf1fcbcaa9..cbbeb0eea0c35 100644
1512 --- a/net/ipv4/inet_hashtables.c
1513 +++ b/net/ipv4/inet_hashtables.c
1514 @@ -20,6 +20,9 @@
1515 #include <net/addrconf.h>
1516 #include <net/inet_connection_sock.h>
1517 #include <net/inet_hashtables.h>
1518 +#if IS_ENABLED(CONFIG_IPV6)
1519 +#include <net/inet6_hashtables.h>
1520 +#endif
1521 #include <net/secure_seq.h>
1522 #include <net/ip.h>
1523 #include <net/tcp.h>
1524 @@ -470,10 +473,52 @@ static u32 inet_sk_port_offset(const struct sock *sk)
1525 inet->inet_dport);
1526 }
1527
1528 -/* insert a socket into ehash, and eventually remove another one
1529 - * (The another one can be a SYN_RECV or TIMEWAIT
1530 +/* Searches for an exsiting socket in the ehash bucket list.
1531 + * Returns true if found, false otherwise.
1532 */
1533 -bool inet_ehash_insert(struct sock *sk, struct sock *osk)
1534 +static bool inet_ehash_lookup_by_sk(struct sock *sk,
1535 + struct hlist_nulls_head *list)
1536 +{
1537 + const __portpair ports = INET_COMBINED_PORTS(sk->sk_dport, sk->sk_num);
1538 + const int sdif = sk->sk_bound_dev_if;
1539 + const int dif = sk->sk_bound_dev_if;
1540 + const struct hlist_nulls_node *node;
1541 + struct net *net = sock_net(sk);
1542 + struct sock *esk;
1543 +
1544 + INET_ADDR_COOKIE(acookie, sk->sk_daddr, sk->sk_rcv_saddr);
1545 +
1546 + sk_nulls_for_each_rcu(esk, node, list) {
1547 + if (esk->sk_hash != sk->sk_hash)
1548 + continue;
1549 + if (sk->sk_family == AF_INET) {
1550 + if (unlikely(INET_MATCH(esk, net, acookie,
1551 + sk->sk_daddr,
1552 + sk->sk_rcv_saddr,
1553 + ports, dif, sdif))) {
1554 + return true;
1555 + }
1556 + }
1557 +#if IS_ENABLED(CONFIG_IPV6)
1558 + else if (sk->sk_family == AF_INET6) {
1559 + if (unlikely(INET6_MATCH(esk, net,
1560 + &sk->sk_v6_daddr,
1561 + &sk->sk_v6_rcv_saddr,
1562 + ports, dif, sdif))) {
1563 + return true;
1564 + }
1565 + }
1566 +#endif
1567 + }
1568 + return false;
1569 +}
1570 +
1571 +/* Insert a socket into ehash, and eventually remove another one
1572 + * (The another one can be a SYN_RECV or TIMEWAIT)
1573 + * If an existing socket already exists, socket sk is not inserted,
1574 + * and sets found_dup_sk parameter to true.
1575 + */
1576 +bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
1577 {
1578 struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
1579 struct hlist_nulls_head *list;
1580 @@ -492,16 +537,23 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk)
1581 if (osk) {
1582 WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
1583 ret = sk_nulls_del_node_init_rcu(osk);
1584 + } else if (found_dup_sk) {
1585 + *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
1586 + if (*found_dup_sk)
1587 + ret = false;
1588 }
1589 +
1590 if (ret)
1591 __sk_nulls_add_node_rcu(sk, list);
1592 +
1593 spin_unlock(lock);
1594 +
1595 return ret;
1596 }
1597
1598 -bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
1599 +bool inet_ehash_nolisten(struct sock *sk, struct sock *osk, bool *found_dup_sk)
1600 {
1601 - bool ok = inet_ehash_insert(sk, osk);
1602 + bool ok = inet_ehash_insert(sk, osk, found_dup_sk);
1603
1604 if (ok) {
1605 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
1606 @@ -545,7 +597,7 @@ int __inet_hash(struct sock *sk, struct sock *osk)
1607 int err = 0;
1608
1609 if (sk->sk_state != TCP_LISTEN) {
1610 - inet_ehash_nolisten(sk, osk);
1611 + inet_ehash_nolisten(sk, osk, NULL);
1612 return 0;
1613 }
1614 WARN_ON(!sk_unhashed(sk));
1615 @@ -641,7 +693,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
1616 tb = inet_csk(sk)->icsk_bind_hash;
1617 spin_lock_bh(&head->lock);
1618 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
1619 - inet_ehash_nolisten(sk, NULL);
1620 + inet_ehash_nolisten(sk, NULL, NULL);
1621 spin_unlock_bh(&head->lock);
1622 return 0;
1623 }
1624 @@ -720,7 +772,7 @@ ok:
1625 inet_bind_hash(sk, tb, port);
1626 if (sk_unhashed(sk)) {
1627 inet_sk(sk)->inet_sport = htons(port);
1628 - inet_ehash_nolisten(sk, (struct sock *)tw);
1629 + inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
1630 }
1631 if (tw)
1632 inet_twsk_bind_unhash(tw, hinfo);
1633 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1634 index 2ce85e52aea7c..426d70d45eda4 100644
1635 --- a/net/ipv4/tcp_ipv4.c
1636 +++ b/net/ipv4/tcp_ipv4.c
1637 @@ -1426,6 +1426,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1638 bool *own_req)
1639 {
1640 struct inet_request_sock *ireq;
1641 + bool found_dup_sk = false;
1642 struct inet_sock *newinet;
1643 struct tcp_sock *newtp;
1644 struct sock *newsk;
1645 @@ -1496,12 +1497,22 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1646
1647 if (__inet_inherit_port(sk, newsk) < 0)
1648 goto put_and_exit;
1649 - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1650 + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1651 + &found_dup_sk);
1652 if (likely(*own_req)) {
1653 tcp_move_syn(newtp, req);
1654 ireq->ireq_opt = NULL;
1655 } else {
1656 newinet->inet_opt = NULL;
1657 +
1658 + if (!req_unhash && found_dup_sk) {
1659 + /* This code path should only be executed in the
1660 + * syncookie case only
1661 + */
1662 + bh_unlock_sock(newsk);
1663 + sock_put(newsk);
1664 + newsk = NULL;
1665 + }
1666 }
1667 return newsk;
1668
1669 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1670 index 3903cc0ab1883..51c900e9bfe20 100644
1671 --- a/net/ipv6/tcp_ipv6.c
1672 +++ b/net/ipv6/tcp_ipv6.c
1673 @@ -1142,6 +1142,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1674 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1675 struct ipv6_txoptions *opt;
1676 struct inet_sock *newinet;
1677 + bool found_dup_sk = false;
1678 struct tcp_sock *newtp;
1679 struct sock *newsk;
1680 #ifdef CONFIG_TCP_MD5SIG
1681 @@ -1308,7 +1309,8 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1682 tcp_done(newsk);
1683 goto out;
1684 }
1685 - *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1686 + *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1687 + &found_dup_sk);
1688 if (*own_req) {
1689 tcp_move_syn(newtp, req);
1690
1691 @@ -1323,6 +1325,15 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1692 skb_set_owner_r(newnp->pktoptions, newsk);
1693 }
1694 }
1695 + } else {
1696 + if (!req_unhash && found_dup_sk) {
1697 + /* This code path should only be executed in the
1698 + * syncookie case only
1699 + */
1700 + bh_unlock_sock(newsk);
1701 + sock_put(newsk);
1702 + newsk = NULL;
1703 + }
1704 }
1705
1706 return newsk;
1707 diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
1708 index f35899d45a9af..ff4352f6d168a 100644
1709 --- a/net/l3mdev/l3mdev.c
1710 +++ b/net/l3mdev/l3mdev.c
1711 @@ -54,7 +54,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
1712
1713 dev = dev_get_by_index_rcu(net, ifindex);
1714 while (dev && !netif_is_l3_master(dev))
1715 - dev = netdev_master_upper_dev_get(dev);
1716 + dev = netdev_master_upper_dev_get_rcu(dev);
1717
1718 return dev ? dev->ifindex : 0;
1719 }
1720 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1721 index fb28969899af0..8aefc52542a00 100644
1722 --- a/net/netlink/af_netlink.c
1723 +++ b/net/netlink/af_netlink.c
1724 @@ -2253,6 +2253,13 @@ static int netlink_dump(struct sock *sk)
1725 * single netdev. The outcome is MSG_TRUNC error.
1726 */
1727 skb_reserve(skb, skb_tailroom(skb) - alloc_size);
1728 +
1729 + /* Make sure malicious BPF programs can not read unitialized memory
1730 + * from skb->head -> skb->data
1731 + */
1732 + skb_reset_network_header(skb);
1733 + skb_reset_mac_header(skb);
1734 +
1735 netlink_skb_set_owner_r(skb, sk);
1736
1737 if (nlk->dump_done_errno > 0) {
1738 diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
1739 index d3f068ad154cb..8461de79f67b4 100644
1740 --- a/net/openvswitch/flow_netlink.c
1741 +++ b/net/openvswitch/flow_netlink.c
1742 @@ -2329,7 +2329,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
1743 new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
1744
1745 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
1746 - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
1747 + if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) {
1748 OVS_NLERR(log, "Flow action size exceeds max %u",
1749 MAX_ACTIONS_BUFSIZE);
1750 return ERR_PTR(-EMSGSIZE);
1751 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1752 index 70c102359bfef..a2696acbcd9d2 100644
1753 --- a/net/packet/af_packet.c
1754 +++ b/net/packet/af_packet.c
1755 @@ -2791,8 +2791,9 @@ tpacket_error:
1756
1757 status = TP_STATUS_SEND_REQUEST;
1758 err = po->xmit(skb);
1759 - if (unlikely(err > 0)) {
1760 - err = net_xmit_errno(err);
1761 + if (unlikely(err != 0)) {
1762 + if (err > 0)
1763 + err = net_xmit_errno(err);
1764 if (err && __packet_get_status(po, ph) ==
1765 TP_STATUS_AVAILABLE) {
1766 /* skb was destructed already */
1767 @@ -2993,8 +2994,12 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1768 skb->no_fcs = 1;
1769
1770 err = po->xmit(skb);
1771 - if (err > 0 && (err = net_xmit_errno(err)) != 0)
1772 - goto out_unlock;
1773 + if (unlikely(err != 0)) {
1774 + if (err > 0)
1775 + err = net_xmit_errno(err);
1776 + if (err)
1777 + goto out_unlock;
1778 + }
1779
1780 dev_put(dev);
1781
1782 diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
1783 index 9a76b74af37bc..91a503871116b 100644
1784 --- a/net/rxrpc/net_ns.c
1785 +++ b/net/rxrpc/net_ns.c
1786 @@ -116,7 +116,9 @@ static __net_exit void rxrpc_exit_net(struct net *net)
1787 struct rxrpc_net *rxnet = rxrpc_net(net);
1788
1789 rxnet->live = false;
1790 + del_timer_sync(&rxnet->peer_keepalive_timer);
1791 cancel_work_sync(&rxnet->peer_keepalive_work);
1792 + /* Remove the timer again as the worker may have restarted it. */
1793 del_timer_sync(&rxnet->peer_keepalive_timer);
1794 rxrpc_destroy_all_calls(rxnet);
1795 rxrpc_destroy_all_connections(rxnet);
1796 diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1797 index e15ff335953de..ed8d26e6468ca 100644
1798 --- a/net/sched/cls_u32.c
1799 +++ b/net/sched/cls_u32.c
1800 @@ -386,14 +386,19 @@ static int u32_init(struct tcf_proto *tp)
1801 return 0;
1802 }
1803
1804 -static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
1805 +static void __u32_destroy_key(struct tc_u_knode *n)
1806 {
1807 struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
1808
1809 tcf_exts_destroy(&n->exts);
1810 - tcf_exts_put_net(&n->exts);
1811 if (ht && --ht->refcnt == 0)
1812 kfree(ht);
1813 + kfree(n);
1814 +}
1815 +
1816 +static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
1817 +{
1818 + tcf_exts_put_net(&n->exts);
1819 #ifdef CONFIG_CLS_U32_PERF
1820 if (free_pf)
1821 free_percpu(n->pf);
1822 @@ -402,8 +407,7 @@ static int u32_destroy_key(struct tc_u_knode *n, bool free_pf)
1823 if (free_pf)
1824 free_percpu(n->pcpu_success);
1825 #endif
1826 - kfree(n);
1827 - return 0;
1828 + __u32_destroy_key(n);
1829 }
1830
1831 /* u32_delete_key_rcu should be called when free'ing a copied
1832 @@ -812,10 +816,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
1833 new->flags = n->flags;
1834 RCU_INIT_POINTER(new->ht_down, ht);
1835
1836 - /* bump reference count as long as we hold pointer to structure */
1837 - if (ht)
1838 - ht->refcnt++;
1839 -
1840 #ifdef CONFIG_CLS_U32_PERF
1841 /* Statistics may be incremented by readers during update
1842 * so we must keep them in tact. When the node is later destroyed
1843 @@ -837,6 +837,10 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
1844 return NULL;
1845 }
1846
1847 + /* bump reference count as long as we hold pointer to structure */
1848 + if (ht)
1849 + ht->refcnt++;
1850 +
1851 return new;
1852 }
1853
1854 @@ -903,13 +907,13 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
1855 tca[TCA_RATE], ovr, extack);
1856
1857 if (err) {
1858 - u32_destroy_key(new, false);
1859 + __u32_destroy_key(new);
1860 return err;
1861 }
1862
1863 err = u32_replace_hw_knode(tp, new, flags, extack);
1864 if (err) {
1865 - u32_destroy_key(new, false);
1866 + __u32_destroy_key(new);
1867 return err;
1868 }
1869
1870 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
1871 index 06684ac346abd..5221092cc66d4 100644
1872 --- a/net/smc/af_smc.c
1873 +++ b/net/smc/af_smc.c
1874 @@ -1698,8 +1698,10 @@ static int smc_shutdown(struct socket *sock, int how)
1875 if (smc->use_fallback) {
1876 rc = kernel_sock_shutdown(smc->clcsock, how);
1877 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1878 - if (sk->sk_shutdown == SHUTDOWN_MASK)
1879 + if (sk->sk_shutdown == SHUTDOWN_MASK) {
1880 sk->sk_state = SMC_CLOSED;
1881 + sock_put(sk);
1882 + }
1883 goto out;
1884 }
1885 switch (how) {
1886 diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
1887 index 05277a88e20d8..d1579896f3a11 100644
1888 --- a/sound/soc/atmel/sam9g20_wm8731.c
1889 +++ b/sound/soc/atmel/sam9g20_wm8731.c
1890 @@ -46,35 +46,6 @@
1891 */
1892 #undef ENABLE_MIC_INPUT
1893
1894 -static struct clk *mclk;
1895 -
1896 -static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
1897 - struct snd_soc_dapm_context *dapm,
1898 - enum snd_soc_bias_level level)
1899 -{
1900 - static int mclk_on;
1901 - int ret = 0;
1902 -
1903 - switch (level) {
1904 - case SND_SOC_BIAS_ON:
1905 - case SND_SOC_BIAS_PREPARE:
1906 - if (!mclk_on)
1907 - ret = clk_enable(mclk);
1908 - if (ret == 0)
1909 - mclk_on = 1;
1910 - break;
1911 -
1912 - case SND_SOC_BIAS_OFF:
1913 - case SND_SOC_BIAS_STANDBY:
1914 - if (mclk_on)
1915 - clk_disable(mclk);
1916 - mclk_on = 0;
1917 - break;
1918 - }
1919 -
1920 - return ret;
1921 -}
1922 -
1923 static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
1924 SND_SOC_DAPM_MIC("Int Mic", NULL),
1925 SND_SOC_DAPM_SPK("Ext Spk", NULL),
1926 @@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
1927 .owner = THIS_MODULE,
1928 .dai_link = &at91sam9g20ek_dai,
1929 .num_links = 1,
1930 - .set_bias_level = at91sam9g20ek_set_bias_level,
1931
1932 .dapm_widgets = at91sam9g20ek_dapm_widgets,
1933 .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
1934 @@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1935 {
1936 struct device_node *np = pdev->dev.of_node;
1937 struct device_node *codec_np, *cpu_np;
1938 - struct clk *pllb;
1939 struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
1940 int ret;
1941
1942 @@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1943 return -EINVAL;
1944 }
1945
1946 - /*
1947 - * Codec MCLK is supplied by PCK0 - set it up.
1948 - */
1949 - mclk = clk_get(NULL, "pck0");
1950 - if (IS_ERR(mclk)) {
1951 - dev_err(&pdev->dev, "Failed to get MCLK\n");
1952 - ret = PTR_ERR(mclk);
1953 - goto err;
1954 - }
1955 -
1956 - pllb = clk_get(NULL, "pllb");
1957 - if (IS_ERR(pllb)) {
1958 - dev_err(&pdev->dev, "Failed to get PLLB\n");
1959 - ret = PTR_ERR(pllb);
1960 - goto err_mclk;
1961 - }
1962 - ret = clk_set_parent(mclk, pllb);
1963 - clk_put(pllb);
1964 - if (ret != 0) {
1965 - dev_err(&pdev->dev, "Failed to set MCLK parent\n");
1966 - goto err_mclk;
1967 - }
1968 -
1969 - clk_set_rate(mclk, MCLK_RATE);
1970 -
1971 card->dev = &pdev->dev;
1972
1973 /* Parse device node info */
1974 @@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
1975
1976 return ret;
1977
1978 -err_mclk:
1979 - clk_put(mclk);
1980 - mclk = NULL;
1981 err:
1982 atmel_ssc_put_audio(0);
1983 return ret;
1984 @@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
1985 {
1986 struct snd_soc_card *card = platform_get_drvdata(pdev);
1987
1988 - clk_disable(mclk);
1989 - mclk = NULL;
1990 snd_soc_unregister_card(card);
1991 atmel_ssc_put_audio(0);
1992
1993 diff --git a/sound/soc/codecs/msm8916-wcd-digital.c b/sound/soc/codecs/msm8916-wcd-digital.c
1994 index d5269ab5f91c5..e4cde214b7b2d 100644
1995 --- a/sound/soc/codecs/msm8916-wcd-digital.c
1996 +++ b/sound/soc/codecs/msm8916-wcd-digital.c
1997 @@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
1998
1999 dev_set_drvdata(dev, priv);
2000
2001 - return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
2002 + ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
2003 msm8916_wcd_digital_dai,
2004 ARRAY_SIZE(msm8916_wcd_digital_dai));
2005 + if (ret)
2006 + goto err_mclk;
2007 +
2008 + return 0;
2009 +
2010 +err_mclk:
2011 + clk_disable_unprepare(priv->mclk);
2012 err_clk:
2013 clk_disable_unprepare(priv->ahbclk);
2014 return ret;
2015 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2016 index 5876be5dd9bae..1c09dfb0c0f09 100644
2017 --- a/sound/soc/soc-dapm.c
2018 +++ b/sound/soc/soc-dapm.c
2019 @@ -1676,8 +1676,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
2020 switch (w->id) {
2021 case snd_soc_dapm_pre:
2022 if (!w->event)
2023 - list_for_each_entry_safe_continue(w, n, list,
2024 - power_list);
2025 + continue;
2026
2027 if (event == SND_SOC_DAPM_STREAM_START)
2028 ret = w->event(w,
2029 @@ -1689,8 +1688,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
2030
2031 case snd_soc_dapm_post:
2032 if (!w->event)
2033 - list_for_each_entry_safe_continue(w, n, list,
2034 - power_list);
2035 + continue;
2036
2037 if (event == SND_SOC_DAPM_STREAM_START)
2038 ret = w->event(w,
2039 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
2040 index 33e9a7f6246f7..ce501200e592f 100644
2041 --- a/sound/usb/midi.c
2042 +++ b/sound/usb/midi.c
2043 @@ -1210,6 +1210,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
2044 } while (drain_urbs && timeout);
2045 finish_wait(&ep->drain_wait, &wait);
2046 }
2047 + port->active = 0;
2048 spin_unlock_irq(&ep->buffer_lock);
2049 }
2050
2051 diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
2052 index ff97fdcf63bd5..b1959e04cbb14 100644
2053 --- a/sound/usb/usbaudio.h
2054 +++ b/sound/usb/usbaudio.h
2055 @@ -8,7 +8,7 @@
2056 */
2057
2058 /* handling of USB vendor/product ID pairs as 32-bit numbers */
2059 -#define USB_ID(vendor, product) (((vendor) << 16) | (product))
2060 +#define USB_ID(vendor, product) (((unsigned int)(vendor) << 16) | (product))
2061 #define USB_ID_VENDOR(id) ((id) >> 16)
2062 #define USB_ID_PRODUCT(id) ((u16)(id))
2063
2064 diff --git a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2065 index fedcb7b35af9f..af5ea50ed5c0e 100755
2066 --- a/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2067 +++ b/tools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh
2068 @@ -172,6 +172,17 @@ flooding_filters_add()
2069 local lsb
2070 local i
2071
2072 + # Prevent unwanted packets from entering the bridge and interfering
2073 + # with the test.
2074 + tc qdisc add dev br0 clsact
2075 + tc filter add dev br0 egress protocol all pref 1 handle 1 \
2076 + matchall skip_hw action drop
2077 + tc qdisc add dev $h1 clsact
2078 + tc filter add dev $h1 egress protocol all pref 1 handle 1 \
2079 + flower skip_hw dst_mac de:ad:be:ef:13:37 action pass
2080 + tc filter add dev $h1 egress protocol all pref 2 handle 2 \
2081 + matchall skip_hw action drop
2082 +
2083 tc qdisc add dev $rp2 clsact
2084
2085 for i in $(eval echo {1..$num_remotes}); do
2086 @@ -194,6 +205,12 @@ flooding_filters_del()
2087 done
2088
2089 tc qdisc del dev $rp2 clsact
2090 +
2091 + tc filter del dev $h1 egress protocol all pref 2 handle 2 matchall
2092 + tc filter del dev $h1 egress protocol all pref 1 handle 1 flower
2093 + tc qdisc del dev $h1 clsact
2094 + tc filter del dev br0 egress protocol all pref 1 handle 1 matchall
2095 + tc qdisc del dev br0 clsact
2096 }
2097
2098 flooding_check_packets()