Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0262-5.4.163-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 156717 byte(s)
-sync kernel patches
1 diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
2 index 38dc56a577604..ecec514b31550 100644
3 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
4 +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-37xx-pinctrl.txt
5 @@ -43,19 +43,19 @@ group emmc_nb
6
7 group pwm0
8 - pin 11 (GPIO1-11)
9 - - functions pwm, gpio
10 + - functions pwm, led, gpio
11
12 group pwm1
13 - pin 12
14 - - functions pwm, gpio
15 + - functions pwm, led, gpio
16
17 group pwm2
18 - pin 13
19 - - functions pwm, gpio
20 + - functions pwm, led, gpio
21
22 group pwm3
23 - pin 14
24 - - functions pwm, gpio
25 + - functions pwm, led, gpio
26
27 group pmic1
28 - pin 7
29 diff --git a/Documentation/networking/ipvs-sysctl.txt b/Documentation/networking/ipvs-sysctl.txt
30 index 056898685d408..fc531c29a2e83 100644
31 --- a/Documentation/networking/ipvs-sysctl.txt
32 +++ b/Documentation/networking/ipvs-sysctl.txt
33 @@ -30,8 +30,7 @@ conn_reuse_mode - INTEGER
34
35 0: disable any special handling on port reuse. The new
36 connection will be delivered to the same real server that was
37 - servicing the previous connection. This will effectively
38 - disable expire_nodest_conn.
39 + servicing the previous connection.
40
41 bit 1: enable rescheduling of new connections when it is safe.
42 That is, whenever expire_nodest_conn and for TCP sockets, when
43 diff --git a/Makefile b/Makefile
44 index e8b05f7d3b238..91d77df0128b4 100644
45 --- a/Makefile
46 +++ b/Makefile
47 @@ -1,7 +1,7 @@
48 # SPDX-License-Identifier: GPL-2.0
49 VERSION = 5
50 PATCHLEVEL = 4
51 -SUBLEVEL = 162
52 +SUBLEVEL = 163
53 EXTRAVERSION =
54 NAME = Kleptomaniac Octopus
55
56 diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
57 index 9711170649b69..05d67f9769118 100644
58 --- a/arch/arm/boot/dts/bcm5301x.dtsi
59 +++ b/arch/arm/boot/dts/bcm5301x.dtsi
60 @@ -242,6 +242,8 @@
61
62 gpio-controller;
63 #gpio-cells = <2>;
64 + interrupt-controller;
65 + #interrupt-cells = <2>;
66 };
67
68 pcie0: pcie@12000 {
69 @@ -387,7 +389,7 @@
70 i2c0: i2c@18009000 {
71 compatible = "brcm,iproc-i2c";
72 reg = <0x18009000 0x50>;
73 - interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
74 + interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
75 #address-cells = <1>;
76 #size-cells = <0>;
77 clock-frequency = <100000>;
78 diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
79 index fc2608b18a0d0..18f01190dcfd4 100644
80 --- a/arch/arm/mach-socfpga/core.h
81 +++ b/arch/arm/mach-socfpga/core.h
82 @@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
83 u32 socfpga_sdram_self_refresh(u32 sdr_base);
84 extern unsigned int socfpga_sdram_self_refresh_sz;
85
86 -extern char secondary_trampoline, secondary_trampoline_end;
87 +extern char secondary_trampoline[], secondary_trampoline_end[];
88
89 extern unsigned long socfpga_cpu1start_addr;
90
91 diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
92 index fbb80b883e5dd..201191cf68f32 100644
93 --- a/arch/arm/mach-socfpga/platsmp.c
94 +++ b/arch/arm/mach-socfpga/platsmp.c
95 @@ -20,14 +20,14 @@
96
97 static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
98 {
99 - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
100 + int trampoline_size = secondary_trampoline_end - secondary_trampoline;
101
102 if (socfpga_cpu1start_addr) {
103 /* This will put CPU #1 into reset. */
104 writel(RSTMGR_MPUMODRST_CPU1,
105 rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
106
107 - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
108 + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
109
110 writel(__pa_symbol(secondary_startup),
111 sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
112 @@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
113
114 static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
115 {
116 - int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
117 + int trampoline_size = secondary_trampoline_end - secondary_trampoline;
118
119 if (socfpga_cpu1start_addr) {
120 writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
121 SOCFPGA_A10_RSTMGR_MODMPURST);
122 - memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
123 + memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
124
125 writel(__pa_symbol(secondary_startup),
126 sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
127 diff --git a/arch/arm64/boot/dts/marvell/armada-3720-db.dts b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
128 index f2cc00594d64a..3e5789f372069 100644
129 --- a/arch/arm64/boot/dts/marvell/armada-3720-db.dts
130 +++ b/arch/arm64/boot/dts/marvell/armada-3720-db.dts
131 @@ -128,6 +128,9 @@
132
133 /* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
134 &pcie0 {
135 + pinctrl-names = "default";
136 + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
137 + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
138 status = "okay";
139 };
140
141 diff --git a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
142 index 6226e7e809807..a75bb2ea3506d 100644
143 --- a/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
144 +++ b/arch/arm64/boot/dts/marvell/armada-3720-espressobin.dts
145 @@ -59,6 +59,7 @@
146 phys = <&comphy1 0>;
147 pinctrl-names = "default";
148 pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
149 + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
150 };
151
152 /* J6 */
153 diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
154 index de0eabff29353..16e73597bb78c 100644
155 --- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
156 +++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
157 @@ -127,10 +127,6 @@
158 };
159 };
160
161 -&pcie_reset_pins {
162 - function = "gpio";
163 -};
164 -
165 &pcie0 {
166 pinctrl-names = "default";
167 pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
168 diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
169 index c28611c1c251a..3d15e4ab3f53a 100644
170 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
171 +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
172 @@ -318,7 +318,7 @@
173
174 pcie_reset_pins: pcie-reset-pins {
175 groups = "pcie1";
176 - function = "pcie";
177 + function = "gpio";
178 };
179
180 pcie_clkreq_pins: pcie-clkreq-pins {
181 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
182 index 9749818eed6d6..2811ecc1f3c71 100644
183 --- a/arch/mips/Kconfig
184 +++ b/arch/mips/Kconfig
185 @@ -3059,7 +3059,7 @@ config STACKTRACE_SUPPORT
186 config PGTABLE_LEVELS
187 int
188 default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
189 - default 3 if 64BIT && !PAGE_SIZE_64KB
190 + default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
191 default 2
192
193 config MIPS_AUTO_PFN_OFFSET
194 diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
195 index 164483b37d854..99cd24f2ea01b 100644
196 --- a/arch/parisc/kernel/vmlinux.lds.S
197 +++ b/arch/parisc/kernel/vmlinux.lds.S
198 @@ -56,8 +56,6 @@ SECTIONS
199 {
200 . = KERNEL_BINARY_TEXT_START;
201
202 - _stext = .; /* start of kernel text, includes init code & data */
203 -
204 __init_begin = .;
205 HEAD_TEXT_SECTION
206 MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
207 @@ -81,6 +79,7 @@ SECTIONS
208 /* freed after init ends here */
209
210 _text = .; /* Text and read-only data */
211 + _stext = .;
212 MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
213 .text ALIGN(PAGE_SIZE) : {
214 TEXT_TEXT
215 diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
216 index 4a91b543a8540..6d34b69729854 100644
217 --- a/arch/powerpc/kvm/book3s_hv_builtin.c
218 +++ b/arch/powerpc/kvm/book3s_hv_builtin.c
219 @@ -821,6 +821,7 @@ static void flush_guest_tlb(struct kvm *kvm)
220 "r" (0) : "memory");
221 }
222 asm volatile("ptesync": : :"memory");
223 + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
224 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
225 } else {
226 for (set = 0; set < kvm->arch.tlb_sets; ++set) {
227 @@ -831,7 +832,9 @@ static void flush_guest_tlb(struct kvm *kvm)
228 rb += PPC_BIT(51); /* increment set number */
229 }
230 asm volatile("ptesync": : :"memory");
231 - asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
232 + // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
233 + if (cpu_has_feature(CPU_FTR_ARCH_300))
234 + asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
235 }
236 }
237
238 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
239 index 9ebd01219812c..4438c00acb656 100644
240 --- a/arch/s390/mm/pgtable.c
241 +++ b/arch/s390/mm/pgtable.c
242 @@ -970,6 +970,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
243 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
244 unsigned long *oldpte, unsigned long *oldpgste)
245 {
246 + struct vm_area_struct *vma;
247 unsigned long pgstev;
248 spinlock_t *ptl;
249 pgste_t pgste;
250 @@ -979,6 +980,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
251 WARN_ON_ONCE(orc > ESSA_MAX);
252 if (unlikely(orc > ESSA_MAX))
253 return -EINVAL;
254 +
255 + vma = find_vma(mm, hva);
256 + if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
257 + return -EFAULT;
258 ptep = get_locked_pte(mm, hva, &ptl);
259 if (unlikely(!ptep))
260 return -EFAULT;
261 @@ -1071,10 +1076,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
262 int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
263 unsigned long bits, unsigned long value)
264 {
265 + struct vm_area_struct *vma;
266 spinlock_t *ptl;
267 pgste_t new;
268 pte_t *ptep;
269
270 + vma = find_vma(mm, hva);
271 + if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
272 + return -EFAULT;
273 ptep = get_locked_pte(mm, hva, &ptl);
274 if (unlikely(!ptep))
275 return -EFAULT;
276 @@ -1099,9 +1108,13 @@ EXPORT_SYMBOL(set_pgste_bits);
277 */
278 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
279 {
280 + struct vm_area_struct *vma;
281 spinlock_t *ptl;
282 pte_t *ptep;
283
284 + vma = find_vma(mm, hva);
285 + if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
286 + return -EFAULT;
287 ptep = get_locked_pte(mm, hva, &ptl);
288 if (unlikely(!ptep))
289 return -EFAULT;
290 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
291 index 47f839bc0234f..1cdc7426bd033 100644
292 --- a/drivers/android/binder.c
293 +++ b/drivers/android/binder.c
294 @@ -3095,7 +3095,7 @@ static void binder_transaction(struct binder_proc *proc,
295 t->from = thread;
296 else
297 t->from = NULL;
298 - t->sender_euid = proc->cred->euid;
299 + t->sender_euid = task_euid(proc->tsk);
300 t->to_proc = target_proc;
301 t->to_thread = target_thread;
302 t->code = tr->code;
303 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
304 index def41e1bd7364..baf10b73675e2 100644
305 --- a/drivers/block/xen-blkfront.c
306 +++ b/drivers/block/xen-blkfront.c
307 @@ -80,6 +80,7 @@ enum blkif_state {
308 BLKIF_STATE_DISCONNECTED,
309 BLKIF_STATE_CONNECTED,
310 BLKIF_STATE_SUSPENDED,
311 + BLKIF_STATE_ERROR,
312 };
313
314 struct grant {
315 @@ -89,6 +90,7 @@ struct grant {
316 };
317
318 enum blk_req_status {
319 + REQ_PROCESSING,
320 REQ_WAITING,
321 REQ_DONE,
322 REQ_ERROR,
323 @@ -533,10 +535,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
324
325 id = get_id_from_freelist(rinfo);
326 rinfo->shadow[id].request = req;
327 - rinfo->shadow[id].status = REQ_WAITING;
328 + rinfo->shadow[id].status = REQ_PROCESSING;
329 rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
330
331 - (*ring_req)->u.rw.id = id;
332 + rinfo->shadow[id].req.u.rw.id = id;
333
334 return id;
335 }
336 @@ -544,11 +546,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
337 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
338 {
339 struct blkfront_info *info = rinfo->dev_info;
340 - struct blkif_request *ring_req;
341 + struct blkif_request *ring_req, *final_ring_req;
342 unsigned long id;
343
344 /* Fill out a communications ring structure. */
345 - id = blkif_ring_get_request(rinfo, req, &ring_req);
346 + id = blkif_ring_get_request(rinfo, req, &final_ring_req);
347 + ring_req = &rinfo->shadow[id].req;
348
349 ring_req->operation = BLKIF_OP_DISCARD;
350 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
351 @@ -559,8 +562,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
352 else
353 ring_req->u.discard.flag = 0;
354
355 - /* Keep a private copy so we can reissue requests when recovering. */
356 - rinfo->shadow[id].req = *ring_req;
357 + /* Copy the request to the ring page. */
358 + *final_ring_req = *ring_req;
359 + rinfo->shadow[id].status = REQ_WAITING;
360
361 return 0;
362 }
363 @@ -693,6 +697,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
364 {
365 struct blkfront_info *info = rinfo->dev_info;
366 struct blkif_request *ring_req, *extra_ring_req = NULL;
367 + struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
368 unsigned long id, extra_id = NO_ASSOCIATED_ID;
369 bool require_extra_req = false;
370 int i;
371 @@ -737,7 +742,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
372 }
373
374 /* Fill out a communications ring structure. */
375 - id = blkif_ring_get_request(rinfo, req, &ring_req);
376 + id = blkif_ring_get_request(rinfo, req, &final_ring_req);
377 + ring_req = &rinfo->shadow[id].req;
378
379 num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
380 num_grant = 0;
381 @@ -788,7 +794,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
382 ring_req->u.rw.nr_segments = num_grant;
383 if (unlikely(require_extra_req)) {
384 extra_id = blkif_ring_get_request(rinfo, req,
385 - &extra_ring_req);
386 + &final_extra_ring_req);
387 + extra_ring_req = &rinfo->shadow[extra_id].req;
388 +
389 /*
390 * Only the first request contains the scatter-gather
391 * list.
392 @@ -830,10 +838,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
393 if (setup.segments)
394 kunmap_atomic(setup.segments);
395
396 - /* Keep a private copy so we can reissue requests when recovering. */
397 - rinfo->shadow[id].req = *ring_req;
398 - if (unlikely(require_extra_req))
399 - rinfo->shadow[extra_id].req = *extra_ring_req;
400 + /* Copy request(s) to the ring page. */
401 + *final_ring_req = *ring_req;
402 + rinfo->shadow[id].status = REQ_WAITING;
403 + if (unlikely(require_extra_req)) {
404 + *final_extra_ring_req = *extra_ring_req;
405 + rinfo->shadow[extra_id].status = REQ_WAITING;
406 + }
407
408 if (new_persistent_gnts)
409 gnttab_free_grant_references(setup.gref_head);
410 @@ -1407,8 +1418,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
411 static int blkif_get_final_status(enum blk_req_status s1,
412 enum blk_req_status s2)
413 {
414 - BUG_ON(s1 == REQ_WAITING);
415 - BUG_ON(s2 == REQ_WAITING);
416 + BUG_ON(s1 < REQ_DONE);
417 + BUG_ON(s2 < REQ_DONE);
418
419 if (s1 == REQ_ERROR || s2 == REQ_ERROR)
420 return BLKIF_RSP_ERROR;
421 @@ -1441,7 +1452,7 @@ static bool blkif_completion(unsigned long *id,
422 s->status = blkif_rsp_to_req_status(bret->status);
423
424 /* Wait the second response if not yet here. */
425 - if (s2->status == REQ_WAITING)
426 + if (s2->status < REQ_DONE)
427 return false;
428
429 bret->status = blkif_get_final_status(s->status,
430 @@ -1549,7 +1560,7 @@ static bool blkif_completion(unsigned long *id,
431 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
432 {
433 struct request *req;
434 - struct blkif_response *bret;
435 + struct blkif_response bret;
436 RING_IDX i, rp;
437 unsigned long flags;
438 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
439 @@ -1560,54 +1571,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
440
441 spin_lock_irqsave(&rinfo->ring_lock, flags);
442 again:
443 - rp = rinfo->ring.sring->rsp_prod;
444 - rmb(); /* Ensure we see queued responses up to 'rp'. */
445 + rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
446 + virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
447 + if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
448 + pr_alert("%s: illegal number of responses %u\n",
449 + info->gd->disk_name, rp - rinfo->ring.rsp_cons);
450 + goto err;
451 + }
452
453 for (i = rinfo->ring.rsp_cons; i != rp; i++) {
454 unsigned long id;
455 + unsigned int op;
456 +
457 + RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
458 + id = bret.id;
459
460 - bret = RING_GET_RESPONSE(&rinfo->ring, i);
461 - id = bret->id;
462 /*
463 * The backend has messed up and given us an id that we would
464 * never have given to it (we stamp it up to BLK_RING_SIZE -
465 * look in get_id_from_freelist.
466 */
467 if (id >= BLK_RING_SIZE(info)) {
468 - WARN(1, "%s: response to %s has incorrect id (%ld)\n",
469 - info->gd->disk_name, op_name(bret->operation), id);
470 - /* We can't safely get the 'struct request' as
471 - * the id is busted. */
472 - continue;
473 + pr_alert("%s: response has incorrect id (%ld)\n",
474 + info->gd->disk_name, id);
475 + goto err;
476 }
477 + if (rinfo->shadow[id].status != REQ_WAITING) {
478 + pr_alert("%s: response references no pending request\n",
479 + info->gd->disk_name);
480 + goto err;
481 + }
482 +
483 + rinfo->shadow[id].status = REQ_PROCESSING;
484 req = rinfo->shadow[id].request;
485
486 - if (bret->operation != BLKIF_OP_DISCARD) {
487 + op = rinfo->shadow[id].req.operation;
488 + if (op == BLKIF_OP_INDIRECT)
489 + op = rinfo->shadow[id].req.u.indirect.indirect_op;
490 + if (bret.operation != op) {
491 + pr_alert("%s: response has wrong operation (%u instead of %u)\n",
492 + info->gd->disk_name, bret.operation, op);
493 + goto err;
494 + }
495 +
496 + if (bret.operation != BLKIF_OP_DISCARD) {
497 /*
498 * We may need to wait for an extra response if the
499 * I/O request is split in 2
500 */
501 - if (!blkif_completion(&id, rinfo, bret))
502 + if (!blkif_completion(&id, rinfo, &bret))
503 continue;
504 }
505
506 if (add_id_to_freelist(rinfo, id)) {
507 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
508 - info->gd->disk_name, op_name(bret->operation), id);
509 + info->gd->disk_name, op_name(bret.operation), id);
510 continue;
511 }
512
513 - if (bret->status == BLKIF_RSP_OKAY)
514 + if (bret.status == BLKIF_RSP_OKAY)
515 blkif_req(req)->error = BLK_STS_OK;
516 else
517 blkif_req(req)->error = BLK_STS_IOERR;
518
519 - switch (bret->operation) {
520 + switch (bret.operation) {
521 case BLKIF_OP_DISCARD:
522 - if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
523 + if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
524 struct request_queue *rq = info->rq;
525 - printk(KERN_WARNING "blkfront: %s: %s op failed\n",
526 - info->gd->disk_name, op_name(bret->operation));
527 +
528 + pr_warn_ratelimited("blkfront: %s: %s op failed\n",
529 + info->gd->disk_name, op_name(bret.operation));
530 blkif_req(req)->error = BLK_STS_NOTSUPP;
531 info->feature_discard = 0;
532 info->feature_secdiscard = 0;
533 @@ -1617,15 +1650,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
534 break;
535 case BLKIF_OP_FLUSH_DISKCACHE:
536 case BLKIF_OP_WRITE_BARRIER:
537 - if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
538 - printk(KERN_WARNING "blkfront: %s: %s op failed\n",
539 - info->gd->disk_name, op_name(bret->operation));
540 + if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
541 + pr_warn_ratelimited("blkfront: %s: %s op failed\n",
542 + info->gd->disk_name, op_name(bret.operation));
543 blkif_req(req)->error = BLK_STS_NOTSUPP;
544 }
545 - if (unlikely(bret->status == BLKIF_RSP_ERROR &&
546 + if (unlikely(bret.status == BLKIF_RSP_ERROR &&
547 rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
548 - printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
549 - info->gd->disk_name, op_name(bret->operation));
550 + pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
551 + info->gd->disk_name, op_name(bret.operation));
552 blkif_req(req)->error = BLK_STS_NOTSUPP;
553 }
554 if (unlikely(blkif_req(req)->error)) {
555 @@ -1638,9 +1671,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
556 /* fall through */
557 case BLKIF_OP_READ:
558 case BLKIF_OP_WRITE:
559 - if (unlikely(bret->status != BLKIF_RSP_OKAY))
560 - dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
561 - "request: %x\n", bret->status);
562 + if (unlikely(bret.status != BLKIF_RSP_OKAY))
563 + dev_dbg_ratelimited(&info->xbdev->dev,
564 + "Bad return from blkdev data request: %#x\n",
565 + bret.status);
566
567 break;
568 default:
569 @@ -1665,6 +1699,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
570 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
571
572 return IRQ_HANDLED;
573 +
574 + err:
575 + info->connected = BLKIF_STATE_ERROR;
576 +
577 + spin_unlock_irqrestore(&rinfo->ring_lock, flags);
578 +
579 + pr_alert("%s disabled for further use\n", info->gd->disk_name);
580 + return IRQ_HANDLED;
581 }
582
583
584 diff --git a/drivers/firmware/arm_scmi/scmi_pm_domain.c b/drivers/firmware/arm_scmi/scmi_pm_domain.c
585 index 041f8152272bf..177874adccf0d 100644
586 --- a/drivers/firmware/arm_scmi/scmi_pm_domain.c
587 +++ b/drivers/firmware/arm_scmi/scmi_pm_domain.c
588 @@ -106,9 +106,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
589 scmi_pd_data->domains = domains;
590 scmi_pd_data->num_domains = num_domains;
591
592 - of_genpd_add_provider_onecell(np, scmi_pd_data);
593 -
594 - return 0;
595 + return of_genpd_add_provider_onecell(np, scmi_pd_data);
596 }
597
598 static const struct scmi_device_id scmi_id_table[] = {
599 diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
600 index 72d30d90b856c..0af246a5609ca 100644
601 --- a/drivers/gpu/drm/vc4/vc4_bo.c
602 +++ b/drivers/gpu/drm/vc4/vc4_bo.c
603 @@ -389,7 +389,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
604
605 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
606 if (!bo)
607 - return ERR_PTR(-ENOMEM);
608 + return NULL;
609
610 bo->madv = VC4_MADV_WILLNEED;
611 refcount_set(&bo->usecnt, 0);
612 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
613 index f6be2e70a4967..e011839f19f89 100644
614 --- a/drivers/hid/wacom_wac.c
615 +++ b/drivers/hid/wacom_wac.c
616 @@ -2578,6 +2578,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
617 return;
618
619 switch (equivalent_usage) {
620 + case HID_DG_CONFIDENCE:
621 + wacom_wac->hid_data.confidence = value;
622 + break;
623 case HID_GD_X:
624 wacom_wac->hid_data.x = value;
625 break;
626 @@ -2610,7 +2613,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
627 }
628
629 if (usage->usage_index + 1 == field->report_count) {
630 - if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
631 + if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
632 + wacom_wac->hid_data.confidence)
633 wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
634 }
635 }
636 @@ -2625,6 +2629,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
637
638 wacom_wac->is_invalid_bt_frame = false;
639
640 + hid_data->confidence = true;
641 +
642 for (i = 0; i < report->maxfield; i++) {
643 struct hid_field *field = report->field[i];
644 int j;
645 diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
646 index e3835407e8d23..8dea7cb298e69 100644
647 --- a/drivers/hid/wacom_wac.h
648 +++ b/drivers/hid/wacom_wac.h
649 @@ -300,6 +300,7 @@ struct hid_data {
650 bool tipswitch;
651 bool barrelswitch;
652 bool barrelswitch2;
653 + bool confidence;
654 int x;
655 int y;
656 int pressure;
657 diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
658 index 06383b26712b6..56857ac0a0be2 100644
659 --- a/drivers/media/cec/cec-adap.c
660 +++ b/drivers/media/cec/cec-adap.c
661 @@ -1191,6 +1191,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
662 if (abort)
663 dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
664 msg->flags = dst->flags;
665 + msg->sequence = dst->sequence;
666 /* Remove it from the wait_queue */
667 list_del_init(&data->list);
668
669 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
670 index cb54fa2120d72..deafcc56adee6 100644
671 --- a/drivers/mmc/host/sdhci.c
672 +++ b/drivers/mmc/host/sdhci.c
673 @@ -749,7 +749,19 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
674 len -= offset;
675 }
676
677 - BUG_ON(len > 65536);
678 + /*
679 + * The block layer forces a minimum segment size of PAGE_SIZE,
680 + * so 'len' can be too big here if PAGE_SIZE >= 64KiB. Write
681 + * multiple descriptors, noting that the ADMA table is sized
682 + * for 4KiB chunks anyway, so it will be big enough.
683 + */
684 + while (len > host->max_adma) {
685 + int n = 32 * 1024; /* 32KiB*/
686 +
687 + __sdhci_adma_write_desc(host, &desc, addr, n, ADMA2_TRAN_VALID);
688 + addr += n;
689 + len -= n;
690 + }
691
692 /* tran, valid */
693 if (len)
694 @@ -3568,6 +3580,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
695 * descriptor for each segment, plus 1 for a nop end descriptor.
696 */
697 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
698 + host->max_adma = 65536;
699
700 return host;
701 }
702 @@ -4221,10 +4234,12 @@ int sdhci_setup_host(struct sdhci_host *host)
703 * be larger than 64 KiB though.
704 */
705 if (host->flags & SDHCI_USE_ADMA) {
706 - if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
707 + if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) {
708 + host->max_adma = 65532; /* 32-bit alignment */
709 mmc->max_seg_size = 65535;
710 - else
711 + } else {
712 mmc->max_seg_size = 65536;
713 + }
714 } else {
715 mmc->max_seg_size = mmc->max_req_size;
716 }
717 diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
718 index 96a0a8f97f559..54f9d6720f132 100644
719 --- a/drivers/mmc/host/sdhci.h
720 +++ b/drivers/mmc/host/sdhci.h
721 @@ -349,7 +349,8 @@ struct sdhci_adma2_64_desc {
722
723 /*
724 * Maximum segments assuming a 512KiB maximum requisition size and a minimum
725 - * 4KiB page size.
726 + * 4KiB page size. Note this also allows enough for multiple descriptors in
727 + * case of PAGE_SIZE >= 64KiB.
728 */
729 #define SDHCI_MAX_SEGS 128
730
731 @@ -547,6 +548,7 @@ struct sdhci_host {
732 unsigned int blocks; /* remaining PIO blocks */
733
734 int sg_count; /* Mapped sg entries */
735 + int max_adma; /* Max. length in ADMA descriptor */
736
737 void *adma_table; /* ADMA descriptor table */
738 void *align_buffer; /* Bounce buffer */
739 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
740 index db2e9dd5681eb..ce6a4e1965e1d 100644
741 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
742 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
743 @@ -644,9 +644,9 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
744 roundup_size = ilog2(roundup_size);
745
746 for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
747 - tc_valid[i] = !!(hdev->hw_tc_map & BIT(i));
748 + tc_valid[i] = 1;
749 tc_size[i] = roundup_size;
750 - tc_offset[i] = rss_size * i;
751 + tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
752 }
753
754 hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
755 diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
756 index ad1e796e5544a..4e0e1b02d615e 100644
757 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
758 +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
759 @@ -719,12 +719,31 @@ static int iavf_get_per_queue_coalesce(struct net_device *netdev, u32 queue,
760 *
761 * Change the ITR settings for a specific queue.
762 **/
763 -static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
764 - struct ethtool_coalesce *ec, int queue)
765 +static int iavf_set_itr_per_queue(struct iavf_adapter *adapter,
766 + struct ethtool_coalesce *ec, int queue)
767 {
768 struct iavf_ring *rx_ring = &adapter->rx_rings[queue];
769 struct iavf_ring *tx_ring = &adapter->tx_rings[queue];
770 struct iavf_q_vector *q_vector;
771 + u16 itr_setting;
772 +
773 + itr_setting = rx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
774 +
775 + if (ec->rx_coalesce_usecs != itr_setting &&
776 + ec->use_adaptive_rx_coalesce) {
777 + netif_info(adapter, drv, adapter->netdev,
778 + "Rx interrupt throttling cannot be changed if adaptive-rx is enabled\n");
779 + return -EINVAL;
780 + }
781 +
782 + itr_setting = tx_ring->itr_setting & ~IAVF_ITR_DYNAMIC;
783 +
784 + if (ec->tx_coalesce_usecs != itr_setting &&
785 + ec->use_adaptive_tx_coalesce) {
786 + netif_info(adapter, drv, adapter->netdev,
787 + "Tx interrupt throttling cannot be changed if adaptive-tx is enabled\n");
788 + return -EINVAL;
789 + }
790
791 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
792 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
793 @@ -747,6 +766,7 @@ static void iavf_set_itr_per_queue(struct iavf_adapter *adapter,
794 * the Tx and Rx ITR values based on the values we have entered
795 * into the q_vector, no need to write the values now.
796 */
797 + return 0;
798 }
799
800 /**
801 @@ -788,9 +808,11 @@ static int __iavf_set_coalesce(struct net_device *netdev,
802 */
803 if (queue < 0) {
804 for (i = 0; i < adapter->num_active_queues; i++)
805 - iavf_set_itr_per_queue(adapter, ec, i);
806 + if (iavf_set_itr_per_queue(adapter, ec, i))
807 + return -EINVAL;
808 } else if (queue < adapter->num_active_queues) {
809 - iavf_set_itr_per_queue(adapter, ec, queue);
810 + if (iavf_set_itr_per_queue(adapter, ec, queue))
811 + return -EINVAL;
812 } else {
813 netif_info(adapter, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
814 adapter->num_active_queues - 1);
815 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
816 index 158feb0ab2739..c11244a9b7e69 100644
817 --- a/drivers/net/ethernet/intel/igb/igb_main.c
818 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
819 @@ -7752,7 +7752,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
820 if (likely(napi_complete_done(napi, work_done)))
821 igb_ring_irq_enable(q_vector);
822
823 - return min(work_done, budget - 1);
824 + return work_done;
825 }
826
827 /**
828 diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
829 index 6030c90d50ccb..bf7832b34a000 100644
830 --- a/drivers/net/ethernet/mscc/ocelot.c
831 +++ b/drivers/net/ethernet/mscc/ocelot.c
832 @@ -1024,12 +1024,6 @@ static int ocelot_hwstamp_set(struct ocelot_port *port, struct ifreq *ifr)
833 switch (cfg.rx_filter) {
834 case HWTSTAMP_FILTER_NONE:
835 break;
836 - case HWTSTAMP_FILTER_ALL:
837 - case HWTSTAMP_FILTER_SOME:
838 - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
839 - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
840 - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
841 - case HWTSTAMP_FILTER_NTP_ALL:
842 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
843 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
844 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
845 @@ -1189,7 +1183,10 @@ static int ocelot_get_ts_info(struct net_device *dev,
846 SOF_TIMESTAMPING_RAW_HARDWARE;
847 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON) |
848 BIT(HWTSTAMP_TX_ONESTEP_SYNC);
849 - info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
850 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
851 + BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
852 + BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
853 + BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
854
855 return 0;
856 }
857 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
858 index 250f510b1d212..3dcb09f17b77f 100644
859 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
860 +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
861 @@ -557,7 +557,6 @@ struct nfp_net_dp {
862 * @exn_name: Name for Exception interrupt
863 * @shared_handler: Handler for shared interrupts
864 * @shared_name: Name for shared interrupt
865 - * @me_freq_mhz: ME clock_freq (MHz)
866 * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
867 * @reconfig_sync_present and HW reconfiguration request
868 * regs/machinery from async requests (sync must take
869 @@ -639,8 +638,6 @@ struct nfp_net {
870 irq_handler_t shared_handler;
871 char shared_name[IFNAMSIZ + 8];
872
873 - u32 me_freq_mhz;
874 -
875 bool link_up;
876 spinlock_t link_status_lock;
877
878 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
879 index 2354dec994184..89e578e25ff8f 100644
880 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
881 +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
882 @@ -1269,7 +1269,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
883 * ME timestamp ticks. There are 16 ME clock cycles for each timestamp
884 * count.
885 */
886 - factor = nn->me_freq_mhz / 16;
887 + factor = nn->tlv_caps.me_freq_mhz / 16;
888
889 /* Each pair of (usecs, max_frames) fields specifies that interrupts
890 * should be coalesced until
891 diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/phy/mdio-aspeed.c
892 index cad820568f751..966c3b4ad59d1 100644
893 --- a/drivers/net/phy/mdio-aspeed.c
894 +++ b/drivers/net/phy/mdio-aspeed.c
895 @@ -61,6 +61,13 @@ static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
896
897 iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
898
899 + rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
900 + !(ctrl & ASPEED_MDIO_CTRL_FIRE),
901 + ASPEED_MDIO_INTERVAL_US,
902 + ASPEED_MDIO_TIMEOUT_US);
903 + if (rc < 0)
904 + return rc;
905 +
906 rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data,
907 data & ASPEED_MDIO_DATA_IDLE,
908 ASPEED_MDIO_INTERVAL_US,
909 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
910 index 7d389c2cc9026..d6f44343213cc 100644
911 --- a/drivers/net/xen-netfront.c
912 +++ b/drivers/net/xen-netfront.c
913 @@ -121,21 +121,17 @@ struct netfront_queue {
914
915 /*
916 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
917 - * are linked from tx_skb_freelist through skb_entry.link.
918 - *
919 - * NB. Freelist index entries are always going to be less than
920 - * PAGE_OFFSET, whereas pointers to skbs will always be equal or
921 - * greater than PAGE_OFFSET: we use this property to distinguish
922 - * them.
923 + * are linked from tx_skb_freelist through tx_link.
924 */
925 - union skb_entry {
926 - struct sk_buff *skb;
927 - unsigned long link;
928 - } tx_skbs[NET_TX_RING_SIZE];
929 + struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
930 + unsigned short tx_link[NET_TX_RING_SIZE];
931 +#define TX_LINK_NONE 0xffff
932 +#define TX_PENDING 0xfffe
933 grant_ref_t gref_tx_head;
934 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
935 struct page *grant_tx_page[NET_TX_RING_SIZE];
936 unsigned tx_skb_freelist;
937 + unsigned int tx_pend_queue;
938
939 spinlock_t rx_lock ____cacheline_aligned_in_smp;
940 struct xen_netif_rx_front_ring rx;
941 @@ -161,6 +157,9 @@ struct netfront_info {
942 struct netfront_stats __percpu *rx_stats;
943 struct netfront_stats __percpu *tx_stats;
944
945 + /* Is device behaving sane? */
946 + bool broken;
947 +
948 atomic_t rx_gso_checksum_fixup;
949 };
950
951 @@ -169,33 +168,25 @@ struct netfront_rx_info {
952 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
953 };
954
955 -static void skb_entry_set_link(union skb_entry *list, unsigned short id)
956 -{
957 - list->link = id;
958 -}
959 -
960 -static int skb_entry_is_link(const union skb_entry *list)
961 -{
962 - BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
963 - return (unsigned long)list->skb < PAGE_OFFSET;
964 -}
965 -
966 /*
967 * Access macros for acquiring freeing slots in tx_skbs[].
968 */
969
970 -static void add_id_to_freelist(unsigned *head, union skb_entry *list,
971 - unsigned short id)
972 +static void add_id_to_list(unsigned *head, unsigned short *list,
973 + unsigned short id)
974 {
975 - skb_entry_set_link(&list[id], *head);
976 + list[id] = *head;
977 *head = id;
978 }
979
980 -static unsigned short get_id_from_freelist(unsigned *head,
981 - union skb_entry *list)
982 +static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
983 {
984 unsigned int id = *head;
985 - *head = list[id].link;
986 +
987 + if (id != TX_LINK_NONE) {
988 + *head = list[id];
989 + list[id] = TX_LINK_NONE;
990 + }
991 return id;
992 }
993
994 @@ -351,7 +342,7 @@ static int xennet_open(struct net_device *dev)
995 unsigned int i = 0;
996 struct netfront_queue *queue = NULL;
997
998 - if (!np->queues)
999 + if (!np->queues || np->broken)
1000 return -ENODEV;
1001
1002 for (i = 0; i < num_queues; ++i) {
1003 @@ -379,27 +370,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1004 unsigned short id;
1005 struct sk_buff *skb;
1006 bool more_to_do;
1007 + const struct device *dev = &queue->info->netdev->dev;
1008
1009 BUG_ON(!netif_carrier_ok(queue->info->netdev));
1010
1011 do {
1012 prod = queue->tx.sring->rsp_prod;
1013 + if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
1014 + dev_alert(dev, "Illegal number of responses %u\n",
1015 + prod - queue->tx.rsp_cons);
1016 + goto err;
1017 + }
1018 rmb(); /* Ensure we see responses up to 'rp'. */
1019
1020 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
1021 - struct xen_netif_tx_response *txrsp;
1022 + struct xen_netif_tx_response txrsp;
1023
1024 - txrsp = RING_GET_RESPONSE(&queue->tx, cons);
1025 - if (txrsp->status == XEN_NETIF_RSP_NULL)
1026 + RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
1027 + if (txrsp.status == XEN_NETIF_RSP_NULL)
1028 continue;
1029
1030 - id = txrsp->id;
1031 - skb = queue->tx_skbs[id].skb;
1032 + id = txrsp.id;
1033 + if (id >= RING_SIZE(&queue->tx)) {
1034 + dev_alert(dev,
1035 + "Response has incorrect id (%u)\n",
1036 + id);
1037 + goto err;
1038 + }
1039 + if (queue->tx_link[id] != TX_PENDING) {
1040 + dev_alert(dev,
1041 + "Response for inactive request\n");
1042 + goto err;
1043 + }
1044 +
1045 + queue->tx_link[id] = TX_LINK_NONE;
1046 + skb = queue->tx_skbs[id];
1047 + queue->tx_skbs[id] = NULL;
1048 if (unlikely(gnttab_query_foreign_access(
1049 queue->grant_tx_ref[id]) != 0)) {
1050 - pr_alert("%s: warning -- grant still in use by backend domain\n",
1051 - __func__);
1052 - BUG();
1053 + dev_alert(dev,
1054 + "Grant still in use by backend domain\n");
1055 + goto err;
1056 }
1057 gnttab_end_foreign_access_ref(
1058 queue->grant_tx_ref[id], GNTMAP_readonly);
1059 @@ -407,7 +418,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1060 &queue->gref_tx_head, queue->grant_tx_ref[id]);
1061 queue->grant_tx_ref[id] = GRANT_INVALID_REF;
1062 queue->grant_tx_page[id] = NULL;
1063 - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
1064 + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
1065 dev_kfree_skb_irq(skb);
1066 }
1067
1068 @@ -417,13 +428,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
1069 } while (more_to_do);
1070
1071 xennet_maybe_wake_tx(queue);
1072 +
1073 + return;
1074 +
1075 + err:
1076 + queue->info->broken = true;
1077 + dev_alert(dev, "Disabled for further use\n");
1078 }
1079
1080 struct xennet_gnttab_make_txreq {
1081 struct netfront_queue *queue;
1082 struct sk_buff *skb;
1083 struct page *page;
1084 - struct xen_netif_tx_request *tx; /* Last request */
1085 + struct xen_netif_tx_request *tx; /* Last request on ring page */
1086 + struct xen_netif_tx_request tx_local; /* Last request local copy*/
1087 unsigned int size;
1088 };
1089
1090 @@ -439,7 +457,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1091 struct netfront_queue *queue = info->queue;
1092 struct sk_buff *skb = info->skb;
1093
1094 - id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
1095 + id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
1096 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1097 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
1098 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
1099 @@ -447,34 +465,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
1100 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
1101 gfn, GNTMAP_readonly);
1102
1103 - queue->tx_skbs[id].skb = skb;
1104 + queue->tx_skbs[id] = skb;
1105 queue->grant_tx_page[id] = page;
1106 queue->grant_tx_ref[id] = ref;
1107
1108 - tx->id = id;
1109 - tx->gref = ref;
1110 - tx->offset = offset;
1111 - tx->size = len;
1112 - tx->flags = 0;
1113 + info->tx_local.id = id;
1114 + info->tx_local.gref = ref;
1115 + info->tx_local.offset = offset;
1116 + info->tx_local.size = len;
1117 + info->tx_local.flags = 0;
1118 +
1119 + *tx = info->tx_local;
1120 +
1121 + /*
1122 + * Put the request in the pending queue, it will be set to be pending
1123 + * when the producer index is about to be raised.
1124 + */
1125 + add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
1126
1127 info->tx = tx;
1128 - info->size += tx->size;
1129 + info->size += info->tx_local.size;
1130 }
1131
1132 static struct xen_netif_tx_request *xennet_make_first_txreq(
1133 - struct netfront_queue *queue, struct sk_buff *skb,
1134 - struct page *page, unsigned int offset, unsigned int len)
1135 + struct xennet_gnttab_make_txreq *info,
1136 + unsigned int offset, unsigned int len)
1137 {
1138 - struct xennet_gnttab_make_txreq info = {
1139 - .queue = queue,
1140 - .skb = skb,
1141 - .page = page,
1142 - .size = 0,
1143 - };
1144 + info->size = 0;
1145
1146 - gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
1147 + gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
1148
1149 - return info.tx;
1150 + return info->tx;
1151 }
1152
1153 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1154 @@ -487,35 +508,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
1155 xennet_tx_setup_grant(gfn, offset, len, data);
1156 }
1157
1158 -static struct xen_netif_tx_request *xennet_make_txreqs(
1159 - struct netfront_queue *queue, struct xen_netif_tx_request *tx,
1160 - struct sk_buff *skb, struct page *page,
1161 +static void xennet_make_txreqs(
1162 + struct xennet_gnttab_make_txreq *info,
1163 + struct page *page,
1164 unsigned int offset, unsigned int len)
1165 {
1166 - struct xennet_gnttab_make_txreq info = {
1167 - .queue = queue,
1168 - .skb = skb,
1169 - .tx = tx,
1170 - };
1171 -
1172 /* Skip unused frames from start of page */
1173 page += offset >> PAGE_SHIFT;
1174 offset &= ~PAGE_MASK;
1175
1176 while (len) {
1177 - info.page = page;
1178 - info.size = 0;
1179 + info->page = page;
1180 + info->size = 0;
1181
1182 gnttab_foreach_grant_in_range(page, offset, len,
1183 xennet_make_one_txreq,
1184 - &info);
1185 + info);
1186
1187 page++;
1188 offset = 0;
1189 - len -= info.size;
1190 + len -= info->size;
1191 }
1192 -
1193 - return info.tx;
1194 }
1195
1196 /*
1197 @@ -562,13 +575,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
1198 return queue_idx;
1199 }
1200
1201 +static void xennet_mark_tx_pending(struct netfront_queue *queue)
1202 +{
1203 + unsigned int i;
1204 +
1205 + while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
1206 + TX_LINK_NONE)
1207 + queue->tx_link[i] = TX_PENDING;
1208 +}
1209 +
1210 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
1211
1212 static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
1213 {
1214 struct netfront_info *np = netdev_priv(dev);
1215 struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
1216 - struct xen_netif_tx_request *tx, *first_tx;
1217 + struct xen_netif_tx_request *first_tx;
1218 unsigned int i;
1219 int notify;
1220 int slots;
1221 @@ -577,6 +599,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1222 unsigned int len;
1223 unsigned long flags;
1224 struct netfront_queue *queue = NULL;
1225 + struct xennet_gnttab_make_txreq info = { };
1226 unsigned int num_queues = dev->real_num_tx_queues;
1227 u16 queue_index;
1228 struct sk_buff *nskb;
1229 @@ -584,6 +607,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1230 /* Drop the packet if no queues are set up */
1231 if (num_queues < 1)
1232 goto drop;
1233 + if (unlikely(np->broken))
1234 + goto drop;
1235 /* Determine which queue to transmit this SKB on */
1236 queue_index = skb_get_queue_mapping(skb);
1237 queue = &np->queues[queue_index];
1238 @@ -634,21 +659,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1239 }
1240
1241 /* First request for the linear area. */
1242 - first_tx = tx = xennet_make_first_txreq(queue, skb,
1243 - page, offset, len);
1244 - offset += tx->size;
1245 + info.queue = queue;
1246 + info.skb = skb;
1247 + info.page = page;
1248 + first_tx = xennet_make_first_txreq(&info, offset, len);
1249 + offset += info.tx_local.size;
1250 if (offset == PAGE_SIZE) {
1251 page++;
1252 offset = 0;
1253 }
1254 - len -= tx->size;
1255 + len -= info.tx_local.size;
1256
1257 if (skb->ip_summed == CHECKSUM_PARTIAL)
1258 /* local packet? */
1259 - tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
1260 + first_tx->flags |= XEN_NETTXF_csum_blank |
1261 + XEN_NETTXF_data_validated;
1262 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1263 /* remote but checksummed. */
1264 - tx->flags |= XEN_NETTXF_data_validated;
1265 + first_tx->flags |= XEN_NETTXF_data_validated;
1266
1267 /* Optional extra info after the first request. */
1268 if (skb_shinfo(skb)->gso_size) {
1269 @@ -657,7 +685,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1270 gso = (struct xen_netif_extra_info *)
1271 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
1272
1273 - tx->flags |= XEN_NETTXF_extra_info;
1274 + first_tx->flags |= XEN_NETTXF_extra_info;
1275
1276 gso->u.gso.size = skb_shinfo(skb)->gso_size;
1277 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
1278 @@ -671,12 +699,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1279 }
1280
1281 /* Requests for the rest of the linear area. */
1282 - tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
1283 + xennet_make_txreqs(&info, page, offset, len);
1284
1285 /* Requests for all the frags. */
1286 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1287 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1288 - tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
1289 + xennet_make_txreqs(&info, skb_frag_page(frag),
1290 skb_frag_off(frag),
1291 skb_frag_size(frag));
1292 }
1293 @@ -684,6 +712,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
1294 /* First request has the packet length. */
1295 first_tx->size = skb->len;
1296
1297 + xennet_mark_tx_pending(queue);
1298 +
1299 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
1300 if (notify)
1301 notify_remote_via_irq(queue->tx_irq);
1302 @@ -741,7 +771,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
1303 RING_IDX rp)
1304
1305 {
1306 - struct xen_netif_extra_info *extra;
1307 + struct xen_netif_extra_info extra;
1308 struct device *dev = &queue->info->netdev->dev;
1309 RING_IDX cons = queue->rx.rsp_cons;
1310 int err = 0;
1311 @@ -757,24 +787,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
1312 break;
1313 }
1314
1315 - extra = (struct xen_netif_extra_info *)
1316 - RING_GET_RESPONSE(&queue->rx, ++cons);
1317 + RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
1318
1319 - if (unlikely(!extra->type ||
1320 - extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1321 + if (unlikely(!extra.type ||
1322 + extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1323 if (net_ratelimit())
1324 dev_warn(dev, "Invalid extra type: %d\n",
1325 - extra->type);
1326 + extra.type);
1327 err = -EINVAL;
1328 } else {
1329 - memcpy(&extras[extra->type - 1], extra,
1330 - sizeof(*extra));
1331 + extras[extra.type - 1] = extra;
1332 }
1333
1334 skb = xennet_get_rx_skb(queue, cons);
1335 ref = xennet_get_rx_ref(queue, cons);
1336 xennet_move_rx_slot(queue, skb, ref);
1337 - } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1338 + } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1339
1340 queue->rx.rsp_cons = cons;
1341 return err;
1342 @@ -784,7 +812,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
1343 struct netfront_rx_info *rinfo, RING_IDX rp,
1344 struct sk_buff_head *list)
1345 {
1346 - struct xen_netif_rx_response *rx = &rinfo->rx;
1347 + struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
1348 struct xen_netif_extra_info *extras = rinfo->extras;
1349 struct device *dev = &queue->info->netdev->dev;
1350 RING_IDX cons = queue->rx.rsp_cons;
1351 @@ -842,7 +870,8 @@ next:
1352 break;
1353 }
1354
1355 - rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
1356 + RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
1357 + rx = &rx_local;
1358 skb = xennet_get_rx_skb(queue, cons + slots);
1359 ref = xennet_get_rx_ref(queue, cons + slots);
1360 slots++;
1361 @@ -897,10 +926,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1362 struct sk_buff *nskb;
1363
1364 while ((nskb = __skb_dequeue(list))) {
1365 - struct xen_netif_rx_response *rx =
1366 - RING_GET_RESPONSE(&queue->rx, ++cons);
1367 + struct xen_netif_rx_response rx;
1368 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
1369
1370 + RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
1371 +
1372 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
1373 unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1374
1375 @@ -915,7 +945,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
1376
1377 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1378 skb_frag_page(nfrag),
1379 - rx->offset, rx->status, PAGE_SIZE);
1380 + rx.offset, rx.status, PAGE_SIZE);
1381
1382 skb_shinfo(nskb)->nr_frags = 0;
1383 kfree_skb(nskb);
1384 @@ -1008,12 +1038,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
1385 skb_queue_head_init(&tmpq);
1386
1387 rp = queue->rx.sring->rsp_prod;
1388 + if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1389 + dev_alert(&dev->dev, "Illegal number of responses %u\n",
1390 + rp - queue->rx.rsp_cons);
1391 + queue->info->broken = true;
1392 + spin_unlock(&queue->rx_lock);
1393 + return 0;
1394 + }
1395 rmb(); /* Ensure we see queued responses up to 'rp'. */
1396
1397 i = queue->rx.rsp_cons;
1398 work_done = 0;
1399 while ((i != rp) && (work_done < budget)) {
1400 - memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
1401 + RING_COPY_RESPONSE(&queue->rx, i, rx);
1402 memset(extras, 0, sizeof(rinfo.extras));
1403
1404 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1405 @@ -1135,17 +1172,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
1406
1407 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1408 /* Skip over entries which are actually freelist references */
1409 - if (skb_entry_is_link(&queue->tx_skbs[i]))
1410 + if (!queue->tx_skbs[i])
1411 continue;
1412
1413 - skb = queue->tx_skbs[i].skb;
1414 + skb = queue->tx_skbs[i];
1415 + queue->tx_skbs[i] = NULL;
1416 get_page(queue->grant_tx_page[i]);
1417 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1418 GNTMAP_readonly,
1419 (unsigned long)page_address(queue->grant_tx_page[i]));
1420 queue->grant_tx_page[i] = NULL;
1421 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1422 - add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1423 + add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1424 dev_kfree_skb_irq(skb);
1425 }
1426 }
1427 @@ -1225,6 +1263,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1428 struct netfront_queue *queue = dev_id;
1429 unsigned long flags;
1430
1431 + if (queue->info->broken)
1432 + return IRQ_HANDLED;
1433 +
1434 spin_lock_irqsave(&queue->tx_lock, flags);
1435 xennet_tx_buf_gc(queue);
1436 spin_unlock_irqrestore(&queue->tx_lock, flags);
1437 @@ -1237,6 +1278,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1438 struct netfront_queue *queue = dev_id;
1439 struct net_device *dev = queue->info->netdev;
1440
1441 + if (queue->info->broken)
1442 + return IRQ_HANDLED;
1443 +
1444 if (likely(netif_carrier_ok(dev) &&
1445 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1446 napi_schedule(&queue->napi);
1447 @@ -1258,6 +1302,10 @@ static void xennet_poll_controller(struct net_device *dev)
1448 struct netfront_info *info = netdev_priv(dev);
1449 unsigned int num_queues = dev->real_num_tx_queues;
1450 unsigned int i;
1451 +
1452 + if (info->broken)
1453 + return;
1454 +
1455 for (i = 0; i < num_queues; ++i)
1456 xennet_interrupt(0, &info->queues[i]);
1457 }
1458 @@ -1627,13 +1675,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
1459 snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1460 devid, queue->id);
1461
1462 - /* Initialise tx_skbs as a free chain containing every entry. */
1463 + /* Initialise tx_skb_freelist as a free chain containing every entry. */
1464 queue->tx_skb_freelist = 0;
1465 + queue->tx_pend_queue = TX_LINK_NONE;
1466 for (i = 0; i < NET_TX_RING_SIZE; i++) {
1467 - skb_entry_set_link(&queue->tx_skbs[i], i+1);
1468 + queue->tx_link[i] = i + 1;
1469 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1470 queue->grant_tx_page[i] = NULL;
1471 }
1472 + queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1473
1474 /* Clear out rx_skbs */
1475 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1476 @@ -1838,6 +1888,9 @@ static int talk_to_netback(struct xenbus_device *dev,
1477 if (info->queues)
1478 xennet_destroy_queues(info);
1479
1480 + /* For the case of a reconnect reset the "broken" indicator. */
1481 + info->broken = false;
1482 +
1483 err = xennet_create_queues(info, &num_queues);
1484 if (err < 0) {
1485 xenbus_dev_fatal(dev, err, "creating queues");
1486 diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
1487 index 6ca17a0babae2..1c8d16b0245b1 100644
1488 --- a/drivers/nvme/target/io-cmd-file.c
1489 +++ b/drivers/nvme/target/io-cmd-file.c
1490 @@ -8,6 +8,7 @@
1491 #include <linux/uio.h>
1492 #include <linux/falloc.h>
1493 #include <linux/file.h>
1494 +#include <linux/fs.h>
1495 #include "nvmet.h"
1496
1497 #define NVMET_MAX_MPOOL_BVEC 16
1498 @@ -254,7 +255,8 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
1499
1500 if (req->ns->buffered_io) {
1501 if (likely(!req->f.mpool_alloc) &&
1502 - nvmet_file_execute_io(req, IOCB_NOWAIT))
1503 + (req->ns->file->f_mode & FMODE_NOWAIT) &&
1504 + nvmet_file_execute_io(req, IOCB_NOWAIT))
1505 return;
1506 nvmet_file_submit_buffered_io(req);
1507 } else
1508 diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
1509 index fac1985870765..4341c72446628 100644
1510 --- a/drivers/nvme/target/tcp.c
1511 +++ b/drivers/nvme/target/tcp.c
1512 @@ -631,10 +631,11 @@ static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
1513 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
1514 {
1515 struct nvmet_tcp_queue *queue = cmd->queue;
1516 + int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
1517 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1518 struct kvec iov = {
1519 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
1520 - .iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
1521 + .iov_len = left
1522 };
1523 int ret;
1524
1525 @@ -643,6 +644,10 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
1526 return ret;
1527
1528 cmd->offset += ret;
1529 + left -= ret;
1530 +
1531 + if (left)
1532 + return -EAGAIN;
1533
1534 if (queue->nvme_sq.sqhd_disabled) {
1535 cmd->queue->snd_cmd = NULL;
1536 diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
1537 index 45794ba643d40..9e208294946cd 100644
1538 --- a/drivers/pci/controller/pci-aardvark.c
1539 +++ b/drivers/pci/controller/pci-aardvark.c
1540 @@ -9,6 +9,7 @@
1541 */
1542
1543 #include <linux/delay.h>
1544 +#include <linux/gpio/consumer.h>
1545 #include <linux/interrupt.h>
1546 #include <linux/irq.h>
1547 #include <linux/irqdomain.h>
1548 @@ -17,6 +18,7 @@
1549 #include <linux/init.h>
1550 #include <linux/platform_device.h>
1551 #include <linux/of_address.h>
1552 +#include <linux/of_gpio.h>
1553 #include <linux/of_pci.h>
1554
1555 #include "../pci.h"
1556 @@ -25,21 +27,8 @@
1557 /* PCIe core registers */
1558 #define PCIE_CORE_DEV_ID_REG 0x0
1559 #define PCIE_CORE_CMD_STATUS_REG 0x4
1560 -#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
1561 -#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
1562 -#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
1563 #define PCIE_CORE_DEV_REV_REG 0x8
1564 #define PCIE_CORE_PCIEXP_CAP 0xc0
1565 -#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
1566 -#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
1567 -#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
1568 -#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
1569 -#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
1570 -#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
1571 -#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
1572 -#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
1573 -#define PCIE_CORE_LINK_TRAINING BIT(5)
1574 -#define PCIE_CORE_LINK_WIDTH_SHIFT 20
1575 #define PCIE_CORE_ERR_CAPCTL_REG 0x118
1576 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
1577 #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
1578 @@ -122,6 +111,46 @@
1579 #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
1580 #define PCIE_MSI_DATA_MASK GENMASK(15, 0)
1581
1582 +/* PCIe window configuration */
1583 +#define OB_WIN_BASE_ADDR 0x4c00
1584 +#define OB_WIN_BLOCK_SIZE 0x20
1585 +#define OB_WIN_COUNT 8
1586 +#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
1587 + OB_WIN_BLOCK_SIZE * (win) + \
1588 + (offset))
1589 +#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
1590 +#define OB_WIN_ENABLE BIT(0)
1591 +#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
1592 +#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
1593 +#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
1594 +#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
1595 +#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
1596 +#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
1597 +#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
1598 +#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
1599 +#define OB_WIN_FUNC_NUM_SHIFT 24
1600 +#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
1601 +#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
1602 +#define OB_WIN_BUS_NUM_BITS_SHIFT 20
1603 +#define OB_WIN_MSG_CODE_ENABLE BIT(22)
1604 +#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
1605 +#define OB_WIN_MSG_CODE_SHIFT 14
1606 +#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
1607 +#define OB_WIN_ATTR_ENABLE BIT(11)
1608 +#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
1609 +#define OB_WIN_ATTR_TC_SHIFT 8
1610 +#define OB_WIN_ATTR_RELAXED BIT(7)
1611 +#define OB_WIN_ATTR_NOSNOOP BIT(6)
1612 +#define OB_WIN_ATTR_POISON BIT(5)
1613 +#define OB_WIN_ATTR_IDO BIT(4)
1614 +#define OB_WIN_TYPE_MASK GENMASK(3, 0)
1615 +#define OB_WIN_TYPE_SHIFT 0
1616 +#define OB_WIN_TYPE_MEM 0x0
1617 +#define OB_WIN_TYPE_IO 0x4
1618 +#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
1619 +#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
1620 +#define OB_WIN_TYPE_MSG 0xc
1621 +
1622 /* LMI registers base address and register offsets */
1623 #define LMI_BASE_ADDR 0x6000
1624 #define CFG_REG (LMI_BASE_ADDR + 0x0)
1625 @@ -237,6 +266,13 @@ struct advk_pcie {
1626 struct platform_device *pdev;
1627 void __iomem *base;
1628 struct list_head resources;
1629 + struct {
1630 + phys_addr_t match;
1631 + phys_addr_t remap;
1632 + phys_addr_t mask;
1633 + u32 actions;
1634 + } wins[OB_WIN_COUNT];
1635 + u8 wins_count;
1636 struct irq_domain *irq_domain;
1637 struct irq_chip irq_chip;
1638 raw_spinlock_t irq_lock;
1639 @@ -249,7 +285,9 @@ struct advk_pcie {
1640 struct mutex msi_used_lock;
1641 u16 msi_msg;
1642 int root_bus_nr;
1643 + int link_gen;
1644 struct pci_bridge_emul bridge;
1645 + struct gpio_desc *reset_gpio;
1646 };
1647
1648 static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
1649 @@ -309,20 +347,16 @@ static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
1650
1651 static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
1652 {
1653 - struct device *dev = &pcie->pdev->dev;
1654 int retries;
1655
1656 /* check if the link is up or not */
1657 for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
1658 - if (advk_pcie_link_up(pcie)) {
1659 - dev_info(dev, "link up\n");
1660 + if (advk_pcie_link_up(pcie))
1661 return 0;
1662 - }
1663
1664 usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
1665 }
1666
1667 - dev_err(dev, "link never came up\n");
1668 return -ETIMEDOUT;
1669 }
1670
1671 @@ -337,9 +371,115 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
1672 }
1673 }
1674
1675 +static void advk_pcie_issue_perst(struct advk_pcie *pcie)
1676 +{
1677 + if (!pcie->reset_gpio)
1678 + return;
1679 +
1680 + /* 10ms delay is needed for some cards */
1681 + dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
1682 + gpiod_set_value_cansleep(pcie->reset_gpio, 1);
1683 + usleep_range(10000, 11000);
1684 + gpiod_set_value_cansleep(pcie->reset_gpio, 0);
1685 +}
1686 +
1687 +static void advk_pcie_train_link(struct advk_pcie *pcie)
1688 +{
1689 + struct device *dev = &pcie->pdev->dev;
1690 + u32 reg;
1691 + int ret;
1692 +
1693 + /*
1694 + * Setup PCIe rev / gen compliance based on device tree property
1695 + * 'max-link-speed' which also forces maximal link speed.
1696 + */
1697 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1698 + reg &= ~PCIE_GEN_SEL_MSK;
1699 + if (pcie->link_gen == 3)
1700 + reg |= SPEED_GEN_3;
1701 + else if (pcie->link_gen == 2)
1702 + reg |= SPEED_GEN_2;
1703 + else
1704 + reg |= SPEED_GEN_1;
1705 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1706 +
1707 + /*
1708 + * Set maximal link speed value also into PCIe Link Control 2 register.
1709 + * Armada 3700 Functional Specification says that default value is based
1710 + * on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
1711 + */
1712 + reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
1713 + reg &= ~PCI_EXP_LNKCTL2_TLS;
1714 + if (pcie->link_gen == 3)
1715 + reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
1716 + else if (pcie->link_gen == 2)
1717 + reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
1718 + else
1719 + reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
1720 + advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
1721 +
1722 + /* Enable link training after selecting PCIe generation */
1723 + reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1724 + reg |= LINK_TRAINING_EN;
1725 + advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1726 +
1727 + /*
1728 + * Reset PCIe card via PERST# signal. Some cards are not detected
1729 + * during link training when they are in some non-initial state.
1730 + */
1731 + advk_pcie_issue_perst(pcie);
1732 +
1733 + /*
1734 + * PERST# signal could have been asserted by pinctrl subsystem before
1735 + * probe() callback has been called or issued explicitly by reset gpio
1736 + * function advk_pcie_issue_perst(), making the endpoint going into
1737 + * fundamental reset. As required by PCI Express spec (PCI Express
1738 + * Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
1739 + * Conventional Reset) a delay for at least 100ms after such a reset
1740 + * before sending a Configuration Request to the device is needed.
1741 + * So wait until PCIe link is up. Function advk_pcie_wait_for_link()
1742 + * waits for link at least 900ms.
1743 + */
1744 + ret = advk_pcie_wait_for_link(pcie);
1745 + if (ret < 0)
1746 + dev_err(dev, "link never came up\n");
1747 + else
1748 + dev_info(dev, "link up\n");
1749 +}
1750 +
1751 +/*
1752 + * Set PCIe address window register which could be used for memory
1753 + * mapping.
1754 + */
1755 +static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
1756 + phys_addr_t match, phys_addr_t remap,
1757 + phys_addr_t mask, u32 actions)
1758 +{
1759 + advk_writel(pcie, OB_WIN_ENABLE |
1760 + lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
1761 + advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
1762 + advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
1763 + advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
1764 + advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
1765 + advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
1766 + advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
1767 +}
1768 +
1769 +static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
1770 +{
1771 + advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
1772 + advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
1773 + advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
1774 + advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
1775 + advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
1776 + advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
1777 + advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
1778 +}
1779 +
1780 static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1781 {
1782 u32 reg;
1783 + int i;
1784
1785 /* Set to Direct mode */
1786 reg = advk_readl(pcie, CTRL_CONFIG_REG);
1787 @@ -362,6 +502,31 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1788 reg = (PCI_VENDOR_ID_MARVELL << 16) | PCI_VENDOR_ID_MARVELL;
1789 advk_writel(pcie, reg, VENDOR_ID_REG);
1790
1791 + /*
1792 + * Change Class Code of PCI Bridge device to PCI Bridge (0x600400),
1793 + * because the default value is Mass storage controller (0x010400).
1794 + *
1795 + * Note that this Aardvark PCI Bridge does not have compliant Type 1
1796 + * Configuration Space and it even cannot be accessed via Aardvark's
1797 + * PCI config space access method. Something like config space is
1798 + * available in internal Aardvark registers starting at offset 0x0
1799 + * and is reported as Type 0. In range 0x10 - 0x34 it has totally
1800 + * different registers.
1801 + *
1802 + * Therefore driver uses emulation of PCI Bridge which emulates
1803 + * access to configuration space via internal Aardvark registers or
1804 + * emulated configuration buffer.
1805 + */
1806 + reg = advk_readl(pcie, PCIE_CORE_DEV_REV_REG);
1807 + reg &= ~0xffffff00;
1808 + reg |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
1809 + advk_writel(pcie, reg, PCIE_CORE_DEV_REV_REG);
1810 +
1811 + /* Disable Root Bridge I/O space, memory space and bus mastering */
1812 + reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1813 + reg &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
1814 + advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
1815 +
1816 /* Set Advanced Error Capabilities and Control PF0 register */
1817 reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX |
1818 PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN |
1819 @@ -369,36 +534,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1820 PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
1821 advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
1822
1823 - /* Set PCIe Device Control and Status 1 PF0 register */
1824 - reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
1825 - (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
1826 - PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
1827 - (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
1828 - PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
1829 - advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
1830 + /* Set PCIe Device Control register */
1831 + reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
1832 + reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
1833 + reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
1834 + reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
1835 + reg &= ~PCI_EXP_DEVCTL_READRQ;
1836 + reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
1837 + reg |= PCI_EXP_DEVCTL_READRQ_512B;
1838 + advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
1839
1840 /* Program PCIe Control 2 to disable strict ordering */
1841 reg = PCIE_CORE_CTRL2_RESERVED |
1842 PCIE_CORE_CTRL2_TD_ENABLE;
1843 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
1844
1845 - /* Set GEN2 */
1846 - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1847 - reg &= ~PCIE_GEN_SEL_MSK;
1848 - reg |= SPEED_GEN_2;
1849 - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1850 -
1851 /* Set lane X1 */
1852 reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1853 reg &= ~LANE_CNT_MSK;
1854 reg |= LANE_COUNT_1;
1855 advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1856
1857 - /* Enable link training */
1858 - reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
1859 - reg |= LINK_TRAINING_EN;
1860 - advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
1861 -
1862 /* Enable MSI */
1863 reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1864 reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
1865 @@ -423,27 +579,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1866 reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
1867 advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
1868
1869 + /*
1870 + * Enable AXI address window location generation:
1871 + * When it is enabled, the default outbound window
1872 + * configurations (Default User Field: 0xD0074CFC)
1873 + * are used to transparent address translation for
1874 + * the outbound transactions. Thus, PCIe address
1875 + * windows are not required for transparent memory
1876 + * access when default outbound window configuration
1877 + * is set for memory access.
1878 + */
1879 reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
1880 reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
1881 advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
1882
1883 - /* Bypass the address window mapping for PIO */
1884 + /*
1885 + * Set memory access in Default User Field so it
1886 + * is not required to configure PCIe address for
1887 + * transparent memory access.
1888 + */
1889 + advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
1890 +
1891 + /*
1892 + * Bypass the address window mapping for PIO:
1893 + * Since PIO access already contains all required
1894 + * info over AXI interface by PIO registers, the
1895 + * address window is not required.
1896 + */
1897 reg = advk_readl(pcie, PIO_CTRL);
1898 reg |= PIO_CTRL_ADDR_WIN_DISABLE;
1899 advk_writel(pcie, reg, PIO_CTRL);
1900
1901 - /* Start link training */
1902 - reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
1903 - reg |= PCIE_CORE_LINK_TRAINING;
1904 - advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
1905 + /*
1906 + * Configure PCIe address windows for non-memory or
1907 + * non-transparent access as by default PCIe uses
1908 + * transparent memory access.
1909 + */
1910 + for (i = 0; i < pcie->wins_count; i++)
1911 + advk_pcie_set_ob_win(pcie, i,
1912 + pcie->wins[i].match, pcie->wins[i].remap,
1913 + pcie->wins[i].mask, pcie->wins[i].actions);
1914
1915 - advk_pcie_wait_for_link(pcie);
1916 + /* Disable remaining PCIe outbound windows */
1917 + for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
1918 + advk_pcie_disable_ob_win(pcie, i);
1919
1920 - reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
1921 - reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
1922 - PCIE_CORE_CMD_IO_ACCESS_EN |
1923 - PCIE_CORE_CMD_MEM_IO_REQ_EN;
1924 - advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG);
1925 + advk_pcie_train_link(pcie);
1926 }
1927
1928 static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u32 *val)
1929 @@ -452,6 +633,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1930 u32 reg;
1931 unsigned int status;
1932 char *strcomp_status, *str_posted;
1933 + int ret;
1934
1935 reg = advk_readl(pcie, PIO_STAT);
1936 status = (reg & PIO_COMPLETION_STATUS_MASK) >>
1937 @@ -476,6 +658,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1938 case PIO_COMPLETION_STATUS_OK:
1939 if (reg & PIO_ERR_STATUS) {
1940 strcomp_status = "COMP_ERR";
1941 + ret = -EFAULT;
1942 break;
1943 }
1944 /* Get the read result */
1945 @@ -483,9 +666,11 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1946 *val = advk_readl(pcie, PIO_RD_DATA);
1947 /* No error */
1948 strcomp_status = NULL;
1949 + ret = 0;
1950 break;
1951 case PIO_COMPLETION_STATUS_UR:
1952 strcomp_status = "UR";
1953 + ret = -EOPNOTSUPP;
1954 break;
1955 case PIO_COMPLETION_STATUS_CRS:
1956 if (allow_crs && val) {
1957 @@ -503,6 +688,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1958 */
1959 *val = CFG_RD_CRS_VAL;
1960 strcomp_status = NULL;
1961 + ret = 0;
1962 break;
1963 }
1964 /* PCIe r4.0, sec 2.3.2, says:
1965 @@ -518,21 +704,24 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1966 * Request and taking appropriate action, e.g., complete the
1967 * Request to the host as a failed transaction.
1968 *
1969 - * To simplify implementation do not re-issue the Configuration
1970 - * Request and complete the Request as a failed transaction.
1971 + * So return -EAGAIN and caller (pci-aardvark.c driver) will
1972 + * re-issue request again up to the PIO_RETRY_CNT retries.
1973 */
1974 strcomp_status = "CRS";
1975 + ret = -EAGAIN;
1976 break;
1977 case PIO_COMPLETION_STATUS_CA:
1978 strcomp_status = "CA";
1979 + ret = -ECANCELED;
1980 break;
1981 default:
1982 strcomp_status = "Unknown";
1983 + ret = -EINVAL;
1984 break;
1985 }
1986
1987 if (!strcomp_status)
1988 - return 0;
1989 + return ret;
1990
1991 if (reg & PIO_NON_POSTED_REQ)
1992 str_posted = "Non-posted";
1993 @@ -542,7 +731,7 @@ static int advk_pcie_check_pio_status(struct advk_pcie *pcie, bool allow_crs, u3
1994 dev_dbg(dev, "%s PIO Response Status: %s, %#x @ %#x\n",
1995 str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS));
1996
1997 - return -EFAULT;
1998 + return ret;
1999 }
2000
2001 static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2002 @@ -550,13 +739,13 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2003 struct device *dev = &pcie->pdev->dev;
2004 int i;
2005
2006 - for (i = 0; i < PIO_RETRY_CNT; i++) {
2007 + for (i = 1; i <= PIO_RETRY_CNT; i++) {
2008 u32 start, isr;
2009
2010 start = advk_readl(pcie, PIO_START);
2011 isr = advk_readl(pcie, PIO_ISR);
2012 if (!start && isr)
2013 - return 0;
2014 + return i;
2015 udelay(PIO_RETRY_DELAY);
2016 }
2017
2018 @@ -564,6 +753,64 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2019 return -ETIMEDOUT;
2020 }
2021
2022 +static pci_bridge_emul_read_status_t
2023 +advk_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
2024 + int reg, u32 *value)
2025 +{
2026 + struct advk_pcie *pcie = bridge->data;
2027 +
2028 + switch (reg) {
2029 + case PCI_COMMAND:
2030 + *value = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
2031 + return PCI_BRIDGE_EMUL_HANDLED;
2032 +
2033 + case PCI_INTERRUPT_LINE: {
2034 + /*
2035 + * From the whole 32bit register we support reading from HW only
2036 + * one bit: PCI_BRIDGE_CTL_BUS_RESET.
2037 + * Other bits are retrieved only from emulated config buffer.
2038 + */
2039 + __le32 *cfgspace = (__le32 *)&bridge->conf;
2040 + u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
2041 + if (advk_readl(pcie, PCIE_CORE_CTRL1_REG) & HOT_RESET_GEN)
2042 + val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
2043 + else
2044 + val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
2045 + *value = val;
2046 + return PCI_BRIDGE_EMUL_HANDLED;
2047 + }
2048 +
2049 + default:
2050 + return PCI_BRIDGE_EMUL_NOT_HANDLED;
2051 + }
2052 +}
2053 +
2054 +static void
2055 +advk_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
2056 + int reg, u32 old, u32 new, u32 mask)
2057 +{
2058 + struct advk_pcie *pcie = bridge->data;
2059 +
2060 + switch (reg) {
2061 + case PCI_COMMAND:
2062 + advk_writel(pcie, new, PCIE_CORE_CMD_STATUS_REG);
2063 + break;
2064 +
2065 + case PCI_INTERRUPT_LINE:
2066 + if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
2067 + u32 val = advk_readl(pcie, PCIE_CORE_CTRL1_REG);
2068 + if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
2069 + val |= HOT_RESET_GEN;
2070 + else
2071 + val &= ~HOT_RESET_GEN;
2072 + advk_writel(pcie, val, PCIE_CORE_CTRL1_REG);
2073 + }
2074 + break;
2075 +
2076 + default:
2077 + break;
2078 + }
2079 +}
2080
2081 static pci_bridge_emul_read_status_t
2082 advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
2083 @@ -665,6 +912,8 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
2084 }
2085
2086 static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
2087 + .read_base = advk_pci_bridge_emul_base_conf_read,
2088 + .write_base = advk_pci_bridge_emul_base_conf_write,
2089 .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
2090 .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
2091 };
2092 @@ -676,37 +925,33 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
2093 static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
2094 {
2095 struct pci_bridge_emul *bridge = &pcie->bridge;
2096 - int ret;
2097
2098 - bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
2099 - bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
2100 + bridge->conf.vendor =
2101 + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff);
2102 + bridge->conf.device =
2103 + cpu_to_le16(advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16);
2104 bridge->conf.class_revision =
2105 - advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
2106 + cpu_to_le32(advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff);
2107
2108 /* Support 32 bits I/O addressing */
2109 bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
2110 bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
2111
2112 /* Support 64 bits memory pref */
2113 - bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
2114 - bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
2115 + bridge->conf.pref_mem_base = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
2116 + bridge->conf.pref_mem_limit = cpu_to_le16(PCI_PREF_RANGE_TYPE_64);
2117
2118 /* Support interrupt A for MSI feature */
2119 bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
2120
2121 + /* Indicates supports for Completion Retry Status */
2122 + bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
2123 +
2124 bridge->has_pcie = true;
2125 bridge->data = pcie;
2126 bridge->ops = &advk_pci_bridge_emul_ops;
2127
2128 - /* PCIe config space can be initialized after pci_bridge_emul_init() */
2129 - ret = pci_bridge_emul_init(bridge, 0);
2130 - if (ret < 0)
2131 - return ret;
2132 -
2133 - /* Indicates supports for Completion Retry Status */
2134 - bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
2135 -
2136 - return 0;
2137 + return pci_bridge_emul_init(bridge, 0);
2138 }
2139
2140 static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2141 @@ -715,6 +960,13 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2142 if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
2143 return false;
2144
2145 + /*
2146 + * If the link goes down after we check for link-up, nothing bad
2147 + * happens but the config access times out.
2148 + */
2149 + if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
2150 + return false;
2151 +
2152 return true;
2153 }
2154
2155 @@ -751,6 +1003,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2156 int where, int size, u32 *val)
2157 {
2158 struct advk_pcie *pcie = bus->sysdata;
2159 + int retry_count;
2160 bool allow_crs;
2161 u32 reg;
2162 int ret;
2163 @@ -773,18 +1026,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2164 (le16_to_cpu(pcie->bridge.pcie_conf.rootctl) &
2165 PCI_EXP_RTCTL_CRSSVE);
2166
2167 - if (advk_pcie_pio_is_running(pcie)) {
2168 - /*
2169 - * If it is possible return Completion Retry Status so caller
2170 - * tries to issue the request again instead of failing.
2171 - */
2172 - if (allow_crs) {
2173 - *val = CFG_RD_CRS_VAL;
2174 - return PCIBIOS_SUCCESSFUL;
2175 - }
2176 - *val = 0xffffffff;
2177 - return PCIBIOS_SET_FAILED;
2178 - }
2179 + if (advk_pcie_pio_is_running(pcie))
2180 + goto try_crs;
2181
2182 /* Program the control register */
2183 reg = advk_readl(pcie, PIO_CTRL);
2184 @@ -803,30 +1046,24 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2185 /* Program the data strobe */
2186 advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
2187
2188 - /* Clear PIO DONE ISR and start the transfer */
2189 - advk_writel(pcie, 1, PIO_ISR);
2190 - advk_writel(pcie, 1, PIO_START);
2191 + retry_count = 0;
2192 + do {
2193 + /* Clear PIO DONE ISR and start the transfer */
2194 + advk_writel(pcie, 1, PIO_ISR);
2195 + advk_writel(pcie, 1, PIO_START);
2196
2197 - ret = advk_pcie_wait_pio(pcie);
2198 - if (ret < 0) {
2199 - /*
2200 - * If it is possible return Completion Retry Status so caller
2201 - * tries to issue the request again instead of failing.
2202 - */
2203 - if (allow_crs) {
2204 - *val = CFG_RD_CRS_VAL;
2205 - return PCIBIOS_SUCCESSFUL;
2206 - }
2207 - *val = 0xffffffff;
2208 - return PCIBIOS_SET_FAILED;
2209 - }
2210 + ret = advk_pcie_wait_pio(pcie);
2211 + if (ret < 0)
2212 + goto try_crs;
2213
2214 - /* Check PIO status and get the read result */
2215 - ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
2216 - if (ret < 0) {
2217 - *val = 0xffffffff;
2218 - return PCIBIOS_SET_FAILED;
2219 - }
2220 + retry_count += ret;
2221 +
2222 + /* Check PIO status and get the read result */
2223 + ret = advk_pcie_check_pio_status(pcie, allow_crs, val);
2224 + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
2225 +
2226 + if (ret < 0)
2227 + goto fail;
2228
2229 if (size == 1)
2230 *val = (*val >> (8 * (where & 3))) & 0xff;
2231 @@ -834,6 +1071,20 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2232 *val = (*val >> (8 * (where & 3))) & 0xffff;
2233
2234 return PCIBIOS_SUCCESSFUL;
2235 +
2236 +try_crs:
2237 + /*
2238 + * If it is possible, return Completion Retry Status so that caller
2239 + * tries to issue the request again instead of failing.
2240 + */
2241 + if (allow_crs) {
2242 + *val = CFG_RD_CRS_VAL;
2243 + return PCIBIOS_SUCCESSFUL;
2244 + }
2245 +
2246 +fail:
2247 + *val = 0xffffffff;
2248 + return PCIBIOS_SET_FAILED;
2249 }
2250
2251 static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2252 @@ -842,6 +1093,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2253 struct advk_pcie *pcie = bus->sysdata;
2254 u32 reg;
2255 u32 data_strobe = 0x0;
2256 + int retry_count;
2257 int offset;
2258 int ret;
2259
2260 @@ -883,19 +1135,22 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2261 /* Program the data strobe */
2262 advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
2263
2264 - /* Clear PIO DONE ISR and start the transfer */
2265 - advk_writel(pcie, 1, PIO_ISR);
2266 - advk_writel(pcie, 1, PIO_START);
2267 + retry_count = 0;
2268 + do {
2269 + /* Clear PIO DONE ISR and start the transfer */
2270 + advk_writel(pcie, 1, PIO_ISR);
2271 + advk_writel(pcie, 1, PIO_START);
2272
2273 - ret = advk_pcie_wait_pio(pcie);
2274 - if (ret < 0)
2275 - return PCIBIOS_SET_FAILED;
2276 + ret = advk_pcie_wait_pio(pcie);
2277 + if (ret < 0)
2278 + return PCIBIOS_SET_FAILED;
2279
2280 - ret = advk_pcie_check_pio_status(pcie, false, NULL);
2281 - if (ret < 0)
2282 - return PCIBIOS_SET_FAILED;
2283 + retry_count += ret;
2284
2285 - return PCIBIOS_SUCCESSFUL;
2286 + ret = advk_pcie_check_pio_status(pcie, false, NULL);
2287 + } while (ret == -EAGAIN && retry_count < PIO_RETRY_CNT);
2288 +
2289 + return ret < 0 ? PCIBIOS_SET_FAILED : PCIBIOS_SUCCESSFUL;
2290 }
2291
2292 static struct pci_ops advk_pcie_ops = {
2293 @@ -1244,6 +1499,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
2294 struct advk_pcie *pcie;
2295 struct resource *res;
2296 struct pci_host_bridge *bridge;
2297 + struct resource_entry *entry;
2298 int ret, irq;
2299
2300 bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
2301 @@ -1273,6 +1529,102 @@ static int advk_pcie_probe(struct platform_device *pdev)
2302 return ret;
2303 }
2304
2305 + resource_list_for_each_entry(entry, &pcie->resources) {
2306 + resource_size_t start = entry->res->start;
2307 + resource_size_t size = resource_size(entry->res);
2308 + unsigned long type = resource_type(entry->res);
2309 + u64 win_size;
2310 +
2311 + /*
2312 + * Aardvark hardware allows to configure also PCIe window
2313 + * for config type 0 and type 1 mapping, but driver uses
2314 + * only PIO for issuing configuration transfers which does
2315 + * not use PCIe window configuration.
2316 + */
2317 + if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
2318 + type != IORESOURCE_IO)
2319 + continue;
2320 +
2321 + /*
2322 + * Skip transparent memory resources. Default outbound access
2323 + * configuration is set to transparent memory access so it
2324 + * does not need window configuration.
2325 + */
2326 + if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
2327 + entry->offset == 0)
2328 + continue;
2329 +
2330 + /*
2331 + * The n-th PCIe window is configured by tuple (match, remap, mask)
2332 + * and an access to address A uses this window if A matches the
2333 + * match with given mask.
2334 + * So every PCIe window size must be a power of two and every start
2335 + * address must be aligned to window size. Minimal size is 64 KiB
2336 + * because lower 16 bits of mask must be zero. Remapped address
2337 + * may have set only bits from the mask.
2338 + */
2339 + while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
2340 + /* Calculate the largest aligned window size */
2341 + win_size = (1ULL << (fls64(size)-1)) |
2342 + (start ? (1ULL << __ffs64(start)) : 0);
2343 + win_size = 1ULL << __ffs64(win_size);
2344 + if (win_size < 0x10000)
2345 + break;
2346 +
2347 + dev_dbg(dev,
2348 + "Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
2349 + pcie->wins_count, (unsigned long long)start,
2350 + (unsigned long long)start + win_size, type);
2351 +
2352 + if (type == IORESOURCE_IO) {
2353 + pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
2354 + pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
2355 + } else {
2356 + pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
2357 + pcie->wins[pcie->wins_count].match = start;
2358 + }
2359 + pcie->wins[pcie->wins_count].remap = start - entry->offset;
2360 + pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
2361 +
2362 + if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
2363 + break;
2364 +
2365 + start += win_size;
2366 + size -= win_size;
2367 + pcie->wins_count++;
2368 + }
2369 +
2370 + if (size > 0) {
2371 + dev_err(&pcie->pdev->dev,
2372 + "Invalid PCIe region [0x%llx-0x%llx]\n",
2373 + (unsigned long long)entry->res->start,
2374 + (unsigned long long)entry->res->end + 1);
2375 + return -EINVAL;
2376 + }
2377 + }
2378 +
2379 + pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node,
2380 + "reset-gpios", 0,
2381 + GPIOD_OUT_LOW,
2382 + "pcie1-reset");
2383 + ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
2384 + if (ret) {
2385 + if (ret == -ENOENT) {
2386 + pcie->reset_gpio = NULL;
2387 + } else {
2388 + if (ret != -EPROBE_DEFER)
2389 + dev_err(dev, "Failed to get reset-gpio: %i\n",
2390 + ret);
2391 + return ret;
2392 + }
2393 + }
2394 +
2395 + ret = of_pci_get_max_link_speed(dev->of_node);
2396 + if (ret <= 0 || ret > 3)
2397 + pcie->link_gen = 3;
2398 + else
2399 + pcie->link_gen = ret;
2400 +
2401 advk_pcie_setup_hw(pcie);
2402
2403 ret = advk_sw_pci_bridge_init(pcie);
2404 diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
2405 index b3d63e319bb39..3026346ccb18c 100644
2406 --- a/drivers/pci/pci-bridge-emul.c
2407 +++ b/drivers/pci/pci-bridge-emul.c
2408 @@ -21,8 +21,9 @@
2409 #include "pci-bridge-emul.h"
2410
2411 #define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
2412 +#define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2)
2413 #define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
2414 -#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
2415 +#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_CAP_PCIE_SIZEOF)
2416
2417 struct pci_bridge_reg_behavior {
2418 /* Read-only bits */
2419 @@ -38,7 +39,8 @@ struct pci_bridge_reg_behavior {
2420 u32 rsvd;
2421 };
2422
2423 -static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
2424 +static const
2425 +struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
2426 [PCI_VENDOR_ID / 4] = { .ro = ~0 },
2427 [PCI_COMMAND / 4] = {
2428 .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
2429 @@ -173,7 +175,8 @@ static const struct pci_bridge_reg_behavior pci_regs_behavior[] = {
2430 },
2431 };
2432
2433 -static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
2434 +static const
2435 +struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = {
2436 [PCI_CAP_LIST_ID / 4] = {
2437 /*
2438 * Capability ID, Next Capability Pointer and
2439 @@ -270,6 +273,8 @@ static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
2440 int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
2441 unsigned int flags)
2442 {
2443 + BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
2444 +
2445 bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
2446 bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
2447 bridge->conf.cache_line_size = 0x10;
2448 diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2449 index 83e585c5a6132..f56add78d58ce 100644
2450 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2451 +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
2452 @@ -166,10 +166,14 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
2453 PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
2454 PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
2455 PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
2456 - PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
2457 - PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
2458 - PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
2459 - PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
2460 + PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
2461 + "pwm", "led"),
2462 + PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
2463 + "pwm", "led"),
2464 + PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
2465 + "pwm", "led"),
2466 + PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
2467 + "pwm", "led"),
2468 PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
2469 PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
2470 PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
2471 @@ -183,11 +187,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
2472 PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
2473 BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
2474 18, 2, "gpio", "uart"),
2475 - PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
2476 - PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
2477 - PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
2478 - PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
2479 -
2480 };
2481
2482 static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
2483 diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2484 index 3654cfc4376fa..97c1f242ef0a3 100644
2485 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2486 +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
2487 @@ -3387,7 +3387,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
2488
2489 shost_for_each_device(sdev, ioc->shost) {
2490 sas_device_priv_data = sdev->hostdata;
2491 - if (!sas_device_priv_data)
2492 + if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
2493 continue;
2494 if (sas_device_priv_data->sas_target->sas_address
2495 != sas_address)
2496 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
2497 index 16432d42a50aa..6faf1d6451b0c 100644
2498 --- a/drivers/scsi/scsi_sysfs.c
2499 +++ b/drivers/scsi/scsi_sysfs.c
2500 @@ -796,7 +796,7 @@ store_state_field(struct device *dev, struct device_attribute *attr,
2501
2502 mutex_lock(&sdev->state_mutex);
2503 if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) {
2504 - ret = count;
2505 + ret = 0;
2506 } else {
2507 ret = scsi_device_set_state(sdev, state);
2508 if (ret == 0 && state == SDEV_RUNNING)
2509 diff --git a/drivers/staging/fbtft/fb_ssd1351.c b/drivers/staging/fbtft/fb_ssd1351.c
2510 index cf263a58a1489..6fd549a424d53 100644
2511 --- a/drivers/staging/fbtft/fb_ssd1351.c
2512 +++ b/drivers/staging/fbtft/fb_ssd1351.c
2513 @@ -187,7 +187,6 @@ static struct fbtft_display display = {
2514 },
2515 };
2516
2517 -#ifdef CONFIG_FB_BACKLIGHT
2518 static int update_onboard_backlight(struct backlight_device *bd)
2519 {
2520 struct fbtft_par *par = bl_get_data(bd);
2521 @@ -231,9 +230,6 @@ static void register_onboard_backlight(struct fbtft_par *par)
2522 if (!par->fbtftops.unregister_backlight)
2523 par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
2524 }
2525 -#else
2526 -static void register_onboard_backlight(struct fbtft_par *par) { };
2527 -#endif
2528
2529 FBTFT_REGISTER_DRIVER(DRVNAME, "solomon,ssd1351", &display);
2530
2531 diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
2532 index bc53d68bfcaa3..771697508cec8 100644
2533 --- a/drivers/staging/fbtft/fbtft-core.c
2534 +++ b/drivers/staging/fbtft/fbtft-core.c
2535 @@ -136,7 +136,6 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
2536 }
2537 #endif
2538
2539 -#ifdef CONFIG_FB_BACKLIGHT
2540 static int fbtft_backlight_update_status(struct backlight_device *bd)
2541 {
2542 struct fbtft_par *par = bl_get_data(bd);
2543 @@ -169,6 +168,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par)
2544 par->info->bl_dev = NULL;
2545 }
2546 }
2547 +EXPORT_SYMBOL(fbtft_unregister_backlight);
2548
2549 static const struct backlight_ops fbtft_bl_ops = {
2550 .get_brightness = fbtft_backlight_get_brightness,
2551 @@ -206,12 +206,7 @@ void fbtft_register_backlight(struct fbtft_par *par)
2552 if (!par->fbtftops.unregister_backlight)
2553 par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
2554 }
2555 -#else
2556 -void fbtft_register_backlight(struct fbtft_par *par) { };
2557 -void fbtft_unregister_backlight(struct fbtft_par *par) { };
2558 -#endif
2559 EXPORT_SYMBOL(fbtft_register_backlight);
2560 -EXPORT_SYMBOL(fbtft_unregister_backlight);
2561
2562 static void fbtft_set_addr_win(struct fbtft_par *par, int xs, int ys, int xe,
2563 int ye)
2564 @@ -860,13 +855,11 @@ int fbtft_register_framebuffer(struct fb_info *fb_info)
2565 fb_info->fix.smem_len >> 10, text1,
2566 HZ / fb_info->fbdefio->delay, text2);
2567
2568 -#ifdef CONFIG_FB_BACKLIGHT
2569 /* Turn on backlight if available */
2570 if (fb_info->bl_dev) {
2571 fb_info->bl_dev->props.power = FB_BLANK_UNBLANK;
2572 fb_info->bl_dev->ops->update_status(fb_info->bl_dev);
2573 }
2574 -#endif
2575
2576 return 0;
2577
2578 diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2579 index c702ee9691b1d..bcbf0c8cd4209 100644
2580 --- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2581 +++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
2582 @@ -2559,13 +2559,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
2583 free_irq(dev->irq, dev);
2584 priv->irq = 0;
2585 }
2586 - free_rtllib(dev);
2587
2588 if (dev->mem_start != 0) {
2589 iounmap((void __iomem *)dev->mem_start);
2590 release_mem_region(pci_resource_start(pdev, 1),
2591 pci_resource_len(pdev, 1));
2592 }
2593 +
2594 + free_rtllib(dev);
2595 } else {
2596 priv = rtllib_priv(dev);
2597 }
2598 diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
2599 index 5ef08905fe05c..15da02aeee948 100644
2600 --- a/drivers/tty/hvc/hvc_xen.c
2601 +++ b/drivers/tty/hvc/hvc_xen.c
2602 @@ -86,7 +86,11 @@ static int __write_console(struct xencons_info *xencons,
2603 cons = intf->out_cons;
2604 prod = intf->out_prod;
2605 mb(); /* update queue values before going on */
2606 - BUG_ON((prod - cons) > sizeof(intf->out));
2607 +
2608 + if ((prod - cons) > sizeof(intf->out)) {
2609 + pr_err_once("xencons: Illegal ring page indices");
2610 + return -EINVAL;
2611 + }
2612
2613 while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
2614 intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
2615 @@ -114,7 +118,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
2616 */
2617 while (len) {
2618 int sent = __write_console(cons, data, len);
2619 -
2620 +
2621 + if (sent < 0)
2622 + return sent;
2623 +
2624 data += sent;
2625 len -= sent;
2626
2627 @@ -138,7 +145,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
2628 cons = intf->in_cons;
2629 prod = intf->in_prod;
2630 mb(); /* get pointers before reading ring */
2631 - BUG_ON((prod - cons) > sizeof(intf->in));
2632 +
2633 + if ((prod - cons) > sizeof(intf->in)) {
2634 + pr_err_once("xencons: Illegal ring page indices");
2635 + return -EINVAL;
2636 + }
2637
2638 while (cons != prod && recv < len)
2639 buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
2640 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2641 index 303e8b3c1bdae..d7ab2e88631a0 100644
2642 --- a/drivers/usb/core/hub.c
2643 +++ b/drivers/usb/core/hub.c
2644 @@ -4609,8 +4609,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
2645 if (oldspeed == USB_SPEED_LOW)
2646 delay = HUB_LONG_RESET_TIME;
2647
2648 - mutex_lock(hcd->address0_mutex);
2649 -
2650 /* Reset the device; full speed may morph to high speed */
2651 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2652 retval = hub_port_reset(hub, port1, udev, delay, false);
2653 @@ -4925,7 +4923,6 @@ fail:
2654 hub_port_disable(hub, port1, 0);
2655 update_devnum(udev, devnum); /* for disconnect processing */
2656 }
2657 - mutex_unlock(hcd->address0_mutex);
2658 return retval;
2659 }
2660
2661 @@ -5015,6 +5012,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2662 struct usb_port *port_dev = hub->ports[port1 - 1];
2663 struct usb_device *udev = port_dev->child;
2664 static int unreliable_port = -1;
2665 + bool retry_locked;
2666
2667 /* Disconnect any existing devices under this port */
2668 if (udev) {
2669 @@ -5070,7 +5068,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2670 unit_load = 100;
2671
2672 status = 0;
2673 +
2674 for (i = 0; i < SET_CONFIG_TRIES; i++) {
2675 + usb_lock_port(port_dev);
2676 + mutex_lock(hcd->address0_mutex);
2677 + retry_locked = true;
2678
2679 /* reallocate for each attempt, since references
2680 * to the previous one can escape in various ways
2681 @@ -5079,6 +5081,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2682 if (!udev) {
2683 dev_err(&port_dev->dev,
2684 "couldn't allocate usb_device\n");
2685 + mutex_unlock(hcd->address0_mutex);
2686 + usb_unlock_port(port_dev);
2687 goto done;
2688 }
2689
2690 @@ -5100,12 +5104,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
2691 }
2692
2693 /* reset (non-USB 3.0 devices) and get descriptor */
2694 - usb_lock_port(port_dev);
2695 status = hub_port_init(hub, udev, port1, i);
2696 - usb_unlock_port(port_dev);
2697 if (status < 0)
2698 goto loop;
2699
2700 + mutex_unlock(hcd->address0_mutex);
2701 + usb_unlock_port(port_dev);
2702 + retry_locked = false;
2703 +
2704 if (udev->quirks & USB_QUIRK_DELAY_INIT)
2705 msleep(2000);
2706
2707 @@ -5198,6 +5204,10 @@ loop:
2708 usb_ep0_reinit(udev);
2709 release_devnum(udev);
2710 hub_free_dev(udev);
2711 + if (retry_locked) {
2712 + mutex_unlock(hcd->address0_mutex);
2713 + usb_unlock_port(port_dev);
2714 + }
2715 usb_put_dev(udev);
2716 if ((status == -ENOTCONN) || (status == -ENOTSUPP))
2717 break;
2718 @@ -5794,6 +5804,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2719 bos = udev->bos;
2720 udev->bos = NULL;
2721
2722 + mutex_lock(hcd->address0_mutex);
2723 +
2724 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
2725
2726 /* ep0 maxpacket size may change; let the HCD know about it.
2727 @@ -5803,6 +5815,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
2728 if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
2729 break;
2730 }
2731 + mutex_unlock(hcd->address0_mutex);
2732
2733 if (ret < 0)
2734 goto re_enumerate;
2735 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
2736 index e8b25dae09499..249e8e6aa9282 100644
2737 --- a/drivers/usb/dwc2/gadget.c
2738 +++ b/drivers/usb/dwc2/gadget.c
2739 @@ -1198,6 +1198,8 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
2740 }
2741 ctrl |= DXEPCTL_CNAK;
2742 } else {
2743 + hs_req->req.frame_number = hs_ep->target_frame;
2744 + hs_req->req.actual = 0;
2745 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
2746 return;
2747 }
2748 @@ -2855,9 +2857,12 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
2749
2750 do {
2751 hs_req = get_ep_head(hs_ep);
2752 - if (hs_req)
2753 + if (hs_req) {
2754 + hs_req->req.frame_number = hs_ep->target_frame;
2755 + hs_req->req.actual = 0;
2756 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
2757 -ENODATA);
2758 + }
2759 dwc2_gadget_incr_frame_num(hs_ep);
2760 /* Update current frame number value. */
2761 hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
2762 @@ -2910,8 +2915,11 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
2763
2764 while (dwc2_gadget_target_frame_elapsed(ep)) {
2765 hs_req = get_ep_head(ep);
2766 - if (hs_req)
2767 + if (hs_req) {
2768 + hs_req->req.frame_number = ep->target_frame;
2769 + hs_req->req.actual = 0;
2770 dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
2771 + }
2772
2773 dwc2_gadget_incr_frame_num(ep);
2774 /* Update current frame number value. */
2775 @@ -3000,8 +3008,11 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
2776
2777 while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
2778 hs_req = get_ep_head(hs_ep);
2779 - if (hs_req)
2780 + if (hs_req) {
2781 + hs_req->req.frame_number = hs_ep->target_frame;
2782 + hs_req->req.actual = 0;
2783 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
2784 + }
2785
2786 dwc2_gadget_incr_frame_num(hs_ep);
2787 /* Update current frame number value. */
2788 diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
2789 index 68bbac64b7536..94af71e9856f2 100644
2790 --- a/drivers/usb/dwc2/hcd_queue.c
2791 +++ b/drivers/usb/dwc2/hcd_queue.c
2792 @@ -59,7 +59,7 @@
2793 #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
2794
2795 /* If we get a NAK, wait this long before retrying */
2796 -#define DWC2_RETRY_WAIT_DELAY 1*1E6L
2797 +#define DWC2_RETRY_WAIT_DELAY (1 * NSEC_PER_MSEC)
2798
2799 /**
2800 * dwc2_periodic_channel_available() - Checks that a channel is available for a
2801 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2802 index a1e9cbe518c74..74203ed5479fa 100644
2803 --- a/drivers/usb/serial/option.c
2804 +++ b/drivers/usb/serial/option.c
2805 @@ -1267,6 +1267,8 @@ static const struct usb_device_id option_ids[] = {
2806 .driver_info = NCTRL(2) },
2807 { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
2808 .driver_info = NCTRL(0) | ZLP },
2809 + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
2810 + .driver_info = NCTRL(0) | ZLP },
2811 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
2812 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
2813 .driver_info = RSVD(1) },
2814 @@ -2094,6 +2096,9 @@ static const struct usb_device_id option_ids[] = {
2815 { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
2816 { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
2817 { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
2818 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
2819 + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
2820 + .driver_info = RSVD(4) },
2821 { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
2822 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
2823 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
2824 diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
2825 index b498960ff72b5..5e661bae39972 100644
2826 --- a/drivers/usb/typec/tcpm/fusb302.c
2827 +++ b/drivers/usb/typec/tcpm/fusb302.c
2828 @@ -669,25 +669,27 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
2829 ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
2830 FUSB_REG_MASK_BC_LVL |
2831 FUSB_REG_MASK_COMP_CHNG,
2832 - FUSB_REG_MASK_COMP_CHNG);
2833 + FUSB_REG_MASK_BC_LVL);
2834 if (ret < 0) {
2835 fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
2836 ret);
2837 goto done;
2838 }
2839 chip->intr_comp_chng = true;
2840 + chip->intr_bc_lvl = false;
2841 break;
2842 case TYPEC_CC_RD:
2843 ret = fusb302_i2c_mask_write(chip, FUSB_REG_MASK,
2844 FUSB_REG_MASK_BC_LVL |
2845 FUSB_REG_MASK_COMP_CHNG,
2846 - FUSB_REG_MASK_BC_LVL);
2847 + FUSB_REG_MASK_COMP_CHNG);
2848 if (ret < 0) {
2849 fusb302_log(chip, "cannot set SRC interrupt, ret=%d",
2850 ret);
2851 goto done;
2852 }
2853 chip->intr_bc_lvl = true;
2854 + chip->intr_comp_chng = false;
2855 break;
2856 default:
2857 break;
2858 diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
2859 index f21f5bfbb78dc..2bf7cb01da9a3 100644
2860 --- a/drivers/vhost/vsock.c
2861 +++ b/drivers/vhost/vsock.c
2862 @@ -491,7 +491,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
2863 virtio_transport_free_pkt(pkt);
2864
2865 len += sizeof(pkt->hdr);
2866 - vhost_add_used(vq, head, len);
2867 + vhost_add_used(vq, head, 0);
2868 total_len += len;
2869 added = true;
2870 } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
2871 diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
2872 index 652894d619677..b911a91bce6b7 100644
2873 --- a/drivers/xen/xenbus/xenbus_probe.c
2874 +++ b/drivers/xen/xenbus/xenbus_probe.c
2875 @@ -846,7 +846,7 @@ static struct notifier_block xenbus_resume_nb = {
2876
2877 static int __init xenbus_init(void)
2878 {
2879 - int err = 0;
2880 + int err;
2881 uint64_t v = 0;
2882 xen_store_domain_type = XS_UNKNOWN;
2883
2884 @@ -886,6 +886,29 @@ static int __init xenbus_init(void)
2885 err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
2886 if (err)
2887 goto out_error;
2888 + /*
2889 + * Uninitialized hvm_params are zero and return no error.
2890 + * Although it is theoretically possible to have
2891 + * HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
2892 + * not zero when valid. If zero, it means that Xenstore hasn't
2893 + * been properly initialized. Instead of attempting to map a
2894 + * wrong guest physical address return error.
2895 + *
2896 + * Also recognize all bits set as an invalid value.
2897 + */
2898 + if (!v || !~v) {
2899 + err = -ENOENT;
2900 + goto out_error;
2901 + }
2902 + /* Avoid truncation on 32-bit. */
2903 +#if BITS_PER_LONG == 32
2904 + if (v > ULONG_MAX) {
2905 + pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
2906 + __func__, v);
2907 + err = -EINVAL;
2908 + goto out_error;
2909 + }
2910 +#endif
2911 xen_store_gfn = (unsigned long)v;
2912 xen_store_interface =
2913 xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
2914 @@ -920,8 +943,10 @@ static int __init xenbus_init(void)
2915 */
2916 proc_create_mount_point("xen");
2917 #endif
2918 + return 0;
2919
2920 out_error:
2921 + xen_store_domain_type = XS_UNKNOWN;
2922 return err;
2923 }
2924
2925 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2926 index a9746af5a44db..03c85beecec10 100644
2927 --- a/fs/cifs/file.c
2928 +++ b/fs/cifs/file.c
2929 @@ -2577,12 +2577,23 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2930 tcon = tlink_tcon(smbfile->tlink);
2931 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2932 server = tcon->ses->server;
2933 - if (server->ops->flush)
2934 - rc = server->ops->flush(xid, tcon, &smbfile->fid);
2935 - else
2936 + if (server->ops->flush == NULL) {
2937 rc = -ENOSYS;
2938 + goto strict_fsync_exit;
2939 + }
2940 +
2941 + if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2942 + smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2943 + if (smbfile) {
2944 + rc = server->ops->flush(xid, tcon, &smbfile->fid);
2945 + cifsFileInfo_put(smbfile);
2946 + } else
2947 + cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2948 + } else
2949 + rc = server->ops->flush(xid, tcon, &smbfile->fid);
2950 }
2951
2952 +strict_fsync_exit:
2953 free_xid(xid);
2954 return rc;
2955 }
2956 @@ -2594,6 +2605,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2957 struct cifs_tcon *tcon;
2958 struct TCP_Server_Info *server;
2959 struct cifsFileInfo *smbfile = file->private_data;
2960 + struct inode *inode = file_inode(file);
2961 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2962
2963 rc = file_write_and_wait_range(file, start, end);
2964 @@ -2608,12 +2620,23 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2965 tcon = tlink_tcon(smbfile->tlink);
2966 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2967 server = tcon->ses->server;
2968 - if (server->ops->flush)
2969 - rc = server->ops->flush(xid, tcon, &smbfile->fid);
2970 - else
2971 + if (server->ops->flush == NULL) {
2972 rc = -ENOSYS;
2973 + goto fsync_exit;
2974 + }
2975 +
2976 + if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2977 + smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2978 + if (smbfile) {
2979 + rc = server->ops->flush(xid, tcon, &smbfile->fid);
2980 + cifsFileInfo_put(smbfile);
2981 + } else
2982 + cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2983 + } else
2984 + rc = server->ops->flush(xid, tcon, &smbfile->fid);
2985 }
2986
2987 +fsync_exit:
2988 free_xid(xid);
2989 return rc;
2990 }
2991 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
2992 index 4cb182c20eedd..0cd1d51dde06d 100644
2993 --- a/fs/f2fs/node.c
2994 +++ b/fs/f2fs/node.c
2995 @@ -1385,6 +1385,7 @@ page_hit:
2996 nid, nid_of_node(page), ino_of_node(page),
2997 ofs_of_node(page), cpver_of_node(page),
2998 next_blkaddr_of_node(page));
2999 + set_sbi_flag(sbi, SBI_NEED_FSCK);
3000 err = -EINVAL;
3001 out_err:
3002 ClearPageUptodate(page);
3003 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
3004 index fa4d2aba5a701..64d6c8c9f1ff2 100644
3005 --- a/fs/fuse/dev.c
3006 +++ b/fs/fuse/dev.c
3007 @@ -839,17 +839,17 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
3008 goto out_put_old;
3009 }
3010
3011 + get_page(newpage);
3012 +
3013 + if (!(buf->flags & PIPE_BUF_FLAG_LRU))
3014 + lru_cache_add_file(newpage);
3015 +
3016 /*
3017 * Release while we have extra ref on stolen page. Otherwise
3018 * anon_pipe_buf_release() might think the page can be reused.
3019 */
3020 pipe_buf_release(cs->pipe, buf);
3021
3022 - get_page(newpage);
3023 -
3024 - if (!(buf->flags & PIPE_BUF_FLAG_LRU))
3025 - lru_cache_add_file(newpage);
3026 -
3027 err = 0;
3028 spin_lock(&cs->req->waitq.lock);
3029 if (test_bit(FR_ABORTED, &cs->req->flags))
3030 diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
3031 index aed865a846296..2b78f7b8d5467 100644
3032 --- a/fs/nfs/nfs42xdr.c
3033 +++ b/fs/nfs/nfs42xdr.c
3034 @@ -769,8 +769,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
3035 status = decode_clone(xdr);
3036 if (status)
3037 goto out;
3038 - status = decode_getfattr(xdr, res->dst_fattr, res->server);
3039 -
3040 + decode_getfattr(xdr, res->dst_fattr, res->server);
3041 out:
3042 res->rpc_status = status;
3043 return status;
3044 diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
3045 index 080ca9d5eccbb..b1102a31a1085 100644
3046 --- a/fs/proc/vmcore.c
3047 +++ b/fs/proc/vmcore.c
3048 @@ -125,9 +125,13 @@ ssize_t read_from_oldmem(char *buf, size_t count,
3049 nr_bytes = count;
3050
3051 /* If pfn is not ram, return zeros for sparse dump files */
3052 - if (pfn_is_ram(pfn) == 0)
3053 - memset(buf, 0, nr_bytes);
3054 - else {
3055 + if (pfn_is_ram(pfn) == 0) {
3056 + tmp = 0;
3057 + if (!userbuf)
3058 + memset(buf, 0, nr_bytes);
3059 + else if (clear_user(buf, nr_bytes))
3060 + tmp = -EFAULT;
3061 + } else {
3062 if (encrypted)
3063 tmp = copy_oldmem_page_encrypted(pfn, buf,
3064 nr_bytes,
3065 @@ -136,10 +140,10 @@ ssize_t read_from_oldmem(char *buf, size_t count,
3066 else
3067 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
3068 offset, userbuf);
3069 -
3070 - if (tmp < 0)
3071 - return tmp;
3072 }
3073 + if (tmp < 0)
3074 + return tmp;
3075 +
3076 *ppos += nr_bytes;
3077 count -= nr_bytes;
3078 buf += nr_bytes;
3079 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
3080 index c309f43bde45e..f8c4d9f97819f 100644
3081 --- a/include/linux/ipc_namespace.h
3082 +++ b/include/linux/ipc_namespace.h
3083 @@ -130,6 +130,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
3084 return ns;
3085 }
3086
3087 +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
3088 +{
3089 + if (ns) {
3090 + if (refcount_inc_not_zero(&ns->count))
3091 + return ns;
3092 + }
3093 +
3094 + return NULL;
3095 +}
3096 +
3097 extern void put_ipc_ns(struct ipc_namespace *ns);
3098 #else
3099 static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
3100 @@ -146,6 +156,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
3101 return ns;
3102 }
3103
3104 +static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
3105 +{
3106 + return ns;
3107 +}
3108 +
3109 static inline void put_ipc_ns(struct ipc_namespace *ns)
3110 {
3111 }
3112 diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
3113 index 4b1c3b664f517..36f3011ab6013 100644
3114 --- a/include/linux/sched/task.h
3115 +++ b/include/linux/sched/task.h
3116 @@ -157,7 +157,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
3117 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
3118 * subscriptions and synchronises with wait4(). Also used in procfs. Also
3119 * pins the final release of task.io_context. Also protects ->cpuset and
3120 - * ->cgroup.subsys[]. And ->vfork_done.
3121 + * ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
3122 *
3123 * Nests both inside and outside of read_lock(&tasklist_lock).
3124 * It must not be nested with write_lock_irq(&tasklist_lock),
3125 diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
3126 index bd0f1595bdc71..05ecaefeb6322 100644
3127 --- a/include/net/ip6_fib.h
3128 +++ b/include/net/ip6_fib.h
3129 @@ -451,6 +451,7 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3130 struct fib6_config *cfg, gfp_t gfp_flags,
3131 struct netlink_ext_ack *extack);
3132 void fib6_nh_release(struct fib6_nh *fib6_nh);
3133 +void fib6_nh_release_dsts(struct fib6_nh *fib6_nh);
3134
3135 int call_fib6_entry_notifiers(struct net *net,
3136 enum fib_event_type event_type,
3137 diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
3138 index 3e7d2c0e79ca1..af9e127779adf 100644
3139 --- a/include/net/ipv6_stubs.h
3140 +++ b/include/net/ipv6_stubs.h
3141 @@ -47,6 +47,7 @@ struct ipv6_stub {
3142 struct fib6_config *cfg, gfp_t gfp_flags,
3143 struct netlink_ext_ack *extack);
3144 void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
3145 + void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh);
3146 void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt);
3147 int (*ip6_del_rt)(struct net *net, struct fib6_info *rt);
3148 void (*fib6_rt_update)(struct net *net, struct fib6_info *rt,
3149 diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
3150 index 33979017b7824..004e49f748419 100644
3151 --- a/include/net/nfc/nci_core.h
3152 +++ b/include/net/nfc/nci_core.h
3153 @@ -30,6 +30,7 @@ enum nci_flag {
3154 NCI_UP,
3155 NCI_DATA_EXCHANGE,
3156 NCI_DATA_EXCHANGE_TO,
3157 + NCI_UNREG,
3158 };
3159
3160 /* NCI device states */
3161 diff --git a/include/net/nl802154.h b/include/net/nl802154.h
3162 index ddcee128f5d9a..145acb8f25095 100644
3163 --- a/include/net/nl802154.h
3164 +++ b/include/net/nl802154.h
3165 @@ -19,6 +19,8 @@
3166 *
3167 */
3168
3169 +#include <linux/types.h>
3170 +
3171 #define NL802154_GENL_NAME "nl802154"
3172
3173 enum nl802154_commands {
3174 @@ -150,10 +152,9 @@ enum nl802154_attrs {
3175 };
3176
3177 enum nl802154_iftype {
3178 - /* for backwards compatibility TODO */
3179 - NL802154_IFTYPE_UNSPEC = -1,
3180 + NL802154_IFTYPE_UNSPEC = (~(__u32)0),
3181
3182 - NL802154_IFTYPE_NODE,
3183 + NL802154_IFTYPE_NODE = 0,
3184 NL802154_IFTYPE_MONITOR,
3185 NL802154_IFTYPE_COORD,
3186
3187 diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
3188 index 3f40501fc60b1..b39cdbc522ec7 100644
3189 --- a/include/xen/interface/io/ring.h
3190 +++ b/include/xen/interface/io/ring.h
3191 @@ -1,21 +1,53 @@
3192 -/* SPDX-License-Identifier: GPL-2.0 */
3193 /******************************************************************************
3194 * ring.h
3195 *
3196 * Shared producer-consumer ring macros.
3197 *
3198 + * Permission is hereby granted, free of charge, to any person obtaining a copy
3199 + * of this software and associated documentation files (the "Software"), to
3200 + * deal in the Software without restriction, including without limitation the
3201 + * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
3202 + * sell copies of the Software, and to permit persons to whom the Software is
3203 + * furnished to do so, subject to the following conditions:
3204 + *
3205 + * The above copyright notice and this permission notice shall be included in
3206 + * all copies or substantial portions of the Software.
3207 + *
3208 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3209 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3210 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
3211 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
3212 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
3213 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
3214 + * DEALINGS IN THE SOFTWARE.
3215 + *
3216 * Tim Deegan and Andrew Warfield November 2004.
3217 */
3218
3219 #ifndef __XEN_PUBLIC_IO_RING_H__
3220 #define __XEN_PUBLIC_IO_RING_H__
3221
3222 +/*
3223 + * When #include'ing this header, you need to provide the following
3224 + * declaration upfront:
3225 + * - standard integers types (uint8_t, uint16_t, etc)
3226 + * They are provided by stdint.h of the standard headers.
3227 + *
3228 + * In addition, if you intend to use the FLEX macros, you also need to
3229 + * provide the following, before invoking the FLEX macros:
3230 + * - size_t
3231 + * - memcpy
3232 + * - grant_ref_t
3233 + * These declarations are provided by string.h of the standard headers,
3234 + * and grant_table.h from the Xen public headers.
3235 + */
3236 +
3237 #include <xen/interface/grant_table.h>
3238
3239 typedef unsigned int RING_IDX;
3240
3241 /* Round a 32-bit unsigned constant down to the nearest power of two. */
3242 -#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3243 +#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
3244 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
3245 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
3246 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
3247 @@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
3248 * A ring contains as many entries as will fit, rounded down to the nearest
3249 * power of two (so we can mask with (size-1) to loop around).
3250 */
3251 -#define __CONST_RING_SIZE(_s, _sz) \
3252 - (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
3253 - sizeof(((struct _s##_sring *)0)->ring[0])))
3254 -
3255 +#define __CONST_RING_SIZE(_s, _sz) \
3256 + (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
3257 + sizeof(((struct _s##_sring *)0)->ring[0])))
3258 /*
3259 * The same for passing in an actual pointer instead of a name tag.
3260 */
3261 -#define __RING_SIZE(_s, _sz) \
3262 - (__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3263 +#define __RING_SIZE(_s, _sz) \
3264 + (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
3265
3266 /*
3267 * Macros to make the correct C datatypes for a new kind of ring.
3268 *
3269 * To make a new ring datatype, you need to have two message structures,
3270 - * let's say struct request, and struct response already defined.
3271 + * let's say request_t, and response_t already defined.
3272 *
3273 * In a header where you want the ring datatype declared, you then do:
3274 *
3275 - * DEFINE_RING_TYPES(mytag, struct request, struct response);
3276 + * DEFINE_RING_TYPES(mytag, request_t, response_t);
3277 *
3278 * These expand out to give you a set of types, as you can see below.
3279 * The most important of these are:
3280 *
3281 - * struct mytag_sring - The shared ring.
3282 - * struct mytag_front_ring - The 'front' half of the ring.
3283 - * struct mytag_back_ring - The 'back' half of the ring.
3284 + * mytag_sring_t - The shared ring.
3285 + * mytag_front_ring_t - The 'front' half of the ring.
3286 + * mytag_back_ring_t - The 'back' half of the ring.
3287 *
3288 * To initialize a ring in your code you need to know the location and size
3289 * of the shared memory area (PAGE_SIZE, for instance). To initialise
3290 * the front half:
3291 *
3292 - * struct mytag_front_ring front_ring;
3293 - * SHARED_RING_INIT((struct mytag_sring *)shared_page);
3294 - * FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
3295 - * PAGE_SIZE);
3296 + * mytag_front_ring_t front_ring;
3297 + * SHARED_RING_INIT((mytag_sring_t *)shared_page);
3298 + * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3299 *
3300 * Initializing the back follows similarly (note that only the front
3301 * initializes the shared ring):
3302 *
3303 - * struct mytag_back_ring back_ring;
3304 - * BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
3305 - * PAGE_SIZE);
3306 + * mytag_back_ring_t back_ring;
3307 + * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
3308 */
3309
3310 -#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3311 - \
3312 -/* Shared ring entry */ \
3313 -union __name##_sring_entry { \
3314 - __req_t req; \
3315 - __rsp_t rsp; \
3316 -}; \
3317 - \
3318 -/* Shared ring page */ \
3319 -struct __name##_sring { \
3320 - RING_IDX req_prod, req_event; \
3321 - RING_IDX rsp_prod, rsp_event; \
3322 - uint8_t pad[48]; \
3323 - union __name##_sring_entry ring[1]; /* variable-length */ \
3324 -}; \
3325 - \
3326 -/* "Front" end's private variables */ \
3327 -struct __name##_front_ring { \
3328 - RING_IDX req_prod_pvt; \
3329 - RING_IDX rsp_cons; \
3330 - unsigned int nr_ents; \
3331 - struct __name##_sring *sring; \
3332 -}; \
3333 - \
3334 -/* "Back" end's private variables */ \
3335 -struct __name##_back_ring { \
3336 - RING_IDX rsp_prod_pvt; \
3337 - RING_IDX req_cons; \
3338 - unsigned int nr_ents; \
3339 - struct __name##_sring *sring; \
3340 -};
3341 -
3342 +#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
3343 + \
3344 +/* Shared ring entry */ \
3345 +union __name##_sring_entry { \
3346 + __req_t req; \
3347 + __rsp_t rsp; \
3348 +}; \
3349 + \
3350 +/* Shared ring page */ \
3351 +struct __name##_sring { \
3352 + RING_IDX req_prod, req_event; \
3353 + RING_IDX rsp_prod, rsp_event; \
3354 + uint8_t __pad[48]; \
3355 + union __name##_sring_entry ring[1]; /* variable-length */ \
3356 +}; \
3357 + \
3358 +/* "Front" end's private variables */ \
3359 +struct __name##_front_ring { \
3360 + RING_IDX req_prod_pvt; \
3361 + RING_IDX rsp_cons; \
3362 + unsigned int nr_ents; \
3363 + struct __name##_sring *sring; \
3364 +}; \
3365 + \
3366 +/* "Back" end's private variables */ \
3367 +struct __name##_back_ring { \
3368 + RING_IDX rsp_prod_pvt; \
3369 + RING_IDX req_cons; \
3370 + unsigned int nr_ents; \
3371 + struct __name##_sring *sring; \
3372 +}; \
3373 + \
3374 /*
3375 * Macros for manipulating rings.
3376 *
3377 @@ -119,105 +148,99 @@ struct __name##_back_ring { \
3378 */
3379
3380 /* Initialising empty rings */
3381 -#define SHARED_RING_INIT(_s) do { \
3382 - (_s)->req_prod = (_s)->rsp_prod = 0; \
3383 - (_s)->req_event = (_s)->rsp_event = 1; \
3384 - memset((_s)->pad, 0, sizeof((_s)->pad)); \
3385 +#define SHARED_RING_INIT(_s) do { \
3386 + (_s)->req_prod = (_s)->rsp_prod = 0; \
3387 + (_s)->req_event = (_s)->rsp_event = 1; \
3388 + (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
3389 } while(0)
3390
3391 -#define FRONT_RING_INIT(_r, _s, __size) do { \
3392 - (_r)->req_prod_pvt = 0; \
3393 - (_r)->rsp_cons = 0; \
3394 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3395 - (_r)->sring = (_s); \
3396 +#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
3397 + (_r)->req_prod_pvt = (_i); \
3398 + (_r)->rsp_cons = (_i); \
3399 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3400 + (_r)->sring = (_s); \
3401 } while (0)
3402
3403 -#define BACK_RING_INIT(_r, _s, __size) do { \
3404 - (_r)->rsp_prod_pvt = 0; \
3405 - (_r)->req_cons = 0; \
3406 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3407 - (_r)->sring = (_s); \
3408 -} while (0)
3409 +#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
3410
3411 -/* Initialize to existing shared indexes -- for recovery */
3412 -#define FRONT_RING_ATTACH(_r, _s, __size) do { \
3413 - (_r)->sring = (_s); \
3414 - (_r)->req_prod_pvt = (_s)->req_prod; \
3415 - (_r)->rsp_cons = (_s)->rsp_prod; \
3416 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3417 +#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
3418 + (_r)->rsp_prod_pvt = (_i); \
3419 + (_r)->req_cons = (_i); \
3420 + (_r)->nr_ents = __RING_SIZE(_s, __size); \
3421 + (_r)->sring = (_s); \
3422 } while (0)
3423
3424 -#define BACK_RING_ATTACH(_r, _s, __size) do { \
3425 - (_r)->sring = (_s); \
3426 - (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
3427 - (_r)->req_cons = (_s)->req_prod; \
3428 - (_r)->nr_ents = __RING_SIZE(_s, __size); \
3429 -} while (0)
3430 +#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
3431
3432 /* How big is this ring? */
3433 -#define RING_SIZE(_r) \
3434 +#define RING_SIZE(_r) \
3435 ((_r)->nr_ents)
3436
3437 /* Number of free requests (for use on front side only). */
3438 -#define RING_FREE_REQUESTS(_r) \
3439 +#define RING_FREE_REQUESTS(_r) \
3440 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
3441
3442 /* Test if there is an empty slot available on the front ring.
3443 * (This is only meaningful from the front. )
3444 */
3445 -#define RING_FULL(_r) \
3446 +#define RING_FULL(_r) \
3447 (RING_FREE_REQUESTS(_r) == 0)
3448
3449 /* Test if there are outstanding messages to be processed on a ring. */
3450 -#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3451 +#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
3452 ((_r)->sring->rsp_prod - (_r)->rsp_cons)
3453
3454 -#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
3455 - ({ \
3456 - unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3457 - unsigned int rsp = RING_SIZE(_r) - \
3458 - ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3459 - req < rsp ? req : rsp; \
3460 - })
3461 +#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
3462 + unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
3463 + unsigned int rsp = RING_SIZE(_r) - \
3464 + ((_r)->req_cons - (_r)->rsp_prod_pvt); \
3465 + req < rsp ? req : rsp; \
3466 +})
3467
3468 /* Direct access to individual ring elements, by index. */
3469 -#define RING_GET_REQUEST(_r, _idx) \
3470 +#define RING_GET_REQUEST(_r, _idx) \
3471 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
3472
3473 +#define RING_GET_RESPONSE(_r, _idx) \
3474 + (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3475 +
3476 /*
3477 - * Get a local copy of a request.
3478 + * Get a local copy of a request/response.
3479 *
3480 - * Use this in preference to RING_GET_REQUEST() so all processing is
3481 + * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
3482 * done on a local copy that cannot be modified by the other end.
3483 *
3484 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
3485 - * to be ineffective where _req is a struct which consists of only bitfields.
3486 + * to be ineffective where dest is a struct which consists of only bitfields.
3487 */
3488 -#define RING_COPY_REQUEST(_r, _idx, _req) do { \
3489 - /* Use volatile to force the copy into _req. */ \
3490 - *(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
3491 +#define RING_COPY_(type, r, idx, dest) do { \
3492 + /* Use volatile to force the copy into dest. */ \
3493 + *(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
3494 } while (0)
3495
3496 -#define RING_GET_RESPONSE(_r, _idx) \
3497 - (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
3498 +#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
3499 +#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
3500
3501 /* Loop termination condition: Would the specified index overflow the ring? */
3502 -#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3503 +#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
3504 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
3505
3506 /* Ill-behaved frontend determination: Can there be this many requests? */
3507 -#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
3508 +#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
3509 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
3510
3511 +/* Ill-behaved backend determination: Can there be this many responses? */
3512 +#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
3513 + (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
3514
3515 -#define RING_PUSH_REQUESTS(_r) do { \
3516 - virt_wmb(); /* back sees requests /before/ updated producer index */ \
3517 - (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3518 +#define RING_PUSH_REQUESTS(_r) do { \
3519 + virt_wmb(); /* back sees requests /before/ updated producer index */\
3520 + (_r)->sring->req_prod = (_r)->req_prod_pvt; \
3521 } while (0)
3522
3523 -#define RING_PUSH_RESPONSES(_r) do { \
3524 - virt_wmb(); /* front sees responses /before/ updated producer index */ \
3525 - (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3526 +#define RING_PUSH_RESPONSES(_r) do { \
3527 + virt_wmb(); /* front sees resps /before/ updated producer index */ \
3528 + (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
3529 } while (0)
3530
3531 /*
3532 @@ -250,40 +273,40 @@ struct __name##_back_ring { \
3533 * field appropriately.
3534 */
3535
3536 -#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3537 - RING_IDX __old = (_r)->sring->req_prod; \
3538 - RING_IDX __new = (_r)->req_prod_pvt; \
3539 - virt_wmb(); /* back sees requests /before/ updated producer index */ \
3540 - (_r)->sring->req_prod = __new; \
3541 - virt_mb(); /* back sees new requests /before/ we check req_event */ \
3542 - (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3543 - (RING_IDX)(__new - __old)); \
3544 +#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
3545 + RING_IDX __old = (_r)->sring->req_prod; \
3546 + RING_IDX __new = (_r)->req_prod_pvt; \
3547 + virt_wmb(); /* back sees requests /before/ updated producer index */\
3548 + (_r)->sring->req_prod = __new; \
3549 + virt_mb(); /* back sees new requests /before/ we check req_event */ \
3550 + (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
3551 + (RING_IDX)(__new - __old)); \
3552 } while (0)
3553
3554 -#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3555 - RING_IDX __old = (_r)->sring->rsp_prod; \
3556 - RING_IDX __new = (_r)->rsp_prod_pvt; \
3557 - virt_wmb(); /* front sees responses /before/ updated producer index */ \
3558 - (_r)->sring->rsp_prod = __new; \
3559 - virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
3560 - (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3561 - (RING_IDX)(__new - __old)); \
3562 +#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
3563 + RING_IDX __old = (_r)->sring->rsp_prod; \
3564 + RING_IDX __new = (_r)->rsp_prod_pvt; \
3565 + virt_wmb(); /* front sees resps /before/ updated producer index */ \
3566 + (_r)->sring->rsp_prod = __new; \
3567 + virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
3568 + (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
3569 + (RING_IDX)(__new - __old)); \
3570 } while (0)
3571
3572 -#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3573 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3574 - if (_work_to_do) break; \
3575 - (_r)->sring->req_event = (_r)->req_cons + 1; \
3576 - virt_mb(); \
3577 - (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3578 +#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
3579 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3580 + if (_work_to_do) break; \
3581 + (_r)->sring->req_event = (_r)->req_cons + 1; \
3582 + virt_mb(); \
3583 + (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
3584 } while (0)
3585
3586 -#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3587 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3588 - if (_work_to_do) break; \
3589 - (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3590 - virt_mb(); \
3591 - (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3592 +#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
3593 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3594 + if (_work_to_do) break; \
3595 + (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
3596 + virt_mb(); \
3597 + (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
3598 } while (0)
3599
3600
3601 diff --git a/ipc/shm.c b/ipc/shm.c
3602 index ce1ca9f7c6e97..984addb5aeb5e 100644
3603 --- a/ipc/shm.c
3604 +++ b/ipc/shm.c
3605 @@ -62,9 +62,18 @@ struct shmid_kernel /* private to the kernel */
3606 struct pid *shm_lprid;
3607 struct user_struct *mlock_user;
3608
3609 - /* The task created the shm object. NULL if the task is dead. */
3610 + /*
3611 + * The task created the shm object, for
3612 + * task_lock(shp->shm_creator)
3613 + */
3614 struct task_struct *shm_creator;
3615 - struct list_head shm_clist; /* list by creator */
3616 +
3617 + /*
3618 + * List by creator. task_lock(->shm_creator) required for read/write.
3619 + * If list_empty(), then the creator is dead already.
3620 + */
3621 + struct list_head shm_clist;
3622 + struct ipc_namespace *ns;
3623 } __randomize_layout;
3624
3625 /* shm_mode upper byte flags */
3626 @@ -115,6 +124,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
3627 struct shmid_kernel *shp;
3628
3629 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
3630 + WARN_ON(ns != shp->ns);
3631
3632 if (shp->shm_nattch) {
3633 shp->shm_perm.mode |= SHM_DEST;
3634 @@ -225,10 +235,43 @@ static void shm_rcu_free(struct rcu_head *head)
3635 kvfree(shp);
3636 }
3637
3638 -static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
3639 +/*
3640 + * It has to be called with shp locked.
3641 + * It must be called before ipc_rmid()
3642 + */
3643 +static inline void shm_clist_rm(struct shmid_kernel *shp)
3644 {
3645 - list_del(&s->shm_clist);
3646 - ipc_rmid(&shm_ids(ns), &s->shm_perm);
3647 + struct task_struct *creator;
3648 +
3649 + /* ensure that shm_creator does not disappear */
3650 + rcu_read_lock();
3651 +
3652 + /*
3653 + * A concurrent exit_shm may do a list_del_init() as well.
3654 + * Just do nothing if exit_shm already did the work
3655 + */
3656 + if (!list_empty(&shp->shm_clist)) {
3657 + /*
3658 + * shp->shm_creator is guaranteed to be valid *only*
3659 + * if shp->shm_clist is not empty.
3660 + */
3661 + creator = shp->shm_creator;
3662 +
3663 + task_lock(creator);
3664 + /*
3665 + * list_del_init() is a nop if the entry was already removed
3666 + * from the list.
3667 + */
3668 + list_del_init(&shp->shm_clist);
3669 + task_unlock(creator);
3670 + }
3671 + rcu_read_unlock();
3672 +}
3673 +
3674 +static inline void shm_rmid(struct shmid_kernel *s)
3675 +{
3676 + shm_clist_rm(s);
3677 + ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
3678 }
3679
3680
3681 @@ -283,7 +326,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3682 shm_file = shp->shm_file;
3683 shp->shm_file = NULL;
3684 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
3685 - shm_rmid(ns, shp);
3686 + shm_rmid(shp);
3687 shm_unlock(shp);
3688 if (!is_file_hugepages(shm_file))
3689 shmem_lock(shm_file, 0, shp->mlock_user);
3690 @@ -306,10 +349,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3691 *
3692 * 2) sysctl kernel.shm_rmid_forced is set to 1.
3693 */
3694 -static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
3695 +static bool shm_may_destroy(struct shmid_kernel *shp)
3696 {
3697 return (shp->shm_nattch == 0) &&
3698 - (ns->shm_rmid_forced ||
3699 + (shp->ns->shm_rmid_forced ||
3700 (shp->shm_perm.mode & SHM_DEST));
3701 }
3702
3703 @@ -340,7 +383,7 @@ static void shm_close(struct vm_area_struct *vma)
3704 ipc_update_pid(&shp->shm_lprid, task_tgid(current));
3705 shp->shm_dtim = ktime_get_real_seconds();
3706 shp->shm_nattch--;
3707 - if (shm_may_destroy(ns, shp))
3708 + if (shm_may_destroy(shp))
3709 shm_destroy(ns, shp);
3710 else
3711 shm_unlock(shp);
3712 @@ -361,10 +404,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
3713 *
3714 * As shp->* are changed under rwsem, it's safe to skip shp locking.
3715 */
3716 - if (shp->shm_creator != NULL)
3717 + if (!list_empty(&shp->shm_clist))
3718 return 0;
3719
3720 - if (shm_may_destroy(ns, shp)) {
3721 + if (shm_may_destroy(shp)) {
3722 shm_lock_by_ptr(shp);
3723 shm_destroy(ns, shp);
3724 }
3725 @@ -382,48 +425,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
3726 /* Locking assumes this will only be called with task == current */
3727 void exit_shm(struct task_struct *task)
3728 {
3729 - struct ipc_namespace *ns = task->nsproxy->ipc_ns;
3730 - struct shmid_kernel *shp, *n;
3731 + for (;;) {
3732 + struct shmid_kernel *shp;
3733 + struct ipc_namespace *ns;
3734
3735 - if (list_empty(&task->sysvshm.shm_clist))
3736 - return;
3737 + task_lock(task);
3738 +
3739 + if (list_empty(&task->sysvshm.shm_clist)) {
3740 + task_unlock(task);
3741 + break;
3742 + }
3743 +
3744 + shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
3745 + shm_clist);
3746
3747 - /*
3748 - * If kernel.shm_rmid_forced is not set then only keep track of
3749 - * which shmids are orphaned, so that a later set of the sysctl
3750 - * can clean them up.
3751 - */
3752 - if (!ns->shm_rmid_forced) {
3753 - down_read(&shm_ids(ns).rwsem);
3754 - list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
3755 - shp->shm_creator = NULL;
3756 /*
3757 - * Only under read lock but we are only called on current
3758 - * so no entry on the list will be shared.
3759 + * 1) Get pointer to the ipc namespace. It is worth to say
3760 + * that this pointer is guaranteed to be valid because
3761 + * shp lifetime is always shorter than namespace lifetime
3762 + * in which shp lives.
3763 + * We taken task_lock it means that shp won't be freed.
3764 */
3765 - list_del(&task->sysvshm.shm_clist);
3766 - up_read(&shm_ids(ns).rwsem);
3767 - return;
3768 - }
3769 + ns = shp->ns;
3770
3771 - /*
3772 - * Destroy all already created segments, that were not yet mapped,
3773 - * and mark any mapped as orphan to cover the sysctl toggling.
3774 - * Destroy is skipped if shm_may_destroy() returns false.
3775 - */
3776 - down_write(&shm_ids(ns).rwsem);
3777 - list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
3778 - shp->shm_creator = NULL;
3779 + /*
3780 + * 2) If kernel.shm_rmid_forced is not set then only keep track of
3781 + * which shmids are orphaned, so that a later set of the sysctl
3782 + * can clean them up.
3783 + */
3784 + if (!ns->shm_rmid_forced)
3785 + goto unlink_continue;
3786
3787 - if (shm_may_destroy(ns, shp)) {
3788 - shm_lock_by_ptr(shp);
3789 - shm_destroy(ns, shp);
3790 + /*
3791 + * 3) get a reference to the namespace.
3792 + * The refcount could be already 0. If it is 0, then
3793 + * the shm objects will be free by free_ipc_work().
3794 + */
3795 + ns = get_ipc_ns_not_zero(ns);
3796 + if (!ns) {
3797 +unlink_continue:
3798 + list_del_init(&shp->shm_clist);
3799 + task_unlock(task);
3800 + continue;
3801 }
3802 - }
3803
3804 - /* Remove the list head from any segments still attached. */
3805 - list_del(&task->sysvshm.shm_clist);
3806 - up_write(&shm_ids(ns).rwsem);
3807 + /*
3808 + * 4) get a reference to shp.
3809 + * This cannot fail: shm_clist_rm() is called before
3810 + * ipc_rmid(), thus the refcount cannot be 0.
3811 + */
3812 + WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
3813 +
3814 + /*
3815 + * 5) unlink the shm segment from the list of segments
3816 + * created by current.
3817 + * This must be done last. After unlinking,
3818 + * only the refcounts obtained above prevent IPC_RMID
3819 + * from destroying the segment or the namespace.
3820 + */
3821 + list_del_init(&shp->shm_clist);
3822 +
3823 + task_unlock(task);
3824 +
3825 + /*
3826 + * 6) we have all references
3827 + * Thus lock & if needed destroy shp.
3828 + */
3829 + down_write(&shm_ids(ns).rwsem);
3830 + shm_lock_by_ptr(shp);
3831 + /*
3832 + * rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
3833 + * safe to call ipc_rcu_putref here
3834 + */
3835 + ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
3836 +
3837 + if (ipc_valid_object(&shp->shm_perm)) {
3838 + if (shm_may_destroy(shp))
3839 + shm_destroy(ns, shp);
3840 + else
3841 + shm_unlock(shp);
3842 + } else {
3843 + /*
3844 + * Someone else deleted the shp from namespace
3845 + * idr/kht while we have waited.
3846 + * Just unlock and continue.
3847 + */
3848 + shm_unlock(shp);
3849 + }
3850 +
3851 + up_write(&shm_ids(ns).rwsem);
3852 + put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
3853 + }
3854 }
3855
3856 static vm_fault_t shm_fault(struct vm_fault *vmf)
3857 @@ -680,7 +772,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
3858 if (error < 0)
3859 goto no_id;
3860
3861 + shp->ns = ns;
3862 +
3863 + task_lock(current);
3864 list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
3865 + task_unlock(current);
3866
3867 /*
3868 * shmid gets reported as "inode#" in /proc/pid/maps.
3869 @@ -1575,7 +1671,8 @@ out_nattch:
3870 down_write(&shm_ids(ns).rwsem);
3871 shp = shm_lock(ns, shmid);
3872 shp->shm_nattch--;
3873 - if (shm_may_destroy(ns, shp))
3874 +
3875 + if (shm_may_destroy(shp))
3876 shm_destroy(ns, shp);
3877 else
3878 shm_unlock(shp);
3879 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
3880 index 69c4cd472def3..6cafb2e910a11 100644
3881 --- a/kernel/power/hibernate.c
3882 +++ b/kernel/power/hibernate.c
3883 @@ -676,7 +676,7 @@ static int load_image_and_restore(void)
3884 goto Unlock;
3885
3886 error = swsusp_read(&flags);
3887 - swsusp_close(FMODE_READ);
3888 + swsusp_close(FMODE_READ | FMODE_EXCL);
3889 if (!error)
3890 hibernation_restore(flags & SF_PLATFORM_MODE);
3891
3892 @@ -871,7 +871,7 @@ static int software_resume(void)
3893 /* The snapshot device should not be opened while we're running */
3894 if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
3895 error = -EBUSY;
3896 - swsusp_close(FMODE_READ);
3897 + swsusp_close(FMODE_READ | FMODE_EXCL);
3898 goto Unlock;
3899 }
3900
3901 @@ -907,7 +907,7 @@ static int software_resume(void)
3902 pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
3903 return error;
3904 Close_Finish:
3905 - swsusp_close(FMODE_READ);
3906 + swsusp_close(FMODE_READ | FMODE_EXCL);
3907 goto Finish;
3908 }
3909
3910 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
3911 index 35e9a01b54800..1d514a1a31554 100644
3912 --- a/kernel/trace/trace.h
3913 +++ b/kernel/trace/trace.h
3914 @@ -1423,14 +1423,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
3915 if (eflags & EVENT_FILE_FL_TRIGGER_COND)
3916 *tt = event_triggers_call(file, entry, event);
3917
3918 - if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
3919 - (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
3920 - !filter_match_preds(file->filter, entry))) {
3921 - __trace_event_discard_commit(buffer, event);
3922 - return true;
3923 - }
3924 + if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
3925 + EVENT_FILE_FL_FILTERED |
3926 + EVENT_FILE_FL_PID_FILTER))))
3927 + return false;
3928 +
3929 + if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
3930 + goto discard;
3931 +
3932 + if (file->flags & EVENT_FILE_FL_FILTERED &&
3933 + !filter_match_preds(file->filter, entry))
3934 + goto discard;
3935 +
3936 + if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
3937 + trace_event_ignore_this_pid(file))
3938 + goto discard;
3939
3940 return false;
3941 + discard:
3942 + __trace_event_discard_commit(buffer, event);
3943 + return true;
3944 }
3945
3946 /**
3947 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3948 index e31ee325dad16..4acc77e049e5f 100644
3949 --- a/kernel/trace/trace_events.c
3950 +++ b/kernel/trace/trace_events.c
3951 @@ -2247,12 +2247,19 @@ static struct trace_event_file *
3952 trace_create_new_event(struct trace_event_call *call,
3953 struct trace_array *tr)
3954 {
3955 + struct trace_pid_list *pid_list;
3956 struct trace_event_file *file;
3957
3958 file = kmem_cache_alloc(file_cachep, GFP_TRACE);
3959 if (!file)
3960 return NULL;
3961
3962 + pid_list = rcu_dereference_protected(tr->filtered_pids,
3963 + lockdep_is_held(&event_mutex));
3964 +
3965 + if (pid_list)
3966 + file->flags |= EVENT_FILE_FL_PID_FILTER;
3967 +
3968 file->event_call = call;
3969 file->tr = tr;
3970 atomic_set(&file->sm_ref, 0);
3971 diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
3972 index b515db036becc..efb51a23a14f2 100644
3973 --- a/kernel/trace/trace_uprobe.c
3974 +++ b/kernel/trace/trace_uprobe.c
3975 @@ -1299,6 +1299,7 @@ static int uprobe_perf_open(struct trace_event_call *call,
3976 return 0;
3977
3978 list_for_each_entry(pos, trace_probe_probe_list(tp), list) {
3979 + tu = container_of(pos, struct trace_uprobe, tp);
3980 err = uprobe_apply(tu->inode, tu->offset, &tu->consumer, true);
3981 if (err) {
3982 uprobe_perf_close(call, event);
3983 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
3984 index cd7c0429cddf8..796d95797ab40 100644
3985 --- a/net/8021q/vlan.c
3986 +++ b/net/8021q/vlan.c
3987 @@ -177,9 +177,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
3988 if (err)
3989 goto out_unregister_netdev;
3990
3991 - /* Account for reference in struct vlan_dev_priv */
3992 - dev_hold(real_dev);
3993 -
3994 vlan_stacked_transfer_operstate(real_dev, dev, vlan);
3995 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
3996
3997 diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
3998 index 415a29d42cdf0..589615ec490bb 100644
3999 --- a/net/8021q/vlan_dev.c
4000 +++ b/net/8021q/vlan_dev.c
4001 @@ -583,6 +583,9 @@ static int vlan_dev_init(struct net_device *dev)
4002 if (!vlan->vlan_pcpu_stats)
4003 return -ENOMEM;
4004
4005 + /* Get vlan's reference to real_dev */
4006 + dev_hold(real_dev);
4007 +
4008 return 0;
4009 }
4010
4011 diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
4012 index 858bb10d8341e..4d69b3de980a6 100644
4013 --- a/net/ipv4/nexthop.c
4014 +++ b/net/ipv4/nexthop.c
4015 @@ -839,15 +839,36 @@ static void remove_nexthop(struct net *net, struct nexthop *nh,
4016 /* if any FIB entries reference this nexthop, any dst entries
4017 * need to be regenerated
4018 */
4019 -static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
4020 +static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
4021 + struct nexthop *replaced_nh)
4022 {
4023 struct fib6_info *f6i;
4024 + struct nh_group *nhg;
4025 + int i;
4026
4027 if (!list_empty(&nh->fi_list))
4028 rt_cache_flush(net);
4029
4030 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
4031 ipv6_stub->fib6_update_sernum(net, f6i);
4032 +
4033 + /* if an IPv6 group was replaced, we have to release all old
4034 + * dsts to make sure all refcounts are released
4035 + */
4036 + if (!replaced_nh->is_group)
4037 + return;
4038 +
4039 + /* new dsts must use only the new nexthop group */
4040 + synchronize_net();
4041 +
4042 + nhg = rtnl_dereference(replaced_nh->nh_grp);
4043 + for (i = 0; i < nhg->num_nh; i++) {
4044 + struct nh_grp_entry *nhge = &nhg->nh_entries[i];
4045 + struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
4046 +
4047 + if (nhi->family == AF_INET6)
4048 + ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
4049 + }
4050 }
4051
4052 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
4053 @@ -994,7 +1015,7 @@ static int replace_nexthop(struct net *net, struct nexthop *old,
4054 err = replace_nexthop_single(net, old, new, extack);
4055
4056 if (!err) {
4057 - nh_rt_cache_flush(net, old);
4058 + nh_rt_cache_flush(net, old, new);
4059
4060 __remove_nexthop(net, new, NULL);
4061 nexthop_put(new);
4062 @@ -1231,11 +1252,15 @@ static int nh_create_ipv6(struct net *net, struct nexthop *nh,
4063 /* sets nh_dev if successful */
4064 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
4065 extack);
4066 - if (err)
4067 + if (err) {
4068 + /* IPv6 is not enabled, don't call fib6_nh_release */
4069 + if (err == -EAFNOSUPPORT)
4070 + goto out;
4071 ipv6_stub->fib6_nh_release(fib6_nh);
4072 - else
4073 + } else {
4074 nh->nh_flags = fib6_nh->fib_nh_flags;
4075 -
4076 + }
4077 +out:
4078 return err;
4079 }
4080
4081 diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
4082 index ee6c38a73325d..44be7a5a13911 100644
4083 --- a/net/ipv4/tcp_cubic.c
4084 +++ b/net/ipv4/tcp_cubic.c
4085 @@ -341,8 +341,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
4086 return;
4087
4088 if (tcp_in_slow_start(tp)) {
4089 - if (hystart && after(ack, ca->end_seq))
4090 - bictcp_hystart_reset(sk);
4091 acked = tcp_slow_start(tp, acked);
4092 if (!acked)
4093 return;
4094 @@ -384,6 +382,9 @@ static void hystart_update(struct sock *sk, u32 delay)
4095 if (ca->found & hystart_detect)
4096 return;
4097
4098 + if (after(tp->snd_una, ca->end_seq))
4099 + bictcp_hystart_reset(sk);
4100 +
4101 if (hystart_detect & HYSTART_ACK_TRAIN) {
4102 u32 now = bictcp_clock();
4103
4104 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
4105 index 14ac1d9112877..942da168f18fb 100644
4106 --- a/net/ipv6/af_inet6.c
4107 +++ b/net/ipv6/af_inet6.c
4108 @@ -955,6 +955,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
4109 .ip6_mtu_from_fib6 = ip6_mtu_from_fib6,
4110 .fib6_nh_init = fib6_nh_init,
4111 .fib6_nh_release = fib6_nh_release,
4112 + .fib6_nh_release_dsts = fib6_nh_release_dsts,
4113 .fib6_update_sernum = fib6_update_sernum_stub,
4114 .fib6_rt_update = fib6_rt_update,
4115 .ip6_del_rt = ip6_del_rt,
4116 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4117 index fc913f09606db..d847aa32628da 100644
4118 --- a/net/ipv6/ip6_output.c
4119 +++ b/net/ipv6/ip6_output.c
4120 @@ -192,7 +192,7 @@ static int __ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff
4121 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
4122 /* Policy lookup after SNAT yielded a new policy */
4123 if (skb_dst(skb)->xfrm) {
4124 - IPCB(skb)->flags |= IPSKB_REROUTED;
4125 + IP6CB(skb)->flags |= IP6SKB_REROUTED;
4126 return dst_output(net, sk, skb);
4127 }
4128 #endif
4129 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4130 index daa876c6ae8db..f36db3dd97346 100644
4131 --- a/net/ipv6/route.c
4132 +++ b/net/ipv6/route.c
4133 @@ -3585,6 +3585,25 @@ void fib6_nh_release(struct fib6_nh *fib6_nh)
4134 fib_nh_common_release(&fib6_nh->nh_common);
4135 }
4136
4137 +void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
4138 +{
4139 + int cpu;
4140 +
4141 + if (!fib6_nh->rt6i_pcpu)
4142 + return;
4143 +
4144 + for_each_possible_cpu(cpu) {
4145 + struct rt6_info *pcpu_rt, **ppcpu_rt;
4146 +
4147 + ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
4148 + pcpu_rt = xchg(ppcpu_rt, NULL);
4149 + if (pcpu_rt) {
4150 + dst_dev_put(&pcpu_rt->dst);
4151 + dst_release(&pcpu_rt->dst);
4152 + }
4153 + }
4154 +}
4155 +
4156 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
4157 gfp_t gfp_flags,
4158 struct netlink_ext_ack *extack)
4159 diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c
4160 index 0187e65176c05..114ef47db76d3 100644
4161 --- a/net/ncsi/ncsi-cmd.c
4162 +++ b/net/ncsi/ncsi-cmd.c
4163 @@ -18,6 +18,8 @@
4164 #include "internal.h"
4165 #include "ncsi-pkt.h"
4166
4167 +static const int padding_bytes = 26;
4168 +
4169 u32 ncsi_calculate_checksum(unsigned char *data, int len)
4170 {
4171 u32 checksum = 0;
4172 @@ -213,12 +215,17 @@ static int ncsi_cmd_handler_oem(struct sk_buff *skb,
4173 {
4174 struct ncsi_cmd_oem_pkt *cmd;
4175 unsigned int len;
4176 + int payload;
4177 + /* NC-SI spec DSP_0222_1.2.0, section 8.2.2.2
4178 + * requires payload to be padded with 0 to
4179 + * 32-bit boundary before the checksum field.
4180 + * Ensure the padding bytes are accounted for in
4181 + * skb allocation
4182 + */
4183
4184 + payload = ALIGN(nca->payload, 4);
4185 len = sizeof(struct ncsi_cmd_pkt_hdr) + 4;
4186 - if (nca->payload < 26)
4187 - len += 26;
4188 - else
4189 - len += nca->payload;
4190 + len += max(payload, padding_bytes);
4191
4192 cmd = skb_put_zero(skb, len);
4193 memcpy(&cmd->mfr_id, nca->data, nca->payload);
4194 @@ -272,6 +279,7 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
4195 struct net_device *dev = nd->dev;
4196 int hlen = LL_RESERVED_SPACE(dev);
4197 int tlen = dev->needed_tailroom;
4198 + int payload;
4199 int len = hlen + tlen;
4200 struct sk_buff *skb;
4201 struct ncsi_request *nr;
4202 @@ -281,14 +289,14 @@ static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca)
4203 return NULL;
4204
4205 /* NCSI command packet has 16-bytes header, payload, 4 bytes checksum.
4206 + * Payload needs padding so that the checksum field following payload is
4207 + * aligned to 32-bit boundary.
4208 * The packet needs padding if its payload is less than 26 bytes to
4209 * meet 64 bytes minimal ethernet frame length.
4210 */
4211 len += sizeof(struct ncsi_cmd_pkt_hdr) + 4;
4212 - if (nca->payload < 26)
4213 - len += 26;
4214 - else
4215 - len += nca->payload;
4216 + payload = ALIGN(nca->payload, 4);
4217 + len += max(payload, padding_bytes);
4218
4219 /* Allocate skb */
4220 skb = alloc_skb(len, GFP_ATOMIC);
4221 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
4222 index 89aa1fc334b19..ccd6af1440745 100644
4223 --- a/net/netfilter/ipvs/ip_vs_core.c
4224 +++ b/net/netfilter/ipvs/ip_vs_core.c
4225 @@ -1982,7 +1982,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
4226 struct ip_vs_proto_data *pd;
4227 struct ip_vs_conn *cp;
4228 int ret, pkts;
4229 - int conn_reuse_mode;
4230 struct sock *sk;
4231
4232 /* Already marked as IPVS request or reply? */
4233 @@ -2059,15 +2058,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
4234 cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto,
4235 ipvs, af, skb, &iph);
4236
4237 - conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
4238 - if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
4239 + if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
4240 + int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
4241 bool old_ct = false, resched = false;
4242
4243 if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
4244 unlikely(!atomic_read(&cp->dest->weight))) {
4245 resched = true;
4246 old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
4247 - } else if (is_new_conn_expected(cp, conn_reuse_mode)) {
4248 + } else if (conn_reuse_mode &&
4249 + is_new_conn_expected(cp, conn_reuse_mode)) {
4250 old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
4251 if (!atomic_read(&cp->n_control)) {
4252 resched = true;
4253 diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
4254 index 1d0aa9e6044bf..b8ecb002e6238 100644
4255 --- a/net/nfc/nci/core.c
4256 +++ b/net/nfc/nci/core.c
4257 @@ -473,6 +473,11 @@ static int nci_open_device(struct nci_dev *ndev)
4258
4259 mutex_lock(&ndev->req_lock);
4260
4261 + if (test_bit(NCI_UNREG, &ndev->flags)) {
4262 + rc = -ENODEV;
4263 + goto done;
4264 + }
4265 +
4266 if (test_bit(NCI_UP, &ndev->flags)) {
4267 rc = -EALREADY;
4268 goto done;
4269 @@ -536,6 +541,10 @@ done:
4270 static int nci_close_device(struct nci_dev *ndev)
4271 {
4272 nci_req_cancel(ndev, ENODEV);
4273 +
4274 + /* This mutex needs to be held as a barrier for
4275 + * caller nci_unregister_device
4276 + */
4277 mutex_lock(&ndev->req_lock);
4278
4279 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
4280 @@ -573,8 +582,8 @@ static int nci_close_device(struct nci_dev *ndev)
4281 /* Flush cmd wq */
4282 flush_workqueue(ndev->cmd_wq);
4283
4284 - /* Clear flags */
4285 - ndev->flags = 0;
4286 + /* Clear flags except NCI_UNREG */
4287 + ndev->flags &= BIT(NCI_UNREG);
4288
4289 mutex_unlock(&ndev->req_lock);
4290
4291 @@ -1256,6 +1265,12 @@ void nci_unregister_device(struct nci_dev *ndev)
4292 {
4293 struct nci_conn_info *conn_info, *n;
4294
4295 + /* This set_bit is not protected with specialized barrier,
4296 + * However, it is fine because the mutex_lock(&ndev->req_lock);
4297 + * in nci_close_device() will help to emit one.
4298 + */
4299 + set_bit(NCI_UNREG, &ndev->flags);
4300 +
4301 nci_close_device(ndev);
4302
4303 destroy_workqueue(ndev->cmd_wq);
4304 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
4305 index 6b0f09c5b195f..5e1493f8deba7 100644
4306 --- a/net/smc/af_smc.c
4307 +++ b/net/smc/af_smc.c
4308 @@ -1658,8 +1658,10 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
4309 static int smc_shutdown(struct socket *sock, int how)
4310 {
4311 struct sock *sk = sock->sk;
4312 + bool do_shutdown = true;
4313 struct smc_sock *smc;
4314 int rc = -EINVAL;
4315 + int old_state;
4316 int rc1 = 0;
4317
4318 smc = smc_sk(sk);
4319 @@ -1686,7 +1688,11 @@ static int smc_shutdown(struct socket *sock, int how)
4320 }
4321 switch (how) {
4322 case SHUT_RDWR: /* shutdown in both directions */
4323 + old_state = sk->sk_state;
4324 rc = smc_close_active(smc);
4325 + if (old_state == SMC_ACTIVE &&
4326 + sk->sk_state == SMC_PEERCLOSEWAIT1)
4327 + do_shutdown = false;
4328 break;
4329 case SHUT_WR:
4330 rc = smc_close_shutdown_write(smc);
4331 @@ -1696,7 +1702,7 @@ static int smc_shutdown(struct socket *sock, int how)
4332 /* nothing more to do because peer is not involved */
4333 break;
4334 }
4335 - if (smc->clcsock)
4336 + if (do_shutdown && smc->clcsock)
4337 rc1 = kernel_sock_shutdown(smc->clcsock, how);
4338 /* map sock_shutdown_cmd constants to sk_shutdown value range */
4339 sk->sk_shutdown |= how + 1;
4340 diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
4341 index fc06720b53c14..2eabf39dee74d 100644
4342 --- a/net/smc/smc_close.c
4343 +++ b/net/smc/smc_close.c
4344 @@ -218,6 +218,12 @@ again:
4345 if (rc)
4346 break;
4347 sk->sk_state = SMC_PEERCLOSEWAIT1;
4348 +
4349 + /* actively shutdown clcsock before peer close it,
4350 + * prevent peer from entering TIME_WAIT state.
4351 + */
4352 + if (smc->clcsock && smc->clcsock->sk)
4353 + rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
4354 } else {
4355 /* peer event has changed the state */
4356 goto again;
4357 diff --git a/sound/pci/ctxfi/ctamixer.c b/sound/pci/ctxfi/ctamixer.c
4358 index d4ff377eb3a34..6d636bdcaa5a3 100644
4359 --- a/sound/pci/ctxfi/ctamixer.c
4360 +++ b/sound/pci/ctxfi/ctamixer.c
4361 @@ -23,16 +23,15 @@
4362
4363 #define BLANK_SLOT 4094
4364
4365 -static int amixer_master(struct rsc *rsc)
4366 +static void amixer_master(struct rsc *rsc)
4367 {
4368 rsc->conj = 0;
4369 - return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
4370 + rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
4371 }
4372
4373 -static int amixer_next_conj(struct rsc *rsc)
4374 +static void amixer_next_conj(struct rsc *rsc)
4375 {
4376 rsc->conj++;
4377 - return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
4378 }
4379
4380 static int amixer_index(const struct rsc *rsc)
4381 @@ -331,16 +330,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
4382
4383 /* SUM resource management */
4384
4385 -static int sum_master(struct rsc *rsc)
4386 +static void sum_master(struct rsc *rsc)
4387 {
4388 rsc->conj = 0;
4389 - return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
4390 + rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
4391 }
4392
4393 -static int sum_next_conj(struct rsc *rsc)
4394 +static void sum_next_conj(struct rsc *rsc)
4395 {
4396 rsc->conj++;
4397 - return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
4398 }
4399
4400 static int sum_index(const struct rsc *rsc)
4401 diff --git a/sound/pci/ctxfi/ctdaio.c b/sound/pci/ctxfi/ctdaio.c
4402 index 27441d498968d..b5e1296af09ee 100644
4403 --- a/sound/pci/ctxfi/ctdaio.c
4404 +++ b/sound/pci/ctxfi/ctdaio.c
4405 @@ -51,12 +51,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
4406 [SPDIFIO] = {.left = 0x05, .right = 0x85},
4407 };
4408
4409 -static int daio_master(struct rsc *rsc)
4410 +static void daio_master(struct rsc *rsc)
4411 {
4412 /* Actually, this is not the resource index of DAIO.
4413 * For DAO, it is the input mapper index. And, for DAI,
4414 * it is the output time-slot index. */
4415 - return rsc->conj = rsc->idx;
4416 + rsc->conj = rsc->idx;
4417 }
4418
4419 static int daio_index(const struct rsc *rsc)
4420 @@ -64,19 +64,19 @@ static int daio_index(const struct rsc *rsc)
4421 return rsc->conj;
4422 }
4423
4424 -static int daio_out_next_conj(struct rsc *rsc)
4425 +static void daio_out_next_conj(struct rsc *rsc)
4426 {
4427 - return rsc->conj += 2;
4428 + rsc->conj += 2;
4429 }
4430
4431 -static int daio_in_next_conj_20k1(struct rsc *rsc)
4432 +static void daio_in_next_conj_20k1(struct rsc *rsc)
4433 {
4434 - return rsc->conj += 0x200;
4435 + rsc->conj += 0x200;
4436 }
4437
4438 -static int daio_in_next_conj_20k2(struct rsc *rsc)
4439 +static void daio_in_next_conj_20k2(struct rsc *rsc)
4440 {
4441 - return rsc->conj += 0x100;
4442 + rsc->conj += 0x100;
4443 }
4444
4445 static const struct rsc_ops daio_out_rsc_ops = {
4446 diff --git a/sound/pci/ctxfi/ctresource.c b/sound/pci/ctxfi/ctresource.c
4447 index 0bb5696e44b37..ec5f597b580ad 100644
4448 --- a/sound/pci/ctxfi/ctresource.c
4449 +++ b/sound/pci/ctxfi/ctresource.c
4450 @@ -109,18 +109,17 @@ static int audio_ring_slot(const struct rsc *rsc)
4451 return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
4452 }
4453
4454 -static int rsc_next_conj(struct rsc *rsc)
4455 +static void rsc_next_conj(struct rsc *rsc)
4456 {
4457 unsigned int i;
4458 for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
4459 i++;
4460 rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
4461 - return rsc->conj;
4462 }
4463
4464 -static int rsc_master(struct rsc *rsc)
4465 +static void rsc_master(struct rsc *rsc)
4466 {
4467 - return rsc->conj = rsc->idx;
4468 + rsc->conj = rsc->idx;
4469 }
4470
4471 static const struct rsc_ops rsc_generic_ops = {
4472 diff --git a/sound/pci/ctxfi/ctresource.h b/sound/pci/ctxfi/ctresource.h
4473 index 93e47488a1c1c..92146054af582 100644
4474 --- a/sound/pci/ctxfi/ctresource.h
4475 +++ b/sound/pci/ctxfi/ctresource.h
4476 @@ -39,8 +39,8 @@ struct rsc {
4477 };
4478
4479 struct rsc_ops {
4480 - int (*master)(struct rsc *rsc); /* Move to master resource */
4481 - int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
4482 + void (*master)(struct rsc *rsc); /* Move to master resource */
4483 + void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
4484 int (*index)(const struct rsc *rsc); /* Return the index of resource */
4485 /* Return the output slot number */
4486 int (*output_slot)(const struct rsc *rsc);
4487 diff --git a/sound/pci/ctxfi/ctsrc.c b/sound/pci/ctxfi/ctsrc.c
4488 index 37c18ce84974a..7d2bda0c3d3de 100644
4489 --- a/sound/pci/ctxfi/ctsrc.c
4490 +++ b/sound/pci/ctxfi/ctsrc.c
4491 @@ -590,16 +590,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
4492
4493 /* SRCIMP resource manager operations */
4494
4495 -static int srcimp_master(struct rsc *rsc)
4496 +static void srcimp_master(struct rsc *rsc)
4497 {
4498 rsc->conj = 0;
4499 - return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
4500 + rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
4501 }
4502
4503 -static int srcimp_next_conj(struct rsc *rsc)
4504 +static void srcimp_next_conj(struct rsc *rsc)
4505 {
4506 rsc->conj++;
4507 - return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
4508 }
4509
4510 static int srcimp_index(const struct rsc *rsc)
4511 diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
4512 index 745cc9dd14f38..16f26dd2d59ed 100644
4513 --- a/sound/soc/qcom/qdsp6/q6routing.c
4514 +++ b/sound/soc/qcom/qdsp6/q6routing.c
4515 @@ -443,7 +443,11 @@ static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
4516 session->port_id = be_id;
4517 snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, update);
4518 } else {
4519 - session->port_id = -1;
4520 + if (session->port_id == be_id) {
4521 + session->port_id = -1;
4522 + return 0;
4523 + }
4524 +
4525 snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, update);
4526 }
4527
4528 diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
4529 index c367609433bfc..21f859e56b700 100644
4530 --- a/sound/soc/soc-topology.c
4531 +++ b/sound/soc/soc-topology.c
4532 @@ -2777,6 +2777,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
4533 /* remove dynamic controls from the component driver */
4534 int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
4535 {
4536 + struct snd_card *card = comp->card->snd_card;
4537 struct snd_soc_dobj *dobj, *next_dobj;
4538 int pass = SOC_TPLG_PASS_END;
4539
4540 @@ -2784,6 +2785,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
4541 while (pass >= SOC_TPLG_PASS_START) {
4542
4543 /* remove mixer controls */
4544 + down_write(&card->controls_rwsem);
4545 list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
4546 list) {
4547
4548 @@ -2827,6 +2829,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
4549 break;
4550 }
4551 }
4552 + up_write(&card->controls_rwsem);
4553 pass--;
4554 }
4555