Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0211-5.4.112-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (19 months ago) by niro
File size: 146536 byte(s)
-add missing
1 diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
2 index 4f78e9a6da609..fcafce635ff01 100644
3 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
4 +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
5 @@ -51,7 +51,7 @@ properties:
6 description:
7 Reference to an nvmem node for the MAC address
8
9 - nvmem-cells-names:
10 + nvmem-cell-names:
11 const: mac-address
12
13 phy-connection-type:
14 diff --git a/Makefile b/Makefile
15 index 25680098f51b2..ba8ee5e806627 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,7 +1,7 @@
19 # SPDX-License-Identifier: GPL-2.0
20 VERSION = 5
21 PATCHLEVEL = 4
22 -SUBLEVEL = 111
23 +SUBLEVEL = 112
24 EXTRAVERSION =
25 NAME = Kleptomaniac Octopus
26
27 diff --git a/arch/arm/boot/dts/armada-385-turris-omnia.dts b/arch/arm/boot/dts/armada-385-turris-omnia.dts
28 index 768b6c5d2129a..fde4c302f08ec 100644
29 --- a/arch/arm/boot/dts/armada-385-turris-omnia.dts
30 +++ b/arch/arm/boot/dts/armada-385-turris-omnia.dts
31 @@ -236,6 +236,7 @@
32 status = "okay";
33 compatible = "ethernet-phy-id0141.0DD1", "ethernet-phy-ieee802.3-c22";
34 reg = <1>;
35 + marvell,reg-init = <3 18 0 0x4985>;
36
37 /* irq is connected to &pcawan pin 7 */
38 };
39 diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
40 index bc43c75f17450..6678b97b10076 100644
41 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
42 +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
43 @@ -432,6 +432,7 @@
44 pinctrl-0 = <&pinctrl_usdhc2>;
45 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
46 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
47 + vmmc-supply = <&vdd_sd1_reg>;
48 status = "disabled";
49 };
50
51 @@ -441,5 +442,6 @@
52 &pinctrl_usdhc3_cdwp>;
53 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
54 wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
55 + vmmc-supply = <&vdd_sd0_reg>;
56 status = "disabled";
57 };
58 diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
59 index cffa8991880d1..93b44efdbc527 100644
60 --- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
61 +++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
62 @@ -124,7 +124,7 @@
63 #define MX8MM_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
64 #define MX8MM_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
65 #define MX8MM_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
66 -#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
67 +#define MX8MM_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
68 #define MX8MM_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
69 #define MX8MM_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
70 #define MX8MM_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
71 diff --git a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
72 index b94b02080a344..68e8fa1729741 100644
73 --- a/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
74 +++ b/arch/arm64/boot/dts/freescale/imx8mq-pinfunc.h
75 @@ -130,7 +130,7 @@
76 #define MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0x0A4 0x30C 0x000 0x0 0x0
77 #define MX8MQ_IOMUXC_SD1_CMD_GPIO2_IO1 0x0A4 0x30C 0x000 0x5 0x0
78 #define MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0x0A8 0x310 0x000 0x0 0x0
79 -#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x31 0x000 0x5 0x0
80 +#define MX8MQ_IOMUXC_SD1_DATA0_GPIO2_IO2 0x0A8 0x310 0x000 0x5 0x0
81 #define MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0x0AC 0x314 0x000 0x0 0x0
82 #define MX8MQ_IOMUXC_SD1_DATA1_GPIO2_IO3 0x0AC 0x314 0x000 0x5 0x0
83 #define MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0x0B0 0x318 0x000 0x0 0x0
84 diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
85 index 7ff574d56429c..f31e07fc936d9 100644
86 --- a/arch/ia64/include/asm/ptrace.h
87 +++ b/arch/ia64/include/asm/ptrace.h
88 @@ -54,8 +54,7 @@
89
90 static inline unsigned long user_stack_pointer(struct pt_regs *regs)
91 {
92 - /* FIXME: should this be bspstore + nr_dirty regs? */
93 - return regs->ar_bspstore;
94 + return regs->r12;
95 }
96
97 static inline int is_syscall_success(struct pt_regs *regs)
98 @@ -79,11 +78,6 @@ static inline long regs_return_value(struct pt_regs *regs)
99 unsigned long __ip = instruction_pointer(regs); \
100 (__ip & ~3UL) + ((__ip & 3UL) << 2); \
101 })
102 -/*
103 - * Why not default? Because user_stack_pointer() on ia64 gives register
104 - * stack backing store instead...
105 - */
106 -#define current_user_stack_pointer() (current_pt_regs()->r12)
107
108 /* given a pointer to a task_struct, return the user's pt_regs */
109 # define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
110 diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
111 index 254703653b6f5..f34dc9bc6758e 100644
112 --- a/arch/nds32/mm/cacheflush.c
113 +++ b/arch/nds32/mm/cacheflush.c
114 @@ -239,7 +239,7 @@ void flush_dcache_page(struct page *page)
115 {
116 struct address_space *mapping;
117
118 - mapping = page_mapping(page);
119 + mapping = page_mapping_file(page);
120 if (mapping && !mapping_mapped(mapping))
121 set_bit(PG_dcache_dirty, &page->flags);
122 else {
123 diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h
124 index 0689585758717..a736dc59bbef8 100644
125 --- a/arch/parisc/include/asm/cmpxchg.h
126 +++ b/arch/parisc/include/asm/cmpxchg.h
127 @@ -72,7 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
128 #endif
129 case 4: return __cmpxchg_u32((unsigned int *)ptr,
130 (unsigned int)old, (unsigned int)new_);
131 - case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
132 + case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
133 }
134 __cmpxchg_called_with_bad_pointer();
135 return old;
136 diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
137 index af013b4244d34..2da0273597989 100644
138 --- a/arch/s390/kernel/cpcmd.c
139 +++ b/arch/s390/kernel/cpcmd.c
140 @@ -37,10 +37,12 @@ static int diag8_noresponse(int cmdlen)
141
142 static int diag8_response(int cmdlen, char *response, int *rlen)
143 {
144 + unsigned long _cmdlen = cmdlen | 0x40000000L;
145 + unsigned long _rlen = *rlen;
146 register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
147 register unsigned long reg3 asm ("3") = (addr_t) response;
148 - register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
149 - register unsigned long reg5 asm ("5") = *rlen;
150 + register unsigned long reg4 asm ("4") = _cmdlen;
151 + register unsigned long reg5 asm ("5") = _rlen;
152
153 asm volatile(
154 " diag %2,%0,0x8\n"
155 diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
156 index 812d6aa6e0136..f2d015a8ff57f 100644
157 --- a/drivers/char/agp/Kconfig
158 +++ b/drivers/char/agp/Kconfig
159 @@ -125,7 +125,7 @@ config AGP_HP_ZX1
160
161 config AGP_PARISC
162 tristate "HP Quicksilver AGP support"
163 - depends on AGP && PARISC && 64BIT
164 + depends on AGP && PARISC && 64BIT && IOMMU_SBA
165 help
166 This option gives you AGP GART support for the HP Quicksilver
167 AGP bus adapter on HP PA-RISC machines (Ok, just on the C8000
168 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
169 index 36e9f38a38824..6ff87cd867121 100644
170 --- a/drivers/clk/clk.c
171 +++ b/drivers/clk/clk.c
172 @@ -4151,20 +4151,19 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
173 /* search the list of notifiers for this clk */
174 list_for_each_entry(cn, &clk_notifier_list, node)
175 if (cn->clk == clk)
176 - break;
177 + goto found;
178
179 /* if clk wasn't in the notifier list, allocate new clk_notifier */
180 - if (cn->clk != clk) {
181 - cn = kzalloc(sizeof(*cn), GFP_KERNEL);
182 - if (!cn)
183 - goto out;
184 + cn = kzalloc(sizeof(*cn), GFP_KERNEL);
185 + if (!cn)
186 + goto out;
187
188 - cn->clk = clk;
189 - srcu_init_notifier_head(&cn->notifier_head);
190 + cn->clk = clk;
191 + srcu_init_notifier_head(&cn->notifier_head);
192
193 - list_add(&cn->node, &clk_notifier_list);
194 - }
195 + list_add(&cn->node, &clk_notifier_list);
196
197 +found:
198 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
199
200 clk->core->notifier_count++;
201 @@ -4189,32 +4188,28 @@ EXPORT_SYMBOL_GPL(clk_notifier_register);
202 */
203 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
204 {
205 - struct clk_notifier *cn = NULL;
206 - int ret = -EINVAL;
207 + struct clk_notifier *cn;
208 + int ret = -ENOENT;
209
210 if (!clk || !nb)
211 return -EINVAL;
212
213 clk_prepare_lock();
214
215 - list_for_each_entry(cn, &clk_notifier_list, node)
216 - if (cn->clk == clk)
217 - break;
218 -
219 - if (cn->clk == clk) {
220 - ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
221 + list_for_each_entry(cn, &clk_notifier_list, node) {
222 + if (cn->clk == clk) {
223 + ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
224
225 - clk->core->notifier_count--;
226 + clk->core->notifier_count--;
227
228 - /* XXX the notifier code should handle this better */
229 - if (!cn->notifier_head.head) {
230 - srcu_cleanup_notifier_head(&cn->notifier_head);
231 - list_del(&cn->node);
232 - kfree(cn);
233 + /* XXX the notifier code should handle this better */
234 + if (!cn->notifier_head.head) {
235 + srcu_cleanup_notifier_head(&cn->notifier_head);
236 + list_del(&cn->node);
237 + kfree(cn);
238 + }
239 + break;
240 }
241 -
242 - } else {
243 - ret = -ENOENT;
244 }
245
246 clk_prepare_unlock();
247 diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
248 index 43ecd507bf836..cf94a12459ea4 100644
249 --- a/drivers/clk/socfpga/clk-gate.c
250 +++ b/drivers/clk/socfpga/clk-gate.c
251 @@ -99,7 +99,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
252 val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift;
253 val &= GENMASK(socfpgaclk->width - 1, 0);
254 /* Check for GPIO_DB_CLK by its offset */
255 - if ((int) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
256 + if ((uintptr_t) socfpgaclk->div_reg & SOCFPGA_GPIO_DB_CLK_OFFSET)
257 div = val + 1;
258 else
259 div = (1 << val);
260 diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
261 index 75e08a98d09be..889ea7a6ed630 100644
262 --- a/drivers/counter/stm32-timer-cnt.c
263 +++ b/drivers/counter/stm32-timer-cnt.c
264 @@ -24,7 +24,6 @@ struct stm32_timer_cnt {
265 struct counter_device counter;
266 struct regmap *regmap;
267 struct clk *clk;
268 - u32 ceiling;
269 u32 max_arr;
270 };
271
272 @@ -67,14 +66,15 @@ static int stm32_count_write(struct counter_device *counter,
273 struct counter_count_write_value *val)
274 {
275 struct stm32_timer_cnt *const priv = counter->priv;
276 - u32 cnt;
277 + u32 cnt, ceiling;
278 int err;
279
280 err = counter_count_write_value_get(&cnt, COUNTER_COUNT_POSITION, val);
281 if (err)
282 return err;
283
284 - if (cnt > priv->ceiling)
285 + regmap_read(priv->regmap, TIM_ARR, &ceiling);
286 + if (cnt > ceiling)
287 return -EINVAL;
288
289 return regmap_write(priv->regmap, TIM_CNT, cnt);
290 @@ -136,10 +136,6 @@ static int stm32_count_function_set(struct counter_device *counter,
291
292 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, 0);
293
294 - /* TIMx_ARR register shouldn't be buffered (ARPE=0) */
295 - regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
296 - regmap_write(priv->regmap, TIM_ARR, priv->ceiling);
297 -
298 regmap_update_bits(priv->regmap, TIM_SMCR, TIM_SMCR_SMS, sms);
299
300 /* Make sure that registers are updated */
301 @@ -197,7 +193,6 @@ static ssize_t stm32_count_ceiling_write(struct counter_device *counter,
302 regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_ARPE, 0);
303 regmap_write(priv->regmap, TIM_ARR, ceiling);
304
305 - priv->ceiling = ceiling;
306 return len;
307 }
308
309 @@ -369,7 +364,6 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
310
311 priv->regmap = ddata->regmap;
312 priv->clk = ddata->clk;
313 - priv->ceiling = ddata->max_arr;
314 priv->max_arr = ddata->max_arr;
315
316 priv->counter.name = dev_name(dev);
317 diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
318 index 3456d33feb46a..ce8182bd0b558 100644
319 --- a/drivers/gpu/drm/i915/display/intel_acpi.c
320 +++ b/drivers/gpu/drm/i915/display/intel_acpi.c
321 @@ -83,13 +83,31 @@ static void intel_dsm_platform_mux_info(acpi_handle dhandle)
322 return;
323 }
324
325 + if (!pkg->package.count) {
326 + DRM_DEBUG_DRIVER("no connection in _DSM\n");
327 + return;
328 + }
329 +
330 connector_count = &pkg->package.elements[0];
331 DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
332 (unsigned long long)connector_count->integer.value);
333 for (i = 1; i < pkg->package.count; i++) {
334 union acpi_object *obj = &pkg->package.elements[i];
335 - union acpi_object *connector_id = &obj->package.elements[0];
336 - union acpi_object *info = &obj->package.elements[1];
337 + union acpi_object *connector_id;
338 + union acpi_object *info;
339 +
340 + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
341 + DRM_DEBUG_DRIVER("Invalid object for MUX #%d\n", i);
342 + continue;
343 + }
344 +
345 + connector_id = &obj->package.elements[0];
346 + info = &obj->package.elements[1];
347 + if (info->type != ACPI_TYPE_BUFFER || info->buffer.length < 4) {
348 + DRM_DEBUG_DRIVER("Invalid info for MUX obj #%d\n", i);
349 + continue;
350 + }
351 +
352 DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
353 (unsigned long long)connector_id->integer.value);
354 DRM_DEBUG_DRIVER(" port id: %s\n",
355 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
356 index 896d6f95a9604..7443df77cadb5 100644
357 --- a/drivers/gpu/drm/msm/msm_drv.c
358 +++ b/drivers/gpu/drm/msm/msm_drv.c
359 @@ -567,6 +567,7 @@ err_free_priv:
360 kfree(priv);
361 err_put_drm_dev:
362 drm_dev_put(ddev);
363 + platform_set_drvdata(pdev, NULL);
364 return ret;
365 }
366
367 diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
368 index 2dfe2ffcf8825..840f59650c7c4 100644
369 --- a/drivers/i2c/i2c-core-base.c
370 +++ b/drivers/i2c/i2c-core-base.c
371 @@ -254,13 +254,14 @@ EXPORT_SYMBOL_GPL(i2c_recover_bus);
372 static void i2c_init_recovery(struct i2c_adapter *adap)
373 {
374 struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
375 - char *err_str;
376 + char *err_str, *err_level = KERN_ERR;
377
378 if (!bri)
379 return;
380
381 if (!bri->recover_bus) {
382 - err_str = "no recover_bus() found";
383 + err_str = "no suitable method provided";
384 + err_level = KERN_DEBUG;
385 goto err;
386 }
387
388 @@ -290,7 +291,7 @@ static void i2c_init_recovery(struct i2c_adapter *adap)
389
390 return;
391 err:
392 - dev_err(&adap->dev, "Not using recovery: %s\n", err_str);
393 + dev_printk(err_level, &adap->dev, "Not using recovery: %s\n", err_str);
394 adap->bus_recovery_info = NULL;
395 }
396
397 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
398 index 8beed4197e73e..c9e63c692b6e8 100644
399 --- a/drivers/infiniband/core/addr.c
400 +++ b/drivers/infiniband/core/addr.c
401 @@ -76,7 +76,9 @@ static struct workqueue_struct *addr_wq;
402
403 static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
404 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
405 - .len = sizeof(struct rdma_nla_ls_gid)},
406 + .len = sizeof(struct rdma_nla_ls_gid),
407 + .validation_type = NLA_VALIDATE_MIN,
408 + .min = sizeof(struct rdma_nla_ls_gid)},
409 };
410
411 static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
412 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
413 index 3c78f8c32d12b..535ee41ee4214 100644
414 --- a/drivers/infiniband/hw/cxgb4/cm.c
415 +++ b/drivers/infiniband/hw/cxgb4/cm.c
416 @@ -3616,7 +3616,8 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
417 c4iw_init_wr_wait(ep->com.wr_waitp);
418 err = cxgb4_remove_server(
419 ep->com.dev->rdev.lldi.ports[0], ep->stid,
420 - ep->com.dev->rdev.lldi.rxq_ids[0], true);
421 + ep->com.dev->rdev.lldi.rxq_ids[0],
422 + ep->com.local_addr.ss_family == AF_INET6);
423 if (err)
424 goto done;
425 err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
426 diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
427 index f22089101cdda..4b18f37beb4c5 100644
428 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
429 +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
430 @@ -856,7 +856,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
431 if (dev->adapter->dev_set_bus) {
432 err = dev->adapter->dev_set_bus(dev, 0);
433 if (err)
434 - goto lbl_unregister_candev;
435 + goto adap_dev_free;
436 }
437
438 /* get device number early */
439 @@ -868,6 +868,10 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
440
441 return 0;
442
443 +adap_dev_free:
444 + if (dev->adapter->dev_free)
445 + dev->adapter->dev_free(dev);
446 +
447 lbl_unregister_candev:
448 unregister_candev(netdev);
449
450 diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
451 index ee1e67df1e7b4..dc75e798dbff8 100644
452 --- a/drivers/net/dsa/lantiq_gswip.c
453 +++ b/drivers/net/dsa/lantiq_gswip.c
454 @@ -93,8 +93,12 @@
455
456 /* GSWIP MII Registers */
457 #define GSWIP_MII_CFGp(p) (0x2 * (p))
458 +#define GSWIP_MII_CFG_RESET BIT(15)
459 #define GSWIP_MII_CFG_EN BIT(14)
460 +#define GSWIP_MII_CFG_ISOLATE BIT(13)
461 #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
462 +#define GSWIP_MII_CFG_RGMII_IBS BIT(8)
463 +#define GSWIP_MII_CFG_RMII_CLK BIT(7)
464 #define GSWIP_MII_CFG_MODE_MIIP 0x0
465 #define GSWIP_MII_CFG_MODE_MIIM 0x1
466 #define GSWIP_MII_CFG_MODE_RMIIP 0x2
467 @@ -190,6 +194,23 @@
468 #define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
469
470 #define GSWIP_MAC_FLEN 0x8C5
471 +#define GSWIP_MAC_CTRL_0p(p) (0x903 + ((p) * 0xC))
472 +#define GSWIP_MAC_CTRL_0_PADEN BIT(8)
473 +#define GSWIP_MAC_CTRL_0_FCS_EN BIT(7)
474 +#define GSWIP_MAC_CTRL_0_FCON_MASK 0x0070
475 +#define GSWIP_MAC_CTRL_0_FCON_AUTO 0x0000
476 +#define GSWIP_MAC_CTRL_0_FCON_RX 0x0010
477 +#define GSWIP_MAC_CTRL_0_FCON_TX 0x0020
478 +#define GSWIP_MAC_CTRL_0_FCON_RXTX 0x0030
479 +#define GSWIP_MAC_CTRL_0_FCON_NONE 0x0040
480 +#define GSWIP_MAC_CTRL_0_FDUP_MASK 0x000C
481 +#define GSWIP_MAC_CTRL_0_FDUP_AUTO 0x0000
482 +#define GSWIP_MAC_CTRL_0_FDUP_EN 0x0004
483 +#define GSWIP_MAC_CTRL_0_FDUP_DIS 0x000C
484 +#define GSWIP_MAC_CTRL_0_GMII_MASK 0x0003
485 +#define GSWIP_MAC_CTRL_0_GMII_AUTO 0x0000
486 +#define GSWIP_MAC_CTRL_0_GMII_MII 0x0001
487 +#define GSWIP_MAC_CTRL_0_GMII_RGMII 0x0002
488 #define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
489 #define GSWIP_MAC_CTRL_2_MLEN BIT(3) /* Maximum Untagged Frame Lnegth */
490
491 @@ -653,16 +674,13 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
492 GSWIP_SDMA_PCTRLp(port));
493
494 if (!dsa_is_cpu_port(ds, port)) {
495 - u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
496 - GSWIP_MDIO_PHY_SPEED_AUTO |
497 - GSWIP_MDIO_PHY_FDUP_AUTO |
498 - GSWIP_MDIO_PHY_FCONTX_AUTO |
499 - GSWIP_MDIO_PHY_FCONRX_AUTO |
500 - (phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK);
501 -
502 - gswip_mdio_w(priv, macconf, GSWIP_MDIO_PHYp(port));
503 - /* Activate MDIO auto polling */
504 - gswip_mdio_mask(priv, 0, BIT(port), GSWIP_MDIO_MDC_CFG0);
505 + u32 mdio_phy = 0;
506 +
507 + if (phydev)
508 + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
509 +
510 + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
511 + GSWIP_MDIO_PHYp(port));
512 }
513
514 return 0;
515 @@ -675,14 +693,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
516 if (!dsa_is_user_port(ds, port))
517 return;
518
519 - if (!dsa_is_cpu_port(ds, port)) {
520 - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_DOWN,
521 - GSWIP_MDIO_PHY_LINK_MASK,
522 - GSWIP_MDIO_PHYp(port));
523 - /* Deactivate MDIO auto polling */
524 - gswip_mdio_mask(priv, BIT(port), 0, GSWIP_MDIO_MDC_CFG0);
525 - }
526 -
527 gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
528 GSWIP_FDMA_PCTRLp(port));
529 gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
530 @@ -790,14 +800,32 @@ static int gswip_setup(struct dsa_switch *ds)
531 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP2);
532 gswip_switch_w(priv, BIT(cpu_port), GSWIP_PCE_PMAP3);
533
534 - /* disable PHY auto polling */
535 + /* Deactivate MDIO PHY auto polling. Some PHYs as the AR8030 have an
536 + * interoperability problem with this auto polling mechanism because
537 + * their status registers think that the link is in a different state
538 + * than it actually is. For the AR8030 it has the BMSR_ESTATEN bit set
539 + * as well as ESTATUS_1000_TFULL and ESTATUS_1000_XFULL. This makes the
540 + * auto polling state machine consider the link being negotiated with
541 + * 1Gbit/s. Since the PHY itself is a Fast Ethernet RMII PHY this leads
542 + * to the switch port being completely dead (RX and TX are both not
543 + * working).
544 + * Also with various other PHY / port combinations (PHY11G GPHY, PHY22F
545 + * GPHY, external RGMII PEF7071/7072) any traffic would stop. Sometimes
546 + * it would work fine for a few minutes to hours and then stop, on
547 + * other device it would no traffic could be sent or received at all.
548 + * Testing shows that when PHY auto polling is disabled these problems
549 + * go away.
550 + */
551 gswip_mdio_w(priv, 0x0, GSWIP_MDIO_MDC_CFG0);
552 +
553 /* Configure the MDIO Clock 2.5 MHz */
554 gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
555
556 - /* Disable the xMII link */
557 + /* Disable the xMII interface and clear it's isolation bit */
558 for (i = 0; i < priv->hw_info->max_ports; i++)
559 - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
560 + gswip_mii_mask_cfg(priv,
561 + GSWIP_MII_CFG_EN | GSWIP_MII_CFG_ISOLATE,
562 + 0, i);
563
564 /* enable special tag insertion on cpu port */
565 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
566 @@ -1447,6 +1475,112 @@ unsupported:
567 return;
568 }
569
570 +static void gswip_port_set_link(struct gswip_priv *priv, int port, bool link)
571 +{
572 + u32 mdio_phy;
573 +
574 + if (link)
575 + mdio_phy = GSWIP_MDIO_PHY_LINK_UP;
576 + else
577 + mdio_phy = GSWIP_MDIO_PHY_LINK_DOWN;
578 +
579 + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_LINK_MASK, mdio_phy,
580 + GSWIP_MDIO_PHYp(port));
581 +}
582 +
583 +static void gswip_port_set_speed(struct gswip_priv *priv, int port, int speed,
584 + phy_interface_t interface)
585 +{
586 + u32 mdio_phy = 0, mii_cfg = 0, mac_ctrl_0 = 0;
587 +
588 + switch (speed) {
589 + case SPEED_10:
590 + mdio_phy = GSWIP_MDIO_PHY_SPEED_M10;
591 +
592 + if (interface == PHY_INTERFACE_MODE_RMII)
593 + mii_cfg = GSWIP_MII_CFG_RATE_M50;
594 + else
595 + mii_cfg = GSWIP_MII_CFG_RATE_M2P5;
596 +
597 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
598 + break;
599 +
600 + case SPEED_100:
601 + mdio_phy = GSWIP_MDIO_PHY_SPEED_M100;
602 +
603 + if (interface == PHY_INTERFACE_MODE_RMII)
604 + mii_cfg = GSWIP_MII_CFG_RATE_M50;
605 + else
606 + mii_cfg = GSWIP_MII_CFG_RATE_M25;
607 +
608 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_MII;
609 + break;
610 +
611 + case SPEED_1000:
612 + mdio_phy = GSWIP_MDIO_PHY_SPEED_G1;
613 +
614 + mii_cfg = GSWIP_MII_CFG_RATE_M125;
615 +
616 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_GMII_RGMII;
617 + break;
618 + }
619 +
620 + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_SPEED_MASK, mdio_phy,
621 + GSWIP_MDIO_PHYp(port));
622 + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_RATE_MASK, mii_cfg, port);
623 + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_GMII_MASK, mac_ctrl_0,
624 + GSWIP_MAC_CTRL_0p(port));
625 +}
626 +
627 +static void gswip_port_set_duplex(struct gswip_priv *priv, int port, int duplex)
628 +{
629 + u32 mac_ctrl_0, mdio_phy;
630 +
631 + if (duplex == DUPLEX_FULL) {
632 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_EN;
633 + mdio_phy = GSWIP_MDIO_PHY_FDUP_EN;
634 + } else {
635 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FDUP_DIS;
636 + mdio_phy = GSWIP_MDIO_PHY_FDUP_DIS;
637 + }
638 +
639 + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FDUP_MASK, mac_ctrl_0,
640 + GSWIP_MAC_CTRL_0p(port));
641 + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_FDUP_MASK, mdio_phy,
642 + GSWIP_MDIO_PHYp(port));
643 +}
644 +
645 +static void gswip_port_set_pause(struct gswip_priv *priv, int port,
646 + bool tx_pause, bool rx_pause)
647 +{
648 + u32 mac_ctrl_0, mdio_phy;
649 +
650 + if (tx_pause && rx_pause) {
651 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RXTX;
652 + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
653 + GSWIP_MDIO_PHY_FCONRX_EN;
654 + } else if (tx_pause) {
655 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_TX;
656 + mdio_phy = GSWIP_MDIO_PHY_FCONTX_EN |
657 + GSWIP_MDIO_PHY_FCONRX_DIS;
658 + } else if (rx_pause) {
659 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_RX;
660 + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
661 + GSWIP_MDIO_PHY_FCONRX_EN;
662 + } else {
663 + mac_ctrl_0 = GSWIP_MAC_CTRL_0_FCON_NONE;
664 + mdio_phy = GSWIP_MDIO_PHY_FCONTX_DIS |
665 + GSWIP_MDIO_PHY_FCONRX_DIS;
666 + }
667 +
668 + gswip_switch_mask(priv, GSWIP_MAC_CTRL_0_FCON_MASK,
669 + mac_ctrl_0, GSWIP_MAC_CTRL_0p(port));
670 + gswip_mdio_mask(priv,
671 + GSWIP_MDIO_PHY_FCONTX_MASK |
672 + GSWIP_MDIO_PHY_FCONRX_MASK,
673 + mdio_phy, GSWIP_MDIO_PHYp(port));
674 +}
675 +
676 static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
677 unsigned int mode,
678 const struct phylink_link_state *state)
679 @@ -1466,6 +1600,9 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
680 break;
681 case PHY_INTERFACE_MODE_RMII:
682 miicfg |= GSWIP_MII_CFG_MODE_RMIIM;
683 +
684 + /* Configure the RMII clock as output: */
685 + miicfg |= GSWIP_MII_CFG_RMII_CLK;
686 break;
687 case PHY_INTERFACE_MODE_RGMII:
688 case PHY_INTERFACE_MODE_RGMII_ID:
689 @@ -1478,7 +1615,16 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
690 "Unsupported interface: %d\n", state->interface);
691 return;
692 }
693 - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_MODE_MASK, miicfg, port);
694 +
695 + gswip_mii_mask_cfg(priv,
696 + GSWIP_MII_CFG_MODE_MASK | GSWIP_MII_CFG_RMII_CLK |
697 + GSWIP_MII_CFG_RGMII_IBS | GSWIP_MII_CFG_LDCLKDIS,
698 + miicfg, port);
699 +
700 + gswip_port_set_speed(priv, port, state->speed, state->interface);
701 + gswip_port_set_duplex(priv, port, state->duplex);
702 + gswip_port_set_pause(priv, port, !!(state->pause & MLO_PAUSE_TX),
703 + !!(state->pause & MLO_PAUSE_RX));
704
705 switch (state->interface) {
706 case PHY_INTERFACE_MODE_RGMII_ID:
707 @@ -1503,6 +1649,9 @@ static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
708 struct gswip_priv *priv = ds->priv;
709
710 gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
711 +
712 + if (!dsa_is_cpu_port(ds, port))
713 + gswip_port_set_link(priv, port, false);
714 }
715
716 static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
717 @@ -1512,6 +1661,9 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
718 {
719 struct gswip_priv *priv = ds->priv;
720
721 + if (!dsa_is_cpu_port(ds, port))
722 + gswip_port_set_link(priv, port, true);
723 +
724 gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
725 }
726
727 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
728 index 47bcbcf58048c..0c93a552b921d 100644
729 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h
730 +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
731 @@ -181,9 +181,9 @@
732 #define XGBE_DMA_SYS_AWCR 0x30303030
733
734 /* DMA cache settings - PCI device */
735 -#define XGBE_DMA_PCI_ARCR 0x00000003
736 -#define XGBE_DMA_PCI_AWCR 0x13131313
737 -#define XGBE_DMA_PCI_AWARCR 0x00000313
738 +#define XGBE_DMA_PCI_ARCR 0x000f0f0f
739 +#define XGBE_DMA_PCI_AWCR 0x0f0f0f0f
740 +#define XGBE_DMA_PCI_AWARCR 0x00000f0f
741
742 /* DMA channel interrupt modes */
743 #define XGBE_IRQ_MODE_EDGE 0
744 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
745 index 3f74416bb8744..bdef5b3dd848c 100644
746 --- a/drivers/net/ethernet/cadence/macb_main.c
747 +++ b/drivers/net/ethernet/cadence/macb_main.c
748 @@ -2915,6 +2915,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
749 bool cmp_b = false;
750 bool cmp_c = false;
751
752 + if (!macb_is_gem(bp))
753 + return;
754 +
755 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
756 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
757
758 @@ -3286,6 +3289,7 @@ static void macb_restore_features(struct macb *bp)
759 {
760 struct net_device *netdev = bp->dev;
761 netdev_features_t features = netdev->features;
762 + struct ethtool_rx_fs_item *item;
763
764 /* TX checksum offload */
765 macb_set_txcsum_feature(bp, features);
766 @@ -3294,6 +3298,9 @@ static void macb_restore_features(struct macb *bp)
767 macb_set_rxcsum_feature(bp, features);
768
769 /* RX Flow Filters */
770 + list_for_each_entry(item, &bp->rx_fs_list.list, list)
771 + gem_prog_cmp_regs(bp, &item->fs);
772 +
773 macb_set_rxflow_feature(bp, features);
774 }
775
776 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
777 index e26ae298a080a..7801425e2726c 100644
778 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
779 +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
780 @@ -1393,11 +1393,25 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
781 struct cudbg_buffer temp_buff = { 0 };
782 struct sge_qbase_reg_field *sge_qbase;
783 struct ireg_buf *ch_sge_dbg;
784 + u8 padap_running = 0;
785 int i, rc;
786 + u32 size;
787
788 - rc = cudbg_get_buff(pdbg_init, dbg_buff,
789 - sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
790 - &temp_buff);
791 + /* Accessing SGE_QBASE_MAP[0-3] and SGE_QBASE_INDEX regs can
792 + * lead to SGE missing doorbells under heavy traffic. So, only
793 + * collect them when adapter is idle.
794 + */
795 + for_each_port(padap, i) {
796 + padap_running = netif_running(padap->port[i]);
797 + if (padap_running)
798 + break;
799 + }
800 +
801 + size = sizeof(*ch_sge_dbg) * 2;
802 + if (!padap_running)
803 + size += sizeof(*sge_qbase);
804 +
805 + rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
806 if (rc)
807 return rc;
808
809 @@ -1419,7 +1433,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
810 ch_sge_dbg++;
811 }
812
813 - if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
814 + if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5 &&
815 + !padap_running) {
816 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
817 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
818 * SGE_QBASE_MAP[0-3]
819 diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
820 index 588b63473c473..42374859b9d35 100644
821 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
822 +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
823 @@ -2093,7 +2093,8 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
824 0x1190, 0x1194,
825 0x11a0, 0x11a4,
826 0x11b0, 0x11b4,
827 - 0x11fc, 0x1274,
828 + 0x11fc, 0x123c,
829 + 0x1254, 0x1274,
830 0x1280, 0x133c,
831 0x1800, 0x18fc,
832 0x3000, 0x302c,
833 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
834 index a8959a092344f..382a45d84cc32 100644
835 --- a/drivers/net/ethernet/freescale/gianfar.c
836 +++ b/drivers/net/ethernet/freescale/gianfar.c
837 @@ -366,7 +366,11 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
838
839 static int gfar_set_mac_addr(struct net_device *dev, void *p)
840 {
841 - eth_mac_addr(dev, p);
842 + int ret;
843 +
844 + ret = eth_mac_addr(dev, p);
845 + if (ret)
846 + return ret;
847
848 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
849
850 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
851 index 9b09dd95e8781..fc275d4f484c5 100644
852 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
853 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
854 @@ -2140,14 +2140,14 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
855 {
856 struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
857
858 + clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
859 +
860 hclgevf_reset_tqp_stats(handle);
861
862 hclgevf_request_link_info(hdev);
863
864 hclgevf_update_link_mode(hdev);
865
866 - clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
867 -
868 return 0;
869 }
870
871 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
872 index 678e4190b8a8c..e571c6116c4b7 100644
873 --- a/drivers/net/ethernet/intel/i40e/i40e.h
874 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
875 @@ -152,6 +152,7 @@ enum i40e_state_t {
876 __I40E_VIRTCHNL_OP_PENDING,
877 __I40E_RECOVERY_MODE,
878 __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
879 + __I40E_VFS_RELEASING,
880 /* This must be last as it determines the size of the BITMAP */
881 __I40E_STATE_SIZE__,
882 };
883 diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
884 index 82c62e4678705..b519e5af5ed94 100644
885 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
886 +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
887 @@ -232,6 +232,8 @@ static void __i40e_add_stat_strings(u8 **p, const struct i40e_stats stats[],
888 I40E_STAT(struct i40e_vsi, _name, _stat)
889 #define I40E_VEB_STAT(_name, _stat) \
890 I40E_STAT(struct i40e_veb, _name, _stat)
891 +#define I40E_VEB_TC_STAT(_name, _stat) \
892 + I40E_STAT(struct i40e_cp_veb_tc_stats, _name, _stat)
893 #define I40E_PFC_STAT(_name, _stat) \
894 I40E_STAT(struct i40e_pfc_stats, _name, _stat)
895 #define I40E_QUEUE_STAT(_name, _stat) \
896 @@ -266,11 +268,18 @@ static const struct i40e_stats i40e_gstrings_veb_stats[] = {
897 I40E_VEB_STAT("veb.rx_unknown_protocol", stats.rx_unknown_protocol),
898 };
899
900 +struct i40e_cp_veb_tc_stats {
901 + u64 tc_rx_packets;
902 + u64 tc_rx_bytes;
903 + u64 tc_tx_packets;
904 + u64 tc_tx_bytes;
905 +};
906 +
907 static const struct i40e_stats i40e_gstrings_veb_tc_stats[] = {
908 - I40E_VEB_STAT("veb.tc_%u_tx_packets", tc_stats.tc_tx_packets),
909 - I40E_VEB_STAT("veb.tc_%u_tx_bytes", tc_stats.tc_tx_bytes),
910 - I40E_VEB_STAT("veb.tc_%u_rx_packets", tc_stats.tc_rx_packets),
911 - I40E_VEB_STAT("veb.tc_%u_rx_bytes", tc_stats.tc_rx_bytes),
912 + I40E_VEB_TC_STAT("veb.tc_%u_tx_packets", tc_tx_packets),
913 + I40E_VEB_TC_STAT("veb.tc_%u_tx_bytes", tc_tx_bytes),
914 + I40E_VEB_TC_STAT("veb.tc_%u_rx_packets", tc_rx_packets),
915 + I40E_VEB_TC_STAT("veb.tc_%u_rx_bytes", tc_rx_bytes),
916 };
917
918 static const struct i40e_stats i40e_gstrings_misc_stats[] = {
919 @@ -1098,6 +1107,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
920
921 /* Set flow control settings */
922 ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
923 + ethtool_link_ksettings_add_link_mode(ks, supported, Asym_Pause);
924
925 switch (hw->fc.requested_mode) {
926 case I40E_FC_FULL:
927 @@ -2212,6 +2222,29 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
928 }
929 }
930
931 +/**
932 + * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
933 + * @tc: the TC statistics in VEB structure (veb->tc_stats)
934 + * @i: the index of traffic class in (veb->tc_stats) structure to copy
935 + *
936 + * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
937 + * one dimensional structure i40e_cp_veb_tc_stats.
938 + * Produce formatted i40e_cp_veb_tc_stats structure of the VEB TC
939 + * statistics for the given TC.
940 + **/
941 +static struct i40e_cp_veb_tc_stats
942 +i40e_get_veb_tc_stats(struct i40e_veb_tc_stats *tc, unsigned int i)
943 +{
944 + struct i40e_cp_veb_tc_stats veb_tc = {
945 + .tc_rx_packets = tc->tc_rx_packets[i],
946 + .tc_rx_bytes = tc->tc_rx_bytes[i],
947 + .tc_tx_packets = tc->tc_tx_packets[i],
948 + .tc_tx_bytes = tc->tc_tx_bytes[i],
949 + };
950 +
951 + return veb_tc;
952 +}
953 +
954 /**
955 * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
956 * @pf: the PF device structure
957 @@ -2296,8 +2329,16 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
958 i40e_gstrings_veb_stats);
959
960 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
961 - i40e_add_ethtool_stats(&data, veb_stats ? veb : NULL,
962 - i40e_gstrings_veb_tc_stats);
963 + if (veb_stats) {
964 + struct i40e_cp_veb_tc_stats veb_tc =
965 + i40e_get_veb_tc_stats(&veb->tc_stats, i);
966 +
967 + i40e_add_ethtool_stats(&data, &veb_tc,
968 + i40e_gstrings_veb_tc_stats);
969 + } else {
970 + i40e_add_ethtool_stats(&data, NULL,
971 + i40e_gstrings_veb_tc_stats);
972 + }
973
974 i40e_add_ethtool_stats(&data, pf, i40e_gstrings_stats);
975
976 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
977 index 58211590229b1..a69aace057925 100644
978 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
979 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
980 @@ -2547,8 +2547,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
981 i40e_stat_str(hw, aq_ret),
982 i40e_aq_str(hw, hw->aq.asq_last_status));
983 } else {
984 - dev_info(&pf->pdev->dev, "%s is %s allmulti mode.\n",
985 - vsi->netdev->name,
986 + dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
987 cur_multipromisc ? "entering" : "leaving");
988 }
989 }
990 @@ -14701,12 +14700,16 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
991 * in order to register the netdev
992 */
993 v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
994 - if (v_idx < 0)
995 + if (v_idx < 0) {
996 + err = v_idx;
997 goto err_switch_setup;
998 + }
999 pf->lan_vsi = v_idx;
1000 vsi = pf->vsi[v_idx];
1001 - if (!vsi)
1002 + if (!vsi) {
1003 + err = -EFAULT;
1004 goto err_switch_setup;
1005 + }
1006 vsi->alloc_queue_pairs = 1;
1007 err = i40e_config_netdev(vsi);
1008 if (err)
1009 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1010 index 5acd599d6b9af..e561073054865 100644
1011 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1012 +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
1013 @@ -137,6 +137,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
1014 **/
1015 static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
1016 {
1017 + struct i40e_pf *pf = vf->pf;
1018 int i;
1019
1020 i40e_vc_notify_vf_reset(vf);
1021 @@ -147,6 +148,11 @@ static inline void i40e_vc_disable_vf(struct i40e_vf *vf)
1022 * ensure a reset.
1023 */
1024 for (i = 0; i < 20; i++) {
1025 + /* If PF is in VFs releasing state reset VF is impossible,
1026 + * so leave it.
1027 + */
1028 + if (test_bit(__I40E_VFS_RELEASING, pf->state))
1029 + return;
1030 if (i40e_reset_vf(vf, false))
1031 return;
1032 usleep_range(10000, 20000);
1033 @@ -1506,6 +1512,8 @@ void i40e_free_vfs(struct i40e_pf *pf)
1034
1035 if (!pf->vf)
1036 return;
1037 +
1038 + set_bit(__I40E_VFS_RELEASING, pf->state);
1039 while (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
1040 usleep_range(1000, 2000);
1041
1042 @@ -1563,6 +1571,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
1043 }
1044 }
1045 clear_bit(__I40E_VF_DISABLE, pf->state);
1046 + clear_bit(__I40E_VFS_RELEASING, pf->state);
1047 }
1048
1049 #ifdef CONFIG_PCI_IOV
1050 diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
1051 index 4df9da3591359..3b1d35365ef0f 100644
1052 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h
1053 +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
1054 @@ -31,8 +31,8 @@ enum ice_ctl_q {
1055 ICE_CTL_Q_MAILBOX,
1056 };
1057
1058 -/* Control Queue timeout settings - max delay 250ms */
1059 -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
1060 +/* Control Queue timeout settings - max delay 1s */
1061 +#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */
1062 #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
1063
1064 struct ice_ctl_q_ring {
1065 diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
1066 index 1acdd43a2eddd..7ff2e07f6d38a 100644
1067 --- a/drivers/net/ethernet/intel/ice/ice_switch.c
1068 +++ b/drivers/net/ethernet/intel/ice/ice_switch.c
1069 @@ -1279,6 +1279,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
1070 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
1071 vsi_list_id);
1072
1073 + if (!m_entry->vsi_list_info)
1074 + return ICE_ERR_NO_MEMORY;
1075 +
1076 /* If this entry was large action then the large action needs
1077 * to be updated to point to FWD to VSI list
1078 */
1079 @@ -2266,6 +2269,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
1080 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
1081 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
1082 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
1083 + fm_entry->vsi_list_info &&
1084 (test_bit(vsi_handle, fm_entry->vsi_list_info->vsi_map))));
1085 }
1086
1087 @@ -2338,14 +2342,12 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
1088 return ICE_ERR_PARAM;
1089
1090 list_for_each_entry(fm_entry, lkup_list_head, list_entry) {
1091 - struct ice_fltr_info *fi;
1092 -
1093 - fi = &fm_entry->fltr_info;
1094 - if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
1095 + if (!ice_vsi_uses_fltr(fm_entry, vsi_handle))
1096 continue;
1097
1098 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
1099 - vsi_list_head, fi);
1100 + vsi_list_head,
1101 + &fm_entry->fltr_info);
1102 if (status)
1103 return status;
1104 }
1105 @@ -2663,7 +2665,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
1106 &remove_list_head);
1107 mutex_unlock(rule_lock);
1108 if (status)
1109 - return;
1110 + goto free_fltr_list;
1111
1112 switch (lkup) {
1113 case ICE_SW_LKUP_MAC:
1114 @@ -2686,6 +2688,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
1115 break;
1116 }
1117
1118 +free_fltr_list:
1119 list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
1120 list_del(&fm_entry->list_entry);
1121 devm_kfree(ice_hw_to_dev(hw), fm_entry);
1122 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1123 index e09b4a96a1d5f..e3dc2cbdc9f6c 100644
1124 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1125 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1126 @@ -700,11 +700,11 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
1127 return 0;
1128 }
1129
1130 -static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
1131 - u32 eth_proto_cap,
1132 - u8 connector_type, bool ext)
1133 +static void ptys2ethtool_supported_advertised_port(struct mlx5_core_dev *mdev,
1134 + struct ethtool_link_ksettings *link_ksettings,
1135 + u32 eth_proto_cap, u8 connector_type)
1136 {
1137 - if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
1138 + if (!MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type)) {
1139 if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
1140 | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
1141 | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
1142 @@ -836,9 +836,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
1143 [MLX5E_PORT_OTHER] = PORT_OTHER,
1144 };
1145
1146 -static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
1147 +static u8 get_connector_port(struct mlx5_core_dev *mdev, u32 eth_proto, u8 connector_type)
1148 {
1149 - if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
1150 + if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_connector_type))
1151 return ptys2connector_type[connector_type];
1152
1153 if (eth_proto &
1154 @@ -937,11 +937,11 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
1155 link_ksettings);
1156
1157 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
1158 -
1159 - link_ksettings->base.port = get_connector_port(eth_proto_oper,
1160 - connector_type, ext);
1161 - ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
1162 - connector_type, ext);
1163 + connector_type = connector_type < MLX5E_CONNECTOR_TYPE_NUMBER ?
1164 + connector_type : MLX5E_PORT_UNKNOWN;
1165 + link_ksettings->base.port = get_connector_port(mdev, eth_proto_oper, connector_type);
1166 + ptys2ethtool_supported_advertised_port(mdev, link_ksettings, eth_proto_admin,
1167 + connector_type);
1168 get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
1169
1170 if (an_status == MLX5_AN_COMPLETE)
1171 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1172 index 0a20938b4aadb..30a2ee3c40a00 100644
1173 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1174 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1175 @@ -926,13 +926,24 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
1176 mutex_unlock(&table->lock);
1177 }
1178
1179 +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1180 +#define MLX5_MAX_ASYNC_EQS 4
1181 +#else
1182 +#define MLX5_MAX_ASYNC_EQS 3
1183 +#endif
1184 +
1185 int mlx5_eq_table_create(struct mlx5_core_dev *dev)
1186 {
1187 struct mlx5_eq_table *eq_table = dev->priv.eq_table;
1188 + int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
1189 + MLX5_CAP_GEN(dev, max_num_eqs) :
1190 + 1 << MLX5_CAP_GEN(dev, log_max_eq);
1191 int err;
1192
1193 eq_table->num_comp_eqs =
1194 - mlx5_irq_get_num_comp(eq_table->irq_table);
1195 + min_t(int,
1196 + mlx5_irq_get_num_comp(eq_table->irq_table),
1197 + num_eqs - MLX5_MAX_ASYNC_EQS);
1198
1199 err = create_async_eqs(dev);
1200 if (err) {
1201 diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
1202 index 0e2db6ea79e96..2ec62c8d86e1c 100644
1203 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
1204 +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
1205 @@ -454,6 +454,7 @@ void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
1206 dev_consume_skb_any(skb);
1207 else
1208 dev_kfree_skb_any(skb);
1209 + return;
1210 }
1211
1212 nfp_ccm_rx(&bpf->ccm, skb);
1213 diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
1214 index 31d94592a7c02..2d99533ad3e0d 100644
1215 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h
1216 +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
1217 @@ -164,6 +164,7 @@ struct nfp_fl_internal_ports {
1218 * @qos_rate_limiters: Current active qos rate limiters
1219 * @qos_stats_lock: Lock on qos stats updates
1220 * @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
1221 + * @merge_table: Hash table to store merged flows
1222 */
1223 struct nfp_flower_priv {
1224 struct nfp_app *app;
1225 @@ -196,6 +197,7 @@ struct nfp_flower_priv {
1226 unsigned int qos_rate_limiters;
1227 spinlock_t qos_stats_lock; /* Protect the qos stats */
1228 int pre_tun_rule_cnt;
1229 + struct rhashtable merge_table;
1230 };
1231
1232 /**
1233 @@ -310,6 +312,12 @@ struct nfp_fl_payload_link {
1234 };
1235
1236 extern const struct rhashtable_params nfp_flower_table_params;
1237 +extern const struct rhashtable_params merge_table_params;
1238 +
1239 +struct nfp_merge_info {
1240 + u64 parent_ctx;
1241 + struct rhash_head ht_node;
1242 +};
1243
1244 struct nfp_fl_stats_frame {
1245 __be32 stats_con_id;
1246 diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
1247 index aa06fcb38f8b9..327bb56b3ef56 100644
1248 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
1249 +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
1250 @@ -490,6 +490,12 @@ const struct rhashtable_params nfp_flower_table_params = {
1251 .automatic_shrinking = true,
1252 };
1253
1254 +const struct rhashtable_params merge_table_params = {
1255 + .key_offset = offsetof(struct nfp_merge_info, parent_ctx),
1256 + .head_offset = offsetof(struct nfp_merge_info, ht_node),
1257 + .key_len = sizeof(u64),
1258 +};
1259 +
1260 int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
1261 unsigned int host_num_mems)
1262 {
1263 @@ -506,6 +512,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
1264 if (err)
1265 goto err_free_flow_table;
1266
1267 + err = rhashtable_init(&priv->merge_table, &merge_table_params);
1268 + if (err)
1269 + goto err_free_stats_ctx_table;
1270 +
1271 get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
1272
1273 /* Init ring buffer and unallocated mask_ids. */
1274 @@ -513,7 +523,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
1275 kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
1276 NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
1277 if (!priv->mask_ids.mask_id_free_list.buf)
1278 - goto err_free_stats_ctx_table;
1279 + goto err_free_merge_table;
1280
1281 priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
1282
1283 @@ -550,6 +560,8 @@ err_free_last_used:
1284 kfree(priv->mask_ids.last_used);
1285 err_free_mask_id:
1286 kfree(priv->mask_ids.mask_id_free_list.buf);
1287 +err_free_merge_table:
1288 + rhashtable_destroy(&priv->merge_table);
1289 err_free_stats_ctx_table:
1290 rhashtable_destroy(&priv->stats_ctx_table);
1291 err_free_flow_table:
1292 @@ -568,6 +580,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
1293 nfp_check_rhashtable_empty, NULL);
1294 rhashtable_free_and_destroy(&priv->stats_ctx_table,
1295 nfp_check_rhashtable_empty, NULL);
1296 + rhashtable_free_and_destroy(&priv->merge_table,
1297 + nfp_check_rhashtable_empty, NULL);
1298 kvfree(priv->stats);
1299 kfree(priv->mask_ids.mask_id_free_list.buf);
1300 kfree(priv->mask_ids.last_used);
1301 diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
1302 index 4dd3f8a5a9b88..f57e7f3370124 100644
1303 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
1304 +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
1305 @@ -923,6 +923,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1306 struct netlink_ext_ack *extack = NULL;
1307 struct nfp_fl_payload *merge_flow;
1308 struct nfp_fl_key_ls merge_key_ls;
1309 + struct nfp_merge_info *merge_info;
1310 + u64 parent_ctx = 0;
1311 int err;
1312
1313 ASSERT_RTNL();
1314 @@ -933,6 +935,15 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1315 nfp_flower_is_merge_flow(sub_flow2))
1316 return -EINVAL;
1317
1318 + /* check if the two flows are already merged */
1319 + parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
1320 + parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
1321 + if (rhashtable_lookup_fast(&priv->merge_table,
1322 + &parent_ctx, merge_table_params)) {
1323 + nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
1324 + return 0;
1325 + }
1326 +
1327 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1328 if (err)
1329 return err;
1330 @@ -974,16 +985,33 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1331 if (err)
1332 goto err_release_metadata;
1333
1334 + merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
1335 + if (!merge_info) {
1336 + err = -ENOMEM;
1337 + goto err_remove_rhash;
1338 + }
1339 + merge_info->parent_ctx = parent_ctx;
1340 + err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
1341 + merge_table_params);
1342 + if (err)
1343 + goto err_destroy_merge_info;
1344 +
1345 err = nfp_flower_xmit_flow(app, merge_flow,
1346 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1347 if (err)
1348 - goto err_remove_rhash;
1349 + goto err_remove_merge_info;
1350
1351 merge_flow->in_hw = true;
1352 sub_flow1->in_hw = false;
1353
1354 return 0;
1355
1356 +err_remove_merge_info:
1357 + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1358 + &merge_info->ht_node,
1359 + merge_table_params));
1360 +err_destroy_merge_info:
1361 + kfree(merge_info);
1362 err_remove_rhash:
1363 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1364 &merge_flow->fl_node,
1365 @@ -1211,7 +1239,9 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
1366 {
1367 struct nfp_flower_priv *priv = app->priv;
1368 struct nfp_fl_payload_link *link, *temp;
1369 + struct nfp_merge_info *merge_info;
1370 struct nfp_fl_payload *origin;
1371 + u64 parent_ctx = 0;
1372 bool mod = false;
1373 int err;
1374
1375 @@ -1248,8 +1278,22 @@ nfp_flower_remove_merge_flow(struct nfp_app *app,
1376 err_free_links:
1377 /* Clean any links connected with the merged flow. */
1378 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1379 - merge_flow.list)
1380 + merge_flow.list) {
1381 + u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
1382 +
1383 + parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
1384 nfp_flower_unlink_flow(link);
1385 + }
1386 +
1387 + merge_info = rhashtable_lookup_fast(&priv->merge_table,
1388 + &parent_ctx,
1389 + merge_table_params);
1390 + if (merge_info) {
1391 + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1392 + &merge_info->ht_node,
1393 + merge_table_params));
1394 + kfree(merge_info);
1395 + }
1396
1397 kfree(merge_flow->action_data);
1398 kfree(merge_flow->mask_data);
1399 diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
1400 index 0dd0ba915ab97..23ee0b14cbfa1 100644
1401 --- a/drivers/net/ieee802154/atusb.c
1402 +++ b/drivers/net/ieee802154/atusb.c
1403 @@ -365,6 +365,7 @@ static int atusb_alloc_urbs(struct atusb *atusb, int n)
1404 return -ENOMEM;
1405 }
1406 usb_anchor_urb(urb, &atusb->idle_urbs);
1407 + usb_free_urb(urb);
1408 n--;
1409 }
1410 return 0;
1411 diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
1412 index e0d3310957ffa..c99883120556c 100644
1413 --- a/drivers/net/phy/bcm-phy-lib.c
1414 +++ b/drivers/net/phy/bcm-phy-lib.c
1415 @@ -190,7 +190,7 @@ EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
1416
1417 int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
1418 {
1419 - int val;
1420 + int val, mask = 0;
1421
1422 /* Enable EEE at PHY level */
1423 val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
1424 @@ -209,10 +209,17 @@ int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
1425 if (val < 0)
1426 return val;
1427
1428 + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1429 + phydev->supported))
1430 + mask |= MDIO_EEE_1000T;
1431 + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1432 + phydev->supported))
1433 + mask |= MDIO_EEE_100TX;
1434 +
1435 if (enable)
1436 - val |= (MDIO_EEE_100TX | MDIO_EEE_1000T);
1437 + val |= mask;
1438 else
1439 - val &= ~(MDIO_EEE_100TX | MDIO_EEE_1000T);
1440 + val &= ~mask;
1441
1442 phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
1443
1444 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1445 index fe6ec22bf3d51..7c40ae058e6d1 100644
1446 --- a/drivers/net/tun.c
1447 +++ b/drivers/net/tun.c
1448 @@ -68,6 +68,14 @@
1449 #include <linux/bpf.h>
1450 #include <linux/bpf_trace.h>
1451 #include <linux/mutex.h>
1452 +#include <linux/ieee802154.h>
1453 +#include <linux/if_ltalk.h>
1454 +#include <uapi/linux/if_fddi.h>
1455 +#include <uapi/linux/if_hippi.h>
1456 +#include <uapi/linux/if_fc.h>
1457 +#include <net/ax25.h>
1458 +#include <net/rose.h>
1459 +#include <net/6lowpan.h>
1460
1461 #include <linux/uaccess.h>
1462 #include <linux/proc_fs.h>
1463 @@ -3043,6 +3051,45 @@ static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
1464 return __tun_set_ebpf(tun, prog_p, prog);
1465 }
1466
1467 +/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
1468 +static unsigned char tun_get_addr_len(unsigned short type)
1469 +{
1470 + switch (type) {
1471 + case ARPHRD_IP6GRE:
1472 + case ARPHRD_TUNNEL6:
1473 + return sizeof(struct in6_addr);
1474 + case ARPHRD_IPGRE:
1475 + case ARPHRD_TUNNEL:
1476 + case ARPHRD_SIT:
1477 + return 4;
1478 + case ARPHRD_ETHER:
1479 + return ETH_ALEN;
1480 + case ARPHRD_IEEE802154:
1481 + case ARPHRD_IEEE802154_MONITOR:
1482 + return IEEE802154_EXTENDED_ADDR_LEN;
1483 + case ARPHRD_PHONET_PIPE:
1484 + case ARPHRD_PPP:
1485 + case ARPHRD_NONE:
1486 + return 0;
1487 + case ARPHRD_6LOWPAN:
1488 + return EUI64_ADDR_LEN;
1489 + case ARPHRD_FDDI:
1490 + return FDDI_K_ALEN;
1491 + case ARPHRD_HIPPI:
1492 + return HIPPI_ALEN;
1493 + case ARPHRD_IEEE802:
1494 + return FC_ALEN;
1495 + case ARPHRD_ROSE:
1496 + return ROSE_ADDR_LEN;
1497 + case ARPHRD_NETROM:
1498 + return AX25_ADDR_LEN;
1499 + case ARPHRD_LOCALTLK:
1500 + return LTALK_ALEN;
1501 + default:
1502 + return 0;
1503 + }
1504 +}
1505 +
1506 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1507 unsigned long arg, int ifreq_len)
1508 {
1509 @@ -3198,6 +3245,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1510 ret = -EBUSY;
1511 } else {
1512 tun->dev->type = (int) arg;
1513 + tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
1514 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1515 tun->dev->type);
1516 ret = 0;
1517 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
1518 index 7449b97a3c89b..38f39154a6433 100644
1519 --- a/drivers/net/usb/hso.c
1520 +++ b/drivers/net/usb/hso.c
1521 @@ -611,7 +611,7 @@ static struct hso_serial *get_serial_by_index(unsigned index)
1522 return serial;
1523 }
1524
1525 -static int get_free_serial_index(void)
1526 +static int obtain_minor(struct hso_serial *serial)
1527 {
1528 int index;
1529 unsigned long flags;
1530 @@ -619,8 +619,10 @@ static int get_free_serial_index(void)
1531 spin_lock_irqsave(&serial_table_lock, flags);
1532 for (index = 0; index < HSO_SERIAL_TTY_MINORS; index++) {
1533 if (serial_table[index] == NULL) {
1534 + serial_table[index] = serial->parent;
1535 + serial->minor = index;
1536 spin_unlock_irqrestore(&serial_table_lock, flags);
1537 - return index;
1538 + return 0;
1539 }
1540 }
1541 spin_unlock_irqrestore(&serial_table_lock, flags);
1542 @@ -629,15 +631,12 @@ static int get_free_serial_index(void)
1543 return -1;
1544 }
1545
1546 -static void set_serial_by_index(unsigned index, struct hso_serial *serial)
1547 +static void release_minor(struct hso_serial *serial)
1548 {
1549 unsigned long flags;
1550
1551 spin_lock_irqsave(&serial_table_lock, flags);
1552 - if (serial)
1553 - serial_table[index] = serial->parent;
1554 - else
1555 - serial_table[index] = NULL;
1556 + serial_table[serial->minor] = NULL;
1557 spin_unlock_irqrestore(&serial_table_lock, flags);
1558 }
1559
1560 @@ -2230,6 +2229,7 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
1561 static void hso_serial_tty_unregister(struct hso_serial *serial)
1562 {
1563 tty_unregister_device(tty_drv, serial->minor);
1564 + release_minor(serial);
1565 }
1566
1567 static void hso_serial_common_free(struct hso_serial *serial)
1568 @@ -2253,24 +2253,22 @@ static void hso_serial_common_free(struct hso_serial *serial)
1569 static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
1570 int rx_size, int tx_size)
1571 {
1572 - int minor;
1573 int i;
1574
1575 tty_port_init(&serial->port);
1576
1577 - minor = get_free_serial_index();
1578 - if (minor < 0)
1579 + if (obtain_minor(serial))
1580 goto exit2;
1581
1582 /* register our minor number */
1583 serial->parent->dev = tty_port_register_device_attr(&serial->port,
1584 - tty_drv, minor, &serial->parent->interface->dev,
1585 + tty_drv, serial->minor, &serial->parent->interface->dev,
1586 serial->parent, hso_serial_dev_groups);
1587 - if (IS_ERR(serial->parent->dev))
1588 + if (IS_ERR(serial->parent->dev)) {
1589 + release_minor(serial);
1590 goto exit2;
1591 + }
1592
1593 - /* fill in specific data for later use */
1594 - serial->minor = minor;
1595 serial->magic = HSO_SERIAL_MAGIC;
1596 spin_lock_init(&serial->serial_lock);
1597 serial->num_rx_urbs = num_urbs;
1598 @@ -2668,9 +2666,6 @@ static struct hso_device *hso_create_bulk_serial_device(
1599
1600 serial->write_data = hso_std_serial_write_data;
1601
1602 - /* and record this serial */
1603 - set_serial_by_index(serial->minor, serial);
1604 -
1605 /* setup the proc dirs and files if needed */
1606 hso_log_port(hso_dev);
1607
1608 @@ -2727,9 +2722,6 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
1609 serial->shared_int->ref_count++;
1610 mutex_unlock(&serial->shared_int->shared_int_lock);
1611
1612 - /* and record this serial */
1613 - set_serial_by_index(serial->minor, serial);
1614 -
1615 /* setup the proc dirs and files if needed */
1616 hso_log_port(hso_dev);
1617
1618 @@ -3114,7 +3106,6 @@ static void hso_free_interface(struct usb_interface *interface)
1619 cancel_work_sync(&serial_table[i]->async_get_intf);
1620 hso_serial_tty_unregister(serial);
1621 kref_put(&serial_table[i]->ref, hso_serial_ref_free);
1622 - set_serial_by_index(i, NULL);
1623 }
1624 }
1625
1626 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1627 index 0ef85819665c8..b67460864b3c2 100644
1628 --- a/drivers/net/virtio_net.c
1629 +++ b/drivers/net/virtio_net.c
1630 @@ -376,7 +376,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
1631 struct receive_queue *rq,
1632 struct page *page, unsigned int offset,
1633 unsigned int len, unsigned int truesize,
1634 - bool hdr_valid)
1635 + bool hdr_valid, unsigned int metasize)
1636 {
1637 struct sk_buff *skb;
1638 struct virtio_net_hdr_mrg_rxbuf *hdr;
1639 @@ -398,6 +398,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
1640 else
1641 hdr_padded_len = sizeof(struct padded_vnet_hdr);
1642
1643 + /* hdr_valid means no XDP, so we can copy the vnet header */
1644 if (hdr_valid)
1645 memcpy(hdr, p, hdr_len);
1646
1647 @@ -410,6 +411,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
1648 copy = skb_tailroom(skb);
1649 skb_put_data(skb, p, copy);
1650
1651 + if (metasize) {
1652 + __skb_pull(skb, metasize);
1653 + skb_metadata_set(skb, metasize);
1654 + }
1655 +
1656 len -= copy;
1657 offset += copy;
1658
1659 @@ -455,10 +461,6 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1660 struct virtio_net_hdr_mrg_rxbuf *hdr;
1661 int err;
1662
1663 - /* virtqueue want to use data area in-front of packet */
1664 - if (unlikely(xdpf->metasize > 0))
1665 - return -EOPNOTSUPP;
1666 -
1667 if (unlikely(xdpf->headroom < vi->hdr_len))
1668 return -EOVERFLOW;
1669
1670 @@ -649,6 +651,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
1671 unsigned int delta = 0;
1672 struct page *xdp_page;
1673 int err;
1674 + unsigned int metasize = 0;
1675
1676 len -= vi->hdr_len;
1677 stats->bytes += len;
1678 @@ -688,8 +691,8 @@ static struct sk_buff *receive_small(struct net_device *dev,
1679
1680 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
1681 xdp.data = xdp.data_hard_start + xdp_headroom;
1682 - xdp_set_data_meta_invalid(&xdp);
1683 xdp.data_end = xdp.data + len;
1684 + xdp.data_meta = xdp.data;
1685 xdp.rxq = &rq->xdp_rxq;
1686 orig_data = xdp.data;
1687 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1688 @@ -700,6 +703,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
1689 /* Recalculate length in case bpf program changed it */
1690 delta = orig_data - xdp.data;
1691 len = xdp.data_end - xdp.data;
1692 + metasize = xdp.data - xdp.data_meta;
1693 break;
1694 case XDP_TX:
1695 stats->xdp_tx++;
1696 @@ -745,6 +749,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
1697 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
1698 } /* keep zeroed vnet hdr since packet was changed by bpf */
1699
1700 + if (metasize)
1701 + skb_metadata_set(skb, metasize);
1702 +
1703 err:
1704 return skb;
1705
1706 @@ -765,8 +772,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
1707 struct virtnet_rq_stats *stats)
1708 {
1709 struct page *page = buf;
1710 - struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
1711 - PAGE_SIZE, true);
1712 + struct sk_buff *skb =
1713 + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
1714
1715 stats->bytes += len - vi->hdr_len;
1716 if (unlikely(!skb))
1717 @@ -798,6 +805,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1718 unsigned int truesize;
1719 unsigned int headroom = mergeable_ctx_to_headroom(ctx);
1720 int err;
1721 + unsigned int metasize = 0;
1722
1723 head_skb = NULL;
1724 stats->bytes += len - vi->hdr_len;
1725 @@ -844,8 +852,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1726 data = page_address(xdp_page) + offset;
1727 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
1728 xdp.data = data + vi->hdr_len;
1729 - xdp_set_data_meta_invalid(&xdp);
1730 xdp.data_end = xdp.data + (len - vi->hdr_len);
1731 + xdp.data_meta = xdp.data;
1732 xdp.rxq = &rq->xdp_rxq;
1733
1734 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1735 @@ -853,24 +861,27 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1736
1737 switch (act) {
1738 case XDP_PASS:
1739 + metasize = xdp.data - xdp.data_meta;
1740 +
1741 /* recalculate offset to account for any header
1742 - * adjustments. Note other cases do not build an
1743 - * skb and avoid using offset
1744 + * adjustments and minus the metasize to copy the
1745 + * metadata in page_to_skb(). Note other cases do not
1746 + * build an skb and avoid using offset
1747 */
1748 - offset = xdp.data -
1749 - page_address(xdp_page) - vi->hdr_len;
1750 + offset = xdp.data - page_address(xdp_page) -
1751 + vi->hdr_len - metasize;
1752
1753 - /* recalculate len if xdp.data or xdp.data_end were
1754 - * adjusted
1755 + /* recalculate len if xdp.data, xdp.data_end or
1756 + * xdp.data_meta were adjusted
1757 */
1758 - len = xdp.data_end - xdp.data + vi->hdr_len;
1759 + len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
1760 /* We can only create skb based on xdp_page. */
1761 if (unlikely(xdp_page != page)) {
1762 rcu_read_unlock();
1763 put_page(page);
1764 - head_skb = page_to_skb(vi, rq, xdp_page,
1765 - offset, len,
1766 - PAGE_SIZE, false);
1767 + head_skb = page_to_skb(vi, rq, xdp_page, offset,
1768 + len, PAGE_SIZE, false,
1769 + metasize);
1770 return head_skb;
1771 }
1772 break;
1773 @@ -926,7 +937,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
1774 goto err_skb;
1775 }
1776
1777 - head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
1778 + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1779 + metasize);
1780 curr_skb = head_skb;
1781
1782 if (unlikely(!curr_skb))
1783 diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
1784 index c09cf55e2d204..40334e55bbc74 100644
1785 --- a/drivers/ras/cec.c
1786 +++ b/drivers/ras/cec.c
1787 @@ -309,11 +309,20 @@ static bool sanity_check(struct ce_array *ca)
1788 return ret;
1789 }
1790
1791 +/**
1792 + * cec_add_elem - Add an element to the CEC array.
1793 + * @pfn: page frame number to insert
1794 + *
1795 + * Return values:
1796 + * - <0: on error
1797 + * - 0: on success
1798 + * - >0: when the inserted pfn was offlined
1799 + */
1800 int cec_add_elem(u64 pfn)
1801 {
1802 struct ce_array *ca = &ce_arr;
1803 + int count, err, ret = 0;
1804 unsigned int to = 0;
1805 - int count, ret = 0;
1806
1807 /*
1808 * We can be called very early on the identify_cpu() path where we are
1809 @@ -330,8 +339,8 @@ int cec_add_elem(u64 pfn)
1810 if (ca->n == MAX_ELEMS)
1811 WARN_ON(!del_lru_elem_unlocked(ca));
1812
1813 - ret = find_elem(ca, pfn, &to);
1814 - if (ret < 0) {
1815 + err = find_elem(ca, pfn, &to);
1816 + if (err < 0) {
1817 /*
1818 * Shift range [to-end] to make room for one more element.
1819 */
1820 diff --git a/drivers/regulator/bd9571mwv-regulator.c b/drivers/regulator/bd9571mwv-regulator.c
1821 index e690c2ce5b3c5..25e33028871c0 100644
1822 --- a/drivers/regulator/bd9571mwv-regulator.c
1823 +++ b/drivers/regulator/bd9571mwv-regulator.c
1824 @@ -124,7 +124,7 @@ static const struct regulator_ops vid_ops = {
1825
1826 static const struct regulator_desc regulators[] = {
1827 BD9571MWV_REG("VD09", "vd09", VD09, avs_ops, 0, 0x7f,
1828 - 0x80, 600000, 10000, 0x3c),
1829 + 0x6f, 600000, 10000, 0x3c),
1830 BD9571MWV_REG("VD18", "vd18", VD18, vid_ops, BD9571MWV_VD18_VID, 0xf,
1831 16, 1625000, 25000, 0),
1832 BD9571MWV_REG("VD25", "vd25", VD25, vid_ops, BD9571MWV_VD25_VID, 0xf,
1833 @@ -133,7 +133,7 @@ static const struct regulator_desc regulators[] = {
1834 11, 2800000, 100000, 0),
1835 BD9571MWV_REG("DVFS", "dvfs", DVFS, reg_ops,
1836 BD9571MWV_DVFS_MONIVDAC, 0x7f,
1837 - 0x80, 600000, 10000, 0x3c),
1838 + 0x6f, 600000, 10000, 0x3c),
1839 };
1840
1841 #ifdef CONFIG_PM_SLEEP
1842 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1843 index 476ef8044ae59..b81eebc7e2df2 100644
1844 --- a/drivers/scsi/ufs/ufshcd.c
1845 +++ b/drivers/scsi/ufs/ufshcd.c
1846 @@ -239,7 +239,7 @@ static struct ufs_dev_fix ufs_fixups[] = {
1847 END_FIX
1848 };
1849
1850 -static void ufshcd_tmc_handler(struct ufs_hba *hba);
1851 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
1852 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
1853 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
1854 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
1855 @@ -496,8 +496,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
1856 static void ufshcd_print_host_state(struct ufs_hba *hba)
1857 {
1858 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
1859 - dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
1860 - hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
1861 + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
1862 + hba->outstanding_reqs, hba->outstanding_tasks);
1863 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
1864 hba->saved_err, hba->saved_uic_err);
1865 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
1866 @@ -644,40 +644,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
1867 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
1868 }
1869
1870 -/**
1871 - * ufshcd_get_tm_free_slot - get a free slot for task management request
1872 - * @hba: per adapter instance
1873 - * @free_slot: pointer to variable with available slot value
1874 - *
1875 - * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
1876 - * Returns 0 if free slot is not available, else return 1 with tag value
1877 - * in @free_slot.
1878 - */
1879 -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
1880 -{
1881 - int tag;
1882 - bool ret = false;
1883 -
1884 - if (!free_slot)
1885 - goto out;
1886 -
1887 - do {
1888 - tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
1889 - if (tag >= hba->nutmrs)
1890 - goto out;
1891 - } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
1892 -
1893 - *free_slot = tag;
1894 - ret = true;
1895 -out:
1896 - return ret;
1897 -}
1898 -
1899 -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
1900 -{
1901 - clear_bit_unlock(slot, &hba->tm_slots_in_use);
1902 -}
1903 -
1904 /**
1905 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
1906 * @hba: per adapter instance
1907 @@ -1279,6 +1245,24 @@ out:
1908 return ret;
1909 }
1910
1911 +static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
1912 +{
1913 + int *busy = priv;
1914 +
1915 + WARN_ON_ONCE(reserved);
1916 + (*busy)++;
1917 + return false;
1918 +}
1919 +
1920 +/* Whether or not any tag is in use by a request that is in progress. */
1921 +static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
1922 +{
1923 + struct request_queue *q = hba->cmd_queue;
1924 + int busy = 0;
1925 +
1926 + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
1927 + return busy;
1928 +}
1929
1930 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1931 struct devfreq_dev_status *stat)
1932 @@ -1633,7 +1617,7 @@ static void ufshcd_gate_work(struct work_struct *work)
1933
1934 if (hba->clk_gating.active_reqs
1935 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1936 - || hba->lrb_in_use || hba->outstanding_tasks
1937 + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1938 || hba->active_uic_cmd || hba->uic_async_done)
1939 goto rel_lock;
1940
1941 @@ -1687,7 +1671,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
1942
1943 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1944 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1945 - || hba->lrb_in_use || hba->outstanding_tasks
1946 + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
1947 || hba->active_uic_cmd || hba->uic_async_done
1948 || ufshcd_eh_in_progress(hba))
1949 return;
1950 @@ -2457,22 +2441,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1951
1952 hba->req_abort_count = 0;
1953
1954 - /* acquire the tag to make sure device cmds don't use it */
1955 - if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
1956 - /*
1957 - * Dev manage command in progress, requeue the command.
1958 - * Requeuing the command helps in cases where the request *may*
1959 - * find different tag instead of waiting for dev manage command
1960 - * completion.
1961 - */
1962 - err = SCSI_MLQUEUE_HOST_BUSY;
1963 - goto out;
1964 - }
1965 -
1966 err = ufshcd_hold(hba, true);
1967 if (err) {
1968 err = SCSI_MLQUEUE_HOST_BUSY;
1969 - clear_bit_unlock(tag, &hba->lrb_in_use);
1970 goto out;
1971 }
1972 WARN_ON(hba->clk_gating.state != CLKS_ON);
1973 @@ -2494,7 +2465,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1974 if (err) {
1975 ufshcd_release(hba);
1976 lrbp->cmd = NULL;
1977 - clear_bit_unlock(tag, &hba->lrb_in_use);
1978 goto out;
1979 }
1980 /* Make sure descriptors are ready before ringing the doorbell */
1981 @@ -2641,44 +2611,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1982 return err;
1983 }
1984
1985 -/**
1986 - * ufshcd_get_dev_cmd_tag - Get device management command tag
1987 - * @hba: per-adapter instance
1988 - * @tag_out: pointer to variable with available slot value
1989 - *
1990 - * Get a free slot and lock it until device management command
1991 - * completes.
1992 - *
1993 - * Returns false if free slot is unavailable for locking, else
1994 - * return true with tag value in @tag.
1995 - */
1996 -static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1997 -{
1998 - int tag;
1999 - bool ret = false;
2000 - unsigned long tmp;
2001 -
2002 - if (!tag_out)
2003 - goto out;
2004 -
2005 - do {
2006 - tmp = ~hba->lrb_in_use;
2007 - tag = find_last_bit(&tmp, hba->nutrs);
2008 - if (tag >= hba->nutrs)
2009 - goto out;
2010 - } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2011 -
2012 - *tag_out = tag;
2013 - ret = true;
2014 -out:
2015 - return ret;
2016 -}
2017 -
2018 -static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2019 -{
2020 - clear_bit_unlock(tag, &hba->lrb_in_use);
2021 -}
2022 -
2023 /**
2024 * ufshcd_exec_dev_cmd - API for sending device management requests
2025 * @hba: UFS hba
2026 @@ -2691,6 +2623,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2027 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2028 enum dev_cmd_type cmd_type, int timeout)
2029 {
2030 + struct request_queue *q = hba->cmd_queue;
2031 + struct request *req;
2032 struct ufshcd_lrb *lrbp;
2033 int err;
2034 int tag;
2035 @@ -2704,7 +2638,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2036 * Even though we use wait_event() which sleeps indefinitely,
2037 * the maximum wait time is bounded by SCSI request timeout.
2038 */
2039 - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2040 + req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2041 + if (IS_ERR(req))
2042 + return PTR_ERR(req);
2043 + tag = req->tag;
2044 + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2045
2046 init_completion(&wait);
2047 lrbp = &hba->lrb[tag];
2048 @@ -2729,8 +2667,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2049 err ? "query_complete_err" : "query_complete");
2050
2051 out_put_tag:
2052 - ufshcd_put_dev_cmd_tag(hba, tag);
2053 - wake_up(&hba->dev_cmd.tag_wq);
2054 + blk_put_request(req);
2055 up_read(&hba->clk_scaling_lock);
2056 return err;
2057 }
2058 @@ -4815,19 +4752,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2059 * ufshcd_uic_cmd_compl - handle completion of uic command
2060 * @hba: per adapter instance
2061 * @intr_status: interrupt status generated by the controller
2062 + *
2063 + * Returns
2064 + * IRQ_HANDLED - If interrupt is valid
2065 + * IRQ_NONE - If invalid interrupt
2066 */
2067 -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
2068 +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
2069 {
2070 + irqreturn_t retval = IRQ_NONE;
2071 +
2072 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
2073 hba->active_uic_cmd->argument2 |=
2074 ufshcd_get_uic_cmd_result(hba);
2075 hba->active_uic_cmd->argument3 =
2076 ufshcd_get_dme_attr_val(hba);
2077 complete(&hba->active_uic_cmd->done);
2078 + retval = IRQ_HANDLED;
2079 }
2080
2081 - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
2082 + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
2083 complete(hba->uic_async_done);
2084 + retval = IRQ_HANDLED;
2085 + }
2086 + return retval;
2087 }
2088
2089 /**
2090 @@ -4853,7 +4800,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
2091 cmd->result = result;
2092 /* Mark completed command as NULL in LRB */
2093 lrbp->cmd = NULL;
2094 - clear_bit_unlock(index, &hba->lrb_in_use);
2095 /* Do not touch lrbp after scsi done */
2096 cmd->scsi_done(cmd);
2097 __ufshcd_release(hba);
2098 @@ -4875,16 +4821,17 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
2099 hba->outstanding_reqs ^= completed_reqs;
2100
2101 ufshcd_clk_scaling_update_busy(hba);
2102 -
2103 - /* we might have free'd some tags above */
2104 - wake_up(&hba->dev_cmd.tag_wq);
2105 }
2106
2107 /**
2108 * ufshcd_transfer_req_compl - handle SCSI and query command completion
2109 * @hba: per adapter instance
2110 + *
2111 + * Returns
2112 + * IRQ_HANDLED - If interrupt is valid
2113 + * IRQ_NONE - If invalid interrupt
2114 */
2115 -static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2116 +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
2117 {
2118 unsigned long completed_reqs;
2119 u32 tr_doorbell;
2120 @@ -4903,7 +4850,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2121 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2122 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
2123
2124 - __ufshcd_transfer_req_compl(hba, completed_reqs);
2125 + if (completed_reqs) {
2126 + __ufshcd_transfer_req_compl(hba, completed_reqs);
2127 + return IRQ_HANDLED;
2128 + } else {
2129 + return IRQ_NONE;
2130 + }
2131 }
2132
2133 /**
2134 @@ -5424,61 +5376,77 @@ out:
2135 /**
2136 * ufshcd_update_uic_error - check and set fatal UIC error flags.
2137 * @hba: per-adapter instance
2138 + *
2139 + * Returns
2140 + * IRQ_HANDLED - If interrupt is valid
2141 + * IRQ_NONE - If invalid interrupt
2142 */
2143 -static void ufshcd_update_uic_error(struct ufs_hba *hba)
2144 +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
2145 {
2146 u32 reg;
2147 + irqreturn_t retval = IRQ_NONE;
2148
2149 /* PHY layer lane error */
2150 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
2151 /* Ignore LINERESET indication, as this is not an error */
2152 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
2153 - (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
2154 + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
2155 /*
2156 * To know whether this error is fatal or not, DB timeout
2157 * must be checked but this error is handled separately.
2158 */
2159 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
2160 ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
2161 + retval |= IRQ_HANDLED;
2162 }
2163
2164 /* PA_INIT_ERROR is fatal and needs UIC reset */
2165 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
2166 - if (reg)
2167 + if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
2168 + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
2169 ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg);
2170
2171 - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2172 - hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2173 - else if (hba->dev_quirks &
2174 - UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
2175 - if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
2176 - hba->uic_error |=
2177 - UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
2178 - else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
2179 - hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
2180 + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2181 + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2182 + else if (hba->dev_quirks &
2183 + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
2184 + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
2185 + hba->uic_error |=
2186 + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
2187 + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
2188 + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
2189 + }
2190 + retval |= IRQ_HANDLED;
2191 }
2192
2193 /* UIC NL/TL/DME errors needs software retry */
2194 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
2195 - if (reg) {
2196 + if ((reg & UIC_NETWORK_LAYER_ERROR) &&
2197 + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
2198 ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg);
2199 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
2200 + retval |= IRQ_HANDLED;
2201 }
2202
2203 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
2204 - if (reg) {
2205 + if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
2206 + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
2207 ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg);
2208 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
2209 + retval |= IRQ_HANDLED;
2210 }
2211
2212 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
2213 - if (reg) {
2214 + if ((reg & UIC_DME_ERROR) &&
2215 + (reg & UIC_DME_ERROR_CODE_MASK)) {
2216 ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg);
2217 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
2218 + retval |= IRQ_HANDLED;
2219 }
2220
2221 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
2222 __func__, hba->uic_error);
2223 + return retval;
2224 }
2225
2226 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
2227 @@ -5502,10 +5470,15 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
2228 /**
2229 * ufshcd_check_errors - Check for errors that need s/w attention
2230 * @hba: per-adapter instance
2231 + *
2232 + * Returns
2233 + * IRQ_HANDLED - If interrupt is valid
2234 + * IRQ_NONE - If invalid interrupt
2235 */
2236 -static void ufshcd_check_errors(struct ufs_hba *hba)
2237 +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
2238 {
2239 bool queue_eh_work = false;
2240 + irqreturn_t retval = IRQ_NONE;
2241
2242 if (hba->errors & INT_FATAL_ERRORS) {
2243 ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors);
2244 @@ -5514,7 +5487,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
2245
2246 if (hba->errors & UIC_ERROR) {
2247 hba->uic_error = 0;
2248 - ufshcd_update_uic_error(hba);
2249 + retval = ufshcd_update_uic_error(hba);
2250 if (hba->uic_error)
2251 queue_eh_work = true;
2252 }
2253 @@ -5562,6 +5535,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
2254 }
2255 schedule_work(&hba->eh_work);
2256 }
2257 + retval |= IRQ_HANDLED;
2258 }
2259 /*
2260 * if (!queue_eh_work) -
2261 @@ -5569,44 +5543,81 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
2262 * itself without s/w intervention or errors that will be
2263 * handled by the SCSI core layer.
2264 */
2265 + return retval;
2266 +}
2267 +
2268 +struct ctm_info {
2269 + struct ufs_hba *hba;
2270 + unsigned long pending;
2271 + unsigned int ncpl;
2272 +};
2273 +
2274 +static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved)
2275 +{
2276 + struct ctm_info *const ci = priv;
2277 + struct completion *c;
2278 +
2279 + WARN_ON_ONCE(reserved);
2280 + if (test_bit(req->tag, &ci->pending))
2281 + return true;
2282 + ci->ncpl++;
2283 + c = req->end_io_data;
2284 + if (c)
2285 + complete(c);
2286 + return true;
2287 }
2288
2289 /**
2290 * ufshcd_tmc_handler - handle task management function completion
2291 * @hba: per adapter instance
2292 + *
2293 + * Returns
2294 + * IRQ_HANDLED - If interrupt is valid
2295 + * IRQ_NONE - If invalid interrupt
2296 */
2297 -static void ufshcd_tmc_handler(struct ufs_hba *hba)
2298 +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
2299 {
2300 - u32 tm_doorbell;
2301 + struct request_queue *q = hba->tmf_queue;
2302 + struct ctm_info ci = {
2303 + .hba = hba,
2304 + .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL),
2305 + };
2306
2307 - tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
2308 - hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
2309 - wake_up(&hba->tm_wq);
2310 + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci);
2311 + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE;
2312 }
2313
2314 /**
2315 * ufshcd_sl_intr - Interrupt service routine
2316 * @hba: per adapter instance
2317 * @intr_status: contains interrupts generated by the controller
2318 + *
2319 + * Returns
2320 + * IRQ_HANDLED - If interrupt is valid
2321 + * IRQ_NONE - If invalid interrupt
2322 */
2323 -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2324 +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2325 {
2326 + irqreturn_t retval = IRQ_NONE;
2327 +
2328 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2329
2330 if (ufshcd_is_auto_hibern8_error(hba, intr_status))
2331 hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
2332
2333 if (hba->errors)
2334 - ufshcd_check_errors(hba);
2335 + retval |= ufshcd_check_errors(hba);
2336
2337 if (intr_status & UFSHCD_UIC_MASK)
2338 - ufshcd_uic_cmd_compl(hba, intr_status);
2339 + retval |= ufshcd_uic_cmd_compl(hba, intr_status);
2340
2341 if (intr_status & UTP_TASK_REQ_COMPL)
2342 - ufshcd_tmc_handler(hba);
2343 + retval |= ufshcd_tmc_handler(hba);
2344
2345 if (intr_status & UTP_TRANSFER_REQ_COMPL)
2346 - ufshcd_transfer_req_compl(hba);
2347 + retval |= ufshcd_transfer_req_compl(hba);
2348 +
2349 + return retval;
2350 }
2351
2352 /**
2353 @@ -5614,8 +5625,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2354 * @irq: irq number
2355 * @__hba: pointer to adapter instance
2356 *
2357 - * Returns IRQ_HANDLED - If interrupt is valid
2358 - * IRQ_NONE - If invalid interrupt
2359 + * Returns
2360 + * IRQ_HANDLED - If interrupt is valid
2361 + * IRQ_NONE - If invalid interrupt
2362 */
2363 static irqreturn_t ufshcd_intr(int irq, void *__hba)
2364 {
2365 @@ -5638,14 +5650,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
2366 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2367 if (intr_status)
2368 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
2369 - if (enabled_intr_status) {
2370 - ufshcd_sl_intr(hba, enabled_intr_status);
2371 - retval = IRQ_HANDLED;
2372 - }
2373 + if (enabled_intr_status)
2374 + retval |= ufshcd_sl_intr(hba, enabled_intr_status);
2375
2376 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
2377 }
2378
2379 + if (retval == IRQ_NONE) {
2380 + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
2381 + __func__, intr_status);
2382 + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
2383 + }
2384 +
2385 spin_unlock(hba->host->host_lock);
2386 return retval;
2387 }
2388 @@ -5674,33 +5690,36 @@ out:
2389 static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2390 struct utp_task_req_desc *treq, u8 tm_function)
2391 {
2392 + struct request_queue *q = hba->tmf_queue;
2393 struct Scsi_Host *host = hba->host;
2394 + DECLARE_COMPLETION_ONSTACK(wait);
2395 + struct request *req;
2396 unsigned long flags;
2397 - int free_slot, task_tag, err;
2398 + int task_tag, err;
2399
2400 /*
2401 - * Get free slot, sleep if slots are unavailable.
2402 - * Even though we use wait_event() which sleeps indefinitely,
2403 - * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2404 + * blk_get_request() is used here only to get a free tag.
2405 */
2406 - wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
2407 + req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED);
2408 + req->end_io_data = &wait;
2409 ufshcd_hold(hba, false);
2410
2411 spin_lock_irqsave(host->host_lock, flags);
2412 - task_tag = hba->nutrs + free_slot;
2413 + blk_mq_start_request(req);
2414
2415 + task_tag = req->tag;
2416 treq->req_header.dword_0 |= cpu_to_be32(task_tag);
2417
2418 - memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
2419 - ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
2420 + memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
2421 + ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
2422
2423 /* send command to the controller */
2424 - __set_bit(free_slot, &hba->outstanding_tasks);
2425 + __set_bit(task_tag, &hba->outstanding_tasks);
2426
2427 /* Make sure descriptors are ready before ringing the task doorbell */
2428 wmb();
2429
2430 - ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
2431 + ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
2432 /* Make sure that doorbell is committed immediately */
2433 wmb();
2434
2435 @@ -5709,33 +5728,35 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
2436 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
2437
2438 /* wait until the task management command is completed */
2439 - err = wait_event_timeout(hba->tm_wq,
2440 - test_bit(free_slot, &hba->tm_condition),
2441 + err = wait_for_completion_io_timeout(&wait,
2442 msecs_to_jiffies(TM_CMD_TIMEOUT));
2443 if (!err) {
2444 + /*
2445 + * Make sure that ufshcd_compl_tm() does not trigger a
2446 + * use-after-free.
2447 + */
2448 + req->end_io_data = NULL;
2449 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
2450 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2451 __func__, tm_function);
2452 - if (ufshcd_clear_tm_cmd(hba, free_slot))
2453 - dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2454 - __func__, free_slot);
2455 + if (ufshcd_clear_tm_cmd(hba, task_tag))
2456 + dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
2457 + __func__, task_tag);
2458 err = -ETIMEDOUT;
2459 } else {
2460 err = 0;
2461 - memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
2462 + memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
2463
2464 ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
2465 }
2466
2467 spin_lock_irqsave(hba->host->host_lock, flags);
2468 - __clear_bit(free_slot, &hba->outstanding_tasks);
2469 + __clear_bit(task_tag, &hba->outstanding_tasks);
2470 spin_unlock_irqrestore(hba->host->host_lock, flags);
2471
2472 - clear_bit(free_slot, &hba->tm_condition);
2473 - ufshcd_put_tm_slot(hba, free_slot);
2474 - wake_up(&hba->tm_tag_wq);
2475 -
2476 ufshcd_release(hba);
2477 + blk_put_request(req);
2478 +
2479 return err;
2480 }
2481
2482 @@ -5809,6 +5830,8 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
2483 int cmd_type,
2484 enum query_opcode desc_op)
2485 {
2486 + struct request_queue *q = hba->cmd_queue;
2487 + struct request *req;
2488 struct ufshcd_lrb *lrbp;
2489 int err = 0;
2490 int tag;
2491 @@ -5818,7 +5841,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
2492
2493 down_read(&hba->clk_scaling_lock);
2494
2495 - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2496 + req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
2497 + if (IS_ERR(req))
2498 + return PTR_ERR(req);
2499 + tag = req->tag;
2500 + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
2501
2502 init_completion(&wait);
2503 lrbp = &hba->lrb[tag];
2504 @@ -5892,8 +5919,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
2505 }
2506 }
2507
2508 - ufshcd_put_dev_cmd_tag(hba, tag);
2509 - wake_up(&hba->dev_cmd.tag_wq);
2510 + blk_put_request(req);
2511 up_read(&hba->clk_scaling_lock);
2512 return err;
2513 }
2514 @@ -6186,9 +6212,6 @@ cleanup:
2515 hba->lrb[tag].cmd = NULL;
2516 spin_unlock_irqrestore(host->host_lock, flags);
2517
2518 - clear_bit_unlock(tag, &hba->lrb_in_use);
2519 - wake_up(&hba->dev_cmd.tag_wq);
2520 -
2521 out:
2522 if (!err) {
2523 err = SUCCESS;
2524 @@ -8184,6 +8207,9 @@ void ufshcd_remove(struct ufs_hba *hba)
2525 {
2526 ufs_bsg_remove(hba);
2527 ufs_sysfs_remove_nodes(hba->dev);
2528 + blk_cleanup_queue(hba->tmf_queue);
2529 + blk_mq_free_tag_set(&hba->tmf_tag_set);
2530 + blk_cleanup_queue(hba->cmd_queue);
2531 scsi_remove_host(hba->host);
2532 /* disable interrupts */
2533 ufshcd_disable_intr(hba, hba->intr_mask);
2534 @@ -8262,6 +8288,18 @@ out_error:
2535 }
2536 EXPORT_SYMBOL(ufshcd_alloc_host);
2537
2538 +/* This function exists because blk_mq_alloc_tag_set() requires this. */
2539 +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
2540 + const struct blk_mq_queue_data *qd)
2541 +{
2542 + WARN_ON_ONCE(true);
2543 + return BLK_STS_NOTSUPP;
2544 +}
2545 +
2546 +static const struct blk_mq_ops ufshcd_tmf_ops = {
2547 + .queue_rq = ufshcd_queue_tmf,
2548 +};
2549 +
2550 /**
2551 * ufshcd_init - Driver initialization routine
2552 * @hba: per-adapter instance
2553 @@ -8331,10 +8369,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2554
2555 hba->max_pwr_info.is_valid = false;
2556
2557 - /* Initailize wait queue for task management */
2558 - init_waitqueue_head(&hba->tm_wq);
2559 - init_waitqueue_head(&hba->tm_tag_wq);
2560 -
2561 /* Initialize work queues */
2562 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
2563 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
2564 @@ -8347,9 +8381,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2565
2566 init_rwsem(&hba->clk_scaling_lock);
2567
2568 - /* Initialize device management tag acquire wait queue */
2569 - init_waitqueue_head(&hba->dev_cmd.tag_wq);
2570 -
2571 ufshcd_init_clk_gating(hba);
2572
2573 ufshcd_init_clk_scaling(hba);
2574 @@ -8383,6 +8414,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2575 goto exit_gating;
2576 }
2577
2578 + hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
2579 + if (IS_ERR(hba->cmd_queue)) {
2580 + err = PTR_ERR(hba->cmd_queue);
2581 + goto out_remove_scsi_host;
2582 + }
2583 +
2584 + hba->tmf_tag_set = (struct blk_mq_tag_set) {
2585 + .nr_hw_queues = 1,
2586 + .queue_depth = hba->nutmrs,
2587 + .ops = &ufshcd_tmf_ops,
2588 + .flags = BLK_MQ_F_NO_SCHED,
2589 + };
2590 + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
2591 + if (err < 0)
2592 + goto free_cmd_queue;
2593 + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
2594 + if (IS_ERR(hba->tmf_queue)) {
2595 + err = PTR_ERR(hba->tmf_queue);
2596 + goto free_tmf_tag_set;
2597 + }
2598 +
2599 /* Reset the attached device */
2600 ufshcd_vops_device_reset(hba);
2601
2602 @@ -8392,7 +8444,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2603 dev_err(hba->dev, "Host controller enable failed\n");
2604 ufshcd_print_host_regs(hba);
2605 ufshcd_print_host_state(hba);
2606 - goto out_remove_scsi_host;
2607 + goto free_tmf_queue;
2608 }
2609
2610 /*
2611 @@ -8429,6 +8481,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
2612
2613 return 0;
2614
2615 +free_tmf_queue:
2616 + blk_cleanup_queue(hba->tmf_queue);
2617 +free_tmf_tag_set:
2618 + blk_mq_free_tag_set(&hba->tmf_tag_set);
2619 +free_cmd_queue:
2620 + blk_cleanup_queue(hba->cmd_queue);
2621 out_remove_scsi_host:
2622 scsi_remove_host(hba->host);
2623 exit_gating:
2624 diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
2625 index 4f1dec68a853f..92ef6e6a3e511 100644
2626 --- a/drivers/scsi/ufs/ufshcd.h
2627 +++ b/drivers/scsi/ufs/ufshcd.h
2628 @@ -213,13 +213,11 @@ struct ufs_query {
2629 * @type: device management command type - Query, NOP OUT
2630 * @lock: lock to allow one command at a time
2631 * @complete: internal commands completion
2632 - * @tag_wq: wait queue until free command slot is available
2633 */
2634 struct ufs_dev_cmd {
2635 enum dev_cmd_type type;
2636 struct mutex lock;
2637 struct completion *complete;
2638 - wait_queue_head_t tag_wq;
2639 struct ufs_query query;
2640 };
2641
2642 @@ -484,7 +482,7 @@ struct ufs_stats {
2643 * @host: Scsi_Host instance of the driver
2644 * @dev: device handle
2645 * @lrb: local reference block
2646 - * @lrb_in_use: lrb in use
2647 + * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
2648 * @outstanding_tasks: Bits representing outstanding task requests
2649 * @outstanding_reqs: Bits representing outstanding transfer requests
2650 * @capabilities: UFS Controller Capabilities
2651 @@ -496,11 +494,9 @@ struct ufs_stats {
2652 * @irq: Irq number of the controller
2653 * @active_uic_cmd: handle of active UIC command
2654 * @uic_cmd_mutex: mutex for uic command
2655 - * @tm_wq: wait queue for task management
2656 - * @tm_tag_wq: wait queue for free task management slots
2657 - * @tm_slots_in_use: bit map of task management request slots in use
2658 + * @tmf_tag_set: TMF tag set.
2659 + * @tmf_queue: Used to allocate TMF tags.
2660 * @pwr_done: completion for power mode change
2661 - * @tm_condition: condition variable for task management
2662 * @ufshcd_state: UFSHCD states
2663 * @eh_flags: Error handling flags
2664 * @intr_mask: Interrupt Mask Bits
2665 @@ -543,6 +539,7 @@ struct ufs_hba {
2666
2667 struct Scsi_Host *host;
2668 struct device *dev;
2669 + struct request_queue *cmd_queue;
2670 /*
2671 * This field is to keep a reference to "scsi_device" corresponding to
2672 * "UFS device" W-LU.
2673 @@ -563,7 +560,6 @@ struct ufs_hba {
2674 u32 ahit;
2675
2676 struct ufshcd_lrb *lrb;
2677 - unsigned long lrb_in_use;
2678
2679 unsigned long outstanding_tasks;
2680 unsigned long outstanding_reqs;
2681 @@ -645,10 +641,8 @@ struct ufs_hba {
2682 /* Device deviations from standard UFS device spec. */
2683 unsigned int dev_quirks;
2684
2685 - wait_queue_head_t tm_wq;
2686 - wait_queue_head_t tm_tag_wq;
2687 - unsigned long tm_condition;
2688 - unsigned long tm_slots_in_use;
2689 + struct blk_mq_tag_set tmf_tag_set;
2690 + struct request_queue *tmf_queue;
2691
2692 struct uic_command *active_uic_cmd;
2693 struct mutex uic_cmd_mutex;
2694 diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
2695 index dbb75cd28dc8a..c2961d37cc1cf 100644
2696 --- a/drivers/scsi/ufs/ufshci.h
2697 +++ b/drivers/scsi/ufs/ufshci.h
2698 @@ -195,7 +195,7 @@ enum {
2699
2700 /* UECDL - Host UIC Error Code Data Link Layer 3Ch */
2701 #define UIC_DATA_LINK_LAYER_ERROR 0x80000000
2702 -#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
2703 +#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF
2704 #define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2
2705 #define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4
2706 #define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8
2707 diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
2708 index bf68d86d80ee5..95f9e48052452 100644
2709 --- a/drivers/soc/fsl/qbman/qman.c
2710 +++ b/drivers/soc/fsl/qbman/qman.c
2711 @@ -186,7 +186,7 @@ struct qm_eqcr_entry {
2712 __be32 tag;
2713 struct qm_fd fd;
2714 u8 __reserved3[32];
2715 -} __packed;
2716 +} __packed __aligned(8);
2717 #define QM_EQCR_VERB_VBIT 0x80
2718 #define QM_EQCR_VERB_CMD_MASK 0x61 /* but only one value; */
2719 #define QM_EQCR_VERB_CMD_ENQUEUE 0x01
2720 diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
2721 index 8f1de1fbbeedf..d8d3892e5a69a 100644
2722 --- a/drivers/usb/usbip/stub_dev.c
2723 +++ b/drivers/usb/usbip/stub_dev.c
2724 @@ -63,6 +63,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
2725
2726 dev_info(dev, "stub up\n");
2727
2728 + mutex_lock(&sdev->ud.sysfs_lock);
2729 spin_lock_irq(&sdev->ud.lock);
2730
2731 if (sdev->ud.status != SDEV_ST_AVAILABLE) {
2732 @@ -87,13 +88,13 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
2733 tcp_rx = kthread_create(stub_rx_loop, &sdev->ud, "stub_rx");
2734 if (IS_ERR(tcp_rx)) {
2735 sockfd_put(socket);
2736 - return -EINVAL;
2737 + goto unlock_mutex;
2738 }
2739 tcp_tx = kthread_create(stub_tx_loop, &sdev->ud, "stub_tx");
2740 if (IS_ERR(tcp_tx)) {
2741 kthread_stop(tcp_rx);
2742 sockfd_put(socket);
2743 - return -EINVAL;
2744 + goto unlock_mutex;
2745 }
2746
2747 /* get task structs now */
2748 @@ -112,6 +113,8 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
2749 wake_up_process(sdev->ud.tcp_rx);
2750 wake_up_process(sdev->ud.tcp_tx);
2751
2752 + mutex_unlock(&sdev->ud.sysfs_lock);
2753 +
2754 } else {
2755 dev_info(dev, "stub down\n");
2756
2757 @@ -122,6 +125,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
2758 spin_unlock_irq(&sdev->ud.lock);
2759
2760 usbip_event_add(&sdev->ud, SDEV_EVENT_DOWN);
2761 + mutex_unlock(&sdev->ud.sysfs_lock);
2762 }
2763
2764 return count;
2765 @@ -130,6 +134,8 @@ sock_err:
2766 sockfd_put(socket);
2767 err:
2768 spin_unlock_irq(&sdev->ud.lock);
2769 +unlock_mutex:
2770 + mutex_unlock(&sdev->ud.sysfs_lock);
2771 return -EINVAL;
2772 }
2773 static DEVICE_ATTR_WO(usbip_sockfd);
2774 @@ -270,6 +276,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
2775 sdev->ud.side = USBIP_STUB;
2776 sdev->ud.status = SDEV_ST_AVAILABLE;
2777 spin_lock_init(&sdev->ud.lock);
2778 + mutex_init(&sdev->ud.sysfs_lock);
2779 sdev->ud.tcp_socket = NULL;
2780 sdev->ud.sockfd = -1;
2781
2782 diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
2783 index 8be857a4fa132..a7e6ce96f62c7 100644
2784 --- a/drivers/usb/usbip/usbip_common.h
2785 +++ b/drivers/usb/usbip/usbip_common.h
2786 @@ -263,6 +263,9 @@ struct usbip_device {
2787 /* lock for status */
2788 spinlock_t lock;
2789
2790 + /* mutex for synchronizing sysfs store paths */
2791 + struct mutex sysfs_lock;
2792 +
2793 int sockfd;
2794 struct socket *tcp_socket;
2795
2796 diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
2797 index 5d88917c96314..086ca76dd0531 100644
2798 --- a/drivers/usb/usbip/usbip_event.c
2799 +++ b/drivers/usb/usbip/usbip_event.c
2800 @@ -70,6 +70,7 @@ static void event_handler(struct work_struct *work)
2801 while ((ud = get_event()) != NULL) {
2802 usbip_dbg_eh("pending event %lx\n", ud->event);
2803
2804 + mutex_lock(&ud->sysfs_lock);
2805 /*
2806 * NOTE: shutdown must come first.
2807 * Shutdown the device.
2808 @@ -90,6 +91,7 @@ static void event_handler(struct work_struct *work)
2809 ud->eh_ops.unusable(ud);
2810 unset_event(ud, USBIP_EH_UNUSABLE);
2811 }
2812 + mutex_unlock(&ud->sysfs_lock);
2813
2814 wake_up(&ud->eh_waitq);
2815 }
2816 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
2817 index 1e0b618e2e6ec..98636fbf71882 100644
2818 --- a/drivers/usb/usbip/vhci_hcd.c
2819 +++ b/drivers/usb/usbip/vhci_hcd.c
2820 @@ -1096,6 +1096,7 @@ static void vhci_device_init(struct vhci_device *vdev)
2821 vdev->ud.side = USBIP_VHCI;
2822 vdev->ud.status = VDEV_ST_NULL;
2823 spin_lock_init(&vdev->ud.lock);
2824 + mutex_init(&vdev->ud.sysfs_lock);
2825
2826 INIT_LIST_HEAD(&vdev->priv_rx);
2827 INIT_LIST_HEAD(&vdev->priv_tx);
2828 diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
2829 index e64ea314930be..ebc7be1d98207 100644
2830 --- a/drivers/usb/usbip/vhci_sysfs.c
2831 +++ b/drivers/usb/usbip/vhci_sysfs.c
2832 @@ -185,6 +185,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
2833
2834 usbip_dbg_vhci_sysfs("enter\n");
2835
2836 + mutex_lock(&vdev->ud.sysfs_lock);
2837 +
2838 /* lock */
2839 spin_lock_irqsave(&vhci->lock, flags);
2840 spin_lock(&vdev->ud.lock);
2841 @@ -195,6 +197,7 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
2842 /* unlock */
2843 spin_unlock(&vdev->ud.lock);
2844 spin_unlock_irqrestore(&vhci->lock, flags);
2845 + mutex_unlock(&vdev->ud.sysfs_lock);
2846
2847 return -EINVAL;
2848 }
2849 @@ -205,6 +208,8 @@ static int vhci_port_disconnect(struct vhci_hcd *vhci_hcd, __u32 rhport)
2850
2851 usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
2852
2853 + mutex_unlock(&vdev->ud.sysfs_lock);
2854 +
2855 return 0;
2856 }
2857
2858 @@ -349,30 +354,36 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
2859 else
2860 vdev = &vhci->vhci_hcd_hs->vdev[rhport];
2861
2862 + mutex_lock(&vdev->ud.sysfs_lock);
2863 +
2864 /* Extract socket from fd. */
2865 socket = sockfd_lookup(sockfd, &err);
2866 if (!socket) {
2867 dev_err(dev, "failed to lookup sock");
2868 - return -EINVAL;
2869 + err = -EINVAL;
2870 + goto unlock_mutex;
2871 }
2872 if (socket->type != SOCK_STREAM) {
2873 dev_err(dev, "Expecting SOCK_STREAM - found %d",
2874 socket->type);
2875 sockfd_put(socket);
2876 - return -EINVAL;
2877 + err = -EINVAL;
2878 + goto unlock_mutex;
2879 }
2880
2881 /* create threads before locking */
2882 tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
2883 if (IS_ERR(tcp_rx)) {
2884 sockfd_put(socket);
2885 - return -EINVAL;
2886 + err = -EINVAL;
2887 + goto unlock_mutex;
2888 }
2889 tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
2890 if (IS_ERR(tcp_tx)) {
2891 kthread_stop(tcp_rx);
2892 sockfd_put(socket);
2893 - return -EINVAL;
2894 + err = -EINVAL;
2895 + goto unlock_mutex;
2896 }
2897
2898 /* get task structs now */
2899 @@ -397,7 +408,8 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
2900 * Will be retried from userspace
2901 * if there's another free port.
2902 */
2903 - return -EBUSY;
2904 + err = -EBUSY;
2905 + goto unlock_mutex;
2906 }
2907
2908 dev_info(dev, "pdev(%u) rhport(%u) sockfd(%d)\n",
2909 @@ -422,7 +434,15 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
2910
2911 rh_port_connect(vdev, speed);
2912
2913 + dev_info(dev, "Device attached\n");
2914 +
2915 + mutex_unlock(&vdev->ud.sysfs_lock);
2916 +
2917 return count;
2918 +
2919 +unlock_mutex:
2920 + mutex_unlock(&vdev->ud.sysfs_lock);
2921 + return err;
2922 }
2923 static DEVICE_ATTR_WO(attach);
2924
2925 diff --git a/drivers/usb/usbip/vudc_dev.c b/drivers/usb/usbip/vudc_dev.c
2926 index c8eeabdd9b568..2bc428f2e2610 100644
2927 --- a/drivers/usb/usbip/vudc_dev.c
2928 +++ b/drivers/usb/usbip/vudc_dev.c
2929 @@ -572,6 +572,7 @@ static int init_vudc_hw(struct vudc *udc)
2930 init_waitqueue_head(&udc->tx_waitq);
2931
2932 spin_lock_init(&ud->lock);
2933 + mutex_init(&ud->sysfs_lock);
2934 ud->status = SDEV_ST_AVAILABLE;
2935 ud->side = USBIP_VUDC;
2936
2937 diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
2938 index 7383a543c6d12..f7633ee655a17 100644
2939 --- a/drivers/usb/usbip/vudc_sysfs.c
2940 +++ b/drivers/usb/usbip/vudc_sysfs.c
2941 @@ -112,6 +112,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
2942 dev_err(dev, "no device");
2943 return -ENODEV;
2944 }
2945 + mutex_lock(&udc->ud.sysfs_lock);
2946 spin_lock_irqsave(&udc->lock, flags);
2947 /* Don't export what we don't have */
2948 if (!udc->driver || !udc->pullup) {
2949 @@ -187,6 +188,8 @@ static ssize_t usbip_sockfd_store(struct device *dev,
2950
2951 wake_up_process(udc->ud.tcp_rx);
2952 wake_up_process(udc->ud.tcp_tx);
2953 +
2954 + mutex_unlock(&udc->ud.sysfs_lock);
2955 return count;
2956
2957 } else {
2958 @@ -207,6 +210,7 @@ static ssize_t usbip_sockfd_store(struct device *dev,
2959 }
2960
2961 spin_unlock_irqrestore(&udc->lock, flags);
2962 + mutex_unlock(&udc->ud.sysfs_lock);
2963
2964 return count;
2965
2966 @@ -216,6 +220,7 @@ unlock_ud:
2967 spin_unlock_irq(&udc->ud.lock);
2968 unlock:
2969 spin_unlock_irqrestore(&udc->lock, flags);
2970 + mutex_unlock(&udc->ud.sysfs_lock);
2971
2972 return ret;
2973 }
2974 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
2975 index e492f5fe5be62..883abc46da6e2 100644
2976 --- a/drivers/xen/events/events_base.c
2977 +++ b/drivers/xen/events/events_base.c
2978 @@ -222,7 +222,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
2979 info->evtchn = evtchn;
2980 info->cpu = cpu;
2981 info->mask_reason = EVT_MASK_REASON_EXPLICIT;
2982 - spin_lock_init(&info->lock);
2983 + raw_spin_lock_init(&info->lock);
2984
2985 ret = set_evtchn_to_irq(evtchn, irq);
2986 if (ret < 0)
2987 @@ -374,28 +374,28 @@ static void do_mask(struct irq_info *info, u8 reason)
2988 {
2989 unsigned long flags;
2990
2991 - spin_lock_irqsave(&info->lock, flags);
2992 + raw_spin_lock_irqsave(&info->lock, flags);
2993
2994 if (!info->mask_reason)
2995 mask_evtchn(info->evtchn);
2996
2997 info->mask_reason |= reason;
2998
2999 - spin_unlock_irqrestore(&info->lock, flags);
3000 + raw_spin_unlock_irqrestore(&info->lock, flags);
3001 }
3002
3003 static void do_unmask(struct irq_info *info, u8 reason)
3004 {
3005 unsigned long flags;
3006
3007 - spin_lock_irqsave(&info->lock, flags);
3008 + raw_spin_lock_irqsave(&info->lock, flags);
3009
3010 info->mask_reason &= ~reason;
3011
3012 if (!info->mask_reason)
3013 unmask_evtchn(info->evtchn);
3014
3015 - spin_unlock_irqrestore(&info->lock, flags);
3016 + raw_spin_unlock_irqrestore(&info->lock, flags);
3017 }
3018
3019 #ifdef CONFIG_X86
3020 diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
3021 index d3a89b4646b8b..eb012fbb62e7b 100644
3022 --- a/drivers/xen/events/events_internal.h
3023 +++ b/drivers/xen/events/events_internal.h
3024 @@ -45,7 +45,7 @@ struct irq_info {
3025 unsigned short eoi_cpu; /* EOI must happen on this cpu */
3026 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
3027 u64 eoi_time; /* Time in jiffies when to EOI. */
3028 - spinlock_t lock;
3029 + raw_spinlock_t lock;
3030
3031 union {
3032 unsigned short virq;
3033 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3034 index 67c2e6487479a..ab9eeb5ff8e57 100644
3035 --- a/fs/cifs/connect.c
3036 +++ b/fs/cifs/connect.c
3037 @@ -4198,7 +4198,6 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3038 cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL);
3039 if (cifs_sb->prepath == NULL)
3040 return -ENOMEM;
3041 - cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
3042 }
3043
3044 return 0;
3045 diff --git a/fs/direct-io.c b/fs/direct-io.c
3046 index 9329ced91f1d8..434cffcc0391f 100644
3047 --- a/fs/direct-io.c
3048 +++ b/fs/direct-io.c
3049 @@ -848,6 +848,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
3050 struct buffer_head *map_bh)
3051 {
3052 int ret = 0;
3053 + int boundary = sdio->boundary; /* dio_send_cur_page may clear it */
3054
3055 if (dio->op == REQ_OP_WRITE) {
3056 /*
3057 @@ -886,10 +887,10 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
3058 sdio->cur_page_fs_offset = sdio->block_in_file << sdio->blkbits;
3059 out:
3060 /*
3061 - * If sdio->boundary then we want to schedule the IO now to
3062 + * If boundary then we want to schedule the IO now to
3063 * avoid metadata seeks.
3064 */
3065 - if (sdio->boundary) {
3066 + if (boundary) {
3067 ret = dio_send_cur_page(dio, sdio, map_bh);
3068 if (sdio->bio)
3069 dio_bio_submit(dio, sdio);
3070 diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
3071 index 5a7eb0c79839e..58a972667bf8e 100644
3072 --- a/fs/hostfs/hostfs_kern.c
3073 +++ b/fs/hostfs/hostfs_kern.c
3074 @@ -139,10 +139,10 @@ static char *inode_name(struct inode *ino)
3075
3076 static char *follow_link(char *link)
3077 {
3078 - int len, n;
3079 char *name, *resolved, *end;
3080 + int n;
3081
3082 - name = __getname();
3083 + name = kmalloc(PATH_MAX, GFP_KERNEL);
3084 if (!name) {
3085 n = -ENOMEM;
3086 goto out_free;
3087 @@ -164,21 +164,18 @@ static char *follow_link(char *link)
3088 return name;
3089
3090 *(end + 1) = '\0';
3091 - len = strlen(link) + strlen(name) + 1;
3092
3093 - resolved = kmalloc(len, GFP_KERNEL);
3094 + resolved = kasprintf(GFP_KERNEL, "%s%s", link, name);
3095 if (resolved == NULL) {
3096 n = -ENOMEM;
3097 goto out_free;
3098 }
3099
3100 - sprintf(resolved, "%s%s", link, name);
3101 - __putname(name);
3102 - kfree(link);
3103 + kfree(name);
3104 return resolved;
3105
3106 out_free:
3107 - __putname(name);
3108 + kfree(name);
3109 return ERR_PTR(n);
3110 }
3111
3112 @@ -918,18 +915,16 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
3113 sb->s_d_op = &simple_dentry_operations;
3114 sb->s_maxbytes = MAX_LFS_FILESIZE;
3115
3116 - /* NULL is printed as <NULL> by sprintf: avoid that. */
3117 + /* NULL is printed as '(null)' by printf(): avoid that. */
3118 if (req_root == NULL)
3119 req_root = "";
3120
3121 err = -ENOMEM;
3122 sb->s_fs_info = host_root_path =
3123 - kmalloc(strlen(root_ino) + strlen(req_root) + 2, GFP_KERNEL);
3124 + kasprintf(GFP_KERNEL, "%s/%s", root_ino, req_root);
3125 if (host_root_path == NULL)
3126 goto out;
3127
3128 - sprintf(host_root_path, "%s/%s", root_ino, req_root);
3129 -
3130 root_inode = new_inode(sb);
3131 if (!root_inode)
3132 goto out;
3133 diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
3134 index 9cd0a68159337..7f66e33424750 100644
3135 --- a/fs/ocfs2/aops.c
3136 +++ b/fs/ocfs2/aops.c
3137 @@ -2304,7 +2304,7 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
3138 struct ocfs2_alloc_context *meta_ac = NULL;
3139 handle_t *handle = NULL;
3140 loff_t end = offset + bytes;
3141 - int ret = 0, credits = 0, locked = 0;
3142 + int ret = 0, credits = 0;
3143
3144 ocfs2_init_dealloc_ctxt(&dealloc);
3145
3146 @@ -2315,13 +2315,6 @@ static int ocfs2_dio_end_io_write(struct inode *inode,
3147 !dwc->dw_orphaned)
3148 goto out;
3149
3150 - /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we
3151 - * are in that context. */
3152 - if (dwc->dw_writer_pid != task_pid_nr(current)) {
3153 - inode_lock(inode);
3154 - locked = 1;
3155 - }
3156 -
3157 ret = ocfs2_inode_lock(inode, &di_bh, 1);
3158 if (ret < 0) {
3159 mlog_errno(ret);
3160 @@ -2402,8 +2395,6 @@ out:
3161 if (meta_ac)
3162 ocfs2_free_alloc_context(meta_ac);
3163 ocfs2_run_deallocs(osb, &dealloc);
3164 - if (locked)
3165 - inode_unlock(inode);
3166 ocfs2_dio_free_write_ctx(inode, dwc);
3167
3168 return ret;
3169 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
3170 index 6cd5e4924e4d2..ab2b0d74ad03e 100644
3171 --- a/fs/ocfs2/file.c
3172 +++ b/fs/ocfs2/file.c
3173 @@ -1244,22 +1244,24 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
3174 goto bail_unlock;
3175 }
3176 }
3177 + down_write(&OCFS2_I(inode)->ip_alloc_sem);
3178 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
3179 2 * ocfs2_quota_trans_credits(sb));
3180 if (IS_ERR(handle)) {
3181 status = PTR_ERR(handle);
3182 mlog_errno(status);
3183 - goto bail_unlock;
3184 + goto bail_unlock_alloc;
3185 }
3186 status = __dquot_transfer(inode, transfer_to);
3187 if (status < 0)
3188 goto bail_commit;
3189 } else {
3190 + down_write(&OCFS2_I(inode)->ip_alloc_sem);
3191 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
3192 if (IS_ERR(handle)) {
3193 status = PTR_ERR(handle);
3194 mlog_errno(status);
3195 - goto bail_unlock;
3196 + goto bail_unlock_alloc;
3197 }
3198 }
3199
3200 @@ -1272,6 +1274,8 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
3201
3202 bail_commit:
3203 ocfs2_commit_trans(osb, handle);
3204 +bail_unlock_alloc:
3205 + up_write(&OCFS2_I(inode)->ip_alloc_sem);
3206 bail_unlock:
3207 if (status && inode_locked) {
3208 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
3209 diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
3210 index 75e5a7fe341fd..36516fe86fe7c 100644
3211 --- a/include/linux/mlx5/mlx5_ifc.h
3212 +++ b/include/linux/mlx5/mlx5_ifc.h
3213 @@ -415,11 +415,11 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
3214 u8 reserved_at_60[0x18];
3215 u8 log_max_ft_num[0x8];
3216
3217 - u8 reserved_at_80[0x18];
3218 + u8 reserved_at_80[0x10];
3219 + u8 log_max_flow_counter[0x8];
3220 u8 log_max_destination[0x8];
3221
3222 - u8 log_max_flow_counter[0x8];
3223 - u8 reserved_at_a8[0x10];
3224 + u8 reserved_at_a0[0x18];
3225 u8 log_max_flow[0x8];
3226
3227 u8 reserved_at_c0[0x40];
3228 @@ -9669,7 +9669,7 @@ struct mlx5_ifc_pbmc_reg_bits {
3229
3230 struct mlx5_ifc_bufferx_reg_bits buffer[10];
3231
3232 - u8 reserved_at_2e0[0x40];
3233 + u8 reserved_at_2e0[0x80];
3234 };
3235
3236 struct mlx5_ifc_qtct_reg_bits {
3237 diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
3238 index 20f3550b0b11b..613d04526032b 100644
3239 --- a/include/linux/skmsg.h
3240 +++ b/include/linux/skmsg.h
3241 @@ -355,13 +355,17 @@ static inline void sk_psock_update_proto(struct sock *sk,
3242 static inline void sk_psock_restore_proto(struct sock *sk,
3243 struct sk_psock *psock)
3244 {
3245 - sk->sk_prot->unhash = psock->saved_unhash;
3246 -
3247 if (psock->sk_proto) {
3248 struct inet_connection_sock *icsk = inet_csk(sk);
3249 bool has_ulp = !!icsk->icsk_ulp_data;
3250
3251 if (has_ulp) {
3252 + /* TLS does not have an unhash proto in SW cases, but we need
3253 + * to ensure we stop using the sock_map unhash routine because
3254 + * the associated psock is being removed. So use the original
3255 + * unhash handler.
3256 + */
3257 + WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
3258 tcp_update_ulp(sk, psock->sk_proto,
3259 psock->saved_write_space);
3260 } else {
3261 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
3262 index 6b5fcfa1e5553..98775d7fa6963 100644
3263 --- a/include/linux/virtio_net.h
3264 +++ b/include/linux/virtio_net.h
3265 @@ -62,6 +62,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
3266 return -EINVAL;
3267 }
3268
3269 + skb_reset_mac_header(skb);
3270 +
3271 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
3272 u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
3273 u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
3274 diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
3275 index 59f45b1e9dac0..b59d73d529ba7 100644
3276 --- a/include/net/netns/xfrm.h
3277 +++ b/include/net/netns/xfrm.h
3278 @@ -72,7 +72,9 @@ struct netns_xfrm {
3279 #if IS_ENABLED(CONFIG_IPV6)
3280 struct dst_ops xfrm6_dst_ops;
3281 #endif
3282 - spinlock_t xfrm_state_lock;
3283 + spinlock_t xfrm_state_lock;
3284 + seqcount_t xfrm_state_hash_generation;
3285 +
3286 spinlock_t xfrm_policy_lock;
3287 struct mutex xfrm_cfg_mutex;
3288 };
3289 diff --git a/include/net/red.h b/include/net/red.h
3290 index 8fe55b8b2fb81..ff07a7cedf685 100644
3291 --- a/include/net/red.h
3292 +++ b/include/net/red.h
3293 @@ -171,9 +171,9 @@ static inline void red_set_vars(struct red_vars *v)
3294 static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
3295 u8 Scell_log, u8 *stab)
3296 {
3297 - if (fls(qth_min) + Wlog > 32)
3298 + if (fls(qth_min) + Wlog >= 32)
3299 return false;
3300 - if (fls(qth_max) + Wlog > 32)
3301 + if (fls(qth_max) + Wlog >= 32)
3302 return false;
3303 if (Scell_log >= 32)
3304 return false;
3305 diff --git a/include/net/sock.h b/include/net/sock.h
3306 index e6a48ebb22aa4..4137fa1787903 100644
3307 --- a/include/net/sock.h
3308 +++ b/include/net/sock.h
3309 @@ -2150,6 +2150,15 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
3310 sk_mem_charge(sk, skb->truesize);
3311 }
3312
3313 +static inline void skb_set_owner_sk_safe(struct sk_buff *skb, struct sock *sk)
3314 +{
3315 + if (sk && refcount_inc_not_zero(&sk->sk_refcnt)) {
3316 + skb_orphan(skb);
3317 + skb->destructor = sock_efree;
3318 + skb->sk = sk;
3319 + }
3320 +}
3321 +
3322 void sk_reset_timer(struct sock *sk, struct timer_list *timer,
3323 unsigned long expires);
3324
3325 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3326 index c00b9ae71ae40..614f19bbad74f 100644
3327 --- a/include/net/xfrm.h
3328 +++ b/include/net/xfrm.h
3329 @@ -1098,7 +1098,7 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
3330 return __xfrm_policy_check(sk, ndir, skb, family);
3331
3332 return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
3333 - (skb_dst(skb)->flags & DST_NOPOLICY) ||
3334 + (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
3335 __xfrm_policy_check(sk, ndir, skb, family);
3336 }
3337
3338 diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c
3339 index 8743150db2acc..c466c7fbdece5 100644
3340 --- a/kernel/gcov/clang.c
3341 +++ b/kernel/gcov/clang.c
3342 @@ -70,7 +70,9 @@ struct gcov_fn_info {
3343
3344 u32 ident;
3345 u32 checksum;
3346 +#if CONFIG_CLANG_VERSION < 110000
3347 u8 use_extra_checksum;
3348 +#endif
3349 u32 cfg_checksum;
3350
3351 u32 num_counters;
3352 @@ -145,10 +147,8 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
3353
3354 list_add_tail(&info->head, &current_info->functions);
3355 }
3356 -EXPORT_SYMBOL(llvm_gcda_emit_function);
3357 #else
3358 -void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
3359 - u8 use_extra_checksum, u32 cfg_checksum)
3360 +void llvm_gcda_emit_function(u32 ident, u32 func_checksum, u32 cfg_checksum)
3361 {
3362 struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
3363
3364 @@ -158,12 +158,11 @@ void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
3365 INIT_LIST_HEAD(&info->head);
3366 info->ident = ident;
3367 info->checksum = func_checksum;
3368 - info->use_extra_checksum = use_extra_checksum;
3369 info->cfg_checksum = cfg_checksum;
3370 list_add_tail(&info->head, &current_info->functions);
3371 }
3372 -EXPORT_SYMBOL(llvm_gcda_emit_function);
3373 #endif
3374 +EXPORT_SYMBOL(llvm_gcda_emit_function);
3375
3376 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
3377 {
3378 @@ -293,11 +292,16 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2)
3379 !list_is_last(&fn_ptr2->head, &info2->functions)) {
3380 if (fn_ptr1->checksum != fn_ptr2->checksum)
3381 return false;
3382 +#if CONFIG_CLANG_VERSION < 110000
3383 if (fn_ptr1->use_extra_checksum != fn_ptr2->use_extra_checksum)
3384 return false;
3385 if (fn_ptr1->use_extra_checksum &&
3386 fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
3387 return false;
3388 +#else
3389 + if (fn_ptr1->cfg_checksum != fn_ptr2->cfg_checksum)
3390 + return false;
3391 +#endif
3392 fn_ptr1 = list_next_entry(fn_ptr1, head);
3393 fn_ptr2 = list_next_entry(fn_ptr2, head);
3394 }
3395 @@ -529,17 +533,22 @@ static size_t convert_to_gcda(char *buffer, struct gcov_info *info)
3396
3397 list_for_each_entry(fi_ptr, &info->functions, head) {
3398 u32 i;
3399 - u32 len = 2;
3400 -
3401 - if (fi_ptr->use_extra_checksum)
3402 - len++;
3403
3404 pos += store_gcov_u32(buffer, pos, GCOV_TAG_FUNCTION);
3405 - pos += store_gcov_u32(buffer, pos, len);
3406 +#if CONFIG_CLANG_VERSION < 110000
3407 + pos += store_gcov_u32(buffer, pos,
3408 + fi_ptr->use_extra_checksum ? 3 : 2);
3409 +#else
3410 + pos += store_gcov_u32(buffer, pos, 3);
3411 +#endif
3412 pos += store_gcov_u32(buffer, pos, fi_ptr->ident);
3413 pos += store_gcov_u32(buffer, pos, fi_ptr->checksum);
3414 +#if CONFIG_CLANG_VERSION < 110000
3415 if (fi_ptr->use_extra_checksum)
3416 pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
3417 +#else
3418 + pos += store_gcov_u32(buffer, pos, fi_ptr->cfg_checksum);
3419 +#endif
3420
3421 pos += store_gcov_u32(buffer, pos, GCOV_TAG_COUNTER_BASE);
3422 pos += store_gcov_u32(buffer, pos, fi_ptr->num_counters * 2);
3423 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
3424 index 29c36c0290623..5d7092e32912e 100644
3425 --- a/kernel/workqueue.c
3426 +++ b/kernel/workqueue.c
3427 @@ -1411,7 +1411,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
3428 */
3429 lockdep_assert_irqs_disabled();
3430
3431 - debug_work_activate(work);
3432
3433 /* if draining, only works from the same workqueue are allowed */
3434 if (unlikely(wq->flags & __WQ_DRAINING) &&
3435 @@ -1493,6 +1492,7 @@ retry:
3436 worklist = &pwq->delayed_works;
3437 }
3438
3439 + debug_work_activate(work);
3440 insert_work(pwq, work, worklist, work_flags);
3441
3442 out:
3443 diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
3444 index 8a482c5ec67bb..c5271ea4dc832 100644
3445 --- a/net/batman-adv/translation-table.c
3446 +++ b/net/batman-adv/translation-table.c
3447 @@ -891,6 +891,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
3448 hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
3449 tt_vlan->vid = htons(vlan->vid);
3450 tt_vlan->crc = htonl(vlan->tt.crc);
3451 + tt_vlan->reserved = 0;
3452
3453 tt_vlan++;
3454 }
3455 @@ -974,6 +975,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
3456
3457 tt_vlan->vid = htons(vlan->vid);
3458 tt_vlan->crc = htonl(vlan->tt.crc);
3459 + tt_vlan->reserved = 0;
3460
3461 tt_vlan++;
3462 }
3463 diff --git a/net/can/bcm.c b/net/can/bcm.c
3464 index c96fa0f33db39..d3aac6a2479b5 100644
3465 --- a/net/can/bcm.c
3466 +++ b/net/can/bcm.c
3467 @@ -88,6 +88,8 @@ MODULE_LICENSE("Dual BSD/GPL");
3468 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
3469 MODULE_ALIAS("can-proto-2");
3470
3471 +#define BCM_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
3472 +
3473 /*
3474 * easy access to the first 64 bit of can(fd)_frame payload. cp->data is
3475 * 64 bit aligned so the offset has to be multiples of 8 which is ensured
3476 @@ -1294,7 +1296,7 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
3477 /* no bound device as default => check msg_name */
3478 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
3479
3480 - if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
3481 + if (msg->msg_namelen < BCM_MIN_NAMELEN)
3482 return -EINVAL;
3483
3484 if (addr->can_family != AF_CAN)
3485 @@ -1536,7 +1538,7 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
3486 struct net *net = sock_net(sk);
3487 int ret = 0;
3488
3489 - if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
3490 + if (len < BCM_MIN_NAMELEN)
3491 return -EINVAL;
3492
3493 lock_sock(sk);
3494 @@ -1618,8 +1620,8 @@ static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3495 sock_recv_ts_and_drops(msg, sk, skb);
3496
3497 if (msg->msg_name) {
3498 - __sockaddr_check_size(sizeof(struct sockaddr_can));
3499 - msg->msg_namelen = sizeof(struct sockaddr_can);
3500 + __sockaddr_check_size(BCM_MIN_NAMELEN);
3501 + msg->msg_namelen = BCM_MIN_NAMELEN;
3502 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
3503 }
3504
3505 diff --git a/net/can/raw.c b/net/can/raw.c
3506 index 59c039d73c6d5..af513d0957c74 100644
3507 --- a/net/can/raw.c
3508 +++ b/net/can/raw.c
3509 @@ -62,6 +62,8 @@ MODULE_LICENSE("Dual BSD/GPL");
3510 MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
3511 MODULE_ALIAS("can-proto-1");
3512
3513 +#define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
3514 +
3515 #define MASK_ALL 0
3516
3517 /* A raw socket has a list of can_filters attached to it, each receiving
3518 @@ -396,7 +398,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
3519 int err = 0;
3520 int notify_enetdown = 0;
3521
3522 - if (len < CAN_REQUIRED_SIZE(*addr, can_ifindex))
3523 + if (len < RAW_MIN_NAMELEN)
3524 return -EINVAL;
3525 if (addr->can_family != AF_CAN)
3526 return -EINVAL;
3527 @@ -477,11 +479,11 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
3528 if (peer)
3529 return -EOPNOTSUPP;
3530
3531 - memset(addr, 0, sizeof(*addr));
3532 + memset(addr, 0, RAW_MIN_NAMELEN);
3533 addr->can_family = AF_CAN;
3534 addr->can_ifindex = ro->ifindex;
3535
3536 - return sizeof(*addr);
3537 + return RAW_MIN_NAMELEN;
3538 }
3539
3540 static int raw_setsockopt(struct socket *sock, int level, int optname,
3541 @@ -733,7 +735,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
3542 if (msg->msg_name) {
3543 DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
3544
3545 - if (msg->msg_namelen < CAN_REQUIRED_SIZE(*addr, can_ifindex))
3546 + if (msg->msg_namelen < RAW_MIN_NAMELEN)
3547 return -EINVAL;
3548
3549 if (addr->can_family != AF_CAN)
3550 @@ -822,8 +824,8 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
3551 sock_recv_ts_and_drops(msg, sk, skb);
3552
3553 if (msg->msg_name) {
3554 - __sockaddr_check_size(sizeof(struct sockaddr_can));
3555 - msg->msg_namelen = sizeof(struct sockaddr_can);
3556 + __sockaddr_check_size(RAW_MIN_NAMELEN);
3557 + msg->msg_namelen = RAW_MIN_NAMELEN;
3558 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
3559 }
3560
3561 diff --git a/net/core/sock.c b/net/core/sock.c
3562 index 33543d59a10d6..19c178aac0ae8 100644
3563 --- a/net/core/sock.c
3564 +++ b/net/core/sock.c
3565 @@ -2026,16 +2026,10 @@ void skb_orphan_partial(struct sk_buff *skb)
3566 if (skb_is_tcp_pure_ack(skb))
3567 return;
3568
3569 - if (can_skb_orphan_partial(skb)) {
3570 - struct sock *sk = skb->sk;
3571 -
3572 - if (refcount_inc_not_zero(&sk->sk_refcnt)) {
3573 - WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
3574 - skb->destructor = sock_efree;
3575 - }
3576 - } else {
3577 + if (can_skb_orphan_partial(skb))
3578 + skb_set_owner_sk_safe(skb, skb->sk);
3579 + else
3580 skb_orphan(skb);
3581 - }
3582 }
3583 EXPORT_SYMBOL(skb_orphan_partial);
3584
3585 diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
3586 index c7bd6c49fadff..5dd463a18e4cf 100644
3587 --- a/net/hsr/hsr_device.c
3588 +++ b/net/hsr/hsr_device.c
3589 @@ -229,6 +229,7 @@ static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
3590 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
3591 if (master) {
3592 skb->dev = master->dev;
3593 + skb_reset_mac_header(skb);
3594 hsr_forward_skb(skb, master);
3595 } else {
3596 atomic_long_inc(&dev->tx_dropped);
3597 diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
3598 index ddd9605bad04c..bf3ecf792688e 100644
3599 --- a/net/hsr/hsr_forward.c
3600 +++ b/net/hsr/hsr_forward.c
3601 @@ -349,12 +349,6 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
3602 {
3603 struct hsr_frame_info frame;
3604
3605 - if (skb_mac_header(skb) != skb->data) {
3606 - WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
3607 - __FILE__, __LINE__, port->dev->name);
3608 - goto out_drop;
3609 - }
3610 -
3611 if (hsr_fill_frame_info(&frame, skb, port) < 0)
3612 goto out_drop;
3613 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
3614 diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
3615 index 6d091e419d3ee..d19c40c684e80 100644
3616 --- a/net/ieee802154/nl-mac.c
3617 +++ b/net/ieee802154/nl-mac.c
3618 @@ -551,9 +551,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
3619 desc->mode = nla_get_u8(info->attrs[IEEE802154_ATTR_LLSEC_KEY_MODE]);
3620
3621 if (desc->mode == IEEE802154_SCF_KEY_IMPLICIT) {
3622 - if (!info->attrs[IEEE802154_ATTR_PAN_ID] &&
3623 - !(info->attrs[IEEE802154_ATTR_SHORT_ADDR] ||
3624 - info->attrs[IEEE802154_ATTR_HW_ADDR]))
3625 + if (!info->attrs[IEEE802154_ATTR_PAN_ID])
3626 return -EINVAL;
3627
3628 desc->device_addr.pan_id = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_PAN_ID]);
3629 @@ -562,6 +560,9 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
3630 desc->device_addr.mode = IEEE802154_ADDR_SHORT;
3631 desc->device_addr.short_addr = nla_get_shortaddr(info->attrs[IEEE802154_ATTR_SHORT_ADDR]);
3632 } else {
3633 + if (!info->attrs[IEEE802154_ATTR_HW_ADDR])
3634 + return -EINVAL;
3635 +
3636 desc->device_addr.mode = IEEE802154_ADDR_LONG;
3637 desc->device_addr.extended_addr = nla_get_hwaddr(info->attrs[IEEE802154_ATTR_HW_ADDR]);
3638 }
3639 diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
3640 index ffcfcef762912..f03958fcb5be1 100644
3641 --- a/net/ieee802154/nl802154.c
3642 +++ b/net/ieee802154/nl802154.c
3643 @@ -836,8 +836,13 @@ nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags,
3644 goto nla_put_failure;
3645
3646 #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
3647 + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3648 + goto out;
3649 +
3650 if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0)
3651 goto nla_put_failure;
3652 +
3653 +out:
3654 #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */
3655
3656 genlmsg_end(msg, hdr);
3657 @@ -1400,6 +1405,9 @@ static int nl802154_set_llsec_params(struct sk_buff *skb,
3658 u32 changed = 0;
3659 int ret;
3660
3661 + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3662 + return -EOPNOTSUPP;
3663 +
3664 if (info->attrs[NL802154_ATTR_SEC_ENABLED]) {
3665 u8 enabled;
3666
3667 @@ -1560,7 +1568,8 @@ static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info)
3668 struct ieee802154_llsec_key_id id = { };
3669 u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { };
3670
3671 - if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
3672 + if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
3673 + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
3674 return -EINVAL;
3675
3676 if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] ||
3677 @@ -1608,7 +1617,8 @@ static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info)
3678 struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1];
3679 struct ieee802154_llsec_key_id id;
3680
3681 - if (nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
3682 + if (!info->attrs[NL802154_ATTR_SEC_KEY] ||
3683 + nla_parse_nested_deprecated(attrs, NL802154_KEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_KEY], nl802154_key_policy, info->extack))
3684 return -EINVAL;
3685
3686 if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0)
3687 @@ -1773,7 +1783,8 @@ static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info)
3688 struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1];
3689 __le64 extended_addr;
3690
3691 - if (nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
3692 + if (!info->attrs[NL802154_ATTR_SEC_DEVICE] ||
3693 + nla_parse_nested_deprecated(attrs, NL802154_DEV_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVICE], nl802154_dev_policy, info->extack))
3694 return -EINVAL;
3695
3696 if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR])
3697 @@ -1929,7 +1940,8 @@ static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info
3698 struct ieee802154_llsec_device_key key;
3699 __le64 extended_addr;
3700
3701 - if (nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
3702 + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] ||
3703 + nla_parse_nested_deprecated(attrs, NL802154_DEVKEY_ATTR_MAX, info->attrs[NL802154_ATTR_SEC_DEVKEY], nl802154_devkey_policy, info->extack))
3704 return -EINVAL;
3705
3706 if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR])
3707 @@ -2101,6 +2113,9 @@ static int nl802154_del_llsec_seclevel(struct sk_buff *skb,
3708 struct wpan_dev *wpan_dev = dev->ieee802154_ptr;
3709 struct ieee802154_llsec_seclevel sl;
3710
3711 + if (wpan_dev->iftype == NL802154_IFTYPE_MONITOR)
3712 + return -EOPNOTSUPP;
3713 +
3714 if (!info->attrs[NL802154_ATTR_SEC_LEVEL] ||
3715 llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL],
3716 &sl) < 0)
3717 diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
3718 index 25c8ba6732df0..8c0af30fb0679 100644
3719 --- a/net/ipv4/esp4_offload.c
3720 +++ b/net/ipv4/esp4_offload.c
3721 @@ -177,10 +177,12 @@ static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
3722
3723 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
3724 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
3725 - esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
3726 + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
3727 + NETIF_F_SCTP_CRC);
3728 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
3729 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
3730 - esp_features = features & ~NETIF_F_CSUM_MASK;
3731 + esp_features = features & ~(NETIF_F_CSUM_MASK |
3732 + NETIF_F_SCTP_CRC);
3733
3734 xo->flags |= XFRM_GSO_SEGMENT;
3735
3736 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3737 index 994a150ae3e90..24841a9e99668 100644
3738 --- a/net/ipv4/udp.c
3739 +++ b/net/ipv4/udp.c
3740 @@ -2692,6 +2692,10 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
3741 val = up->gso_size;
3742 break;
3743
3744 + case UDP_GRO:
3745 + val = up->gro_enabled;
3746 + break;
3747 +
3748 /* The following two cannot be changed on UDP sockets, the return is
3749 * always 0 (which corresponds to the full checksum coverage of UDP). */
3750 case UDPLITE_SEND_CSCOV:
3751 diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
3752 index 93e086cf058a6..1c532638b2adf 100644
3753 --- a/net/ipv6/esp6_offload.c
3754 +++ b/net/ipv6/esp6_offload.c
3755 @@ -210,9 +210,11 @@ static struct sk_buff *esp6_gso_segment(struct sk_buff *skb,
3756 skb->encap_hdr_csum = 1;
3757
3758 if (!(features & NETIF_F_HW_ESP) || x->xso.dev != skb->dev)
3759 - esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
3760 + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK |
3761 + NETIF_F_SCTP_CRC);
3762 else if (!(features & NETIF_F_HW_ESP_TX_CSUM))
3763 - esp_features = features & ~NETIF_F_CSUM_MASK;
3764 + esp_features = features & ~(NETIF_F_CSUM_MASK |
3765 + NETIF_F_SCTP_CRC);
3766
3767 xo->flags |= XFRM_GSO_SEGMENT;
3768
3769 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
3770 index dfe5e603ffe16..828dd95840b47 100644
3771 --- a/net/ipv6/raw.c
3772 +++ b/net/ipv6/raw.c
3773 @@ -298,7 +298,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
3774 */
3775 v4addr = LOOPBACK4_IPV6;
3776 if (!(addr_type & IPV6_ADDR_MULTICAST) &&
3777 - !sock_net(sk)->ipv6.sysctl.ip_nonlocal_bind) {
3778 + !ipv6_can_nonlocal_bind(sock_net(sk), inet)) {
3779 err = -EADDRNOTAVAIL;
3780 if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
3781 dev, 0)) {
3782 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3783 index 46df6345bb99c..3a9bd9687e7d1 100644
3784 --- a/net/ipv6/route.c
3785 +++ b/net/ipv6/route.c
3786 @@ -5183,9 +5183,11 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
3787 * nexthops have been replaced by first new, the rest should
3788 * be added to it.
3789 */
3790 - cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3791 - NLM_F_REPLACE);
3792 - cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
3793 + if (cfg->fc_nlinfo.nlh) {
3794 + cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3795 + NLM_F_REPLACE);
3796 + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
3797 + }
3798 nhn++;
3799 }
3800
3801 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
3802 index da0e285302f9d..538722522ffe9 100644
3803 --- a/net/mac80211/tx.c
3804 +++ b/net/mac80211/tx.c
3805 @@ -3582,7 +3582,7 @@ begin:
3806 test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
3807 goto out;
3808
3809 - if (vif->txqs_stopped[ieee80211_ac_from_tid(txq->tid)]) {
3810 + if (vif->txqs_stopped[txq->ac]) {
3811 set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
3812 goto out;
3813 }
3814 diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
3815 index c079ee69d3d03..346a9c86bcf88 100644
3816 --- a/net/mac802154/llsec.c
3817 +++ b/net/mac802154/llsec.c
3818 @@ -152,7 +152,7 @@ err_tfm0:
3819 crypto_free_sync_skcipher(key->tfm0);
3820 err_tfm:
3821 for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
3822 - if (key->tfm[i])
3823 + if (!IS_ERR_OR_NULL(key->tfm[i]))
3824 crypto_free_aead(key->tfm[i]);
3825
3826 kzfree(key);
3827 diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
3828 index 4910e61622329..9bd12f7517ed5 100644
3829 --- a/net/ncsi/ncsi-manage.c
3830 +++ b/net/ncsi/ncsi-manage.c
3831 @@ -103,13 +103,20 @@ static void ncsi_channel_monitor(struct timer_list *t)
3832 monitor_state = nc->monitor.state;
3833 spin_unlock_irqrestore(&nc->lock, flags);
3834
3835 - if (!enabled || chained) {
3836 - ncsi_stop_channel_monitor(nc);
3837 - return;
3838 - }
3839 + if (!enabled)
3840 + return; /* expected race disabling timer */
3841 + if (WARN_ON_ONCE(chained))
3842 + goto bad_state;
3843 +
3844 if (state != NCSI_CHANNEL_INACTIVE &&
3845 state != NCSI_CHANNEL_ACTIVE) {
3846 - ncsi_stop_channel_monitor(nc);
3847 +bad_state:
3848 + netdev_warn(ndp->ndev.dev,
3849 + "Bad NCSI monitor state channel %d 0x%x %s queue\n",
3850 + nc->id, state, chained ? "on" : "off");
3851 + spin_lock_irqsave(&nc->lock, flags);
3852 + nc->monitor.enabled = false;
3853 + spin_unlock_irqrestore(&nc->lock, flags);
3854 return;
3855 }
3856
3857 @@ -134,10 +141,9 @@ static void ncsi_channel_monitor(struct timer_list *t)
3858 ncsi_report_link(ndp, true);
3859 ndp->flags |= NCSI_DEV_RESHUFFLE;
3860
3861 - ncsi_stop_channel_monitor(nc);
3862 -
3863 ncm = &nc->modes[NCSI_MODE_LINK];
3864 spin_lock_irqsave(&nc->lock, flags);
3865 + nc->monitor.enabled = false;
3866 nc->state = NCSI_CHANNEL_INVISIBLE;
3867 ncm->data[2] &= ~0x1;
3868 spin_unlock_irqrestore(&nc->lock, flags);
3869 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
3870 index 28604414dec1b..170c342b11dae 100644
3871 --- a/net/nfc/llcp_sock.c
3872 +++ b/net/nfc/llcp_sock.c
3873 @@ -108,11 +108,13 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
3874 llcp_sock->service_name_len,
3875 GFP_KERNEL);
3876 if (!llcp_sock->service_name) {
3877 + nfc_llcp_local_put(llcp_sock->local);
3878 ret = -ENOMEM;
3879 goto put_dev;
3880 }
3881 llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
3882 if (llcp_sock->ssap == LLCP_SAP_MAX) {
3883 + nfc_llcp_local_put(llcp_sock->local);
3884 kfree(llcp_sock->service_name);
3885 llcp_sock->service_name = NULL;
3886 ret = -EADDRINUSE;
3887 @@ -671,6 +673,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
3888 ret = -EISCONN;
3889 goto error;
3890 }
3891 + if (sk->sk_state == LLCP_CONNECTING) {
3892 + ret = -EINPROGRESS;
3893 + goto error;
3894 + }
3895
3896 dev = nfc_get_device(addr->dev_idx);
3897 if (dev == NULL) {
3898 @@ -702,6 +708,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
3899 llcp_sock->local = nfc_llcp_local_get(local);
3900 llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
3901 if (llcp_sock->ssap == LLCP_SAP_MAX) {
3902 + nfc_llcp_local_put(llcp_sock->local);
3903 ret = -ENOMEM;
3904 goto put_dev;
3905 }
3906 @@ -743,9 +750,12 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
3907
3908 sock_unlink:
3909 nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
3910 + kfree(llcp_sock->service_name);
3911 + llcp_sock->service_name = NULL;
3912
3913 sock_llcp_release:
3914 nfc_llcp_put_ssap(local, llcp_sock->ssap);
3915 + nfc_llcp_local_put(llcp_sock->local);
3916
3917 put_dev:
3918 nfc_put_device(dev);
3919 diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
3920 index d06d7d58eaf27..b6f98eba71f1b 100644
3921 --- a/net/openvswitch/conntrack.c
3922 +++ b/net/openvswitch/conntrack.c
3923 @@ -2019,16 +2019,12 @@ static int ovs_ct_limit_del_zone_limit(struct nlattr *nla_zone_limit,
3924 static int ovs_ct_limit_get_default_limit(struct ovs_ct_limit_info *info,
3925 struct sk_buff *reply)
3926 {
3927 - struct ovs_zone_limit zone_limit;
3928 - int err;
3929 + struct ovs_zone_limit zone_limit = {
3930 + .zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE,
3931 + .limit = info->default_limit,
3932 + };
3933
3934 - zone_limit.zone_id = OVS_ZONE_LIMIT_DEFAULT_ZONE;
3935 - zone_limit.limit = info->default_limit;
3936 - err = nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
3937 - if (err)
3938 - return err;
3939 -
3940 - return 0;
3941 + return nla_put_nohdr(reply, sizeof(zone_limit), &zone_limit);
3942 }
3943
3944 static int __ovs_ct_limit_get_zone_limit(struct net *net,
3945 diff --git a/net/rds/message.c b/net/rds/message.c
3946 index 2d43e13d6dd59..92b6b22884d4c 100644
3947 --- a/net/rds/message.c
3948 +++ b/net/rds/message.c
3949 @@ -347,8 +347,9 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
3950 rm->data.op_nents = DIV_ROUND_UP(total_len, PAGE_SIZE);
3951 rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
3952 if (IS_ERR(rm->data.op_sg)) {
3953 + void *err = ERR_CAST(rm->data.op_sg);
3954 rds_message_put(rm);
3955 - return ERR_CAST(rm->data.op_sg);
3956 + return err;
3957 }
3958
3959 for (i = 0; i < rm->data.op_nents; ++i) {
3960 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
3961 index 43c10a85e8813..716cad6773184 100644
3962 --- a/net/sched/act_api.c
3963 +++ b/net/sched/act_api.c
3964 @@ -935,6 +935,9 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
3965 if (err != ACT_P_CREATED)
3966 module_put(a_o->owner);
3967
3968 + if (!bind && ovr && err == ACT_P_CREATED)
3969 + refcount_set(&a->tcfa_refcnt, 2);
3970 +
3971 return a;
3972
3973 err_mod:
3974 diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
3975 index 2f1f0a3784083..6af6b95bdb672 100644
3976 --- a/net/sched/sch_teql.c
3977 +++ b/net/sched/sch_teql.c
3978 @@ -134,6 +134,9 @@ teql_destroy(struct Qdisc *sch)
3979 struct teql_sched_data *dat = qdisc_priv(sch);
3980 struct teql_master *master = dat->m;
3981
3982 + if (!master)
3983 + return;
3984 +
3985 prev = master->slaves;
3986 if (prev) {
3987 do {
3988 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
3989 index c87af430107ae..52c92b8d827fd 100644
3990 --- a/net/sctp/ipv6.c
3991 +++ b/net/sctp/ipv6.c
3992 @@ -643,8 +643,8 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
3993 if (!(type & IPV6_ADDR_UNICAST))
3994 return 0;
3995
3996 - return sp->inet.freebind || net->ipv6.sysctl.ip_nonlocal_bind ||
3997 - ipv6_chk_addr(net, in6, NULL, 0);
3998 + return ipv6_can_nonlocal_bind(net, &sp->inet) ||
3999 + ipv6_chk_addr(net, in6, NULL, 0);
4000 }
4001
4002 /* This function checks if the address is a valid address to be used for
4003 @@ -933,8 +933,7 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
4004 net = sock_net(&opt->inet.sk);
4005 rcu_read_lock();
4006 dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
4007 - if (!dev || !(opt->inet.freebind ||
4008 - net->ipv6.sysctl.ip_nonlocal_bind ||
4009 + if (!dev || !(ipv6_can_nonlocal_bind(net, &opt->inet) ||
4010 ipv6_chk_addr(net, &addr->v6.sin6_addr,
4011 dev, 0))) {
4012 rcu_read_unlock();
4013 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4014 index 66e8f89bce534..b2c36dcfc8e2f 100644
4015 --- a/net/tipc/socket.c
4016 +++ b/net/tipc/socket.c
4017 @@ -1210,7 +1210,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
4018 spin_lock_bh(&inputq->lock);
4019 if (skb_peek(arrvq) == skb) {
4020 skb_queue_splice_tail_init(&tmpq, inputq);
4021 - kfree_skb(__skb_dequeue(arrvq));
4022 + __skb_dequeue(arrvq);
4023 }
4024 spin_unlock_bh(&inputq->lock);
4025 __skb_queue_purge(&tmpq);
4026 diff --git a/net/wireless/sme.c b/net/wireless/sme.c
4027 index d32a2ec4d96ac..63f89687a018b 100644
4028 --- a/net/wireless/sme.c
4029 +++ b/net/wireless/sme.c
4030 @@ -530,7 +530,7 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
4031 cfg80211_sme_free(wdev);
4032 }
4033
4034 - if (WARN_ON(wdev->conn))
4035 + if (wdev->conn)
4036 return -EINPROGRESS;
4037
4038 wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
4039 diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
4040 index 01c65f96d2832..74e90d78c3b46 100644
4041 --- a/net/xfrm/xfrm_interface.c
4042 +++ b/net/xfrm/xfrm_interface.c
4043 @@ -302,6 +302,8 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
4044
4045 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
4046 } else {
4047 + if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
4048 + goto xmit;
4049 icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
4050 htonl(mtu));
4051 }
4052 @@ -310,6 +312,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
4053 return -EMSGSIZE;
4054 }
4055
4056 +xmit:
4057 xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
4058 skb_dst_set(skb, dst);
4059 skb->dev = tdev;
4060 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
4061 index 61fd0569d3934..1423e2b7cb42a 100644
4062 --- a/net/xfrm/xfrm_state.c
4063 +++ b/net/xfrm/xfrm_state.c
4064 @@ -44,7 +44,6 @@ static void xfrm_state_gc_task(struct work_struct *work);
4065 */
4066
4067 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
4068 -static __read_mostly seqcount_t xfrm_state_hash_generation = SEQCNT_ZERO(xfrm_state_hash_generation);
4069 static struct kmem_cache *xfrm_state_cache __ro_after_init;
4070
4071 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
4072 @@ -140,7 +139,7 @@ static void xfrm_hash_resize(struct work_struct *work)
4073 }
4074
4075 spin_lock_bh(&net->xfrm.xfrm_state_lock);
4076 - write_seqcount_begin(&xfrm_state_hash_generation);
4077 + write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
4078
4079 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
4080 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
4081 @@ -156,7 +155,7 @@ static void xfrm_hash_resize(struct work_struct *work)
4082 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
4083 net->xfrm.state_hmask = nhashmask;
4084
4085 - write_seqcount_end(&xfrm_state_hash_generation);
4086 + write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
4087 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
4088
4089 osize = (ohashmask + 1) * sizeof(struct hlist_head);
4090 @@ -1058,7 +1057,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
4091
4092 to_put = NULL;
4093
4094 - sequence = read_seqcount_begin(&xfrm_state_hash_generation);
4095 + sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
4096
4097 rcu_read_lock();
4098 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
4099 @@ -1171,7 +1170,7 @@ out:
4100 if (to_put)
4101 xfrm_state_put(to_put);
4102
4103 - if (read_seqcount_retry(&xfrm_state_hash_generation, sequence)) {
4104 + if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
4105 *err = -EAGAIN;
4106 if (x) {
4107 xfrm_state_put(x);
4108 @@ -2588,6 +2587,7 @@ int __net_init xfrm_state_init(struct net *net)
4109 net->xfrm.state_num = 0;
4110 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
4111 spin_lock_init(&net->xfrm.xfrm_state_lock);
4112 + seqcount_init(&net->xfrm.xfrm_state_hash_generation);
4113 return 0;
4114
4115 out_byspi:
4116 diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
4117 index 9ccdad89c288d..452b9eaca815b 100644
4118 --- a/sound/drivers/aloop.c
4119 +++ b/sound/drivers/aloop.c
4120 @@ -1035,6 +1035,14 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
4121 return -ENOMEM;
4122 kctl->id.device = dev;
4123 kctl->id.subdevice = substr;
4124 +
4125 + /* Add the control before copying the id so that
4126 + * the numid field of the id is set in the copy.
4127 + */
4128 + err = snd_ctl_add(card, kctl);
4129 + if (err < 0)
4130 + return err;
4131 +
4132 switch (idx) {
4133 case ACTIVE_IDX:
4134 setup->active_id = kctl->id;
4135 @@ -1051,9 +1059,6 @@ static int loopback_mixer_new(struct loopback *loopback, int notify)
4136 default:
4137 break;
4138 }
4139 - err = snd_ctl_add(card, kctl);
4140 - if (err < 0)
4141 - return err;
4142 }
4143 }
4144 }
4145 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4146 index 3c9e072db3538..e3fab993b3395 100644
4147 --- a/sound/pci/hda/patch_realtek.c
4148 +++ b/sound/pci/hda/patch_realtek.c
4149 @@ -3917,6 +3917,15 @@ static void alc271_fixup_dmic(struct hda_codec *codec,
4150 snd_hda_sequence_write(codec, verbs);
4151 }
4152
4153 +/* Fix the speaker amp after resume, etc */
4154 +static void alc269vb_fixup_aspire_e1_coef(struct hda_codec *codec,
4155 + const struct hda_fixup *fix,
4156 + int action)
4157 +{
4158 + if (action == HDA_FIXUP_ACT_INIT)
4159 + alc_update_coef_idx(codec, 0x0d, 0x6000, 0x6000);
4160 +}
4161 +
4162 static void alc269_fixup_pcm_44k(struct hda_codec *codec,
4163 const struct hda_fixup *fix, int action)
4164 {
4165 @@ -6220,6 +6229,7 @@ enum {
4166 ALC283_FIXUP_HEADSET_MIC,
4167 ALC255_FIXUP_MIC_MUTE_LED,
4168 ALC282_FIXUP_ASPIRE_V5_PINS,
4169 + ALC269VB_FIXUP_ASPIRE_E1_COEF,
4170 ALC280_FIXUP_HP_GPIO4,
4171 ALC286_FIXUP_HP_GPIO_LED,
4172 ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY,
4173 @@ -6890,6 +6900,10 @@ static const struct hda_fixup alc269_fixups[] = {
4174 { },
4175 },
4176 },
4177 + [ALC269VB_FIXUP_ASPIRE_E1_COEF] = {
4178 + .type = HDA_FIXUP_FUNC,
4179 + .v.func = alc269vb_fixup_aspire_e1_coef,
4180 + },
4181 [ALC280_FIXUP_HP_GPIO4] = {
4182 .type = HDA_FIXUP_FUNC,
4183 .v.func = alc280_fixup_hp_gpio4,
4184 @@ -7764,6 +7778,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4185 SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4186 SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
4187 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
4188 + SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
4189 SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
4190 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4191 SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
4192 @@ -8240,6 +8255,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4193 {.id = ALC283_FIXUP_HEADSET_MIC, .name = "alc283-headset"},
4194 {.id = ALC255_FIXUP_MIC_MUTE_LED, .name = "alc255-dell-mute"},
4195 {.id = ALC282_FIXUP_ASPIRE_V5_PINS, .name = "aspire-v5"},
4196 + {.id = ALC269VB_FIXUP_ASPIRE_E1_COEF, .name = "aspire-e1-coef"},
4197 {.id = ALC280_FIXUP_HP_GPIO4, .name = "hp-gpio4"},
4198 {.id = ALC286_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
4199 {.id = ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, .name = "hp-gpio2-hotkey"},
4200 diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
4201 index 6cf0f6612bdaf..708fc4ed54eda 100644
4202 --- a/sound/soc/codecs/wm8960.c
4203 +++ b/sound/soc/codecs/wm8960.c
4204 @@ -707,7 +707,13 @@ int wm8960_configure_pll(struct snd_soc_component *component, int freq_in,
4205 best_freq_out = -EINVAL;
4206 *sysclk_idx = *dac_idx = *bclk_idx = -1;
4207
4208 - for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
4209 + /*
4210 + * From Datasheet, the PLL performs best when f2 is between
4211 + * 90MHz and 100MHz, the desired sysclk output is 11.2896MHz
4212 + * or 12.288MHz, then sysclkdiv = 2 is the best choice.
4213 + * So search sysclk_divs from 2 to 1 other than from 1 to 2.
4214 + */
4215 + for (i = ARRAY_SIZE(sysclk_divs) - 1; i >= 0; --i) {
4216 if (sysclk_divs[i] == -1)
4217 continue;
4218 for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
4219 diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4220 index 31f1dd6541aa1..c3ff203c3f447 100644
4221 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4222 +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
4223 @@ -500,14 +500,14 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
4224 .channels_min = SST_STEREO,
4225 .channels_max = SST_STEREO,
4226 .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
4227 - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
4228 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
4229 },
4230 .capture = {
4231 .stream_name = "Headset Capture",
4232 .channels_min = 1,
4233 .channels_max = 2,
4234 .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
4235 - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
4236 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
4237 },
4238 },
4239 {
4240 @@ -518,7 +518,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
4241 .channels_min = SST_STEREO,
4242 .channels_max = SST_STEREO,
4243 .rates = SNDRV_PCM_RATE_44100|SNDRV_PCM_RATE_48000,
4244 - .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
4245 + .formats = SNDRV_PCM_FMTBIT_S16_LE,
4246 },
4247 },
4248 {
4249 diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
4250 index 94b093b370e2f..06715b3d8c319 100644
4251 --- a/sound/soc/sof/intel/hda-dsp.c
4252 +++ b/sound/soc/sof/intel/hda-dsp.c
4253 @@ -192,10 +192,17 @@ bool hda_dsp_core_is_enabled(struct snd_sof_dev *sdev,
4254
4255 val = snd_sof_dsp_read(sdev, HDA_DSP_BAR, HDA_DSP_REG_ADSPCS);
4256
4257 - is_enable = ((val & HDA_DSP_ADSPCS_CPA_MASK(core_mask)) &&
4258 - (val & HDA_DSP_ADSPCS_SPA_MASK(core_mask)) &&
4259 - !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
4260 - !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask)));
4261 +#define MASK_IS_EQUAL(v, m, field) ({ \
4262 + u32 _m = field(m); \
4263 + ((v) & _m) == _m; \
4264 +})
4265 +
4266 + is_enable = MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_CPA_MASK) &&
4267 + MASK_IS_EQUAL(val, core_mask, HDA_DSP_ADSPCS_SPA_MASK) &&
4268 + !(val & HDA_DSP_ADSPCS_CRST_MASK(core_mask)) &&
4269 + !(val & HDA_DSP_ADSPCS_CSTALL_MASK(core_mask));
4270 +
4271 +#undef MASK_IS_EQUAL
4272
4273 dev_dbg(sdev->dev, "DSP core(s) enabled? %d : core_mask %x\n",
4274 is_enable, core_mask);
4275 diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c
4276 index ee448d5e07a6d..c4021d6ac9dfb 100644
4277 --- a/sound/soc/sunxi/sun4i-codec.c
4278 +++ b/sound/soc/sunxi/sun4i-codec.c
4279 @@ -1364,6 +1364,7 @@ static struct snd_soc_card *sun4i_codec_create_card(struct device *dev)
4280 return ERR_PTR(-ENOMEM);
4281
4282 card->dev = dev;
4283 + card->owner = THIS_MODULE;
4284 card->name = "sun4i-codec";
4285 card->dapm_widgets = sun4i_codec_card_dapm_widgets;
4286 card->num_dapm_widgets = ARRAY_SIZE(sun4i_codec_card_dapm_widgets);
4287 @@ -1396,6 +1397,7 @@ static struct snd_soc_card *sun6i_codec_create_card(struct device *dev)
4288 return ERR_PTR(-ENOMEM);
4289
4290 card->dev = dev;
4291 + card->owner = THIS_MODULE;
4292 card->name = "A31 Audio Codec";
4293 card->dapm_widgets = sun6i_codec_card_dapm_widgets;
4294 card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
4295 @@ -1449,6 +1451,7 @@ static struct snd_soc_card *sun8i_a23_codec_create_card(struct device *dev)
4296 return ERR_PTR(-ENOMEM);
4297
4298 card->dev = dev;
4299 + card->owner = THIS_MODULE;
4300 card->name = "A23 Audio Codec";
4301 card->dapm_widgets = sun6i_codec_card_dapm_widgets;
4302 card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
4303 @@ -1487,6 +1490,7 @@ static struct snd_soc_card *sun8i_h3_codec_create_card(struct device *dev)
4304 return ERR_PTR(-ENOMEM);
4305
4306 card->dev = dev;
4307 + card->owner = THIS_MODULE;
4308 card->name = "H3 Audio Codec";
4309 card->dapm_widgets = sun6i_codec_card_dapm_widgets;
4310 card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
4311 @@ -1525,6 +1529,7 @@ static struct snd_soc_card *sun8i_v3s_codec_create_card(struct device *dev)
4312 return ERR_PTR(-ENOMEM);
4313
4314 card->dev = dev;
4315 + card->owner = THIS_MODULE;
4316 card->name = "V3s Audio Codec";
4317 card->dapm_widgets = sun6i_codec_card_dapm_widgets;
4318 card->num_dapm_widgets = ARRAY_SIZE(sun6i_codec_card_dapm_widgets);
4319 diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
4320 index 372ecb3e2c06f..0d524ef3606d7 100644
4321 --- a/tools/perf/builtin-inject.c
4322 +++ b/tools/perf/builtin-inject.c
4323 @@ -835,7 +835,7 @@ int cmd_inject(int argc, const char **argv)
4324 inject.tool.ordered_events = inject.sched_stat;
4325
4326 data.path = inject.input_name;
4327 - inject.session = perf_session__new(&data, true, &inject.tool);
4328 + inject.session = perf_session__new(&data, inject.output.is_pipe, &inject.tool);
4329 if (IS_ERR(inject.session))
4330 return PTR_ERR(inject.session);
4331