Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0188-5.4.89-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 98848 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index 450ebe1528062..95848875110ef 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 88
10 +SUBLEVEL = 89
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 @@ -436,7 +436,7 @@ LEX = flex
15 YACC = bison
16 AWK = awk
17 INSTALLKERNEL := installkernel
18 -DEPMOD = /sbin/depmod
19 +DEPMOD = depmod
20 PERL = perl
21 PYTHON = python
22 PYTHON3 = python3
23 diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
24 index 5229eeac8946d..4def51c12e1bf 100644
25 --- a/arch/powerpc/kernel/vmlinux.lds.S
26 +++ b/arch/powerpc/kernel/vmlinux.lds.S
27 @@ -98,7 +98,7 @@ SECTIONS
28 ALIGN_FUNCTION();
29 #endif
30 /* careful! __ftr_alt_* sections need to be close to .text */
31 - *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
32 + *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
33 #ifdef CONFIG_PPC64
34 *(.tramp.ftrace.text);
35 #endif
36 diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
37 index aa5c064a6a227..4ea906fe1c351 100644
38 --- a/arch/x86/kernel/cpu/mtrr/generic.c
39 +++ b/arch/x86/kernel/cpu/mtrr/generic.c
40 @@ -167,9 +167,6 @@ static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
41 *repeat = 0;
42 *uniform = 1;
43
44 - /* Make end inclusive instead of exclusive */
45 - end--;
46 -
47 prev_match = MTRR_TYPE_INVALID;
48 for (i = 0; i < num_var_ranges; ++i) {
49 unsigned short start_state, end_state, inclusive;
50 @@ -261,6 +258,9 @@ u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
51 int repeat;
52 u64 partial_end;
53
54 + /* Make end inclusive instead of exclusive */
55 + end--;
56 +
57 if (!mtrr_state_set)
58 return MTRR_TYPE_INVALID;
59
60 diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
61 index 6f2208cf30df3..ea9945a05b831 100644
62 --- a/arch/x86/kvm/mmu.h
63 +++ b/arch/x86/kvm/mmu.h
64 @@ -48,7 +48,7 @@ static inline u64 rsvd_bits(int s, int e)
65 if (e < s)
66 return 0;
67
68 - return ((1ULL << (e - s + 1)) - 1) << s;
69 + return ((2ULL << (e - s)) - 1) << s;
70 }
71
72 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask);
73 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
74 index 7bd2c3a52297f..7982f13807aa0 100644
75 --- a/arch/x86/mm/pgtable.c
76 +++ b/arch/x86/mm/pgtable.c
77 @@ -826,6 +826,8 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
78 }
79
80 free_page((unsigned long)pmd_sv);
81 +
82 + pgtable_pmd_page_dtor(virt_to_page(pmd));
83 free_page((unsigned long)pmd);
84
85 return 1;
86 diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
87 index 5154e280ada22..08baa10a254bf 100644
88 --- a/crypto/asymmetric_keys/asym_tpm.c
89 +++ b/crypto/asymmetric_keys/asym_tpm.c
90 @@ -370,7 +370,7 @@ static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
91 memcpy(cur, e, sizeof(e));
92 cur += sizeof(e);
93 /* Zero parameters to satisfy set_pub_key ABI. */
94 - memset(cur, 0, SETKEY_PARAMS_SIZE);
95 + memzero_explicit(cur, SETKEY_PARAMS_SIZE);
96
97 return cur - buf;
98 }
99 diff --git a/crypto/ecdh.c b/crypto/ecdh.c
100 index efa4ee72301f8..46570b517175a 100644
101 --- a/crypto/ecdh.c
102 +++ b/crypto/ecdh.c
103 @@ -39,7 +39,8 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
104 struct ecdh params;
105 unsigned int ndigits;
106
107 - if (crypto_ecdh_decode_key(buf, len, &params) < 0)
108 + if (crypto_ecdh_decode_key(buf, len, &params) < 0 ||
109 + params.key_size > sizeof(ctx->private_key))
110 return -EINVAL;
111
112 ndigits = ecdh_supported_curve(params.curve_id);
113 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
114 index df51680e89319..363073e7b6538 100644
115 --- a/drivers/atm/idt77252.c
116 +++ b/drivers/atm/idt77252.c
117 @@ -3606,7 +3606,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
118
119 if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
120 printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
121 - return err;
122 + goto err_out_disable_pdev;
123 }
124
125 card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
126 diff --git a/drivers/base/core.c b/drivers/base/core.c
127 index ddfbd62d8bfc2..c5edb00938f69 100644
128 --- a/drivers/base/core.c
129 +++ b/drivers/base/core.c
130 @@ -3414,7 +3414,7 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
131 if (fwnode_is_primary(fn)) {
132 dev->fwnode = fn->secondary;
133 if (!(parent && fn == parent->fwnode))
134 - fn->secondary = ERR_PTR(-ENODEV);
135 + fn->secondary = NULL;
136 } else {
137 dev->fwnode = NULL;
138 }
139 diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
140 index 17b0f1b793ec8..e11af747395dd 100644
141 --- a/drivers/bluetooth/hci_h5.c
142 +++ b/drivers/bluetooth/hci_h5.c
143 @@ -250,12 +250,8 @@ static int h5_close(struct hci_uart *hu)
144 if (h5->vnd && h5->vnd->close)
145 h5->vnd->close(h5);
146
147 - if (hu->serdev)
148 - serdev_device_close(hu->serdev);
149 -
150 - kfree_skb(h5->rx_skb);
151 - kfree(h5);
152 - h5 = NULL;
153 + if (!hu->serdev)
154 + kfree(h5);
155
156 return 0;
157 }
158 diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
159 index f6df6ef1b0fbe..758de0e9b2ddc 100644
160 --- a/drivers/dma-buf/dma-buf.c
161 +++ b/drivers/dma-buf/dma-buf.c
162 @@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
163
164 dmabuf->ops->release(dmabuf);
165
166 - mutex_lock(&db_list.lock);
167 - list_del(&dmabuf->list_node);
168 - mutex_unlock(&db_list.lock);
169 -
170 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
171 dma_resv_fini(dmabuf->resv);
172
173 @@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
174 kfree(dmabuf);
175 }
176
177 +static int dma_buf_file_release(struct inode *inode, struct file *file)
178 +{
179 + struct dma_buf *dmabuf;
180 +
181 + if (!is_dma_buf_file(file))
182 + return -EINVAL;
183 +
184 + dmabuf = file->private_data;
185 +
186 + mutex_lock(&db_list.lock);
187 + list_del(&dmabuf->list_node);
188 + mutex_unlock(&db_list.lock);
189 +
190 + return 0;
191 +}
192 +
193 static const struct dentry_operations dma_buf_dentry_ops = {
194 .d_dname = dmabuffs_dname,
195 .d_release = dma_buf_release,
196 @@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
197 }
198
199 static const struct file_operations dma_buf_fops = {
200 + .release = dma_buf_file_release,
201 .mmap = dma_buf_mmap_internal,
202 .llseek = dma_buf_llseek,
203 .poll = dma_buf_poll,
204 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
205 index 7f7d59445faed..198a91c765314 100644
206 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
207 +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
208 @@ -936,7 +936,7 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
209 GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
210 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
211
212 - __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
213 + i915_gem_object_flush_map(obj);
214 i915_gem_object_unpin_map(obj);
215
216 intel_gt_chipset_flush(cache->rq->engine->gt);
217 @@ -1163,6 +1163,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
218 goto out_pool;
219 }
220
221 + memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
222 +
223 batch = i915_vma_instance(pool->obj, vma->vm, NULL);
224 if (IS_ERR(batch)) {
225 err = PTR_ERR(batch);
226 diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
227 index 80bc3bf82f4d7..775fd34132abb 100644
228 --- a/drivers/ide/ide-atapi.c
229 +++ b/drivers/ide/ide-atapi.c
230 @@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
231 sense_rq->rq_disk = rq->rq_disk;
232 sense_rq->cmd_flags = REQ_OP_DRV_IN;
233 ide_req(sense_rq)->type = ATA_PRIV_SENSE;
234 - sense_rq->rq_flags |= RQF_PREEMPT;
235
236 req->cmd[0] = GPCMD_REQUEST_SENSE;
237 req->cmd[4] = cmd_len;
238 diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
239 index b137f27a34d58..b32a013d827a0 100644
240 --- a/drivers/ide/ide-io.c
241 +++ b/drivers/ide/ide-io.c
242 @@ -512,11 +512,6 @@ repeat:
243 * above to return us whatever is in the queue. Since we call
244 * ide_do_request() ourselves, we end up taking requests while
245 * the queue is blocked...
246 - *
247 - * We let requests forced at head of queue with ide-preempt
248 - * though. I hope that doesn't happen too much, hopefully not
249 - * unless the subdriver triggers such a thing in its own PM
250 - * state machine.
251 */
252 if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
253 ata_pm_request(rq) == 0 &&
254 diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
255 index ef5d5cc529693..0d9b3fa7bd94e 100644
256 --- a/drivers/net/dsa/lantiq_gswip.c
257 +++ b/drivers/net/dsa/lantiq_gswip.c
258 @@ -92,9 +92,7 @@
259 GSWIP_MDIO_PHY_FDUP_MASK)
260
261 /* GSWIP MII Registers */
262 -#define GSWIP_MII_CFG0 0x00
263 -#define GSWIP_MII_CFG1 0x02
264 -#define GSWIP_MII_CFG5 0x04
265 +#define GSWIP_MII_CFGp(p) (0x2 * (p))
266 #define GSWIP_MII_CFG_EN BIT(14)
267 #define GSWIP_MII_CFG_LDCLKDIS BIT(12)
268 #define GSWIP_MII_CFG_MODE_MIIP 0x0
269 @@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
270 static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
271 int port)
272 {
273 - switch (port) {
274 - case 0:
275 - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
276 - break;
277 - case 1:
278 - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
279 - break;
280 - case 5:
281 - gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
282 - break;
283 - }
284 + /* There's no MII_CFG register for the CPU port */
285 + if (!dsa_is_cpu_port(priv->ds, port))
286 + gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
287 }
288
289 static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
290 @@ -806,9 +796,8 @@ static int gswip_setup(struct dsa_switch *ds)
291 gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
292
293 /* Disable the xMII link */
294 - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
295 - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
296 - gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
297 + for (i = 0; i < priv->hw_info->max_ports; i++)
298 + gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
299
300 /* enable special tag insertion on cpu port */
301 gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
302 @@ -1522,9 +1511,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
303 {
304 struct gswip_priv *priv = ds->priv;
305
306 - /* Enable the xMII interface only for the external PHY */
307 - if (interface != PHY_INTERFACE_MODE_INTERNAL)
308 - gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
309 + gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
310 }
311
312 static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
313 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
314 index 71eb8914e620b..470d12e308814 100644
315 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
316 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
317 @@ -2520,6 +2520,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
318 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
319 dev->hw_features |= dev->features;
320 dev->vlan_features |= dev->features;
321 + dev->max_mtu = UMAC_MAX_MTU_SIZE;
322
323 /* Request the WOL interrupt and advertise suspend if available */
324 priv->wol_irq_disabled = 1;
325 diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
326 index ea4f17f5cce77..590d20ca891cb 100644
327 --- a/drivers/net/ethernet/ethoc.c
328 +++ b/drivers/net/ethernet/ethoc.c
329 @@ -1207,7 +1207,7 @@ static int ethoc_probe(struct platform_device *pdev)
330 ret = mdiobus_register(priv->mdio);
331 if (ret) {
332 dev_err(&netdev->dev, "failed to register MDIO bus\n");
333 - goto free2;
334 + goto free3;
335 }
336
337 ret = ethoc_mdio_probe(netdev);
338 @@ -1239,6 +1239,7 @@ error2:
339 netif_napi_del(&priv->napi);
340 error:
341 mdiobus_unregister(priv->mdio);
342 +free3:
343 mdiobus_free(priv->mdio);
344 free2:
345 clk_disable_unprepare(priv->clk);
346 diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
347 index d3b8ce734c1b9..beaf35c585d28 100644
348 --- a/drivers/net/ethernet/freescale/ucc_geth.c
349 +++ b/drivers/net/ethernet/freescale/ucc_geth.c
350 @@ -3890,6 +3890,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
351 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
352 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
353 dev->mtu = 1500;
354 + dev->max_mtu = 1518;
355
356 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
357 ugeth->phy_interface = phy_interface;
358 @@ -3935,12 +3936,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
359 struct device_node *np = ofdev->dev.of_node;
360
361 unregister_netdev(dev);
362 - free_netdev(dev);
363 ucc_geth_memclean(ugeth);
364 if (of_phy_is_fixed_link(np))
365 of_phy_deregister_fixed_link(np);
366 of_node_put(ugeth->ug_info->tbi_node);
367 of_node_put(ugeth->ug_info->phy_node);
368 + free_netdev(dev);
369
370 return 0;
371 }
372 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
373 index 717fccc2efba9..78b2f4e01bd8e 100644
374 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
375 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
376 @@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
377 /* for mutl buffer*/
378 new_skb = skb_copy(skb, GFP_ATOMIC);
379 dev_kfree_skb_any(skb);
380 + if (!new_skb) {
381 + netdev_err(ndev, "skb alloc failed\n");
382 + return;
383 + }
384 skb = new_skb;
385
386 check_ok = 0;
387 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
388 index 47b8ce7822c09..9040340fad198 100644
389 --- a/drivers/net/ethernet/ibm/ibmvnic.c
390 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
391 @@ -2152,8 +2152,7 @@ static void __ibmvnic_reset(struct work_struct *work)
392 rc = do_hard_reset(adapter, rwi, reset_state);
393 rtnl_unlock();
394 }
395 - } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
396 - adapter->from_passive_init)) {
397 + } else {
398 rc = do_reset(adapter, rwi, reset_state);
399 }
400 kfree(rwi);
401 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
402 index cfe99bae8e362..678e4190b8a8c 100644
403 --- a/drivers/net/ethernet/intel/i40e/i40e.h
404 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
405 @@ -129,6 +129,7 @@ enum i40e_state_t {
406 __I40E_RESET_INTR_RECEIVED,
407 __I40E_REINIT_REQUESTED,
408 __I40E_PF_RESET_REQUESTED,
409 + __I40E_PF_RESET_AND_REBUILD_REQUESTED,
410 __I40E_CORE_RESET_REQUESTED,
411 __I40E_GLOBAL_RESET_REQUESTED,
412 __I40E_EMP_RESET_INTR_RECEIVED,
413 @@ -156,6 +157,8 @@ enum i40e_state_t {
414 };
415
416 #define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
417 +#define I40E_PF_RESET_AND_REBUILD_FLAG \
418 + BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
419
420 /* VSI state flags */
421 enum i40e_vsi_state_t {
422 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
423 index 2b4327416457d..c19b45a90fcd2 100644
424 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
425 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
426 @@ -44,6 +44,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
427 static void i40e_determine_queue_usage(struct i40e_pf *pf);
428 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
429 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
430 +static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
431 + bool lock_acquired);
432 static int i40e_reset(struct i40e_pf *pf);
433 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
434 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
435 @@ -8484,6 +8486,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
436 "FW LLDP is disabled\n" :
437 "FW LLDP is enabled\n");
438
439 + } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
440 + /* Request a PF Reset
441 + *
442 + * Resets PF and reinitializes PFs VSI.
443 + */
444 + i40e_prep_for_reset(pf, lock_acquired);
445 + i40e_reset_and_rebuild(pf, true, lock_acquired);
446 +
447 } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
448 int v;
449
450 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
451 index 09ff3f335ffa6..c952212900fcf 100644
452 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
453 +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
454 @@ -1704,7 +1704,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
455 if (num_vfs) {
456 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
457 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
458 - i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
459 + i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
460 }
461 ret = i40e_pci_sriov_enable(pdev, num_vfs);
462 goto sriov_configure_out;
463 @@ -1713,7 +1713,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
464 if (!pci_vfs_assigned(pf->pdev)) {
465 i40e_free_vfs(pf);
466 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
467 - i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
468 + i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
469 } else {
470 dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
471 ret = -EINVAL;
472 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
473 index cd95d6af8fc1b..56e6bec9af797 100644
474 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
475 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
476 @@ -1844,11 +1844,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
477 netif_tx_stop_all_queues(netdev);
478 if (CLIENT_ALLOWED(adapter)) {
479 err = iavf_lan_add_device(adapter);
480 - if (err) {
481 - rtnl_unlock();
482 + if (err)
483 dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
484 err);
485 - }
486 }
487 dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
488 if (netdev->features & NETIF_F_GRO)
489 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
490 index ccb2abd18d6c7..94e3f8b869be4 100644
491 --- a/drivers/net/ethernet/marvell/mvneta.c
492 +++ b/drivers/net/ethernet/marvell/mvneta.c
493 @@ -4694,7 +4694,7 @@ static int mvneta_probe(struct platform_device *pdev)
494 err = mvneta_port_power_up(pp, pp->phy_interface);
495 if (err < 0) {
496 dev_err(&pdev->dev, "can't power up port\n");
497 - return err;
498 + goto err_netdev;
499 }
500
501 /* Armada3700 network controller does not support per-cpu
502 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
503 index 931d1a56b79ca..8827ab4b4932e 100644
504 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
505 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
506 @@ -1129,7 +1129,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
507
508 regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
509 if (port->gop_id == 2)
510 - val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
511 + val |= GENCONF_CTRL0_PORT0_RGMII;
512 else if (port->gop_id == 3)
513 val |= GENCONF_CTRL0_PORT1_RGMII_MII;
514 regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
515 @@ -2161,17 +2161,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
516 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
517 struct mvpp2_tx_queue *txq)
518 {
519 - unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
520 + unsigned int thread;
521 u32 val;
522
523 if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
524 txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
525
526 val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
527 - mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
528 - mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
529 -
530 - put_cpu();
531 + /* PKT-coalescing registers are per-queue + per-thread */
532 + for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
533 + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
534 + mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
535 + }
536 }
537
538 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
539 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
540 index 5692c6087bbb0..a30eb90ba3d28 100644
541 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
542 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
543 @@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
544 return -EINVAL;
545 }
546
547 +/* Drop flow control pause frames */
548 +static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
549 +{
550 + unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
551 + struct mvpp2_prs_entry pe;
552 + unsigned int len;
553 +
554 + memset(&pe, 0, sizeof(pe));
555 +
556 + /* For all ports - drop flow control frames */
557 + pe.index = MVPP2_PE_FC_DROP;
558 + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
559 +
560 + /* Set match on DA */
561 + len = ETH_ALEN;
562 + while (len--)
563 + mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
564 +
565 + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
566 + MVPP2_PRS_RI_DROP_MASK);
567 +
568 + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
569 + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
570 +
571 + /* Mask all ports */
572 + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
573 +
574 + /* Update shadow table and hw entry */
575 + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
576 + mvpp2_prs_hw_write(priv, &pe);
577 +}
578 +
579 /* Enable/disable dropping all mac da's */
580 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
581 {
582 @@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
583 mvpp2_prs_hw_write(priv, &pe);
584
585 /* Create dummy entries for drop all and promiscuous modes */
586 + mvpp2_prs_drop_fc(priv);
587 mvpp2_prs_mac_drop_all_set(priv, 0, false);
588 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
589 mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
590 @@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
591 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
592 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
593 MVPP2_PRS_RI_L3_PROTO_MASK);
594 - /* Skip eth_type + 4 bytes of IPv6 header */
595 - mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
596 + /* Jump to DIP of IPV6 header */
597 + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
598 + MVPP2_MAX_L3_ADDR_SIZE,
599 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
600 /* Set L3 offset */
601 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
602 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
603 index e22f6c85d3803..4b68dd3747338 100644
604 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
605 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
606 @@ -129,7 +129,7 @@
607 #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
608 #define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
609 #define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
610 -/* reserved */
611 +#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
612 #define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
613 #define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
614 #define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
615 diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
616 index ab6663d94f424..c818d24a8b24f 100644
617 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
618 +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
619 @@ -257,7 +257,7 @@ void ionic_rx_fill(struct ionic_queue *q)
620 unsigned int len;
621 unsigned int i;
622
623 - len = netdev->mtu + ETH_HLEN;
624 + len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
625
626 for (i = ionic_q_space_avail(q); i; i--) {
627 skb = ionic_rx_skb_alloc(q, len, &dma_addr);
628 diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
629 index 004c0bfec41d7..f310a94e04898 100644
630 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
631 +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
632 @@ -1737,6 +1737,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
633 ntohs(udp_hdr(skb)->dest) != gnv_port))
634 return features & ~(NETIF_F_CSUM_MASK |
635 NETIF_F_GSO_MASK);
636 + } else if (l4_proto == IPPROTO_IPIP) {
637 + /* IPIP tunnels are unknown to the device or at least unsupported natively,
638 + * offloads for them can't be done trivially, so disable them for such skb.
639 + */
640 + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
641 }
642 }
643
644 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
645 index fd5adb0c54d29..366ca1b5da5cc 100644
646 --- a/drivers/net/ethernet/realtek/r8169_main.c
647 +++ b/drivers/net/ethernet/realtek/r8169_main.c
648 @@ -3958,7 +3958,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
649 }
650
651 switch (tp->mac_version) {
652 - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
653 + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
654 + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
655 case RTL_GIGA_MAC_VER_37:
656 case RTL_GIGA_MAC_VER_39:
657 case RTL_GIGA_MAC_VER_43:
658 @@ -3987,7 +3988,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
659 static void rtl_pll_power_up(struct rtl8169_private *tp)
660 {
661 switch (tp->mac_version) {
662 - case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
663 + case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
664 + case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
665 case RTL_GIGA_MAC_VER_37:
666 case RTL_GIGA_MAC_VER_39:
667 case RTL_GIGA_MAC_VER_43:
668 diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
669 index 61136428e2c0e..26cfe3f7ed8df 100644
670 --- a/drivers/net/ethernet/ti/cpts.c
671 +++ b/drivers/net/ethernet/ti/cpts.c
672 @@ -485,6 +485,7 @@ void cpts_unregister(struct cpts *cpts)
673
674 ptp_clock_unregister(cpts->clock);
675 cpts->clock = NULL;
676 + cpts->phc_index = -1;
677
678 cpts_write32(cpts, 0, int_enable);
679 cpts_write32(cpts, 0, control);
680 @@ -667,6 +668,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
681 cpts->cc.read = cpts_systim_read;
682 cpts->cc.mask = CLOCKSOURCE_MASK(32);
683 cpts->info = cpts_info;
684 + cpts->phc_index = -1;
685
686 cpts_calc_mult_shift(cpts);
687 /* save cc.mult original value as it can be modified
688 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
689 index e72d273999834..84e779f93f0a4 100644
690 --- a/drivers/net/tun.c
691 +++ b/drivers/net/tun.c
692 @@ -1469,7 +1469,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
693 int i;
694
695 if (it->nr_segs > MAX_SKB_FRAGS + 1)
696 - return ERR_PTR(-ENOMEM);
697 + return ERR_PTR(-EMSGSIZE);
698
699 local_bh_disable();
700 skb = napi_get_frags(&tfile->napi);
701 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
702 index c2c82e6391b4f..d407489cec904 100644
703 --- a/drivers/net/usb/cdc_ncm.c
704 +++ b/drivers/net/usb/cdc_ncm.c
705 @@ -1625,9 +1625,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
706 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
707 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
708 */
709 - netif_info(dev, link, dev->net,
710 - "network connection: %sconnected\n",
711 - !!event->wValue ? "" : "dis");
712 usbnet_link_change(dev, !!event->wValue, 0);
713 break;
714
715 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
716 index 74de621fa3504..b0d748a614a9e 100644
717 --- a/drivers/net/usb/qmi_wwan.c
718 +++ b/drivers/net/usb/qmi_wwan.c
719 @@ -1058,6 +1058,7 @@ static const struct usb_device_id products[] = {
720 {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
721 {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
722 {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
723 + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
724 {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
725
726 /* 3. Combined interface devices matching on interface number */
727 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
728 index 7cc8f405be1ad..0ef85819665c8 100644
729 --- a/drivers/net/virtio_net.c
730 +++ b/drivers/net/virtio_net.c
731 @@ -2072,14 +2072,16 @@ static int virtnet_set_channels(struct net_device *dev,
732
733 get_online_cpus();
734 err = _virtnet_set_queues(vi, queue_pairs);
735 - if (!err) {
736 - netif_set_real_num_tx_queues(dev, queue_pairs);
737 - netif_set_real_num_rx_queues(dev, queue_pairs);
738 -
739 - virtnet_set_affinity(vi);
740 + if (err) {
741 + put_online_cpus();
742 + goto err;
743 }
744 + virtnet_set_affinity(vi);
745 put_online_cpus();
746
747 + netif_set_real_num_tx_queues(dev, queue_pairs);
748 + netif_set_real_num_rx_queues(dev, queue_pairs);
749 + err:
750 return err;
751 }
752
753 diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
754 index 64f8556513369..261b53fc8e04c 100644
755 --- a/drivers/net/wan/hdlc_ppp.c
756 +++ b/drivers/net/wan/hdlc_ppp.c
757 @@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
758 unsigned long flags;
759
760 spin_lock_irqsave(&ppp->lock, flags);
761 + /* mod_timer could be called after we entered this function but
762 + * before we got the lock.
763 + */
764 + if (timer_pending(&proto->timer)) {
765 + spin_unlock_irqrestore(&ppp->lock, flags);
766 + return;
767 + }
768 switch (proto->state) {
769 case STOPPING:
770 case REQ_SENT:
771 diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
772 index f3d5b1bbd5aa7..c37dd15d16d24 100644
773 --- a/drivers/scsi/scsi_transport_spi.c
774 +++ b/drivers/scsi/scsi_transport_spi.c
775 @@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
776 sshdr = &sshdr_tmp;
777
778 for(i = 0; i < DV_RETRIES; i++) {
779 + /*
780 + * The purpose of the RQF_PM flag below is to bypass the
781 + * SDEV_QUIESCE state.
782 + */
783 result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
784 sshdr, DV_TIMEOUT, /* retries */ 1,
785 REQ_FAILFAST_DEV |
786 REQ_FAILFAST_TRANSPORT |
787 REQ_FAILFAST_DRIVER,
788 - 0, NULL);
789 + RQF_PM, NULL);
790 if (driver_byte(result) != DRIVER_SENSE ||
791 sshdr->sense_key != UNIT_ATTENTION)
792 break;
793 @@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
794 */
795 lock_system_sleep();
796
797 + if (scsi_autopm_get_device(sdev))
798 + goto unlock_system_sleep;
799 +
800 if (unlikely(spi_dv_in_progress(starget)))
801 - goto unlock;
802 + goto put_autopm;
803
804 if (unlikely(scsi_device_get(sdev)))
805 - goto unlock;
806 + goto put_autopm;
807
808 spi_dv_in_progress(starget) = 1;
809
810 buffer = kzalloc(len, GFP_KERNEL);
811
812 if (unlikely(!buffer))
813 - goto out_put;
814 + goto put_sdev;
815
816 /* We need to verify that the actual device will quiesce; the
817 * later target quiesce is just a nice to have */
818 if (unlikely(scsi_device_quiesce(sdev)))
819 - goto out_free;
820 + goto free_buffer;
821
822 scsi_target_quiesce(starget);
823
824 @@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
825
826 spi_initial_dv(starget) = 1;
827
828 - out_free:
829 +free_buffer:
830 kfree(buffer);
831 - out_put:
832 +
833 +put_sdev:
834 spi_dv_in_progress(starget) = 0;
835 scsi_device_put(sdev);
836 -unlock:
837 +put_autopm:
838 + scsi_autopm_put_device(sdev);
839 +
840 +unlock_system_sleep:
841 unlock_system_sleep();
842 }
843 EXPORT_SYMBOL(spi_dv_device);
844 diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
845 index 3b19de3ae9a30..e4ba2445ff56f 100644
846 --- a/drivers/scsi/ufs/ufshcd-pci.c
847 +++ b/drivers/scsi/ufs/ufshcd-pci.c
848 @@ -96,6 +96,30 @@ static int ufshcd_pci_resume(struct device *dev)
849 {
850 return ufshcd_system_resume(dev_get_drvdata(dev));
851 }
852 +
853 +/**
854 + * ufshcd_pci_poweroff - suspend-to-disk poweroff function
855 + * @dev: pointer to PCI device handle
856 + *
857 + * Returns 0 if successful
858 + * Returns non-zero otherwise
859 + */
860 +static int ufshcd_pci_poweroff(struct device *dev)
861 +{
862 + struct ufs_hba *hba = dev_get_drvdata(dev);
863 + int spm_lvl = hba->spm_lvl;
864 + int ret;
865 +
866 + /*
867 + * For poweroff we need to set the UFS device to PowerDown mode.
868 + * Force spm_lvl to ensure that.
869 + */
870 + hba->spm_lvl = 5;
871 + ret = ufshcd_system_suspend(hba);
872 + hba->spm_lvl = spm_lvl;
873 + return ret;
874 +}
875 +
876 #endif /* !CONFIG_PM_SLEEP */
877
878 #ifdef CONFIG_PM
879 @@ -190,8 +214,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
880 }
881
882 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
883 - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
884 - ufshcd_pci_resume)
885 +#ifdef CONFIG_PM_SLEEP
886 + .suspend = ufshcd_pci_suspend,
887 + .resume = ufshcd_pci_resume,
888 + .freeze = ufshcd_pci_suspend,
889 + .thaw = ufshcd_pci_resume,
890 + .poweroff = ufshcd_pci_poweroff,
891 + .restore = ufshcd_pci_resume,
892 +#endif
893 SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
894 ufshcd_pci_runtime_resume,
895 ufshcd_pci_runtime_idle)
896 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
897 index 675e16e61ebdd..b888117f4ecd3 100644
898 --- a/drivers/scsi/ufs/ufshcd.c
899 +++ b/drivers/scsi/ufs/ufshcd.c
900 @@ -3593,7 +3593,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
901 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
902 if (ret)
903 dev_err(hba->dev,
904 - "dme-reset: error code %d\n", ret);
905 + "dme-enable: error code %d\n", ret);
906
907 return ret;
908 }
909 diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
910 index d964642d95a3c..bf2772af1045f 100644
911 --- a/drivers/staging/mt7621-dma/mtk-hsdma.c
912 +++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
913 @@ -714,7 +714,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
914 ret = dma_async_device_register(dd);
915 if (ret) {
916 dev_err(&pdev->dev, "failed to register dma device\n");
917 - return ret;
918 + goto err_uninit_hsdma;
919 }
920
921 ret = of_dma_controller_register(pdev->dev.of_node,
922 @@ -730,6 +730,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
923
924 err_unregister:
925 dma_async_device_unregister(dd);
926 +err_uninit_hsdma:
927 + mtk_hsdma_uninit(hsdma);
928 return ret;
929 }
930
931 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
932 index 9d24e85b08631..596ad3edec9c0 100644
933 --- a/drivers/target/target_core_xcopy.c
934 +++ b/drivers/target/target_core_xcopy.c
935 @@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
936 return 0;
937 }
938
939 -struct xcopy_dev_search_info {
940 - const unsigned char *dev_wwn;
941 - struct se_device *found_dev;
942 -};
943 -
944 +/**
945 + * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
946 + *
947 + * @se_dev: device being considered for match
948 + * @dev_wwn: XCOPY requested NAA dev_wwn
949 + * @return: 1 on match, 0 on no-match
950 + */
951 static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
952 - void *data)
953 + const unsigned char *dev_wwn)
954 {
955 - struct xcopy_dev_search_info *info = data;
956 unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
957 int rc;
958
959 - if (!se_dev->dev_attrib.emulate_3pc)
960 + if (!se_dev->dev_attrib.emulate_3pc) {
961 + pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
962 return 0;
963 + }
964
965 memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
966 target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
967
968 - rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
969 - if (rc != 0)
970 - return 0;
971 -
972 - info->found_dev = se_dev;
973 - pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
974 -
975 - rc = target_depend_item(&se_dev->dev_group.cg_item);
976 + rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
977 if (rc != 0) {
978 - pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
979 - rc, se_dev);
980 - return rc;
981 + pr_debug("XCOPY: skip non-matching: %*ph\n",
982 + XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
983 + return 0;
984 }
985 + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
986
987 - pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
988 - se_dev, &se_dev->dev_group);
989 return 1;
990 }
991
992 -static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
993 - struct se_device **found_dev)
994 +static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
995 + const unsigned char *dev_wwn,
996 + struct se_device **_found_dev,
997 + struct percpu_ref **_found_lun_ref)
998 {
999 - struct xcopy_dev_search_info info;
1000 - int ret;
1001 -
1002 - memset(&info, 0, sizeof(info));
1003 - info.dev_wwn = dev_wwn;
1004 -
1005 - ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
1006 - if (ret == 1) {
1007 - *found_dev = info.found_dev;
1008 - return 0;
1009 - } else {
1010 - pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
1011 - return -EINVAL;
1012 + struct se_dev_entry *deve;
1013 + struct se_node_acl *nacl;
1014 + struct se_lun *this_lun = NULL;
1015 + struct se_device *found_dev = NULL;
1016 +
1017 + /* cmd with NULL sess indicates no associated $FABRIC_MOD */
1018 + if (!sess)
1019 + goto err_out;
1020 +
1021 + pr_debug("XCOPY 0xe4: searching for: %*ph\n",
1022 + XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
1023 +
1024 + nacl = sess->se_node_acl;
1025 + rcu_read_lock();
1026 + hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
1027 + struct se_device *this_dev;
1028 + int rc;
1029 +
1030 + this_lun = rcu_dereference(deve->se_lun);
1031 + this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
1032 +
1033 + rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
1034 + if (rc) {
1035 + if (percpu_ref_tryget_live(&this_lun->lun_ref))
1036 + found_dev = this_dev;
1037 + break;
1038 + }
1039 }
1040 + rcu_read_unlock();
1041 + if (found_dev == NULL)
1042 + goto err_out;
1043 +
1044 + pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
1045 + found_dev, &found_dev->dev_group);
1046 + *_found_dev = found_dev;
1047 + *_found_lun_ref = &this_lun->lun_ref;
1048 + return 0;
1049 +err_out:
1050 + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
1051 + return -EINVAL;
1052 }
1053
1054 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
1055 @@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
1056
1057 switch (xop->op_origin) {
1058 case XCOL_SOURCE_RECV_OP:
1059 - rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
1060 - &xop->dst_dev);
1061 + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
1062 + xop->dst_tid_wwn,
1063 + &xop->dst_dev,
1064 + &xop->remote_lun_ref);
1065 break;
1066 case XCOL_DEST_RECV_OP:
1067 - rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
1068 - &xop->src_dev);
1069 + rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
1070 + xop->src_tid_wwn,
1071 + &xop->src_dev,
1072 + &xop->remote_lun_ref);
1073 break;
1074 default:
1075 pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
1076 @@ -396,18 +423,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
1077
1078 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
1079 {
1080 - struct se_device *remote_dev;
1081 -
1082 if (xop->op_origin == XCOL_SOURCE_RECV_OP)
1083 - remote_dev = xop->dst_dev;
1084 + pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
1085 else
1086 - remote_dev = xop->src_dev;
1087 -
1088 - pr_debug("Calling configfs_undepend_item for"
1089 - " remote_dev: %p remote_dev->dev_group: %p\n",
1090 - remote_dev, &remote_dev->dev_group.cg_item);
1091 + pr_debug("putting src lun_ref for %p\n", xop->src_dev);
1092
1093 - target_undepend_item(&remote_dev->dev_group.cg_item);
1094 + percpu_ref_put(xop->remote_lun_ref);
1095 }
1096
1097 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
1098 diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
1099 index 26ba4c3c9cffd..974bc1e19ff2b 100644
1100 --- a/drivers/target/target_core_xcopy.h
1101 +++ b/drivers/target/target_core_xcopy.h
1102 @@ -29,6 +29,7 @@ struct xcopy_op {
1103 struct se_device *dst_dev;
1104 unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
1105 unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
1106 + struct percpu_ref *remote_lun_ref;
1107
1108 sector_t src_lba;
1109 sector_t dst_lba;
1110 diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
1111 index c08bcce04276e..85561b3194a16 100644
1112 --- a/drivers/usb/chipidea/ci_hdrc_imx.c
1113 +++ b/drivers/usb/chipidea/ci_hdrc_imx.c
1114 @@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
1115 misc_pdev = of_find_device_by_node(args.np);
1116 of_node_put(args.np);
1117
1118 - if (!misc_pdev || !platform_get_drvdata(misc_pdev))
1119 + if (!misc_pdev)
1120 return ERR_PTR(-EPROBE_DEFER);
1121
1122 + if (!platform_get_drvdata(misc_pdev)) {
1123 + put_device(&misc_pdev->dev);
1124 + return ERR_PTR(-EPROBE_DEFER);
1125 + }
1126 data->dev = &misc_pdev->dev;
1127
1128 /*
1129 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1130 index 16c98e718001b..681374a3b3684 100644
1131 --- a/drivers/usb/class/cdc-acm.c
1132 +++ b/drivers/usb/class/cdc-acm.c
1133 @@ -1907,6 +1907,10 @@ static const struct usb_device_id acm_ids[] = {
1134 { USB_DEVICE(0x04d8, 0x0083), /* Bootloader mode */
1135 .driver_info = IGNORE_DEVICE,
1136 },
1137 +
1138 + { USB_DEVICE(0x04d8, 0xf58b),
1139 + .driver_info = IGNORE_DEVICE,
1140 + },
1141 #endif
1142
1143 /*Samsung phone in firmware update mode */
1144 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1145 index 9875e2fe33db2..fc1a219ad0a76 100644
1146 --- a/drivers/usb/class/cdc-wdm.c
1147 +++ b/drivers/usb/class/cdc-wdm.c
1148 @@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
1149 if (!desc->resp_count || !--desc->resp_count)
1150 goto out;
1151
1152 + if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
1153 + rv = -ENODEV;
1154 + goto out;
1155 + }
1156 + if (test_bit(WDM_RESETTING, &desc->flags)) {
1157 + rv = -EIO;
1158 + goto out;
1159 + }
1160 +
1161 set_bit(WDM_RESPONDING, &desc->flags);
1162 spin_unlock_irq(&desc->iuspin);
1163 rv = usb_submit_urb(desc->response, GFP_KERNEL);
1164 spin_lock_irq(&desc->iuspin);
1165 if (rv) {
1166 - dev_err(&desc->intf->dev,
1167 - "usb_submit_urb failed with result %d\n", rv);
1168 + if (!test_bit(WDM_DISCONNECTING, &desc->flags))
1169 + dev_err(&desc->intf->dev,
1170 + "usb_submit_urb failed with result %d\n", rv);
1171
1172 /* make sure the next notification trigger a submit */
1173 clear_bit(WDM_RESPONDING, &desc->flags);
1174 @@ -1026,9 +1036,9 @@ static void wdm_disconnect(struct usb_interface *intf)
1175 wake_up_all(&desc->wait);
1176 mutex_lock(&desc->rlock);
1177 mutex_lock(&desc->wlock);
1178 - kill_urbs(desc);
1179 cancel_work_sync(&desc->rxwork);
1180 cancel_work_sync(&desc->service_outs_intr);
1181 + kill_urbs(desc);
1182 mutex_unlock(&desc->wlock);
1183 mutex_unlock(&desc->rlock);
1184
1185 diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
1186 index 67cbd42421bee..134dc2005ce97 100644
1187 --- a/drivers/usb/class/usblp.c
1188 +++ b/drivers/usb/class/usblp.c
1189 @@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
1190 #define usblp_reset(usblp)\
1191 usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
1192
1193 -#define usblp_hp_channel_change_request(usblp, channel, buffer) \
1194 - usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
1195 +static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
1196 +{
1197 + u8 *buf;
1198 + int ret;
1199 +
1200 + buf = kzalloc(1, GFP_KERNEL);
1201 + if (!buf)
1202 + return -ENOMEM;
1203 +
1204 + ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
1205 + USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
1206 + channel, buf, 1);
1207 + if (ret == 0)
1208 + *new_channel = buf[0];
1209 +
1210 + kfree(buf);
1211 +
1212 + return ret;
1213 +}
1214
1215 /*
1216 * See the description for usblp_select_alts() below for the usage
1217 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
1218 index c848f9164f929..da296f888f45d 100644
1219 --- a/drivers/usb/dwc3/core.h
1220 +++ b/drivers/usb/dwc3/core.h
1221 @@ -283,6 +283,7 @@
1222
1223 /* Global USB2 PHY Vendor Control Register */
1224 #define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
1225 +#define DWC3_GUSB2PHYACC_DONE BIT(24)
1226 #define DWC3_GUSB2PHYACC_BUSY BIT(23)
1227 #define DWC3_GUSB2PHYACC_WRITE BIT(22)
1228 #define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
1229 diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
1230 index f62b5f3c2d67d..bb8271531da70 100644
1231 --- a/drivers/usb/dwc3/ulpi.c
1232 +++ b/drivers/usb/dwc3/ulpi.c
1233 @@ -24,7 +24,7 @@ static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
1234
1235 while (count--) {
1236 reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
1237 - if (!(reg & DWC3_GUSB2PHYACC_BUSY))
1238 + if (reg & DWC3_GUSB2PHYACC_DONE)
1239 return 0;
1240 cpu_relax();
1241 }
1242 diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
1243 index 02ff850278b17..09ba3af1234ee 100644
1244 --- a/drivers/usb/gadget/Kconfig
1245 +++ b/drivers/usb/gadget/Kconfig
1246 @@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
1247 depends on NET
1248 select USB_U_ETHER
1249 select USB_F_NCM
1250 + select CRC32
1251 help
1252 NCM is an advanced protocol for Ethernet encapsulation, allows
1253 grouping of several ethernet frames into one USB transfer and
1254 @@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
1255 depends on NET
1256 select USB_U_ETHER
1257 select USB_F_EEM
1258 + select CRC32
1259 help
1260 CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
1261 and therefore can be supported by more hardware. Technically ECM and
1262 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1263 index f75ff1a75dc45..ed46fd74a292a 100644
1264 --- a/drivers/usb/gadget/composite.c
1265 +++ b/drivers/usb/gadget/composite.c
1266 @@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
1267
1268 spin_lock_irqsave(&cdev->lock, flags);
1269
1270 - if (cdev->deactivations == 0)
1271 + if (cdev->deactivations == 0) {
1272 + spin_unlock_irqrestore(&cdev->lock, flags);
1273 status = usb_gadget_deactivate(cdev->gadget);
1274 + spin_lock_irqsave(&cdev->lock, flags);
1275 + }
1276 if (status == 0)
1277 cdev->deactivations++;
1278
1279 @@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
1280 status = -EINVAL;
1281 else {
1282 cdev->deactivations--;
1283 - if (cdev->deactivations == 0)
1284 + if (cdev->deactivations == 0) {
1285 + spin_unlock_irqrestore(&cdev->lock, flags);
1286 status = usb_gadget_activate(cdev->gadget);
1287 + spin_lock_irqsave(&cdev->lock, flags);
1288 + }
1289 }
1290
1291 spin_unlock_irqrestore(&cdev->lock, flags);
1292 diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
1293 index a7709d126b29b..5b8b2ca4376cb 100644
1294 --- a/drivers/usb/gadget/configfs.c
1295 +++ b/drivers/usb/gadget/configfs.c
1296 @@ -233,9 +233,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
1297
1298 static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
1299 {
1300 - char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
1301 + struct gadget_info *gi = to_gadget_info(item);
1302 + char *udc_name;
1303 + int ret;
1304 +
1305 + mutex_lock(&gi->lock);
1306 + udc_name = gi->composite.gadget_driver.udc_name;
1307 + ret = sprintf(page, "%s\n", udc_name ?: "");
1308 + mutex_unlock(&gi->lock);
1309
1310 - return sprintf(page, "%s\n", udc_name ?: "");
1311 + return ret;
1312 }
1313
1314 static int unregister_gadget(struct gadget_info *gi)
1315 @@ -1217,9 +1224,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
1316
1317 cfg = container_of(c, struct config_usb_cfg, c);
1318
1319 - list_for_each_entry_safe(f, tmp, &c->functions, list) {
1320 + list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
1321
1322 - list_move_tail(&f->list, &cfg->func_list);
1323 + list_move(&f->list, &cfg->func_list);
1324 if (f->unbind) {
1325 dev_dbg(&gi->cdev.gadget->dev,
1326 "unbind function '%s'/%p\n",
1327 @@ -1505,7 +1512,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
1328 .suspend = configfs_composite_suspend,
1329 .resume = configfs_composite_resume,
1330
1331 - .max_speed = USB_SPEED_SUPER,
1332 + .max_speed = USB_SPEED_SUPER_PLUS,
1333 .driver = {
1334 .owner = THIS_MODULE,
1335 .name = "configfs-gadget",
1336 @@ -1545,7 +1552,7 @@ static struct config_group *gadgets_make(
1337 gi->composite.unbind = configfs_do_nothing;
1338 gi->composite.suspend = NULL;
1339 gi->composite.resume = NULL;
1340 - gi->composite.max_speed = USB_SPEED_SUPER;
1341 + gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
1342
1343 spin_lock_init(&gi->spinlock);
1344 mutex_init(&gi->lock);
1345 diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
1346 index 8ed1295d7e350..0f47cd398d60a 100644
1347 --- a/drivers/usb/gadget/function/f_printer.c
1348 +++ b/drivers/usb/gadget/function/f_printer.c
1349 @@ -1126,6 +1126,7 @@ fail_tx_reqs:
1350 printer_req_free(dev->in_ep, req);
1351 }
1352
1353 + usb_free_all_descriptors(f);
1354 return ret;
1355
1356 }
1357 diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
1358 index 3633df6d7610f..5d960b6603b6f 100644
1359 --- a/drivers/usb/gadget/function/f_uac2.c
1360 +++ b/drivers/usb/gadget/function/f_uac2.c
1361 @@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
1362
1363 .bEndpointAddress = USB_DIR_OUT,
1364 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
1365 - .wMaxPacketSize = cpu_to_le16(1023),
1366 + /* .wMaxPacketSize = DYNAMIC */
1367 .bInterval = 1,
1368 };
1369
1370 @@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
1371 .bDescriptorType = USB_DT_ENDPOINT,
1372
1373 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
1374 - .wMaxPacketSize = cpu_to_le16(1024),
1375 + /* .wMaxPacketSize = DYNAMIC */
1376 .bInterval = 4,
1377 };
1378
1379 @@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
1380
1381 .bEndpointAddress = USB_DIR_IN,
1382 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
1383 - .wMaxPacketSize = cpu_to_le16(1023),
1384 + /* .wMaxPacketSize = DYNAMIC */
1385 .bInterval = 1,
1386 };
1387
1388 @@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
1389 .bDescriptorType = USB_DT_ENDPOINT,
1390
1391 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
1392 - .wMaxPacketSize = cpu_to_le16(1024),
1393 + /* .wMaxPacketSize = DYNAMIC */
1394 .bInterval = 4,
1395 };
1396
1397 @@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
1398 __le32 dRES;
1399 } __packed;
1400
1401 -static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
1402 +static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
1403 struct usb_endpoint_descriptor *ep_desc,
1404 - unsigned int factor, bool is_playback)
1405 + enum usb_device_speed speed, bool is_playback)
1406 {
1407 int chmask, srate, ssize;
1408 - u16 max_packet_size;
1409 + u16 max_size_bw, max_size_ep;
1410 + unsigned int factor;
1411 +
1412 + switch (speed) {
1413 + case USB_SPEED_FULL:
1414 + max_size_ep = 1023;
1415 + factor = 1000;
1416 + break;
1417 +
1418 + case USB_SPEED_HIGH:
1419 + max_size_ep = 1024;
1420 + factor = 8000;
1421 + break;
1422 +
1423 + default:
1424 + return -EINVAL;
1425 + }
1426
1427 if (is_playback) {
1428 chmask = uac2_opts->p_chmask;
1429 @@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
1430 ssize = uac2_opts->c_ssize;
1431 }
1432
1433 - max_packet_size = num_channels(chmask) * ssize *
1434 + max_size_bw = num_channels(chmask) * ssize *
1435 DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
1436 - ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
1437 - le16_to_cpu(ep_desc->wMaxPacketSize)));
1438 + ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
1439 + max_size_ep));
1440 +
1441 + return 0;
1442 }
1443
1444 /* Use macro to overcome line length limitation */
1445 @@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1446 }
1447
1448 /* Calculate wMaxPacketSize according to audio bandwidth */
1449 - set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
1450 - set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
1451 - set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
1452 - set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
1453 + ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
1454 + true);
1455 + if (ret < 0) {
1456 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1457 + return ret;
1458 + }
1459 +
1460 + ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
1461 + false);
1462 + if (ret < 0) {
1463 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1464 + return ret;
1465 + }
1466 +
1467 + ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
1468 + true);
1469 + if (ret < 0) {
1470 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1471 + return ret;
1472 + }
1473 +
1474 + ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
1475 + false);
1476 + if (ret < 0) {
1477 + dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
1478 + return ret;
1479 + }
1480
1481 if (EPOUT_EN(uac2_opts)) {
1482 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
1483 diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
1484 index 891e9f7f40d59..99b840daf3d94 100644
1485 --- a/drivers/usb/gadget/function/u_ether.c
1486 +++ b/drivers/usb/gadget/function/u_ether.c
1487 @@ -45,9 +45,10 @@
1488 #define UETH__VERSION "29-May-2008"
1489
1490 /* Experiments show that both Linux and Windows hosts allow up to 16k
1491 - * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
1492 + * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
1493 * blocks and still have efficient handling. */
1494 -#define GETHER_MAX_ETH_FRAME_LEN 15412
1495 +#define GETHER_MAX_MTU_SIZE 15412
1496 +#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
1497
1498 struct eth_dev {
1499 /* lock is held while accessing port_usb
1500 @@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
1501
1502 /* MTU range: 14 - 15412 */
1503 net->min_mtu = ETH_HLEN;
1504 - net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
1505 + net->max_mtu = GETHER_MAX_MTU_SIZE;
1506
1507 dev->gadget = g;
1508 SET_NETDEV_DEV(net, &g->dev);
1509 @@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
1510
1511 /* MTU range: 14 - 15412 */
1512 net->min_mtu = ETH_HLEN;
1513 - net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
1514 + net->max_mtu = GETHER_MAX_MTU_SIZE;
1515
1516 return net;
1517 }
1518 diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
1519 index af16672d51187..6680dcfe660ea 100644
1520 --- a/drivers/usb/gadget/legacy/acm_ms.c
1521 +++ b/drivers/usb/gadget/legacy/acm_ms.c
1522 @@ -203,8 +203,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
1523 struct usb_descriptor_header *usb_desc;
1524
1525 usb_desc = usb_otg_descriptor_alloc(gadget);
1526 - if (!usb_desc)
1527 + if (!usb_desc) {
1528 + status = -ENOMEM;
1529 goto fail_string_ids;
1530 + }
1531 usb_otg_descriptor_init(gadget, usb_desc);
1532 otg_desc[0] = usb_desc;
1533 otg_desc[1] = NULL;
1534 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
1535 index 7123ab44671b2..70aa3055c41e7 100644
1536 --- a/drivers/usb/host/xhci.c
1537 +++ b/drivers/usb/host/xhci.c
1538 @@ -4642,19 +4642,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
1539 {
1540 unsigned long long timeout_ns;
1541
1542 + if (xhci->quirks & XHCI_INTEL_HOST)
1543 + timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
1544 + else
1545 + timeout_ns = udev->u1_params.sel;
1546 +
1547 /* Prevent U1 if service interval is shorter than U1 exit latency */
1548 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
1549 - if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
1550 + if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
1551 dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
1552 return USB3_LPM_DISABLED;
1553 }
1554 }
1555
1556 - if (xhci->quirks & XHCI_INTEL_HOST)
1557 - timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
1558 - else
1559 - timeout_ns = udev->u1_params.sel;
1560 -
1561 /* The U1 timeout is encoded in 1us intervals.
1562 * Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
1563 */
1564 @@ -4706,19 +4706,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
1565 {
1566 unsigned long long timeout_ns;
1567
1568 + if (xhci->quirks & XHCI_INTEL_HOST)
1569 + timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
1570 + else
1571 + timeout_ns = udev->u2_params.sel;
1572 +
1573 /* Prevent U2 if service interval is shorter than U2 exit latency */
1574 if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
1575 - if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
1576 + if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
1577 dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
1578 return USB3_LPM_DISABLED;
1579 }
1580 }
1581
1582 - if (xhci->quirks & XHCI_INTEL_HOST)
1583 - timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
1584 - else
1585 - timeout_ns = udev->u2_params.sel;
1586 -
1587 /* The U2 timeout is encoded in 256us intervals */
1588 timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
1589 /* If the necessary timeout value is bigger than what we can set in the
1590 diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
1591 index 785080f790738..08b72bb22b7ef 100644
1592 --- a/drivers/usb/misc/yurex.c
1593 +++ b/drivers/usb/misc/yurex.c
1594 @@ -497,6 +497,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
1595 timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
1596 finish_wait(&dev->waitq, &wait);
1597
1598 + /* make sure URB is idle after timeout or (spurious) CMD_ACK */
1599 + usb_kill_urb(dev->cntl_urb);
1600 +
1601 mutex_unlock(&dev->io_mutex);
1602
1603 if (retval < 0) {
1604 diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
1605 index ffbb2a8901b2b..7e4ec0b8d01f8 100644
1606 --- a/drivers/usb/serial/iuu_phoenix.c
1607 +++ b/drivers/usb/serial/iuu_phoenix.c
1608 @@ -536,23 +536,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
1609 struct device *dev = &port->dev;
1610 int i;
1611 int status;
1612 - u8 rxcmd = IUU_UART_RX;
1613 + u8 *rxcmd;
1614 struct iuu_private *priv = usb_get_serial_port_data(port);
1615
1616 if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
1617 return -EIO;
1618
1619 + rxcmd = kmalloc(1, GFP_KERNEL);
1620 + if (!rxcmd)
1621 + return -ENOMEM;
1622 +
1623 + rxcmd[0] = IUU_UART_RX;
1624 +
1625 for (i = 0; i < 2; i++) {
1626 - status = bulk_immediate(port, &rxcmd, 1);
1627 + status = bulk_immediate(port, rxcmd, 1);
1628 if (status != IUU_OPERATION_OK) {
1629 dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
1630 - return status;
1631 + goto out_free;
1632 }
1633
1634 status = read_immediate(port, &priv->len, 1);
1635 if (status != IUU_OPERATION_OK) {
1636 dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
1637 - return status;
1638 + goto out_free;
1639 }
1640
1641 if (priv->len > 0) {
1642 @@ -560,12 +566,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
1643 status = read_immediate(port, priv->buf, priv->len);
1644 if (status != IUU_OPERATION_OK) {
1645 dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
1646 - return status;
1647 + goto out_free;
1648 }
1649 }
1650 }
1651 dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
1652 iuu_led(port, 0, 0xF000, 0, 0xFF);
1653 +
1654 +out_free:
1655 + kfree(rxcmd);
1656 +
1657 return status;
1658 }
1659
1660 diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
1661 index fab6aa8a676aa..5dd228da5bdd5 100644
1662 --- a/drivers/usb/serial/keyspan_pda.c
1663 +++ b/drivers/usb/serial/keyspan_pda.c
1664 @@ -555,10 +555,8 @@ exit:
1665 static void keyspan_pda_write_bulk_callback(struct urb *urb)
1666 {
1667 struct usb_serial_port *port = urb->context;
1668 - struct keyspan_pda_private *priv;
1669
1670 set_bit(0, &port->write_urbs_free);
1671 - priv = usb_get_serial_port_data(port);
1672
1673 /* queue up a wakeup at scheduler time */
1674 usb_serial_port_softint(port);
1675 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1676 index 531744049e7f0..fd41b07b5aaf1 100644
1677 --- a/drivers/usb/serial/option.c
1678 +++ b/drivers/usb/serial/option.c
1679 @@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
1680 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
1681 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1682 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
1683 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
1684 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
1685 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
1686 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
1687 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
1688 @@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
1689 { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
1690 .driver_info = RSVD(6) },
1691 { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
1692 + { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
1693 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
1694 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
1695 { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
1696 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
1697 index 749c69be091cc..cb7b15ecb7aba 100644
1698 --- a/drivers/usb/storage/unusual_uas.h
1699 +++ b/drivers/usb/storage/unusual_uas.h
1700 @@ -90,6 +90,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
1701 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1702 US_FL_BROKEN_FUA),
1703
1704 +/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
1705 +UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
1706 + "PNY",
1707 + "Pro Elite SSD",
1708 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1709 + US_FL_NO_ATA_1X),
1710 +
1711 /* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
1712 UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
1713 "PNY",
1714 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
1715 index 65850e9c71905..fee511437abe3 100644
1716 --- a/drivers/usb/usbip/vhci_hcd.c
1717 +++ b/drivers/usb/usbip/vhci_hcd.c
1718 @@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1719 default:
1720 usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
1721 wValue);
1722 + if (wValue >= 32)
1723 + goto error;
1724 vhci_hcd->port_status[rhport] &= ~(1 << wValue);
1725 break;
1726 }
1727 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
1728 index b53b6528d6ce2..48e574ae60330 100644
1729 --- a/drivers/vhost/net.c
1730 +++ b/drivers/vhost/net.c
1731 @@ -860,6 +860,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
1732 size_t len, total_len = 0;
1733 int err;
1734 struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
1735 + struct ubuf_info *ubuf;
1736 bool zcopy_used;
1737 int sent_pkts = 0;
1738
1739 @@ -892,9 +893,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
1740
1741 /* use msg_control to pass vhost zerocopy ubuf info to skb */
1742 if (zcopy_used) {
1743 - struct ubuf_info *ubuf;
1744 ubuf = nvq->ubuf_info + nvq->upend_idx;
1745 -
1746 vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
1747 vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
1748 ubuf->callback = vhost_zerocopy_callback;
1749 @@ -924,7 +923,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
1750 err = sock->ops->sendmsg(sock, &msg, len);
1751 if (unlikely(err < 0)) {
1752 if (zcopy_used) {
1753 - vhost_net_ubuf_put(ubufs);
1754 + if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
1755 + vhost_net_ubuf_put(ubufs);
1756 nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
1757 % UIO_MAXIOV;
1758 }
1759 diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
1760 index 81671272aa58f..1c6ae98710a01 100644
1761 --- a/drivers/video/fbdev/hyperv_fb.c
1762 +++ b/drivers/video/fbdev/hyperv_fb.c
1763 @@ -704,11 +704,9 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
1764 }
1765
1766 /*
1767 - * Map the VRAM cacheable for performance. This is also required for
1768 - * VM Connect to display properly for ARM64 Linux VM, as the host also
1769 - * maps the VRAM cacheable.
1770 + * Map the VRAM cacheable for performance.
1771 */
1772 - fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
1773 + fb_virt = ioremap_wc(par->mem->start, screen_fb_size);
1774 if (!fb_virt)
1775 goto err2;
1776
1777 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
1778 index 88940f494428a..903136ceac34a 100644
1779 --- a/fs/btrfs/send.c
1780 +++ b/fs/btrfs/send.c
1781 @@ -238,6 +238,7 @@ struct waiting_dir_move {
1782 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
1783 */
1784 u64 rmdir_ino;
1785 + u64 rmdir_gen;
1786 bool orphanized;
1787 };
1788
1789 @@ -323,7 +324,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
1790 static struct waiting_dir_move *
1791 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
1792
1793 -static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
1794 +static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
1795
1796 static int need_send_hole(struct send_ctx *sctx)
1797 {
1798 @@ -2306,7 +2307,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
1799
1800 fs_path_reset(name);
1801
1802 - if (is_waiting_for_rm(sctx, ino)) {
1803 + if (is_waiting_for_rm(sctx, ino, gen)) {
1804 ret = gen_unique_name(sctx, ino, gen, name);
1805 if (ret < 0)
1806 goto out;
1807 @@ -2865,8 +2866,8 @@ out:
1808 return ret;
1809 }
1810
1811 -static struct orphan_dir_info *
1812 -add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
1813 +static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
1814 + u64 dir_ino, u64 dir_gen)
1815 {
1816 struct rb_node **p = &sctx->orphan_dirs.rb_node;
1817 struct rb_node *parent = NULL;
1818 @@ -2875,20 +2876,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
1819 while (*p) {
1820 parent = *p;
1821 entry = rb_entry(parent, struct orphan_dir_info, node);
1822 - if (dir_ino < entry->ino) {
1823 + if (dir_ino < entry->ino)
1824 p = &(*p)->rb_left;
1825 - } else if (dir_ino > entry->ino) {
1826 + else if (dir_ino > entry->ino)
1827 p = &(*p)->rb_right;
1828 - } else {
1829 + else if (dir_gen < entry->gen)
1830 + p = &(*p)->rb_left;
1831 + else if (dir_gen > entry->gen)
1832 + p = &(*p)->rb_right;
1833 + else
1834 return entry;
1835 - }
1836 }
1837
1838 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
1839 if (!odi)
1840 return ERR_PTR(-ENOMEM);
1841 odi->ino = dir_ino;
1842 - odi->gen = 0;
1843 + odi->gen = dir_gen;
1844 odi->last_dir_index_offset = 0;
1845
1846 rb_link_node(&odi->node, parent, p);
1847 @@ -2896,8 +2900,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
1848 return odi;
1849 }
1850
1851 -static struct orphan_dir_info *
1852 -get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
1853 +static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
1854 + u64 dir_ino, u64 gen)
1855 {
1856 struct rb_node *n = sctx->orphan_dirs.rb_node;
1857 struct orphan_dir_info *entry;
1858 @@ -2908,15 +2912,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
1859 n = n->rb_left;
1860 else if (dir_ino > entry->ino)
1861 n = n->rb_right;
1862 + else if (gen < entry->gen)
1863 + n = n->rb_left;
1864 + else if (gen > entry->gen)
1865 + n = n->rb_right;
1866 else
1867 return entry;
1868 }
1869 return NULL;
1870 }
1871
1872 -static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
1873 +static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
1874 {
1875 - struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
1876 + struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
1877
1878 return odi != NULL;
1879 }
1880 @@ -2961,7 +2969,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1881 key.type = BTRFS_DIR_INDEX_KEY;
1882 key.offset = 0;
1883
1884 - odi = get_orphan_dir_info(sctx, dir);
1885 + odi = get_orphan_dir_info(sctx, dir, dir_gen);
1886 if (odi)
1887 key.offset = odi->last_dir_index_offset;
1888
1889 @@ -2992,7 +3000,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1890
1891 dm = get_waiting_dir_move(sctx, loc.objectid);
1892 if (dm) {
1893 - odi = add_orphan_dir_info(sctx, dir);
1894 + odi = add_orphan_dir_info(sctx, dir, dir_gen);
1895 if (IS_ERR(odi)) {
1896 ret = PTR_ERR(odi);
1897 goto out;
1898 @@ -3000,12 +3008,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1899 odi->gen = dir_gen;
1900 odi->last_dir_index_offset = found_key.offset;
1901 dm->rmdir_ino = dir;
1902 + dm->rmdir_gen = dir_gen;
1903 ret = 0;
1904 goto out;
1905 }
1906
1907 if (loc.objectid > send_progress) {
1908 - odi = add_orphan_dir_info(sctx, dir);
1909 + odi = add_orphan_dir_info(sctx, dir, dir_gen);
1910 if (IS_ERR(odi)) {
1911 ret = PTR_ERR(odi);
1912 goto out;
1913 @@ -3045,6 +3054,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
1914 return -ENOMEM;
1915 dm->ino = ino;
1916 dm->rmdir_ino = 0;
1917 + dm->rmdir_gen = 0;
1918 dm->orphanized = orphanized;
1919
1920 while (*p) {
1921 @@ -3190,7 +3200,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
1922 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
1923 fs_path_reset(name);
1924
1925 - if (is_waiting_for_rm(sctx, ino))
1926 + if (is_waiting_for_rm(sctx, ino, gen))
1927 break;
1928 if (is_waiting_for_move(sctx, ino)) {
1929 if (*ancestor_ino == 0)
1930 @@ -3230,6 +3240,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
1931 u64 parent_ino, parent_gen;
1932 struct waiting_dir_move *dm = NULL;
1933 u64 rmdir_ino = 0;
1934 + u64 rmdir_gen;
1935 u64 ancestor;
1936 bool is_orphan;
1937 int ret;
1938 @@ -3244,6 +3255,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
1939 dm = get_waiting_dir_move(sctx, pm->ino);
1940 ASSERT(dm);
1941 rmdir_ino = dm->rmdir_ino;
1942 + rmdir_gen = dm->rmdir_gen;
1943 is_orphan = dm->orphanized;
1944 free_waiting_dir_move(sctx, dm);
1945
1946 @@ -3280,6 +3292,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
1947 dm = get_waiting_dir_move(sctx, pm->ino);
1948 ASSERT(dm);
1949 dm->rmdir_ino = rmdir_ino;
1950 + dm->rmdir_gen = rmdir_gen;
1951 }
1952 goto out;
1953 }
1954 @@ -3298,7 +3311,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
1955 struct orphan_dir_info *odi;
1956 u64 gen;
1957
1958 - odi = get_orphan_dir_info(sctx, rmdir_ino);
1959 + odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
1960 if (!odi) {
1961 /* already deleted */
1962 goto finish;
1963 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
1964 index 64e9ee1b129e2..8c3dbe13e647c 100644
1965 --- a/fs/proc/generic.c
1966 +++ b/fs/proc/generic.c
1967 @@ -138,8 +138,12 @@ static int proc_getattr(const struct path *path, struct kstat *stat,
1968 {
1969 struct inode *inode = d_inode(path->dentry);
1970 struct proc_dir_entry *de = PDE(inode);
1971 - if (de && de->nlink)
1972 - set_nlink(inode, de->nlink);
1973 + if (de) {
1974 + nlink_t nlink = READ_ONCE(de->nlink);
1975 + if (nlink > 0) {
1976 + set_nlink(inode, nlink);
1977 + }
1978 + }
1979
1980 generic_fillattr(inode, stat);
1981 return 0;
1982 @@ -338,6 +342,16 @@ static const struct file_operations proc_dir_operations = {
1983 .iterate_shared = proc_readdir,
1984 };
1985
1986 +static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
1987 +{
1988 + return 0;
1989 +}
1990 +
1991 +const struct dentry_operations proc_net_dentry_ops = {
1992 + .d_revalidate = proc_net_d_revalidate,
1993 + .d_delete = always_delete_dentry,
1994 +};
1995 +
1996 /*
1997 * proc directories can do almost nothing..
1998 */
1999 @@ -362,6 +376,7 @@ struct proc_dir_entry *proc_register(struct proc_dir_entry *dir,
2000 write_unlock(&proc_subdir_lock);
2001 goto out_free_inum;
2002 }
2003 + dir->nlink++;
2004 write_unlock(&proc_subdir_lock);
2005
2006 return dp;
2007 @@ -459,8 +474,8 @@ struct proc_dir_entry *proc_symlink(const char *name,
2008 }
2009 EXPORT_SYMBOL(proc_symlink);
2010
2011 -struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
2012 - struct proc_dir_entry *parent, void *data)
2013 +struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
2014 + struct proc_dir_entry *parent, void *data, bool force_lookup)
2015 {
2016 struct proc_dir_entry *ent;
2017
2018 @@ -472,13 +487,20 @@ struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
2019 ent->data = data;
2020 ent->proc_fops = &proc_dir_operations;
2021 ent->proc_iops = &proc_dir_inode_operations;
2022 - parent->nlink++;
2023 + if (force_lookup) {
2024 + pde_force_lookup(ent);
2025 + }
2026 ent = proc_register(parent, ent);
2027 - if (!ent)
2028 - parent->nlink--;
2029 }
2030 return ent;
2031 }
2032 +EXPORT_SYMBOL_GPL(_proc_mkdir);
2033 +
2034 +struct proc_dir_entry *proc_mkdir_data(const char *name, umode_t mode,
2035 + struct proc_dir_entry *parent, void *data)
2036 +{
2037 + return _proc_mkdir(name, mode, parent, data, false);
2038 +}
2039 EXPORT_SYMBOL_GPL(proc_mkdir_data);
2040
2041 struct proc_dir_entry *proc_mkdir_mode(const char *name, umode_t mode,
2042 @@ -505,10 +527,7 @@ struct proc_dir_entry *proc_create_mount_point(const char *name)
2043 ent->data = NULL;
2044 ent->proc_fops = NULL;
2045 ent->proc_iops = NULL;
2046 - parent->nlink++;
2047 ent = proc_register(parent, ent);
2048 - if (!ent)
2049 - parent->nlink--;
2050 }
2051 return ent;
2052 }
2053 @@ -666,8 +685,12 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
2054 len = strlen(fn);
2055
2056 de = pde_subdir_find(parent, fn, len);
2057 - if (de)
2058 + if (de) {
2059 rb_erase(&de->subdir_node, &parent->subdir);
2060 + if (S_ISDIR(de->mode)) {
2061 + parent->nlink--;
2062 + }
2063 + }
2064 write_unlock(&proc_subdir_lock);
2065 if (!de) {
2066 WARN(1, "name '%s'\n", name);
2067 @@ -676,9 +699,6 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
2068
2069 proc_entry_rundown(de);
2070
2071 - if (S_ISDIR(de->mode))
2072 - parent->nlink--;
2073 - de->nlink = 0;
2074 WARN(pde_subdir_first(de),
2075 "%s: removing non-empty directory '%s/%s', leaking at least '%s'\n",
2076 __func__, de->parent->name, de->name, pde_subdir_first(de)->name);
2077 @@ -714,13 +734,12 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
2078 de = next;
2079 continue;
2080 }
2081 - write_unlock(&proc_subdir_lock);
2082 -
2083 - proc_entry_rundown(de);
2084 next = de->parent;
2085 if (S_ISDIR(de->mode))
2086 next->nlink--;
2087 - de->nlink = 0;
2088 + write_unlock(&proc_subdir_lock);
2089 +
2090 + proc_entry_rundown(de);
2091 if (de == root)
2092 break;
2093 pde_put(de);
2094 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
2095 index cd0c8d5ce9a13..269acc165055d 100644
2096 --- a/fs/proc/internal.h
2097 +++ b/fs/proc/internal.h
2098 @@ -299,3 +299,10 @@ extern unsigned long task_statm(struct mm_struct *,
2099 unsigned long *, unsigned long *,
2100 unsigned long *, unsigned long *);
2101 extern void task_mem(struct seq_file *, struct mm_struct *);
2102 +
2103 +extern const struct dentry_operations proc_net_dentry_ops;
2104 +static inline void pde_force_lookup(struct proc_dir_entry *pde)
2105 +{
2106 + /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
2107 + pde->proc_dops = &proc_net_dentry_ops;
2108 +}
2109 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
2110 index 76ae278df1c47..313b7c751867f 100644
2111 --- a/fs/proc/proc_net.c
2112 +++ b/fs/proc/proc_net.c
2113 @@ -39,22 +39,6 @@ static struct net *get_proc_net(const struct inode *inode)
2114 return maybe_get_net(PDE_NET(PDE(inode)));
2115 }
2116
2117 -static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
2118 -{
2119 - return 0;
2120 -}
2121 -
2122 -static const struct dentry_operations proc_net_dentry_ops = {
2123 - .d_revalidate = proc_net_d_revalidate,
2124 - .d_delete = always_delete_dentry,
2125 -};
2126 -
2127 -static void pde_force_lookup(struct proc_dir_entry *pde)
2128 -{
2129 - /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
2130 - pde->proc_dops = &proc_net_dentry_ops;
2131 -}
2132 -
2133 static int seq_open_net(struct inode *inode, struct file *file)
2134 {
2135 unsigned int state_size = PDE(inode)->state_size;
2136 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
2137 index a705aa2d03f91..865d02c224ada 100644
2138 --- a/include/linux/proc_fs.h
2139 +++ b/include/linux/proc_fs.h
2140 @@ -21,6 +21,7 @@ extern void proc_flush_task(struct task_struct *);
2141
2142 extern struct proc_dir_entry *proc_symlink(const char *,
2143 struct proc_dir_entry *, const char *);
2144 +struct proc_dir_entry *_proc_mkdir(const char *, umode_t, struct proc_dir_entry *, void *, bool);
2145 extern struct proc_dir_entry *proc_mkdir(const char *, struct proc_dir_entry *);
2146 extern struct proc_dir_entry *proc_mkdir_data(const char *, umode_t,
2147 struct proc_dir_entry *, void *);
2148 @@ -99,6 +100,11 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
2149 static inline struct proc_dir_entry *proc_mkdir(const char *name,
2150 struct proc_dir_entry *parent) {return NULL;}
2151 static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; }
2152 +static inline struct proc_dir_entry *_proc_mkdir(const char *name, umode_t mode,
2153 + struct proc_dir_entry *parent, void *data, bool force_lookup)
2154 +{
2155 + return NULL;
2156 +}
2157 static inline struct proc_dir_entry *proc_mkdir_data(const char *name,
2158 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; }
2159 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
2160 @@ -136,7 +142,7 @@ struct net;
2161 static inline struct proc_dir_entry *proc_net_mkdir(
2162 struct net *net, const char *name, struct proc_dir_entry *parent)
2163 {
2164 - return proc_mkdir_data(name, 0, parent, net);
2165 + return _proc_mkdir(name, 0, parent, net, true);
2166 }
2167
2168 struct ns_common;
2169 diff --git a/include/net/red.h b/include/net/red.h
2170 index 9665582c4687e..e21e7fd4fe077 100644
2171 --- a/include/net/red.h
2172 +++ b/include/net/red.h
2173 @@ -168,12 +168,14 @@ static inline void red_set_vars(struct red_vars *v)
2174 v->qcount = -1;
2175 }
2176
2177 -static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog)
2178 +static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
2179 {
2180 if (fls(qth_min) + Wlog > 32)
2181 return false;
2182 if (fls(qth_max) + Wlog > 32)
2183 return false;
2184 + if (Scell_log >= 32)
2185 + return false;
2186 if (qth_max < qth_min)
2187 return false;
2188 return true;
2189 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2190 index 4aa268582a225..28e52657e0930 100644
2191 --- a/kernel/workqueue.c
2192 +++ b/kernel/workqueue.c
2193 @@ -3718,17 +3718,24 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
2194 * is updated and visible.
2195 */
2196 if (!freezable || !workqueue_freezing) {
2197 + bool kick = false;
2198 +
2199 pwq->max_active = wq->saved_max_active;
2200
2201 while (!list_empty(&pwq->delayed_works) &&
2202 - pwq->nr_active < pwq->max_active)
2203 + pwq->nr_active < pwq->max_active) {
2204 pwq_activate_first_delayed(pwq);
2205 + kick = true;
2206 + }
2207
2208 /*
2209 * Need to kick a worker after thawed or an unbound wq's
2210 - * max_active is bumped. It's a slow path. Do it always.
2211 + * max_active is bumped. In realtime scenarios, always kicking a
2212 + * worker will cause interference on the isolated cpu cores, so
2213 + * let's kick iff work items were activated.
2214 */
2215 - wake_up_worker(pwq->pool);
2216 + if (kick)
2217 + wake_up_worker(pwq->pool);
2218 } else {
2219 pwq->max_active = 0;
2220 }
2221 diff --git a/lib/genalloc.c b/lib/genalloc.c
2222 index 9fc31292cfa1d..80d10d02cf388 100644
2223 --- a/lib/genalloc.c
2224 +++ b/lib/genalloc.c
2225 @@ -81,14 +81,14 @@ static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
2226 * users set the same bit, one user will return remain bits, otherwise
2227 * return 0.
2228 */
2229 -static int bitmap_set_ll(unsigned long *map, int start, int nr)
2230 +static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
2231 {
2232 unsigned long *p = map + BIT_WORD(start);
2233 - const int size = start + nr;
2234 + const unsigned long size = start + nr;
2235 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
2236 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
2237
2238 - while (nr - bits_to_set >= 0) {
2239 + while (nr >= bits_to_set) {
2240 if (set_bits_ll(p, mask_to_set))
2241 return nr;
2242 nr -= bits_to_set;
2243 @@ -116,14 +116,15 @@ static int bitmap_set_ll(unsigned long *map, int start, int nr)
2244 * users clear the same bit, one user will return remain bits,
2245 * otherwise return 0.
2246 */
2247 -static int bitmap_clear_ll(unsigned long *map, int start, int nr)
2248 +static unsigned long
2249 +bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
2250 {
2251 unsigned long *p = map + BIT_WORD(start);
2252 - const int size = start + nr;
2253 + const unsigned long size = start + nr;
2254 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
2255 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
2256
2257 - while (nr - bits_to_clear >= 0) {
2258 + while (nr >= bits_to_clear) {
2259 if (clear_bits_ll(p, mask_to_clear))
2260 return nr;
2261 nr -= bits_to_clear;
2262 @@ -183,8 +184,8 @@ int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t ph
2263 size_t size, int nid, void *owner)
2264 {
2265 struct gen_pool_chunk *chunk;
2266 - int nbits = size >> pool->min_alloc_order;
2267 - int nbytes = sizeof(struct gen_pool_chunk) +
2268 + unsigned long nbits = size >> pool->min_alloc_order;
2269 + unsigned long nbytes = sizeof(struct gen_pool_chunk) +
2270 BITS_TO_LONGS(nbits) * sizeof(long);
2271
2272 chunk = vzalloc_node(nbytes, nid);
2273 @@ -242,7 +243,7 @@ void gen_pool_destroy(struct gen_pool *pool)
2274 struct list_head *_chunk, *_next_chunk;
2275 struct gen_pool_chunk *chunk;
2276 int order = pool->min_alloc_order;
2277 - int bit, end_bit;
2278 + unsigned long bit, end_bit;
2279
2280 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
2281 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
2282 @@ -278,7 +279,7 @@ unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
2283 struct gen_pool_chunk *chunk;
2284 unsigned long addr = 0;
2285 int order = pool->min_alloc_order;
2286 - int nbits, start_bit, end_bit, remain;
2287 + unsigned long nbits, start_bit, end_bit, remain;
2288
2289 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
2290 BUG_ON(in_nmi());
2291 @@ -487,7 +488,7 @@ void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
2292 {
2293 struct gen_pool_chunk *chunk;
2294 int order = pool->min_alloc_order;
2295 - int start_bit, nbits, remain;
2296 + unsigned long start_bit, nbits, remain;
2297
2298 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
2299 BUG_ON(in_nmi());
2300 @@ -754,7 +755,7 @@ unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
2301 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
2302
2303 while (index < size) {
2304 - int next_bit = find_next_bit(map, size, index + nr);
2305 + unsigned long next_bit = find_next_bit(map, size, index + nr);
2306 if ((next_bit - index) < len) {
2307 len = next_bit - index;
2308 start_bit = index;
2309 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
2310 index 2ebf9b252779d..98474d85fb51f 100644
2311 --- a/net/core/net-sysfs.c
2312 +++ b/net/core/net-sysfs.c
2313 @@ -1235,8 +1235,8 @@ static const struct attribute_group dql_group = {
2314 static ssize_t xps_cpus_show(struct netdev_queue *queue,
2315 char *buf)
2316 {
2317 + int cpu, len, ret, num_tc = 1, tc = 0;
2318 struct net_device *dev = queue->dev;
2319 - int cpu, len, num_tc = 1, tc = 0;
2320 struct xps_dev_maps *dev_maps;
2321 cpumask_var_t mask;
2322 unsigned long index;
2323 @@ -1246,22 +1246,31 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
2324
2325 index = get_netdev_queue_index(queue);
2326
2327 + if (!rtnl_trylock())
2328 + return restart_syscall();
2329 +
2330 if (dev->num_tc) {
2331 /* Do not allow XPS on subordinate device directly */
2332 num_tc = dev->num_tc;
2333 - if (num_tc < 0)
2334 - return -EINVAL;
2335 + if (num_tc < 0) {
2336 + ret = -EINVAL;
2337 + goto err_rtnl_unlock;
2338 + }
2339
2340 /* If queue belongs to subordinate dev use its map */
2341 dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev;
2342
2343 tc = netdev_txq_to_tc(dev, index);
2344 - if (tc < 0)
2345 - return -EINVAL;
2346 + if (tc < 0) {
2347 + ret = -EINVAL;
2348 + goto err_rtnl_unlock;
2349 + }
2350 }
2351
2352 - if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
2353 - return -ENOMEM;
2354 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2355 + ret = -ENOMEM;
2356 + goto err_rtnl_unlock;
2357 + }
2358
2359 rcu_read_lock();
2360 dev_maps = rcu_dereference(dev->xps_cpus_map);
2361 @@ -1284,9 +1293,15 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
2362 }
2363 rcu_read_unlock();
2364
2365 + rtnl_unlock();
2366 +
2367 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask));
2368 free_cpumask_var(mask);
2369 return len < PAGE_SIZE ? len : -EINVAL;
2370 +
2371 +err_rtnl_unlock:
2372 + rtnl_unlock();
2373 + return ret;
2374 }
2375
2376 static ssize_t xps_cpus_store(struct netdev_queue *queue,
2377 @@ -1314,7 +1329,13 @@ static ssize_t xps_cpus_store(struct netdev_queue *queue,
2378 return err;
2379 }
2380
2381 + if (!rtnl_trylock()) {
2382 + free_cpumask_var(mask);
2383 + return restart_syscall();
2384 + }
2385 +
2386 err = netif_set_xps_queue(dev, mask, index);
2387 + rtnl_unlock();
2388
2389 free_cpumask_var(mask);
2390
2391 @@ -1326,22 +1347,29 @@ static struct netdev_queue_attribute xps_cpus_attribute __ro_after_init
2392
2393 static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
2394 {
2395 + int j, len, ret, num_tc = 1, tc = 0;
2396 struct net_device *dev = queue->dev;
2397 struct xps_dev_maps *dev_maps;
2398 unsigned long *mask, index;
2399 - int j, len, num_tc = 1, tc = 0;
2400
2401 index = get_netdev_queue_index(queue);
2402
2403 + if (!rtnl_trylock())
2404 + return restart_syscall();
2405 +
2406 if (dev->num_tc) {
2407 num_tc = dev->num_tc;
2408 tc = netdev_txq_to_tc(dev, index);
2409 - if (tc < 0)
2410 - return -EINVAL;
2411 + if (tc < 0) {
2412 + ret = -EINVAL;
2413 + goto err_rtnl_unlock;
2414 + }
2415 }
2416 mask = bitmap_zalloc(dev->num_rx_queues, GFP_KERNEL);
2417 - if (!mask)
2418 - return -ENOMEM;
2419 + if (!mask) {
2420 + ret = -ENOMEM;
2421 + goto err_rtnl_unlock;
2422 + }
2423
2424 rcu_read_lock();
2425 dev_maps = rcu_dereference(dev->xps_rxqs_map);
2426 @@ -1367,10 +1395,16 @@ static ssize_t xps_rxqs_show(struct netdev_queue *queue, char *buf)
2427 out_no_maps:
2428 rcu_read_unlock();
2429
2430 + rtnl_unlock();
2431 +
2432 len = bitmap_print_to_pagebuf(false, buf, mask, dev->num_rx_queues);
2433 bitmap_free(mask);
2434
2435 return len < PAGE_SIZE ? len : -EINVAL;
2436 +
2437 +err_rtnl_unlock:
2438 + rtnl_unlock();
2439 + return ret;
2440 }
2441
2442 static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
2443 @@ -1396,10 +1430,17 @@ static ssize_t xps_rxqs_store(struct netdev_queue *queue, const char *buf,
2444 return err;
2445 }
2446
2447 + if (!rtnl_trylock()) {
2448 + bitmap_free(mask);
2449 + return restart_syscall();
2450 + }
2451 +
2452 cpus_read_lock();
2453 err = __netif_set_xps_queue(dev, mask, index, true);
2454 cpus_read_unlock();
2455
2456 + rtnl_unlock();
2457 +
2458 bitmap_free(mask);
2459 return err ? : len;
2460 }
2461 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2462 index da994f7e3def9..2ce191019526e 100644
2463 --- a/net/ipv4/fib_frontend.c
2464 +++ b/net/ipv4/fib_frontend.c
2465 @@ -302,7 +302,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
2466 .flowi4_iif = LOOPBACK_IFINDEX,
2467 .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
2468 .daddr = ip_hdr(skb)->saddr,
2469 - .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
2470 + .flowi4_tos = ip_hdr(skb)->tos & IPTOS_RT_MASK,
2471 .flowi4_scope = scope,
2472 .flowi4_mark = vmark ? skb->mark : 0,
2473 };
2474 diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
2475 index 66fdbfe5447cd..5d1e6fe9d8387 100644
2476 --- a/net/ipv4/gre_demux.c
2477 +++ b/net/ipv4/gre_demux.c
2478 @@ -128,7 +128,7 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2479 * to 0 and sets the configured key in the
2480 * inner erspan header field
2481 */
2482 - if (greh->protocol == htons(ETH_P_ERSPAN) ||
2483 + if ((greh->protocol == htons(ETH_P_ERSPAN) && hdr_len != 4) ||
2484 greh->protocol == htons(ETH_P_ERSPAN2)) {
2485 struct erspan_base_hdr *ershdr;
2486
2487 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2488 index 8a6a4384e7916..12d242fedffdc 100644
2489 --- a/net/ipv4/netfilter/arp_tables.c
2490 +++ b/net/ipv4/netfilter/arp_tables.c
2491 @@ -1406,7 +1406,7 @@ static int compat_get_entries(struct net *net,
2492 xt_compat_lock(NFPROTO_ARP);
2493 t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
2494 if (!IS_ERR(t)) {
2495 - const struct xt_table_info *private = t->private;
2496 + const struct xt_table_info *private = xt_table_get_private_protected(t);
2497 struct xt_table_info info;
2498
2499 ret = compat_table_info(private, &info);
2500 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2501 index 4852769995440..cbbc8a7b82788 100644
2502 --- a/net/ipv4/netfilter/ip_tables.c
2503 +++ b/net/ipv4/netfilter/ip_tables.c
2504 @@ -1616,7 +1616,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
2505 xt_compat_lock(AF_INET);
2506 t = xt_find_table_lock(net, AF_INET, get.name);
2507 if (!IS_ERR(t)) {
2508 - const struct xt_table_info *private = t->private;
2509 + const struct xt_table_info *private = xt_table_get_private_protected(t);
2510 struct xt_table_info info;
2511 ret = compat_table_info(private, &info);
2512 if (!ret && get.size == info.size)
2513 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2514 index 12735ee7713a7..01cdde25eb16d 100644
2515 --- a/net/ipv6/netfilter/ip6_tables.c
2516 +++ b/net/ipv6/netfilter/ip6_tables.c
2517 @@ -1625,7 +1625,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
2518 xt_compat_lock(AF_INET6);
2519 t = xt_find_table_lock(net, AF_INET6, get.name);
2520 if (!IS_ERR(t)) {
2521 - const struct xt_table_info *private = t->private;
2522 + const struct xt_table_info *private = xt_table_get_private_protected(t);
2523 struct xt_table_info info;
2524 ret = compat_table_info(private, &info);
2525 if (!ret && get.size == info.size)
2526 diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c
2527 index d5611f04926de..7c893c3799202 100644
2528 --- a/net/ncsi/ncsi-rsp.c
2529 +++ b/net/ncsi/ncsi-rsp.c
2530 @@ -1114,7 +1114,7 @@ int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev,
2531 int payload, i, ret;
2532
2533 /* Find the NCSI device */
2534 - nd = ncsi_find_dev(dev);
2535 + nd = ncsi_find_dev(orig_dev);
2536 ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
2537 if (!ndp)
2538 return -ENODEV;
2539 diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
2540 index a7a982a3e6761..1a58cfdb862d6 100644
2541 --- a/net/netfilter/ipset/ip_set_hash_gen.h
2542 +++ b/net/netfilter/ipset/ip_set_hash_gen.h
2543 @@ -143,20 +143,6 @@ htable_size(u8 hbits)
2544 return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
2545 }
2546
2547 -/* Compute htable_bits from the user input parameter hashsize */
2548 -static u8
2549 -htable_bits(u32 hashsize)
2550 -{
2551 - /* Assume that hashsize == 2^htable_bits */
2552 - u8 bits = fls(hashsize - 1);
2553 -
2554 - if (jhash_size(bits) != hashsize)
2555 - /* Round up to the first 2^n value */
2556 - bits = fls(hashsize);
2557 -
2558 - return bits;
2559 -}
2560 -
2561 #ifdef IP_SET_HASH_WITH_NETS
2562 #if IPSET_NET_COUNT > 1
2563 #define __CIDR(cidr, i) (cidr[i])
2564 @@ -1520,7 +1506,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
2565 if (!h)
2566 return -ENOMEM;
2567
2568 - hbits = htable_bits(hashsize);
2569 + /* Compute htable_bits from the user input parameter hashsize.
2570 + * Assume that hashsize == 2^htable_bits,
2571 + * otherwise round up to the first 2^n value.
2572 + */
2573 + hbits = fls(hashsize - 1);
2574 hsize = htable_size(hbits);
2575 if (hsize == 0) {
2576 kfree(h);
2577 diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
2578 index 217fd1bdc55e7..60236cc316d03 100644
2579 --- a/net/netfilter/nft_dynset.c
2580 +++ b/net/netfilter/nft_dynset.c
2581 @@ -146,7 +146,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
2582 u32 flags = ntohl(nla_get_be32(tb[NFTA_DYNSET_FLAGS]));
2583
2584 if (flags & ~NFT_DYNSET_F_INV)
2585 - return -EINVAL;
2586 + return -EOPNOTSUPP;
2587 if (flags & NFT_DYNSET_F_INV)
2588 priv->invert = true;
2589 }
2590 @@ -179,7 +179,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
2591 timeout = 0;
2592 if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
2593 if (!(set->flags & NFT_SET_TIMEOUT))
2594 - return -EINVAL;
2595 + return -EOPNOTSUPP;
2596
2597 err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
2598 if (err)
2599 @@ -193,7 +193,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
2600
2601 if (tb[NFTA_DYNSET_SREG_DATA] != NULL) {
2602 if (!(set->flags & NFT_SET_MAP))
2603 - return -EINVAL;
2604 + return -EOPNOTSUPP;
2605 if (set->dtype == NFT_DATA_VERDICT)
2606 return -EOPNOTSUPP;
2607
2608 diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
2609 index 2236455b10a36..182c1285b4ada 100644
2610 --- a/net/netfilter/xt_RATEEST.c
2611 +++ b/net/netfilter/xt_RATEEST.c
2612 @@ -115,6 +115,9 @@ static int xt_rateest_tg_checkentry(const struct xt_tgchk_param *par)
2613 } cfg;
2614 int ret;
2615
2616 + if (strnlen(info->name, sizeof(est->name)) >= sizeof(est->name))
2617 + return -ENAMETOOLONG;
2618 +
2619 net_get_random_once(&jhash_rnd, sizeof(jhash_rnd));
2620
2621 mutex_lock(&xn->hash_lock);
2622 diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
2623 index 4021f726b58fd..d856b395ee8eb 100644
2624 --- a/net/sched/sch_choke.c
2625 +++ b/net/sched/sch_choke.c
2626 @@ -368,7 +368,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
2627
2628 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
2629
2630 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
2631 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2632 return -EINVAL;
2633
2634 if (ctl->limit > CHOKE_MAX_QUEUE)
2635 diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
2636 index 8599c6f31b057..e0bc77533acc3 100644
2637 --- a/net/sched/sch_gred.c
2638 +++ b/net/sched/sch_gred.c
2639 @@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
2640 struct gred_sched *table = qdisc_priv(sch);
2641 struct gred_sched_data *q = table->tab[dp];
2642
2643 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
2644 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
2645 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
2646 return -EINVAL;
2647 }
2648 diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
2649 index 1695421333e38..71e167e91a48d 100644
2650 --- a/net/sched/sch_red.c
2651 +++ b/net/sched/sch_red.c
2652 @@ -213,7 +213,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
2653 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
2654
2655 ctl = nla_data(tb[TCA_RED_PARMS]);
2656 - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
2657 + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
2658 return -EINVAL;
2659
2660 if (ctl->limit > 0) {
2661 diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2662 index 5a6def5e4e6df..6e13e137883c3 100644
2663 --- a/net/sched/sch_sfq.c
2664 +++ b/net/sched/sch_sfq.c
2665 @@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
2666 }
2667
2668 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
2669 - ctl_v1->Wlog))
2670 + ctl_v1->Wlog, ctl_v1->Scell_log))
2671 return -EINVAL;
2672 if (ctl_v1 && ctl_v1->qth_min) {
2673 p = kmalloc(sizeof(*p), GFP_KERNEL);
2674 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
2675 index f2b1305e79d2f..09116be995113 100644
2676 --- a/net/sched/sch_taprio.c
2677 +++ b/net/sched/sch_taprio.c
2678 @@ -1626,7 +1626,7 @@ static void taprio_destroy(struct Qdisc *sch)
2679 taprio_disable_offload(dev, q, NULL);
2680
2681 if (q->qdiscs) {
2682 - for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
2683 + for (i = 0; i < dev->num_tx_queues; i++)
2684 qdisc_put(q->qdiscs[i]);
2685
2686 kfree(q->qdiscs);
2687 diff --git a/scripts/depmod.sh b/scripts/depmod.sh
2688 index e083bcae343f3..3643b4f896ede 100755
2689 --- a/scripts/depmod.sh
2690 +++ b/scripts/depmod.sh
2691 @@ -15,6 +15,8 @@ if ! test -r System.map ; then
2692 exit 0
2693 fi
2694
2695 +# legacy behavior: "depmod" in /sbin, no /sbin in PATH
2696 +PATH="$PATH:/sbin"
2697 if [ -z $(command -v $DEPMOD) ]; then
2698 echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
2699 echo "This is probably in the kmod package." >&2
2700 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2701 index 192e580561efd..5f515a29668c8 100644
2702 --- a/sound/pci/hda/hda_intel.c
2703 +++ b/sound/pci/hda/hda_intel.c
2704 @@ -2186,8 +2186,6 @@ static struct snd_pci_quirk power_save_blacklist[] = {
2705 SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
2706 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2707 SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
2708 - /* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
2709 - SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
2710 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2711 SND_PCI_QUIRK(0x1558, 0x6504, "Clevo W65_67SB", 0),
2712 /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
2713 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2714 index 396b5503038ad..c3a1ce129b3d9 100644
2715 --- a/sound/pci/hda/patch_conexant.c
2716 +++ b/sound/pci/hda/patch_conexant.c
2717 @@ -1075,6 +1075,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
2718 static const struct hda_device_id snd_hda_id_conexant[] = {
2719 HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
2720 HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
2721 + HDA_CODEC_ENTRY(0x14f120d0, "CX11970", patch_conexant_auto),
2722 HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
2723 HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
2724 HDA_CODEC_ENTRY(0x14f15051, "CX20561 (Hermosa)", patch_conexant_auto),
2725 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2726 index ec0938923f5de..8adbe45a54c11 100644
2727 --- a/sound/pci/hda/patch_realtek.c
2728 +++ b/sound/pci/hda/patch_realtek.c
2729 @@ -6236,6 +6236,7 @@ enum {
2730 ALC221_FIXUP_HP_FRONT_MIC,
2731 ALC292_FIXUP_TPT460,
2732 ALC298_FIXUP_SPK_VOLUME,
2733 + ALC298_FIXUP_LENOVO_SPK_VOLUME,
2734 ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,
2735 ALC269_FIXUP_ATIV_BOOK_8,
2736 ALC221_FIXUP_HP_MIC_NO_PRESENCE,
2737 @@ -7062,6 +7063,10 @@ static const struct hda_fixup alc269_fixups[] = {
2738 .chained = true,
2739 .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
2740 },
2741 + [ALC298_FIXUP_LENOVO_SPK_VOLUME] = {
2742 + .type = HDA_FIXUP_FUNC,
2743 + .v.func = alc298_fixup_speaker_volume,
2744 + },
2745 [ALC295_FIXUP_DISABLE_DAC3] = {
2746 .type = HDA_FIXUP_FUNC,
2747 .v.func = alc295_fixup_disable_dac3,
2748 @@ -7875,6 +7880,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2749 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2750 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
2751 SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
2752 + SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
2753 SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
2754 SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT),
2755 SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED),
2756 @@ -7935,6 +7941,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2757 SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
2758 SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
2759 SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
2760 + SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
2761 + SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
2762 SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
2763 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
2764 SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
2765 @@ -8040,6 +8048,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2766 SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
2767 SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
2768 SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
2769 + SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
2770 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
2771 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
2772 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
2773 diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
2774 index 7ef8f3105cdb7..0ab40a8a68fb5 100644
2775 --- a/sound/pci/hda/patch_via.c
2776 +++ b/sound/pci/hda/patch_via.c
2777 @@ -1002,6 +1002,7 @@ static const struct hda_verb vt1802_init_verbs[] = {
2778 enum {
2779 VIA_FIXUP_INTMIC_BOOST,
2780 VIA_FIXUP_ASUS_G75,
2781 + VIA_FIXUP_POWER_SAVE,
2782 };
2783
2784 static void via_fixup_intmic_boost(struct hda_codec *codec,
2785 @@ -1011,6 +1012,13 @@ static void via_fixup_intmic_boost(struct hda_codec *codec,
2786 override_mic_boost(codec, 0x30, 0, 2, 40);
2787 }
2788
2789 +static void via_fixup_power_save(struct hda_codec *codec,
2790 + const struct hda_fixup *fix, int action)
2791 +{
2792 + if (action == HDA_FIXUP_ACT_PRE_PROBE)
2793 + codec->power_save_node = 0;
2794 +}
2795 +
2796 static const struct hda_fixup via_fixups[] = {
2797 [VIA_FIXUP_INTMIC_BOOST] = {
2798 .type = HDA_FIXUP_FUNC,
2799 @@ -1025,11 +1033,16 @@ static const struct hda_fixup via_fixups[] = {
2800 { }
2801 }
2802 },
2803 + [VIA_FIXUP_POWER_SAVE] = {
2804 + .type = HDA_FIXUP_FUNC,
2805 + .v.func = via_fixup_power_save,
2806 + },
2807 };
2808
2809 static const struct snd_pci_quirk vt2002p_fixups[] = {
2810 SND_PCI_QUIRK(0x1043, 0x1487, "Asus G75", VIA_FIXUP_ASUS_G75),
2811 SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
2812 + SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", VIA_FIXUP_POWER_SAVE),
2813 {}
2814 };
2815
2816 diff --git a/sound/usb/midi.c b/sound/usb/midi.c
2817 index bc9068b616bb9..1cc17c449407c 100644
2818 --- a/sound/usb/midi.c
2819 +++ b/sound/usb/midi.c
2820 @@ -1889,6 +1889,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
2821 ms_ep = find_usb_ms_endpoint_descriptor(hostep);
2822 if (!ms_ep)
2823 continue;
2824 + if (ms_ep->bNumEmbMIDIJack > 0x10)
2825 + continue;
2826 if (usb_endpoint_dir_out(ep)) {
2827 if (endpoints[epidx].out_ep) {
2828 if (++epidx >= MIDI_MAX_ENDPOINTS) {
2829 @@ -2141,6 +2143,8 @@ static int snd_usbmidi_detect_roland(struct snd_usb_midi *umidi,
2830 cs_desc[1] == USB_DT_CS_INTERFACE &&
2831 cs_desc[2] == 0xf1 &&
2832 cs_desc[3] == 0x02) {
2833 + if (cs_desc[4] > 0x10 || cs_desc[5] > 0x10)
2834 + continue;
2835 endpoint->in_cables = (1 << cs_desc[4]) - 1;
2836 endpoint->out_cables = (1 << cs_desc[5]) - 1;
2837 return snd_usbmidi_detect_endpoints(umidi, endpoint, 1);
2838 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2839 index 1dfa49d26de91..8f3b40ec02b77 100644
2840 --- a/virt/kvm/kvm_main.c
2841 +++ b/virt/kvm/kvm_main.c
2842 @@ -428,9 +428,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
2843 kvm->mmu_notifier_count++;
2844 need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
2845 range->flags);
2846 - need_tlb_flush |= kvm->tlbs_dirty;
2847 /* we've to flush the tlb before the pages can be freed */
2848 - if (need_tlb_flush)
2849 + if (need_tlb_flush || kvm->tlbs_dirty)
2850 kvm_flush_remote_tlbs(kvm);
2851
2852 spin_unlock(&kvm->mmu_lock);