Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0197-5.4.98-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 39621 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index 032751f6be0c1..4f6bfcf434e80 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 97
10 +SUBLEVEL = 98
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
15 index 4906e480b5bb6..296b0d7570d06 100644
16 --- a/arch/x86/kvm/svm.c
17 +++ b/arch/x86/kvm/svm.c
18 @@ -1835,6 +1835,8 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
19 struct page **pages;
20 unsigned long first, last;
21
22 + lockdep_assert_held(&kvm->lock);
23 +
24 if (ulen == 0 || uaddr + ulen < uaddr)
25 return NULL;
26
27 @@ -7091,12 +7093,21 @@ static int svm_register_enc_region(struct kvm *kvm,
28 if (!region)
29 return -ENOMEM;
30
31 + mutex_lock(&kvm->lock);
32 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
33 if (!region->pages) {
34 ret = -ENOMEM;
35 + mutex_unlock(&kvm->lock);
36 goto e_free;
37 }
38
39 + region->uaddr = range->addr;
40 + region->size = range->size;
41 +
42 + mutex_lock(&kvm->lock);
43 + list_add_tail(&region->list, &sev->regions_list);
44 + mutex_unlock(&kvm->lock);
45 +
46 /*
47 * The guest may change the memory encryption attribute from C=0 -> C=1
48 * or vice versa for this memory range. Lets make sure caches are
49 @@ -7105,13 +7116,6 @@ static int svm_register_enc_region(struct kvm *kvm,
50 */
51 sev_clflush_pages(region->pages, region->npages);
52
53 - region->uaddr = range->addr;
54 - region->size = range->size;
55 -
56 - mutex_lock(&kvm->lock);
57 - list_add_tail(&region->list, &sev->regions_list);
58 - mutex_unlock(&kvm->lock);
59 -
60 return ret;
61
62 e_free:
63 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
64 index 3d34ac02d76ef..cb3d44d200055 100644
65 --- a/block/blk-cgroup.c
66 +++ b/block/blk-cgroup.c
67 @@ -1089,6 +1089,8 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
68 */
69 void blkcg_destroy_blkgs(struct blkcg *blkcg)
70 {
71 + might_sleep();
72 +
73 spin_lock_irq(&blkcg->lock);
74
75 while (!hlist_empty(&blkcg->blkg_list)) {
76 @@ -1096,14 +1098,20 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
77 struct blkcg_gq, blkcg_node);
78 struct request_queue *q = blkg->q;
79
80 - if (spin_trylock(&q->queue_lock)) {
81 - blkg_destroy(blkg);
82 - spin_unlock(&q->queue_lock);
83 - } else {
84 + if (need_resched() || !spin_trylock(&q->queue_lock)) {
85 + /*
86 + * Given that the system can accumulate a huge number
87 + * of blkgs in pathological cases, check to see if we
88 + * need to rescheduling to avoid softlockup.
89 + */
90 spin_unlock_irq(&blkcg->lock);
91 - cpu_relax();
92 + cond_resched();
93 spin_lock_irq(&blkcg->lock);
94 + continue;
95 }
96 +
97 + blkg_destroy(blkg);
98 + spin_unlock(&q->queue_lock);
99 }
100
101 spin_unlock_irq(&blkcg->lock);
102 diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
103 index eddc6d1bdb2d1..82b76df43ae57 100644
104 --- a/drivers/crypto/chelsio/chtls/chtls_cm.c
105 +++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
106 @@ -1047,11 +1047,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
107
108 n = dst_neigh_lookup(dst, &iph->saddr);
109 if (!n || !n->dev)
110 - goto free_sk;
111 + goto free_dst;
112
113 ndev = n->dev;
114 - if (!ndev)
115 - goto free_dst;
116 if (is_vlan_dev(ndev))
117 ndev = vlan_dev_real_dev(ndev);
118
119 @@ -1117,7 +1115,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
120 free_csk:
121 chtls_sock_release(&csk->kref);
122 free_dst:
123 - neigh_release(n);
124 + if (n)
125 + neigh_release(n);
126 dst_release(dst);
127 free_sk:
128 inet_csk_prepare_forced_close(newsk);
129 diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
130 index 5a9f0d17f52c8..e1ef0122ef759 100644
131 --- a/drivers/i2c/busses/i2c-mt65xx.c
132 +++ b/drivers/i2c/busses/i2c-mt65xx.c
133 @@ -1008,7 +1008,8 @@ static int mtk_i2c_probe(struct platform_device *pdev)
134 mtk_i2c_clock_disable(i2c);
135
136 ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq,
137 - IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c);
138 + IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE,
139 + I2C_DRV_NAME, i2c);
140 if (ret < 0) {
141 dev_err(&pdev->dev,
142 "Request I2C IRQ %d fail\n", irq);
143 @@ -1035,7 +1036,16 @@ static int mtk_i2c_remove(struct platform_device *pdev)
144 }
145
146 #ifdef CONFIG_PM_SLEEP
147 -static int mtk_i2c_resume(struct device *dev)
148 +static int mtk_i2c_suspend_noirq(struct device *dev)
149 +{
150 + struct mtk_i2c *i2c = dev_get_drvdata(dev);
151 +
152 + i2c_mark_adapter_suspended(&i2c->adap);
153 +
154 + return 0;
155 +}
156 +
157 +static int mtk_i2c_resume_noirq(struct device *dev)
158 {
159 int ret;
160 struct mtk_i2c *i2c = dev_get_drvdata(dev);
161 @@ -1050,12 +1060,15 @@ static int mtk_i2c_resume(struct device *dev)
162
163 mtk_i2c_clock_disable(i2c);
164
165 + i2c_mark_adapter_resumed(&i2c->adap);
166 +
167 return 0;
168 }
169 #endif
170
171 static const struct dev_pm_ops mtk_i2c_pm = {
172 - SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume)
173 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq,
174 + mtk_i2c_resume_noirq)
175 };
176
177 static struct platform_driver mtk_i2c_driver = {
178 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
179 index f043eefabb4ec..7b1d2dac6ceb8 100644
180 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
181 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
182 @@ -514,7 +514,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
183 const size_t bufsz = sizeof(buf);
184 int pos = 0;
185
186 + mutex_lock(&mvm->mutex);
187 iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
188 + mutex_unlock(&mvm->mutex);
189 +
190 do_div(curr_os, NSEC_PER_USEC);
191 diff = curr_os - curr_gp2;
192 pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
193 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
194 index daae86cd61140..fc6430edd1107 100644
195 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
196 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
197 @@ -4169,6 +4169,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
198 iwl_mvm_binding_remove_vif(mvm, vif);
199
200 out:
201 + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) &&
202 + switching_chanctx)
203 + return;
204 mvmvif->phy_ctxt = NULL;
205 iwl_mvm_power_update_mac(mvm);
206 }
207 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
208 index b04cc6214bac8..8b0576cde797e 100644
209 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
210 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
211 @@ -838,6 +838,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
212 if (!mvm->scan_cmd)
213 goto out_free;
214
215 + /* invalidate ids to prevent accidental removal of sta_id 0 */
216 + mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA;
217 + mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA;
218 +
219 /* Set EBS as successful as long as not stated otherwise by the FW. */
220 mvm->last_ebs_successful = true;
221
222 @@ -1238,6 +1242,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
223 reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
224 if (device_reprobe(reprobe->dev))
225 dev_err(reprobe->dev, "reprobe failed!\n");
226 + put_device(reprobe->dev);
227 kfree(reprobe);
228 module_put(THIS_MODULE);
229 }
230 @@ -1288,7 +1293,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
231 module_put(THIS_MODULE);
232 return;
233 }
234 - reprobe->dev = mvm->trans->dev;
235 + reprobe->dev = get_device(mvm->trans->dev);
236 INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
237 schedule_work(&reprobe->work);
238 } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
239 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
240 index a36aa9e85e0b3..40cafcf40ccf0 100644
241 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
242 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
243 @@ -2070,6 +2070,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
244
245 lockdep_assert_held(&mvm->mutex);
246
247 + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
248 + return -EINVAL;
249 +
250 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
251 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
252 if (ret)
253 @@ -2084,6 +2087,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
254
255 lockdep_assert_held(&mvm->mutex);
256
257 + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
258 + return -EINVAL;
259 +
260 iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0);
261 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
262 if (ret)
263 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
264 index 7a5b024a6d384..eab159205e48b 100644
265 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
266 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
267 @@ -164,8 +164,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
268 /* Allocate IML */
269 iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
270 &trans_pcie->iml_dma_addr, GFP_KERNEL);
271 - if (!iml_img)
272 - return -ENOMEM;
273 + if (!iml_img) {
274 + ret = -ENOMEM;
275 + goto err_free_ctxt_info;
276 + }
277
278 memcpy(iml_img, trans->iml, trans->iml_len);
279
280 @@ -207,6 +209,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
281
282 return 0;
283
284 +err_free_ctxt_info:
285 + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
286 + trans_pcie->ctxt_info_gen3,
287 + trans_pcie->ctxt_info_dma_addr);
288 + trans_pcie->ctxt_info_gen3 = NULL;
289 err_free_prph_info:
290 dma_free_coherent(trans->dev,
291 sizeof(*prph_info),
292 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
293 index d3b58334e13ea..e7dcf8bc99b7c 100644
294 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
295 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
296 @@ -657,6 +657,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
297 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
298 struct iwl_txq *txq = trans_pcie->txq[txq_id];
299
300 + if (!txq) {
301 + IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
302 + return;
303 + }
304 +
305 spin_lock_bh(&txq->lock);
306 while (txq->write_ptr != txq->read_ptr) {
307 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
308 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
309 index c9b8613e69db2..5b9d570df85cc 100644
310 --- a/drivers/regulator/core.c
311 +++ b/drivers/regulator/core.c
312 @@ -1772,13 +1772,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
313 {
314 struct regulator_dev *r;
315 struct device *dev = rdev->dev.parent;
316 - int ret;
317 + int ret = 0;
318
319 /* No supply to resolve? */
320 if (!rdev->supply_name)
321 return 0;
322
323 - /* Supply already resolved? */
324 + /* Supply already resolved? (fast-path without locking contention) */
325 if (rdev->supply)
326 return 0;
327
328 @@ -1788,7 +1788,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
329
330 /* Did the lookup explicitly defer for us? */
331 if (ret == -EPROBE_DEFER)
332 - return ret;
333 + goto out;
334
335 if (have_full_constraints()) {
336 r = dummy_regulator_rdev;
337 @@ -1796,15 +1796,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
338 } else {
339 dev_err(dev, "Failed to resolve %s-supply for %s\n",
340 rdev->supply_name, rdev->desc->name);
341 - return -EPROBE_DEFER;
342 + ret = -EPROBE_DEFER;
343 + goto out;
344 }
345 }
346
347 if (r == rdev) {
348 dev_err(dev, "Supply for %s (%s) resolved to itself\n",
349 rdev->desc->name, rdev->supply_name);
350 - if (!have_full_constraints())
351 - return -EINVAL;
352 + if (!have_full_constraints()) {
353 + ret = -EINVAL;
354 + goto out;
355 + }
356 r = dummy_regulator_rdev;
357 get_device(&r->dev);
358 }
359 @@ -1818,7 +1821,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
360 if (r->dev.parent && r->dev.parent != rdev->dev.parent) {
361 if (!device_is_bound(r->dev.parent)) {
362 put_device(&r->dev);
363 - return -EPROBE_DEFER;
364 + ret = -EPROBE_DEFER;
365 + goto out;
366 }
367 }
368
369 @@ -1826,15 +1830,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
370 ret = regulator_resolve_supply(r);
371 if (ret < 0) {
372 put_device(&r->dev);
373 - return ret;
374 + goto out;
375 + }
376 +
377 + /*
378 + * Recheck rdev->supply with rdev->mutex lock held to avoid a race
379 + * between rdev->supply null check and setting rdev->supply in
380 + * set_supply() from concurrent tasks.
381 + */
382 + regulator_lock(rdev);
383 +
384 + /* Supply just resolved by a concurrent task? */
385 + if (rdev->supply) {
386 + regulator_unlock(rdev);
387 + put_device(&r->dev);
388 + goto out;
389 }
390
391 ret = set_supply(rdev, r);
392 if (ret < 0) {
393 + regulator_unlock(rdev);
394 put_device(&r->dev);
395 - return ret;
396 + goto out;
397 }
398
399 + regulator_unlock(rdev);
400 +
401 /*
402 * In set_machine_constraints() we may have turned this regulator on
403 * but we couldn't propagate to the supply if it hadn't been resolved
404 @@ -1845,11 +1866,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
405 if (ret < 0) {
406 _regulator_put(rdev->supply);
407 rdev->supply = NULL;
408 - return ret;
409 + goto out;
410 }
411 }
412
413 - return 0;
414 +out:
415 + return ret;
416 }
417
418 /* Internal regulator request function */
419 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
420 index ca1d98f274d12..e3a79e6958124 100644
421 --- a/fs/nfs/pnfs.c
422 +++ b/fs/nfs/pnfs.c
423 @@ -2369,7 +2369,13 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
424 * We got an entirely new state ID. Mark all segments for the
425 * inode invalid, and retry the layoutget
426 */
427 - pnfs_mark_layout_stateid_invalid(lo, &free_me);
428 + struct pnfs_layout_range range = {
429 + .iomode = IOMODE_ANY,
430 + .length = NFS4_MAX_UINT64,
431 + };
432 + pnfs_set_plh_return_info(lo, IOMODE_ANY, 0);
433 + pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
434 + &range, 0);
435 goto out_forget;
436 }
437
438 diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c
439 index ae2c87bb0fbec..eb02072d28dd6 100644
440 --- a/fs/squashfs/export.c
441 +++ b/fs/squashfs/export.c
442 @@ -41,12 +41,17 @@ static long long squashfs_inode_lookup(struct super_block *sb, int ino_num)
443 struct squashfs_sb_info *msblk = sb->s_fs_info;
444 int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
445 int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
446 - u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
447 + u64 start;
448 __le64 ino;
449 int err;
450
451 TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
452
453 + if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
454 + return -EINVAL;
455 +
456 + start = le64_to_cpu(msblk->inode_lookup_table[blk]);
457 +
458 err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
459 if (err < 0)
460 return err;
461 @@ -111,7 +116,10 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
462 u64 lookup_table_start, u64 next_table, unsigned int inodes)
463 {
464 unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
465 + unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
466 + int n;
467 __le64 *table;
468 + u64 start, end;
469
470 TRACE("In read_inode_lookup_table, length %d\n", length);
471
472 @@ -121,20 +129,37 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
473 if (inodes == 0)
474 return ERR_PTR(-EINVAL);
475
476 - /* length bytes should not extend into the next table - this check
477 - * also traps instances where lookup_table_start is incorrectly larger
478 - * than the next table start
479 + /*
480 + * The computed size of the lookup table (length bytes) should exactly
481 + * match the table start and end points
482 */
483 - if (lookup_table_start + length > next_table)
484 + if (length != (next_table - lookup_table_start))
485 return ERR_PTR(-EINVAL);
486
487 table = squashfs_read_table(sb, lookup_table_start, length);
488 + if (IS_ERR(table))
489 + return table;
490
491 /*
492 - * table[0] points to the first inode lookup table metadata block,
493 - * this should be less than lookup_table_start
494 + * table0], table[1], ... table[indexes - 1] store the locations
495 + * of the compressed inode lookup blocks. Each entry should be
496 + * less than the next (i.e. table[0] < table[1]), and the difference
497 + * between them should be SQUASHFS_METADATA_SIZE or less.
498 + * table[indexes - 1] should be less than lookup_table_start, and
499 + * again the difference should be SQUASHFS_METADATA_SIZE or less
500 */
501 - if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
502 + for (n = 0; n < (indexes - 1); n++) {
503 + start = le64_to_cpu(table[n]);
504 + end = le64_to_cpu(table[n + 1]);
505 +
506 + if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
507 + kfree(table);
508 + return ERR_PTR(-EINVAL);
509 + }
510 + }
511 +
512 + start = le64_to_cpu(table[indexes - 1]);
513 + if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
514 kfree(table);
515 return ERR_PTR(-EINVAL);
516 }
517 diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c
518 index 6be5afe7287d6..11581bf31af41 100644
519 --- a/fs/squashfs/id.c
520 +++ b/fs/squashfs/id.c
521 @@ -35,10 +35,15 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
522 struct squashfs_sb_info *msblk = sb->s_fs_info;
523 int block = SQUASHFS_ID_BLOCK(index);
524 int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
525 - u64 start_block = le64_to_cpu(msblk->id_table[block]);
526 + u64 start_block;
527 __le32 disk_id;
528 int err;
529
530 + if (index >= msblk->ids)
531 + return -EINVAL;
532 +
533 + start_block = le64_to_cpu(msblk->id_table[block]);
534 +
535 err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
536 sizeof(disk_id));
537 if (err < 0)
538 @@ -56,7 +61,10 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
539 u64 id_table_start, u64 next_table, unsigned short no_ids)
540 {
541 unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
542 + unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
543 + int n;
544 __le64 *table;
545 + u64 start, end;
546
547 TRACE("In read_id_index_table, length %d\n", length);
548
549 @@ -67,20 +75,36 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
550 return ERR_PTR(-EINVAL);
551
552 /*
553 - * length bytes should not extend into the next table - this check
554 - * also traps instances where id_table_start is incorrectly larger
555 - * than the next table start
556 + * The computed size of the index table (length bytes) should exactly
557 + * match the table start and end points
558 */
559 - if (id_table_start + length > next_table)
560 + if (length != (next_table - id_table_start))
561 return ERR_PTR(-EINVAL);
562
563 table = squashfs_read_table(sb, id_table_start, length);
564 + if (IS_ERR(table))
565 + return table;
566
567 /*
568 - * table[0] points to the first id lookup table metadata block, this
569 - * should be less than id_table_start
570 + * table[0], table[1], ... table[indexes - 1] store the locations
571 + * of the compressed id blocks. Each entry should be less than
572 + * the next (i.e. table[0] < table[1]), and the difference between them
573 + * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
574 + * should be less than id_table_start, and again the difference
575 + * should be SQUASHFS_METADATA_SIZE or less
576 */
577 - if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
578 + for (n = 0; n < (indexes - 1); n++) {
579 + start = le64_to_cpu(table[n]);
580 + end = le64_to_cpu(table[n + 1]);
581 +
582 + if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
583 + kfree(table);
584 + return ERR_PTR(-EINVAL);
585 + }
586 + }
587 +
588 + start = le64_to_cpu(table[indexes - 1]);
589 + if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
590 kfree(table);
591 return ERR_PTR(-EINVAL);
592 }
593 diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
594 index 34c21ffb6df37..166e98806265b 100644
595 --- a/fs/squashfs/squashfs_fs_sb.h
596 +++ b/fs/squashfs/squashfs_fs_sb.h
597 @@ -64,5 +64,6 @@ struct squashfs_sb_info {
598 unsigned int inodes;
599 unsigned int fragments;
600 int xattr_ids;
601 + unsigned int ids;
602 };
603 #endif
604 diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
605 index 0cc4ceec05624..2110323b610b9 100644
606 --- a/fs/squashfs/super.c
607 +++ b/fs/squashfs/super.c
608 @@ -166,6 +166,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
609 msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
610 msblk->inodes = le32_to_cpu(sblk->inodes);
611 msblk->fragments = le32_to_cpu(sblk->fragments);
612 + msblk->ids = le16_to_cpu(sblk->no_ids);
613 flags = le16_to_cpu(sblk->flags);
614
615 TRACE("Found valid superblock on %pg\n", sb->s_bdev);
616 @@ -177,7 +178,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
617 TRACE("Block size %d\n", msblk->block_size);
618 TRACE("Number of inodes %d\n", msblk->inodes);
619 TRACE("Number of fragments %d\n", msblk->fragments);
620 - TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
621 + TRACE("Number of ids %d\n", msblk->ids);
622 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
623 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
624 TRACE("sblk->fragment_table_start %llx\n",
625 @@ -236,8 +237,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
626 allocate_id_index_table:
627 /* Allocate and read id index table */
628 msblk->id_table = squashfs_read_id_index_table(sb,
629 - le64_to_cpu(sblk->id_table_start), next_table,
630 - le16_to_cpu(sblk->no_ids));
631 + le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
632 if (IS_ERR(msblk->id_table)) {
633 errorf(fc, "unable to read id index table");
634 err = PTR_ERR(msblk->id_table);
635 diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h
636 index 184129afd4566..d8a270d3ac4cb 100644
637 --- a/fs/squashfs/xattr.h
638 +++ b/fs/squashfs/xattr.h
639 @@ -17,8 +17,16 @@ extern int squashfs_xattr_lookup(struct super_block *, unsigned int, int *,
640 static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
641 u64 start, u64 *xattr_table_start, int *xattr_ids)
642 {
643 + struct squashfs_xattr_id_table *id_table;
644 +
645 + id_table = squashfs_read_table(sb, start, sizeof(*id_table));
646 + if (IS_ERR(id_table))
647 + return (__le64 *) id_table;
648 +
649 + *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
650 + kfree(id_table);
651 +
652 ERROR("Xattrs in filesystem, these will be ignored\n");
653 - *xattr_table_start = start;
654 return ERR_PTR(-ENOTSUPP);
655 }
656
657 diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c
658 index d99e08464554f..ead66670b41a5 100644
659 --- a/fs/squashfs/xattr_id.c
660 +++ b/fs/squashfs/xattr_id.c
661 @@ -31,10 +31,15 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
662 struct squashfs_sb_info *msblk = sb->s_fs_info;
663 int block = SQUASHFS_XATTR_BLOCK(index);
664 int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
665 - u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
666 + u64 start_block;
667 struct squashfs_xattr_id id;
668 int err;
669
670 + if (index >= msblk->xattr_ids)
671 + return -EINVAL;
672 +
673 + start_block = le64_to_cpu(msblk->xattr_id_table[block]);
674 +
675 err = squashfs_read_metadata(sb, &id, &start_block, &offset,
676 sizeof(id));
677 if (err < 0)
678 @@ -50,13 +55,17 @@ int squashfs_xattr_lookup(struct super_block *sb, unsigned int index,
679 /*
680 * Read uncompressed xattr id lookup table indexes from disk into memory
681 */
682 -__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
683 +__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
684 u64 *xattr_table_start, int *xattr_ids)
685 {
686 - unsigned int len;
687 + struct squashfs_sb_info *msblk = sb->s_fs_info;
688 + unsigned int len, indexes;
689 struct squashfs_xattr_id_table *id_table;
690 + __le64 *table;
691 + u64 start, end;
692 + int n;
693
694 - id_table = squashfs_read_table(sb, start, sizeof(*id_table));
695 + id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
696 if (IS_ERR(id_table))
697 return (__le64 *) id_table;
698
699 @@ -70,13 +79,52 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
700 if (*xattr_ids == 0)
701 return ERR_PTR(-EINVAL);
702
703 - /* xattr_table should be less than start */
704 - if (*xattr_table_start >= start)
705 + len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
706 + indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
707 +
708 + /*
709 + * The computed size of the index table (len bytes) should exactly
710 + * match the table start and end points
711 + */
712 + start = table_start + sizeof(*id_table);
713 + end = msblk->bytes_used;
714 +
715 + if (len != (end - start))
716 return ERR_PTR(-EINVAL);
717
718 - len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
719 + table = squashfs_read_table(sb, start, len);
720 + if (IS_ERR(table))
721 + return table;
722 +
723 + /* table[0], table[1], ... table[indexes - 1] store the locations
724 + * of the compressed xattr id blocks. Each entry should be less than
725 + * the next (i.e. table[0] < table[1]), and the difference between them
726 + * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
727 + * should be less than table_start, and again the difference
728 + * shouls be SQUASHFS_METADATA_SIZE or less.
729 + *
730 + * Finally xattr_table_start should be less than table[0].
731 + */
732 + for (n = 0; n < (indexes - 1); n++) {
733 + start = le64_to_cpu(table[n]);
734 + end = le64_to_cpu(table[n + 1]);
735 +
736 + if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
737 + kfree(table);
738 + return ERR_PTR(-EINVAL);
739 + }
740 + }
741 +
742 + start = le64_to_cpu(table[indexes - 1]);
743 + if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
744 + kfree(table);
745 + return ERR_PTR(-EINVAL);
746 + }
747
748 - TRACE("In read_xattr_index_table, length %d\n", len);
749 + if (*xattr_table_start >= le64_to_cpu(table[0])) {
750 + kfree(table);
751 + return ERR_PTR(-EINVAL);
752 + }
753
754 - return squashfs_read_table(sb, start + sizeof(*id_table), len);
755 + return table;
756 }
757 diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
758 index a60488867dd06..a121fd8e7c3a0 100644
759 --- a/include/linux/kprobes.h
760 +++ b/include/linux/kprobes.h
761 @@ -232,7 +232,7 @@ extern void kprobes_inc_nmissed_count(struct kprobe *p);
762 extern bool arch_within_kprobe_blacklist(unsigned long addr);
763 extern int arch_populate_kprobe_blacklist(void);
764 extern bool arch_kprobe_on_func_entry(unsigned long offset);
765 -extern bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
766 +extern int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
767
768 extern bool within_kprobe_blacklist(unsigned long addr);
769 extern int kprobe_add_ksym_blacklist(unsigned long entry);
770 diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
771 index 9db6097c22c5d..a8d68c5a4ca61 100644
772 --- a/include/linux/sunrpc/xdr.h
773 +++ b/include/linux/sunrpc/xdr.h
774 @@ -27,8 +27,7 @@ struct rpc_rqst;
775 #define XDR_QUADLEN(l) (((l) + 3) >> 2)
776
777 /*
778 - * Generic opaque `network object.' At the kernel level, this type
779 - * is used only by lockd.
780 + * Generic opaque `network object.'
781 */
782 #define XDR_MAX_NETOBJ 1024
783 struct xdr_netobj {
784 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
785 index a67bfa803d983..2c248c4f6419c 100644
786 --- a/kernel/bpf/verifier.c
787 +++ b/kernel/bpf/verifier.c
788 @@ -9002,30 +9002,28 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
789 insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
790 insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
791 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
792 - struct bpf_insn mask_and_div[] = {
793 - BPF_MOV32_REG(insn->src_reg, insn->src_reg),
794 + bool isdiv = BPF_OP(insn->code) == BPF_DIV;
795 + struct bpf_insn *patchlet;
796 + struct bpf_insn chk_and_div[] = {
797 /* Rx div 0 -> 0 */
798 - BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
799 + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
800 + BPF_JNE | BPF_K, insn->src_reg,
801 + 0, 2, 0),
802 BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
803 BPF_JMP_IMM(BPF_JA, 0, 0, 1),
804 *insn,
805 };
806 - struct bpf_insn mask_and_mod[] = {
807 - BPF_MOV32_REG(insn->src_reg, insn->src_reg),
808 + struct bpf_insn chk_and_mod[] = {
809 /* Rx mod 0 -> Rx */
810 - BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
811 + BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
812 + BPF_JEQ | BPF_K, insn->src_reg,
813 + 0, 1, 0),
814 *insn,
815 };
816 - struct bpf_insn *patchlet;
817
818 - if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
819 - insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
820 - patchlet = mask_and_div + (is64 ? 1 : 0);
821 - cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
822 - } else {
823 - patchlet = mask_and_mod + (is64 ? 1 : 0);
824 - cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
825 - }
826 + patchlet = isdiv ? chk_and_div : chk_and_mod;
827 + cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
828 + ARRAY_SIZE(chk_and_mod);
829
830 new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
831 if (!new_prog)
832 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
833 index 26ae92c12fc22..a7812c115e487 100644
834 --- a/kernel/kprobes.c
835 +++ b/kernel/kprobes.c
836 @@ -1948,29 +1948,45 @@ bool __weak arch_kprobe_on_func_entry(unsigned long offset)
837 return !offset;
838 }
839
840 -bool kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
841 +/**
842 + * kprobe_on_func_entry() -- check whether given address is function entry
843 + * @addr: Target address
844 + * @sym: Target symbol name
845 + * @offset: The offset from the symbol or the address
846 + *
847 + * This checks whether the given @addr+@offset or @sym+@offset is on the
848 + * function entry address or not.
849 + * This returns 0 if it is the function entry, or -EINVAL if it is not.
850 + * And also it returns -ENOENT if it fails the symbol or address lookup.
851 + * Caller must pass @addr or @sym (either one must be NULL), or this
852 + * returns -EINVAL.
853 + */
854 +int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
855 {
856 kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
857
858 if (IS_ERR(kp_addr))
859 - return false;
860 + return PTR_ERR(kp_addr);
861
862 - if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset) ||
863 - !arch_kprobe_on_func_entry(offset))
864 - return false;
865 + if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
866 + return -ENOENT;
867
868 - return true;
869 + if (!arch_kprobe_on_func_entry(offset))
870 + return -EINVAL;
871 +
872 + return 0;
873 }
874
875 int register_kretprobe(struct kretprobe *rp)
876 {
877 - int ret = 0;
878 + int ret;
879 struct kretprobe_instance *inst;
880 int i;
881 void *addr;
882
883 - if (!kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset))
884 - return -EINVAL;
885 + ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
886 + if (ret)
887 + return ret;
888
889 /* If only rp->kp.addr is specified, check reregistering kprobes */
890 if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
891 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
892 index 1074a69beff3f..233322c77b76c 100644
893 --- a/kernel/trace/trace_kprobe.c
894 +++ b/kernel/trace/trace_kprobe.c
895 @@ -220,9 +220,9 @@ bool trace_kprobe_on_func_entry(struct trace_event_call *call)
896 {
897 struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
898
899 - return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
900 + return tk ? (kprobe_on_func_entry(tk->rp.kp.addr,
901 tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
902 - tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
903 + tk->rp.kp.addr ? 0 : tk->rp.kp.offset) == 0) : false;
904 }
905
906 bool trace_kprobe_error_injectable(struct trace_event_call *call)
907 @@ -811,9 +811,11 @@ static int trace_kprobe_create(int argc, const char *argv[])
908 trace_probe_log_err(0, BAD_PROBE_ADDR);
909 goto parse_error;
910 }
911 - if (kprobe_on_func_entry(NULL, symbol, offset))
912 + ret = kprobe_on_func_entry(NULL, symbol, offset);
913 + if (ret == 0)
914 flags |= TPARG_FL_FENTRY;
915 - if (offset && is_return && !(flags & TPARG_FL_FENTRY)) {
916 + /* Defer the ENOENT case until register kprobe */
917 + if (ret == -EINVAL && is_return) {
918 trace_probe_log_err(0, BAD_RETPROBE);
919 goto parse_error;
920 }
921 diff --git a/net/key/af_key.c b/net/key/af_key.c
922 index a915bc86620af..907d04a474597 100644
923 --- a/net/key/af_key.c
924 +++ b/net/key/af_key.c
925 @@ -2902,7 +2902,7 @@ static int count_ah_combs(const struct xfrm_tmpl *t)
926 break;
927 if (!aalg->pfkey_supported)
928 continue;
929 - if (aalg_tmpl_set(t, aalg) && aalg->available)
930 + if (aalg_tmpl_set(t, aalg))
931 sz += sizeof(struct sadb_comb);
932 }
933 return sz + sizeof(struct sadb_prop);
934 @@ -2920,7 +2920,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
935 if (!ealg->pfkey_supported)
936 continue;
937
938 - if (!(ealg_tmpl_set(t, ealg) && ealg->available))
939 + if (!(ealg_tmpl_set(t, ealg)))
940 continue;
941
942 for (k = 1; ; k++) {
943 @@ -2931,7 +2931,7 @@ static int count_esp_combs(const struct xfrm_tmpl *t)
944 if (!aalg->pfkey_supported)
945 continue;
946
947 - if (aalg_tmpl_set(t, aalg) && aalg->available)
948 + if (aalg_tmpl_set(t, aalg))
949 sz += sizeof(struct sadb_comb);
950 }
951 }
952 diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
953 index 5fe2b645912f6..132f8423addaa 100644
954 --- a/net/mac80211/spectmgmt.c
955 +++ b/net/mac80211/spectmgmt.c
956 @@ -132,16 +132,20 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
957 }
958
959 if (wide_bw_chansw_ie) {
960 + u8 new_seg1 = wide_bw_chansw_ie->new_center_freq_seg1;
961 struct ieee80211_vht_operation vht_oper = {
962 .chan_width =
963 wide_bw_chansw_ie->new_channel_width,
964 .center_freq_seg0_idx =
965 wide_bw_chansw_ie->new_center_freq_seg0,
966 - .center_freq_seg1_idx =
967 - wide_bw_chansw_ie->new_center_freq_seg1,
968 + .center_freq_seg1_idx = new_seg1,
969 /* .basic_mcs_set doesn't matter */
970 };
971 - struct ieee80211_ht_operation ht_oper = {};
972 + struct ieee80211_ht_operation ht_oper = {
973 + .operation_mode =
974 + cpu_to_le16(new_seg1 <<
975 + IEEE80211_HT_OP_MODE_CCFS2_SHIFT),
976 + };
977
978 /* default, for the case of IEEE80211_VHT_CHANWIDTH_USE_HT,
979 * to the previously parsed chandef
980 diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
981 index 5fc6c028f89c0..b7a71578bd986 100644
982 --- a/net/sunrpc/auth_gss/auth_gss.c
983 +++ b/net/sunrpc/auth_gss/auth_gss.c
984 @@ -29,6 +29,7 @@
985 #include <linux/uaccess.h>
986 #include <linux/hashtable.h>
987
988 +#include "auth_gss_internal.h"
989 #include "../netns.h"
990
991 #include <trace/events/rpcgss.h>
992 @@ -125,35 +126,6 @@ gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
993 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
994 }
995
996 -static const void *
997 -simple_get_bytes(const void *p, const void *end, void *res, size_t len)
998 -{
999 - const void *q = (const void *)((const char *)p + len);
1000 - if (unlikely(q > end || q < p))
1001 - return ERR_PTR(-EFAULT);
1002 - memcpy(res, p, len);
1003 - return q;
1004 -}
1005 -
1006 -static inline const void *
1007 -simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
1008 -{
1009 - const void *q;
1010 - unsigned int len;
1011 -
1012 - p = simple_get_bytes(p, end, &len, sizeof(len));
1013 - if (IS_ERR(p))
1014 - return p;
1015 - q = (const void *)((const char *)p + len);
1016 - if (unlikely(q > end || q < p))
1017 - return ERR_PTR(-EFAULT);
1018 - dest->data = kmemdup(p, len, GFP_NOFS);
1019 - if (unlikely(dest->data == NULL))
1020 - return ERR_PTR(-ENOMEM);
1021 - dest->len = len;
1022 - return q;
1023 -}
1024 -
1025 static struct gss_cl_ctx *
1026 gss_cred_get_ctx(struct rpc_cred *cred)
1027 {
1028 diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h
1029 new file mode 100644
1030 index 0000000000000..f6d9631bd9d00
1031 --- /dev/null
1032 +++ b/net/sunrpc/auth_gss/auth_gss_internal.h
1033 @@ -0,0 +1,45 @@
1034 +// SPDX-License-Identifier: BSD-3-Clause
1035 +/*
1036 + * linux/net/sunrpc/auth_gss/auth_gss_internal.h
1037 + *
1038 + * Internal definitions for RPCSEC_GSS client authentication
1039 + *
1040 + * Copyright (c) 2000 The Regents of the University of Michigan.
1041 + * All rights reserved.
1042 + *
1043 + */
1044 +#include <linux/err.h>
1045 +#include <linux/string.h>
1046 +#include <linux/sunrpc/xdr.h>
1047 +
1048 +static inline const void *
1049 +simple_get_bytes(const void *p, const void *end, void *res, size_t len)
1050 +{
1051 + const void *q = (const void *)((const char *)p + len);
1052 + if (unlikely(q > end || q < p))
1053 + return ERR_PTR(-EFAULT);
1054 + memcpy(res, p, len);
1055 + return q;
1056 +}
1057 +
1058 +static inline const void *
1059 +simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
1060 +{
1061 + const void *q;
1062 + unsigned int len;
1063 +
1064 + p = simple_get_bytes(p, end, &len, sizeof(len));
1065 + if (IS_ERR(p))
1066 + return p;
1067 + q = (const void *)((const char *)p + len);
1068 + if (unlikely(q > end || q < p))
1069 + return ERR_PTR(-EFAULT);
1070 + if (len) {
1071 + dest->data = kmemdup(p, len, GFP_NOFS);
1072 + if (unlikely(dest->data == NULL))
1073 + return ERR_PTR(-ENOMEM);
1074 + } else
1075 + dest->data = NULL;
1076 + dest->len = len;
1077 + return q;
1078 +}
1079 diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
1080 index 6e5d6d2402158..b552dd4f32f80 100644
1081 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c
1082 +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
1083 @@ -21,6 +21,8 @@
1084 #include <linux/sunrpc/xdr.h>
1085 #include <linux/sunrpc/gss_krb5_enctypes.h>
1086
1087 +#include "auth_gss_internal.h"
1088 +
1089 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1090 # define RPCDBG_FACILITY RPCDBG_AUTH
1091 #endif
1092 @@ -164,35 +166,6 @@ get_gss_krb5_enctype(int etype)
1093 return NULL;
1094 }
1095
1096 -static const void *
1097 -simple_get_bytes(const void *p, const void *end, void *res, int len)
1098 -{
1099 - const void *q = (const void *)((const char *)p + len);
1100 - if (unlikely(q > end || q < p))
1101 - return ERR_PTR(-EFAULT);
1102 - memcpy(res, p, len);
1103 - return q;
1104 -}
1105 -
1106 -static const void *
1107 -simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
1108 -{
1109 - const void *q;
1110 - unsigned int len;
1111 -
1112 - p = simple_get_bytes(p, end, &len, sizeof(len));
1113 - if (IS_ERR(p))
1114 - return p;
1115 - q = (const void *)((const char *)p + len);
1116 - if (unlikely(q > end || q < p))
1117 - return ERR_PTR(-EFAULT);
1118 - res->data = kmemdup(p, len, GFP_NOFS);
1119 - if (unlikely(res->data == NULL))
1120 - return ERR_PTR(-ENOMEM);
1121 - res->len = len;
1122 - return q;
1123 -}
1124 -
1125 static inline const void *
1126 get_key(const void *p, const void *end,
1127 struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
1128 diff --git a/sound/soc/codecs/ak4458.c b/sound/soc/codecs/ak4458.c
1129 index 71562154c0b1e..217e8ce9a4ba4 100644
1130 --- a/sound/soc/codecs/ak4458.c
1131 +++ b/sound/soc/codecs/ak4458.c
1132 @@ -523,18 +523,10 @@ static struct snd_soc_dai_driver ak4497_dai = {
1133 .ops = &ak4458_dai_ops,
1134 };
1135
1136 -static void ak4458_power_off(struct ak4458_priv *ak4458)
1137 +static void ak4458_reset(struct ak4458_priv *ak4458, bool active)
1138 {
1139 if (ak4458->reset_gpiod) {
1140 - gpiod_set_value_cansleep(ak4458->reset_gpiod, 0);
1141 - usleep_range(1000, 2000);
1142 - }
1143 -}
1144 -
1145 -static void ak4458_power_on(struct ak4458_priv *ak4458)
1146 -{
1147 - if (ak4458->reset_gpiod) {
1148 - gpiod_set_value_cansleep(ak4458->reset_gpiod, 1);
1149 + gpiod_set_value_cansleep(ak4458->reset_gpiod, active);
1150 usleep_range(1000, 2000);
1151 }
1152 }
1153 @@ -548,7 +540,7 @@ static int ak4458_init(struct snd_soc_component *component)
1154 if (ak4458->mute_gpiod)
1155 gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
1156
1157 - ak4458_power_on(ak4458);
1158 + ak4458_reset(ak4458, false);
1159
1160 ret = snd_soc_component_update_bits(component, AK4458_00_CONTROL1,
1161 0x80, 0x80); /* ACKS bit = 1; 10000000 */
1162 @@ -571,7 +563,7 @@ static void ak4458_remove(struct snd_soc_component *component)
1163 {
1164 struct ak4458_priv *ak4458 = snd_soc_component_get_drvdata(component);
1165
1166 - ak4458_power_off(ak4458);
1167 + ak4458_reset(ak4458, true);
1168 }
1169
1170 #ifdef CONFIG_PM
1171 @@ -581,7 +573,7 @@ static int __maybe_unused ak4458_runtime_suspend(struct device *dev)
1172
1173 regcache_cache_only(ak4458->regmap, true);
1174
1175 - ak4458_power_off(ak4458);
1176 + ak4458_reset(ak4458, true);
1177
1178 if (ak4458->mute_gpiod)
1179 gpiod_set_value_cansleep(ak4458->mute_gpiod, 0);
1180 @@ -596,8 +588,8 @@ static int __maybe_unused ak4458_runtime_resume(struct device *dev)
1181 if (ak4458->mute_gpiod)
1182 gpiod_set_value_cansleep(ak4458->mute_gpiod, 1);
1183
1184 - ak4458_power_off(ak4458);
1185 - ak4458_power_on(ak4458);
1186 + ak4458_reset(ak4458, true);
1187 + ak4458_reset(ak4458, false);
1188
1189 regcache_cache_only(ak4458->regmap, false);
1190 regcache_mark_dirty(ak4458->regmap);
1191 diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
1192 index 2cb719893324a..1940b17f27efa 100644
1193 --- a/sound/soc/intel/skylake/skl-topology.c
1194 +++ b/sound/soc/intel/skylake/skl-topology.c
1195 @@ -3632,7 +3632,7 @@ static void skl_tplg_complete(struct snd_soc_component *component)
1196 sprintf(chan_text, "c%d", mach->mach_params.dmic_num);
1197
1198 for (i = 0; i < se->items; i++) {
1199 - struct snd_ctl_elem_value val;
1200 + struct snd_ctl_elem_value val = {};
1201
1202 if (strstr(texts[i], chan_text)) {
1203 val.value.enumerated.item[0] = i;