Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0203-5.4.104-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (hide annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (20 months ago) by niro
File size: 24911 byte(s)
-add missing
1 niro 3637 diff --git a/Makefile b/Makefile
2     index c95435d78fcbb..e94dcf2d77f55 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 103
10     +SUBLEVEL = 104
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
15     index 30b877f8b85ef..0cfd685774891 100644
16     --- a/arch/arm64/kernel/ptrace.c
17     +++ b/arch/arm64/kernel/ptrace.c
18     @@ -1844,7 +1844,7 @@ int syscall_trace_enter(struct pt_regs *regs)
19    
20     if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
21     tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
22     - if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
23     + if (flags & _TIF_SYSCALL_EMU)
24     return -1;
25     }
26    
27     diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
28     index 137a7ba053d78..8e3d0228b05bb 100644
29     --- a/drivers/base/power/runtime.c
30     +++ b/drivers/base/power/runtime.c
31     @@ -325,22 +325,22 @@ static void rpm_put_suppliers(struct device *dev)
32     static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
33     __releases(&dev->power.lock) __acquires(&dev->power.lock)
34     {
35     - int retval, idx;
36     bool use_links = dev->power.links_count > 0;
37     + bool get = false;
38     + int retval, idx;
39     + bool put;
40    
41     if (dev->power.irq_safe) {
42     spin_unlock(&dev->power.lock);
43     + } else if (!use_links) {
44     + spin_unlock_irq(&dev->power.lock);
45     } else {
46     + get = dev->power.runtime_status == RPM_RESUMING;
47     +
48     spin_unlock_irq(&dev->power.lock);
49    
50     - /*
51     - * Resume suppliers if necessary.
52     - *
53     - * The device's runtime PM status cannot change until this
54     - * routine returns, so it is safe to read the status outside of
55     - * the lock.
56     - */
57     - if (use_links && dev->power.runtime_status == RPM_RESUMING) {
58     + /* Resume suppliers if necessary. */
59     + if (get) {
60     idx = device_links_read_lock();
61    
62     retval = rpm_get_suppliers(dev);
63     @@ -355,24 +355,36 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
64    
65     if (dev->power.irq_safe) {
66     spin_lock(&dev->power.lock);
67     - } else {
68     - /*
69     - * If the device is suspending and the callback has returned
70     - * success, drop the usage counters of the suppliers that have
71     - * been reference counted on its resume.
72     - *
73     - * Do that if resume fails too.
74     - */
75     - if (use_links
76     - && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
77     - || (dev->power.runtime_status == RPM_RESUMING && retval))) {
78     - idx = device_links_read_lock();
79     + return retval;
80     + }
81    
82     - fail:
83     - rpm_put_suppliers(dev);
84     + spin_lock_irq(&dev->power.lock);
85    
86     - device_links_read_unlock(idx);
87     - }
88     + if (!use_links)
89     + return retval;
90     +
91     + /*
92     + * If the device is suspending and the callback has returned success,
93     + * drop the usage counters of the suppliers that have been reference
94     + * counted on its resume.
95     + *
96     + * Do that if the resume fails too.
97     + */
98     + put = dev->power.runtime_status == RPM_SUSPENDING && !retval;
99     + if (put)
100     + __update_runtime_status(dev, RPM_SUSPENDED);
101     + else
102     + put = get && retval;
103     +
104     + if (put) {
105     + spin_unlock_irq(&dev->power.lock);
106     +
107     + idx = device_links_read_lock();
108     +
109     +fail:
110     + rpm_put_suppliers(dev);
111     +
112     + device_links_read_unlock(idx);
113    
114     spin_lock_irq(&dev->power.lock);
115     }
116     diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
117     index 10f6368117d81..804d28faa97b0 100644
118     --- a/drivers/block/rsxx/core.c
119     +++ b/drivers/block/rsxx/core.c
120     @@ -165,15 +165,17 @@ static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
121     {
122     struct rsxx_cardinfo *card = file_inode(fp)->i_private;
123     char *buf;
124     - ssize_t st;
125     + int st;
126    
127     buf = kzalloc(cnt, GFP_KERNEL);
128     if (!buf)
129     return -ENOMEM;
130    
131     st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
132     - if (!st)
133     - st = copy_to_user(ubuf, buf, cnt);
134     + if (!st) {
135     + if (copy_to_user(ubuf, buf, cnt))
136     + st = -EFAULT;
137     + }
138     kfree(buf);
139     if (st)
140     return st;
141     diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
142     index 46d1fac247db7..7da35867b6ad3 100644
143     --- a/drivers/char/tpm/tpm_tis_core.c
144     +++ b/drivers/char/tpm/tpm_tis_core.c
145     @@ -618,12 +618,22 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
146     const char *desc = "attempting to generate an interrupt";
147     u32 cap2;
148     cap_t cap;
149     + int ret;
150    
151     + /* TPM 2.0 */
152     if (chip->flags & TPM_CHIP_FLAG_TPM2)
153     return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
154     - else
155     - return tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc,
156     - 0);
157     +
158     + /* TPM 1.2 */
159     + ret = request_locality(chip, 0);
160     + if (ret < 0)
161     + return ret;
162     +
163     + ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
164     +
165     + release_locality(chip, 0);
166     +
167     + return ret;
168     }
169    
170     /* Register the IRQ and issue a command that will cause an interrupt. If an
171     @@ -929,11 +939,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
172     init_waitqueue_head(&priv->read_queue);
173     init_waitqueue_head(&priv->int_queue);
174     if (irq != -1) {
175     - /* Before doing irq testing issue a command to the TPM in polling mode
176     + /*
177     + * Before doing irq testing issue a command to the TPM in polling mode
178     * to make sure it works. May as well use that command to set the
179     * proper timeouts for the driver.
180     */
181     - if (tpm_get_timeouts(chip)) {
182     +
183     + rc = request_locality(chip, 0);
184     + if (rc < 0)
185     + goto out_err;
186     +
187     + rc = tpm_get_timeouts(chip);
188     +
189     + release_locality(chip, 0);
190     +
191     + if (rc) {
192     dev_err(dev, "Could not get TPM timeouts and durations\n");
193     rc = -ENODEV;
194     goto out_err;
195     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
196     index 700e26b69abca..a9a81e55777bf 100644
197     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
198     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
199     @@ -240,7 +240,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
200     while (size) {
201     uint32_t value;
202    
203     - value = RREG32_PCIE(*pos >> 2);
204     + value = RREG32_PCIE(*pos);
205     r = put_user(value, (uint32_t *)buf);
206     if (r)
207     return r;
208     @@ -283,7 +283,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
209     if (r)
210     return r;
211    
212     - WREG32_PCIE(*pos >> 2, value);
213     + WREG32_PCIE(*pos, value);
214    
215     result += 4;
216     buf += 4;
217     diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
218     index 4d6f25fdcc0ef..664e0f374ac00 100644
219     --- a/drivers/infiniband/hw/mlx5/devx.c
220     +++ b/drivers/infiniband/hw/mlx5/devx.c
221     @@ -2022,8 +2022,10 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
222    
223     num_alloc_xa_entries++;
224     event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
225     - if (!event_sub)
226     + if (!event_sub) {
227     + err = -ENOMEM;
228     goto err;
229     + }
230    
231     list_add_tail(&event_sub->event_list, &sub_list);
232     if (use_eventfd) {
233     diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig
234     index 71a773f607bbc..0e8f1d05dfb2a 100644
235     --- a/drivers/infiniband/sw/rxe/Kconfig
236     +++ b/drivers/infiniband/sw/rxe/Kconfig
237     @@ -4,6 +4,7 @@ config RDMA_RXE
238     depends on INET && PCI && INFINIBAND
239     depends on INFINIBAND_VIRT_DMA
240     select NET_UDP_TUNNEL
241     + select CRYPTO
242     select CRYPTO_CRC32
243     select DMA_VIRT_OPS
244     ---help---
245     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
246     index a9529dc2b26e6..e8c37d9a652dc 100644
247     --- a/drivers/md/dm-bufio.c
248     +++ b/drivers/md/dm-bufio.c
249     @@ -1438,6 +1438,10 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
250     sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
251     {
252     sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
253     + if (s >= c->start)
254     + s -= c->start;
255     + else
256     + s = 0;
257     if (likely(c->sectors_per_block_bits >= 0))
258     s >>= c->sectors_per_block_bits;
259     else
260     diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
261     index fb41b4f23c489..66f4c6398f670 100644
262     --- a/drivers/md/dm-verity-fec.c
263     +++ b/drivers/md/dm-verity-fec.c
264     @@ -61,19 +61,18 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
265     static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
266     unsigned *offset, struct dm_buffer **buf)
267     {
268     - u64 position, block;
269     + u64 position, block, rem;
270     u8 *res;
271    
272     position = (index + rsb) * v->fec->roots;
273     - block = position >> v->data_dev_block_bits;
274     - *offset = (unsigned)(position - (block << v->data_dev_block_bits));
275     + block = div64_u64_rem(position, v->fec->roots << SECTOR_SHIFT, &rem);
276     + *offset = (unsigned)rem;
277    
278     - res = dm_bufio_read(v->fec->bufio, v->fec->start + block, buf);
279     + res = dm_bufio_read(v->fec->bufio, block, buf);
280     if (IS_ERR(res)) {
281     DMERR("%s: FEC %llu: parity read failed (block %llu): %ld",
282     v->data_dev->name, (unsigned long long)rsb,
283     - (unsigned long long)(v->fec->start + block),
284     - PTR_ERR(res));
285     + (unsigned long long)block, PTR_ERR(res));
286     *buf = NULL;
287     }
288    
289     @@ -155,7 +154,7 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
290    
291     /* read the next block when we run out of parity bytes */
292     offset += v->fec->roots;
293     - if (offset >= 1 << v->data_dev_block_bits) {
294     + if (offset >= v->fec->roots << SECTOR_SHIFT) {
295     dm_bufio_release(buf);
296    
297     par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
298     @@ -674,7 +673,7 @@ int verity_fec_ctr(struct dm_verity *v)
299     {
300     struct dm_verity_fec *f = v->fec;
301     struct dm_target *ti = v->ti;
302     - u64 hash_blocks;
303     + u64 hash_blocks, fec_blocks;
304     int ret;
305    
306     if (!verity_fec_is_enabled(v)) {
307     @@ -744,15 +743,17 @@ int verity_fec_ctr(struct dm_verity *v)
308     }
309    
310     f->bufio = dm_bufio_client_create(f->dev->bdev,
311     - 1 << v->data_dev_block_bits,
312     + f->roots << SECTOR_SHIFT,
313     1, 0, NULL, NULL);
314     if (IS_ERR(f->bufio)) {
315     ti->error = "Cannot initialize FEC bufio client";
316     return PTR_ERR(f->bufio);
317     }
318    
319     - if (dm_bufio_get_device_size(f->bufio) <
320     - ((f->start + f->rounds * f->roots) >> v->data_dev_block_bits)) {
321     + dm_bufio_set_sector_offset(f->bufio, f->start << (v->data_dev_block_bits - SECTOR_SHIFT));
322     +
323     + fec_blocks = div64_u64(f->rounds * f->roots, v->fec->roots << SECTOR_SHIFT);
324     + if (dm_bufio_get_device_size(f->bufio) < fec_blocks) {
325     ti->error = "FEC device is too small";
326     return -E2BIG;
327     }
328     diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
329     index 131be1fa770cb..bd8decc54b871 100644
330     --- a/drivers/net/ethernet/realtek/r8169_main.c
331     +++ b/drivers/net/ethernet/realtek/r8169_main.c
332     @@ -3959,6 +3959,7 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
333    
334     switch (tp->mac_version) {
335     case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
336     + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
337     case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
338     case RTL_GIGA_MAC_VER_37:
339     case RTL_GIGA_MAC_VER_39:
340     @@ -3989,6 +3990,7 @@ static void rtl_pll_power_up(struct rtl8169_private *tp)
341     {
342     switch (tp->mac_version) {
343     case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
344     + case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
345     case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
346     case RTL_GIGA_MAC_VER_37:
347     case RTL_GIGA_MAC_VER_39:
348     diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
349     index bef62b01824de..7dad8794ee388 100644
350     --- a/fs/btrfs/delayed-inode.c
351     +++ b/fs/btrfs/delayed-inode.c
352     @@ -649,7 +649,7 @@ static int btrfs_delayed_inode_reserve_metadata(
353     btrfs_ino(inode),
354     num_bytes, 1);
355     } else {
356     - btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize);
357     + btrfs_qgroup_free_meta_prealloc(root, num_bytes);
358     }
359     return ret;
360     }
361     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
362     index 4126513e2429c..f8e5c47b95e40 100644
363     --- a/fs/btrfs/file.c
364     +++ b/fs/btrfs/file.c
365     @@ -3151,8 +3151,11 @@ reserve_space:
366     goto out;
367     ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
368     alloc_start, bytes_to_reserve);
369     - if (ret)
370     + if (ret) {
371     + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
372     + lockend, &cached_state);
373     goto out;
374     + }
375     ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
376     alloc_end - alloc_start,
377     i_blocksize(inode),
378     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
379     index 8ed71b3b25466..4bbd4b09fb96f 100644
380     --- a/fs/btrfs/ioctl.c
381     +++ b/fs/btrfs/ioctl.c
382     @@ -1907,7 +1907,10 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
383     if (vol_args->flags & BTRFS_SUBVOL_RDONLY)
384     readonly = true;
385     if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) {
386     - if (vol_args->size > PAGE_SIZE) {
387     + u64 nums;
388     +
389     + if (vol_args->size < sizeof(*inherit) ||
390     + vol_args->size > PAGE_SIZE) {
391     ret = -EINVAL;
392     goto free_args;
393     }
394     @@ -1916,6 +1919,20 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
395     ret = PTR_ERR(inherit);
396     goto free_args;
397     }
398     +
399     + if (inherit->num_qgroups > PAGE_SIZE ||
400     + inherit->num_ref_copies > PAGE_SIZE ||
401     + inherit->num_excl_copies > PAGE_SIZE) {
402     + ret = -EINVAL;
403     + goto free_inherit;
404     + }
405     +
406     + nums = inherit->num_qgroups + 2 * inherit->num_ref_copies +
407     + 2 * inherit->num_excl_copies;
408     + if (vol_args->size != struct_size(inherit, qgroups, nums)) {
409     + ret = -EINVAL;
410     + goto free_inherit;
411     + }
412     }
413    
414     ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
415     diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
416     index 8f47a85944eb7..7ac679ed2b6c6 100644
417     --- a/fs/btrfs/raid56.c
418     +++ b/fs/btrfs/raid56.c
419     @@ -1198,22 +1198,19 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
420     int nr_data = rbio->nr_data;
421     int stripe;
422     int pagenr;
423     - int p_stripe = -1;
424     - int q_stripe = -1;
425     + bool has_qstripe;
426     struct bio_list bio_list;
427     struct bio *bio;
428     int ret;
429    
430     bio_list_init(&bio_list);
431    
432     - if (rbio->real_stripes - rbio->nr_data == 1) {
433     - p_stripe = rbio->real_stripes - 1;
434     - } else if (rbio->real_stripes - rbio->nr_data == 2) {
435     - p_stripe = rbio->real_stripes - 2;
436     - q_stripe = rbio->real_stripes - 1;
437     - } else {
438     + if (rbio->real_stripes - rbio->nr_data == 1)
439     + has_qstripe = false;
440     + else if (rbio->real_stripes - rbio->nr_data == 2)
441     + has_qstripe = true;
442     + else
443     BUG();
444     - }
445    
446     /* at this point we either have a full stripe,
447     * or we've read the full stripe from the drive.
448     @@ -1257,7 +1254,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
449     SetPageUptodate(p);
450     pointers[stripe++] = kmap(p);
451    
452     - if (q_stripe != -1) {
453     + if (has_qstripe) {
454    
455     /*
456     * raid6, add the qstripe and call the
457     @@ -2355,8 +2352,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
458     int nr_data = rbio->nr_data;
459     int stripe;
460     int pagenr;
461     - int p_stripe = -1;
462     - int q_stripe = -1;
463     + bool has_qstripe;
464     struct page *p_page = NULL;
465     struct page *q_page = NULL;
466     struct bio_list bio_list;
467     @@ -2366,14 +2362,12 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
468    
469     bio_list_init(&bio_list);
470    
471     - if (rbio->real_stripes - rbio->nr_data == 1) {
472     - p_stripe = rbio->real_stripes - 1;
473     - } else if (rbio->real_stripes - rbio->nr_data == 2) {
474     - p_stripe = rbio->real_stripes - 2;
475     - q_stripe = rbio->real_stripes - 1;
476     - } else {
477     + if (rbio->real_stripes - rbio->nr_data == 1)
478     + has_qstripe = false;
479     + else if (rbio->real_stripes - rbio->nr_data == 2)
480     + has_qstripe = true;
481     + else
482     BUG();
483     - }
484    
485     if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
486     is_replace = 1;
487     @@ -2395,17 +2389,22 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
488     goto cleanup;
489     SetPageUptodate(p_page);
490    
491     - if (q_stripe != -1) {
492     + if (has_qstripe) {
493     + /* RAID6, allocate and map temp space for the Q stripe */
494     q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
495     if (!q_page) {
496     __free_page(p_page);
497     goto cleanup;
498     }
499     SetPageUptodate(q_page);
500     + pointers[rbio->real_stripes - 1] = kmap(q_page);
501     }
502    
503     atomic_set(&rbio->error, 0);
504    
505     + /* Map the parity stripe just once */
506     + pointers[nr_data] = kmap(p_page);
507     +
508     for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
509     struct page *p;
510     void *parity;
511     @@ -2415,17 +2414,8 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
512     pointers[stripe] = kmap(p);
513     }
514    
515     - /* then add the parity stripe */
516     - pointers[stripe++] = kmap(p_page);
517     -
518     - if (q_stripe != -1) {
519     -
520     - /*
521     - * raid6, add the qstripe and call the
522     - * library function to fill in our p/q
523     - */
524     - pointers[stripe++] = kmap(q_page);
525     -
526     + if (has_qstripe) {
527     + /* RAID6, call the library function to fill in our P/Q */
528     raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
529     pointers);
530     } else {
531     @@ -2446,12 +2436,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
532    
533     for (stripe = 0; stripe < nr_data; stripe++)
534     kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
535     - kunmap(p_page);
536     }
537    
538     + kunmap(p_page);
539     __free_page(p_page);
540     - if (q_page)
541     + if (q_page) {
542     + kunmap(q_page);
543     __free_page(q_page);
544     + }
545    
546     writeback:
547     /*
548     diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
549     index 95d9aebff2c4b..48858510739b2 100644
550     --- a/fs/btrfs/xattr.c
551     +++ b/fs/btrfs/xattr.c
552     @@ -227,11 +227,33 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
553     {
554     struct btrfs_root *root = BTRFS_I(inode)->root;
555     struct btrfs_trans_handle *trans;
556     + const bool start_trans = (current->journal_info == NULL);
557     int ret;
558    
559     - trans = btrfs_start_transaction(root, 2);
560     - if (IS_ERR(trans))
561     - return PTR_ERR(trans);
562     + if (start_trans) {
563     + /*
564     + * 1 unit for inserting/updating/deleting the xattr
565     + * 1 unit for the inode item update
566     + */
567     + trans = btrfs_start_transaction(root, 2);
568     + if (IS_ERR(trans))
569     + return PTR_ERR(trans);
570     + } else {
571     + /*
572     + * This can happen when smack is enabled and a directory is being
573     + * created. It happens through d_instantiate_new(), which calls
574     + * smack_d_instantiate(), which in turn calls __vfs_setxattr() to
575     + * set the transmute xattr (XATTR_NAME_SMACKTRANSMUTE) on the
576     + * inode. We have already reserved space for the xattr and inode
577     + * update at btrfs_mkdir(), so just use the transaction handle.
578     + * We don't join or start a transaction, as that will reset the
579     + * block_rsv of the handle and trigger a warning for the start
580     + * case.
581     + */
582     + ASSERT(strncmp(name, XATTR_SECURITY_PREFIX,
583     + XATTR_SECURITY_PREFIX_LEN) == 0);
584     + trans = current->journal_info;
585     + }
586    
587     ret = btrfs_setxattr(trans, inode, name, value, size, flags);
588     if (ret)
589     @@ -242,7 +264,8 @@ int btrfs_setxattr_trans(struct inode *inode, const char *name,
590     ret = btrfs_update_inode(trans, root, inode);
591     BUG_ON(ret);
592     out:
593     - btrfs_end_transaction(trans);
594     + if (start_trans)
595     + btrfs_end_transaction(trans);
596     return ret;
597     }
598    
599     diff --git a/include/crypto/hash.h b/include/crypto/hash.h
600     index d52b95b75ae4d..84e9f2380edf2 100644
601     --- a/include/crypto/hash.h
602     +++ b/include/crypto/hash.h
603     @@ -141,7 +141,7 @@ struct ahash_alg {
604    
605     struct shash_desc {
606     struct crypto_shash *tfm;
607     - void *__ctx[] CRYPTO_MINALIGN_ATTR;
608     + void *__ctx[] __aligned(ARCH_SLAB_MINALIGN);
609     };
610    
611     #define HASH_MAX_DIGESTSIZE 64
612     @@ -154,9 +154,9 @@ struct shash_desc {
613    
614     #define HASH_MAX_STATESIZE 512
615    
616     -#define SHASH_DESC_ON_STACK(shash, ctx) \
617     - char __##shash##_desc[sizeof(struct shash_desc) + \
618     - HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
619     +#define SHASH_DESC_ON_STACK(shash, ctx) \
620     + char __##shash##_desc[sizeof(struct shash_desc) + HASH_MAX_DESCSIZE] \
621     + __aligned(__alignof__(struct shash_desc)); \
622     struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
623    
624     /**
625     diff --git a/include/linux/crypto.h b/include/linux/crypto.h
626     index 19ea3a371d7bb..0c720a2982ae4 100644
627     --- a/include/linux/crypto.h
628     +++ b/include/linux/crypto.h
629     @@ -130,9 +130,12 @@
630     * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
631     * declaration) is used to ensure that the crypto_tfm context structure is
632     * aligned correctly for the given architecture so that there are no alignment
633     - * faults for C data types. In particular, this is required on platforms such
634     - * as arm where pointers are 32-bit aligned but there are data types such as
635     - * u64 which require 64-bit alignment.
636     + * faults for C data types. On architectures that support non-cache coherent
637     + * DMA, such as ARM or arm64, it also takes into account the minimal alignment
638     + * that is required to ensure that the context struct member does not share any
639     + * cachelines with the rest of the struct. This is needed to ensure that cache
640     + * maintenance for non-coherent DMA (cache invalidation in particular) does not
641     + * affect data that may be accessed by the CPU concurrently.
642     */
643     #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
644    
645     diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
646     index f657fd8fc0add..f38947b9a1b9b 100644
647     --- a/include/sound/intel-nhlt.h
648     +++ b/include/sound/intel-nhlt.h
649     @@ -112,6 +112,11 @@ struct nhlt_vendor_dmic_array_config {
650     /* TODO add vendor mic config */
651     } __packed;
652    
653     +enum {
654     + NHLT_CONFIG_TYPE_GENERIC = 0,
655     + NHLT_CONFIG_TYPE_MIC_ARRAY = 1
656     +};
657     +
658     enum {
659     NHLT_MIC_ARRAY_2CH_SMALL = 0xa,
660     NHLT_MIC_ARRAY_2CH_BIG = 0xb,
661     diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
662     index b9c2ee7ab43fa..cce12e1971d85 100644
663     --- a/scripts/recordmcount.c
664     +++ b/scripts/recordmcount.c
665     @@ -438,7 +438,7 @@ static int arm_is_fake_mcount(Elf32_Rel const *rp)
666    
667     static int arm64_is_fake_mcount(Elf64_Rel const *rp)
668     {
669     - return ELF64_R_TYPE(w(rp->r_info)) != R_AARCH64_CALL26;
670     + return ELF64_R_TYPE(w8(rp->r_info)) != R_AARCH64_CALL26;
671     }
672    
673     /* 64-bit EM_MIPS has weird ELF64_Rela.r_info.
674     diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
675     index daede96f28ee3..baeda6c9716a1 100644
676     --- a/sound/hda/intel-nhlt.c
677     +++ b/sound/hda/intel-nhlt.c
678     @@ -64,18 +64,44 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
679     struct nhlt_endpoint *epnt;
680     struct nhlt_dmic_array_config *cfg;
681     struct nhlt_vendor_dmic_array_config *cfg_vendor;
682     + struct nhlt_fmt *fmt_configs;
683     unsigned int dmic_geo = 0;
684     - u8 j;
685     + u16 max_ch = 0;
686     + u8 i, j;
687    
688     if (!nhlt)
689     return 0;
690    
691     - epnt = (struct nhlt_endpoint *)nhlt->desc;
692     + for (j = 0, epnt = nhlt->desc; j < nhlt->endpoint_count; j++,
693     + epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length)) {
694    
695     - for (j = 0; j < nhlt->endpoint_count; j++) {
696     - if (epnt->linktype == NHLT_LINK_DMIC) {
697     - cfg = (struct nhlt_dmic_array_config *)
698     - (epnt->config.caps);
699     + if (epnt->linktype != NHLT_LINK_DMIC)
700     + continue;
701     +
702     + cfg = (struct nhlt_dmic_array_config *)(epnt->config.caps);
703     + fmt_configs = (struct nhlt_fmt *)(epnt->config.caps + epnt->config.size);
704     +
705     + /* find max number of channels based on format_configuration */
706     + if (fmt_configs->fmt_count) {
707     + dev_dbg(dev, "%s: found %d format definitions\n",
708     + __func__, fmt_configs->fmt_count);
709     +
710     + for (i = 0; i < fmt_configs->fmt_count; i++) {
711     + struct wav_fmt_ext *fmt_ext;
712     +
713     + fmt_ext = &fmt_configs->fmt_config[i].fmt_ext;
714     +
715     + if (fmt_ext->fmt.channels > max_ch)
716     + max_ch = fmt_ext->fmt.channels;
717     + }
718     + dev_dbg(dev, "%s: max channels found %d\n", __func__, max_ch);
719     + } else {
720     + dev_dbg(dev, "%s: No format information found\n", __func__);
721     + }
722     +
723     + if (cfg->device_config.config_type != NHLT_CONFIG_TYPE_MIC_ARRAY) {
724     + dmic_geo = max_ch;
725     + } else {
726     switch (cfg->array_type) {
727     case NHLT_MIC_ARRAY_2CH_SMALL:
728     case NHLT_MIC_ARRAY_2CH_BIG:
729     @@ -92,13 +118,23 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
730     dmic_geo = cfg_vendor->nb_mics;
731     break;
732     default:
733     - dev_warn(dev, "undefined DMIC array_type 0x%0x\n",
734     - cfg->array_type);
735     + dev_warn(dev, "%s: undefined DMIC array_type 0x%0x\n",
736     + __func__, cfg->array_type);
737     + }
738     +
739     + if (dmic_geo > 0) {
740     + dev_dbg(dev, "%s: Array with %d dmics\n", __func__, dmic_geo);
741     + }
742     + if (max_ch > dmic_geo) {
743     + dev_dbg(dev, "%s: max channels %d exceed dmic number %d\n",
744     + __func__, max_ch, dmic_geo);
745     }
746     }
747     - epnt = (struct nhlt_endpoint *)((u8 *)epnt + epnt->length);
748     }
749    
750     + dev_dbg(dev, "%s: dmic number %d max_ch %d\n",
751     + __func__, dmic_geo, max_ch);
752     +
753     return dmic_geo;
754     }
755     EXPORT_SYMBOL_GPL(intel_nhlt_get_dmic_geo);
756     diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
757     index 3cd4b7dad945d..b1cc4cdc6c41f 100644
758     --- a/sound/pci/ctxfi/cthw20k2.c
759     +++ b/sound/pci/ctxfi/cthw20k2.c
760     @@ -991,7 +991,7 @@ static int daio_mgr_dao_init(void *blk, unsigned int idx, unsigned int conf)
761    
762     if (idx < 4) {
763     /* S/PDIF output */
764     - switch ((conf & 0x7)) {
765     + switch ((conf & 0xf)) {
766     case 1:
767     set_field(&ctl->txctl[idx], ATXCTL_NUC, 0);
768     break;
769     diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
770     index d1d8ba2a4a40c..ca78aa3684762 100644
771     --- a/tools/usb/usbip/libsrc/usbip_host_common.c
772     +++ b/tools/usb/usbip/libsrc/usbip_host_common.c
773     @@ -23,7 +23,7 @@
774     #include "list.h"
775     #include "sysfs_utils.h"
776    
777     -struct udev *udev_context;
778     +extern struct udev *udev_context;
779    
780     static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
781     {