Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0300-5.4.201-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 15865 byte(s)
-sync kernel patches
1 diff --git a/Documentation/hwmon/hwmon-kernel-api.rst b/Documentation/hwmon/hwmon-kernel-api.rst
2 index 23f27fe78e379..c41eb61081036 100644
3 --- a/Documentation/hwmon/hwmon-kernel-api.rst
4 +++ b/Documentation/hwmon/hwmon-kernel-api.rst
5 @@ -72,7 +72,7 @@ hwmon_device_register_with_info is the most comprehensive and preferred means
6 to register a hardware monitoring device. It creates the standard sysfs
7 attributes in the hardware monitoring core, letting the driver focus on reading
8 from and writing to the chip instead of having to bother with sysfs attributes.
9 -The parent device parameter as well as the chip parameter must not be NULL. Its
10 +The parent device parameter cannot be NULL with non-NULL chip info. Its
11 parameters are described in more detail below.
12
13 devm_hwmon_device_register_with_info is similar to
14 diff --git a/Makefile b/Makefile
15 index 32da9117e9d76..75be5870cc55f 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,7 +1,7 @@
19 # SPDX-License-Identifier: GPL-2.0
20 VERSION = 5
21 PATCHLEVEL = 4
22 -SUBLEVEL = 200
23 +SUBLEVEL = 201
24 EXTRAVERSION =
25 NAME = Kleptomaniac Octopus
26
27 diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
28 index db767b072601e..7b054c67acd81 100644
29 --- a/arch/arm64/mm/cache.S
30 +++ b/arch/arm64/mm/cache.S
31 @@ -228,8 +228,6 @@ ENDPIPROC(__dma_flush_area)
32 * - dir - DMA direction
33 */
34 ENTRY(__dma_map_area)
35 - cmp w2, #DMA_FROM_DEVICE
36 - b.eq __dma_inv_area
37 b __dma_clean_area
38 ENDPIPROC(__dma_map_area)
39
40 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
41 index 4438c00acb656..28ca07360e970 100644
42 --- a/arch/s390/mm/pgtable.c
43 +++ b/arch/s390/mm/pgtable.c
44 @@ -716,7 +716,7 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
45 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
46 ptev = pte_val(*ptep);
47 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
48 - page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
49 + page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
50 pgste_set_unlock(ptep, pgste);
51 preempt_enable();
52 }
53 diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
54 index c73b93b9bb87d..a2175394cd253 100644
55 --- a/drivers/hwmon/hwmon.c
56 +++ b/drivers/hwmon/hwmon.c
57 @@ -715,12 +715,11 @@ EXPORT_SYMBOL_GPL(hwmon_device_register_with_groups);
58
59 /**
60 * hwmon_device_register_with_info - register w/ hwmon
61 - * @dev: the parent device (mandatory)
62 - * @name: hwmon name attribute (mandatory)
63 - * @drvdata: driver data to attach to created device (optional)
64 - * @chip: pointer to hwmon chip information (mandatory)
65 + * @dev: the parent device
66 + * @name: hwmon name attribute
67 + * @drvdata: driver data to attach to created device
68 + * @chip: pointer to hwmon chip information
69 * @extra_groups: pointer to list of additional non-standard attribute groups
70 - * (optional)
71 *
72 * hwmon_device_unregister() must be called when the device is no
73 * longer needed.
74 @@ -733,10 +732,13 @@ hwmon_device_register_with_info(struct device *dev, const char *name,
75 const struct hwmon_chip_info *chip,
76 const struct attribute_group **extra_groups)
77 {
78 - if (!dev || !name || !chip)
79 + if (!name)
80 + return ERR_PTR(-EINVAL);
81 +
82 + if (chip && (!chip->ops || !chip->ops->is_visible || !chip->info))
83 return ERR_PTR(-EINVAL);
84
85 - if (!chip->ops || !chip->ops->is_visible || !chip->info)
86 + if (chip && !dev)
87 return ERR_PTR(-EINVAL);
88
89 return __hwmon_device_register(dev, name, drvdata, chip, extra_groups);
90 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
91 index 06b382304d926..81bc36a43b32d 100644
92 --- a/drivers/md/dm-table.c
93 +++ b/drivers/md/dm-table.c
94 @@ -872,8 +872,7 @@ EXPORT_SYMBOL(dm_consume_args);
95 static bool __table_type_bio_based(enum dm_queue_mode table_type)
96 {
97 return (table_type == DM_TYPE_BIO_BASED ||
98 - table_type == DM_TYPE_DAX_BIO_BASED ||
99 - table_type == DM_TYPE_NVME_BIO_BASED);
100 + table_type == DM_TYPE_DAX_BIO_BASED);
101 }
102
103 static bool __table_type_request_based(enum dm_queue_mode table_type)
104 @@ -929,8 +928,6 @@ bool dm_table_supports_dax(struct dm_table *t,
105 return true;
106 }
107
108 -static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
109 -
110 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
111 sector_t start, sector_t len, void *data)
112 {
113 @@ -960,7 +957,6 @@ static int dm_table_determine_type(struct dm_table *t)
114 goto verify_bio_based;
115 }
116 BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
117 - BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
118 goto verify_rq_based;
119 }
120
121 @@ -999,15 +995,6 @@ verify_bio_based:
122 if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
123 (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
124 t->type = DM_TYPE_DAX_BIO_BASED;
125 - } else {
126 - /* Check if upgrading to NVMe bio-based is valid or required */
127 - tgt = dm_table_get_immutable_target(t);
128 - if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
129 - t->type = DM_TYPE_NVME_BIO_BASED;
130 - goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
131 - } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
132 - t->type = DM_TYPE_NVME_BIO_BASED;
133 - }
134 }
135 return 0;
136 }
137 @@ -1024,8 +1011,7 @@ verify_rq_based:
138 * (e.g. request completion process for partial completion.)
139 */
140 if (t->num_targets > 1) {
141 - DMERR("%s DM doesn't support multiple targets",
142 - t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
143 + DMERR("request-based DM doesn't support multiple targets");
144 return -EINVAL;
145 }
146
147 @@ -1714,20 +1700,6 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
148 return q && !blk_queue_add_random(q);
149 }
150
151 -static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev,
152 - sector_t start, sector_t len, void *data)
153 -{
154 - char b[BDEVNAME_SIZE];
155 -
156 - /* For now, NVMe devices are the only devices of this class */
157 - return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0);
158 -}
159 -
160 -static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
161 -{
162 - return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL);
163 -}
164 -
165 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
166 sector_t start, sector_t len, void *data)
167 {
168 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
169 index 37b8bb4d80f0f..77e28f77c59f4 100644
170 --- a/drivers/md/dm.c
171 +++ b/drivers/md/dm.c
172 @@ -1000,7 +1000,7 @@ static void clone_endio(struct bio *bio)
173 struct mapped_device *md = tio->io->md;
174 dm_endio_fn endio = tio->ti->type->end_io;
175
176 - if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
177 + if (unlikely(error == BLK_STS_TARGET)) {
178 if (bio_op(bio) == REQ_OP_DISCARD &&
179 !bio->bi_disk->queue->limits.max_discard_sectors)
180 disable_discard(md);
181 @@ -1325,7 +1325,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
182 sector = clone->bi_iter.bi_sector;
183
184 if (unlikely(swap_bios_limit(ti, clone))) {
185 - struct mapped_device *md = io->md;
186 int latch = get_swap_bios();
187 if (unlikely(latch != md->swap_bios))
188 __set_swap_bios_limit(md, latch);
189 @@ -1340,24 +1339,17 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
190 /* the bio has been remapped so dispatch it */
191 trace_block_bio_remap(clone->bi_disk->queue, clone,
192 bio_dev(io->orig_bio), sector);
193 - if (md->type == DM_TYPE_NVME_BIO_BASED)
194 - ret = direct_make_request(clone);
195 - else
196 - ret = generic_make_request(clone);
197 + ret = generic_make_request(clone);
198 break;
199 case DM_MAPIO_KILL:
200 - if (unlikely(swap_bios_limit(ti, clone))) {
201 - struct mapped_device *md = io->md;
202 + if (unlikely(swap_bios_limit(ti, clone)))
203 up(&md->swap_bios_semaphore);
204 - }
205 free_tio(tio);
206 dec_pending(io, BLK_STS_IOERR);
207 break;
208 case DM_MAPIO_REQUEUE:
209 - if (unlikely(swap_bios_limit(ti, clone))) {
210 - struct mapped_device *md = io->md;
211 + if (unlikely(swap_bios_limit(ti, clone)))
212 up(&md->swap_bios_semaphore);
213 - }
214 free_tio(tio);
215 dec_pending(io, BLK_STS_DM_REQUEUE);
216 break;
217 @@ -1732,51 +1724,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
218 return ret;
219 }
220
221 -/*
222 - * Optimized variant of __split_and_process_bio that leverages the
223 - * fact that targets that use it do _not_ have a need to split bios.
224 - */
225 -static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
226 - struct bio *bio, struct dm_target *ti)
227 -{
228 - struct clone_info ci;
229 - blk_qc_t ret = BLK_QC_T_NONE;
230 - int error = 0;
231 -
232 - init_clone_info(&ci, md, map, bio);
233 -
234 - if (bio->bi_opf & REQ_PREFLUSH) {
235 - struct bio flush_bio;
236 -
237 - /*
238 - * Use an on-stack bio for this, it's safe since we don't
239 - * need to reference it after submit. It's just used as
240 - * the basis for the clone(s).
241 - */
242 - bio_init(&flush_bio, NULL, 0);
243 - flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
244 - ci.bio = &flush_bio;
245 - ci.sector_count = 0;
246 - error = __send_empty_flush(&ci);
247 - bio_uninit(ci.bio);
248 - /* dec_pending submits any data associated with flush */
249 - } else {
250 - struct dm_target_io *tio;
251 -
252 - ci.bio = bio;
253 - ci.sector_count = bio_sectors(bio);
254 - if (__process_abnormal_io(&ci, ti, &error))
255 - goto out;
256 -
257 - tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
258 - ret = __clone_and_map_simple_bio(&ci, tio, NULL);
259 - }
260 -out:
261 - /* drop the extra reference count */
262 - dec_pending(ci.io, errno_to_blk_status(error));
263 - return ret;
264 -}
265 -
266 static blk_qc_t dm_process_bio(struct mapped_device *md,
267 struct dm_table *map, struct bio *bio)
268 {
269 @@ -1807,8 +1754,6 @@ static blk_qc_t dm_process_bio(struct mapped_device *md,
270 /* regular IO is split by __split_and_process_bio */
271 }
272
273 - if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
274 - return __process_bio(md, map, bio, ti);
275 return __split_and_process_bio(md, map, bio);
276 }
277
278 @@ -2200,12 +2145,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
279 if (request_based)
280 dm_stop_queue(q);
281
282 - if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
283 + if (request_based) {
284 /*
285 - * Leverage the fact that request-based DM targets and
286 - * NVMe bio based targets are immutable singletons
287 - * - used to optimize both dm_request_fn and dm_mq_queue_rq;
288 - * and __process_bio.
289 + * Leverage the fact that request-based DM targets are
290 + * immutable singletons - used to optimize dm_mq_queue_rq.
291 */
292 md->immutable_target = dm_table_get_immutable_target(t);
293 }
294 @@ -2334,7 +2277,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
295 break;
296 case DM_TYPE_BIO_BASED:
297 case DM_TYPE_DAX_BIO_BASED:
298 - case DM_TYPE_NVME_BIO_BASED:
299 dm_init_congested_fn(md);
300 break;
301 case DM_TYPE_NONE:
302 @@ -3070,7 +3012,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
303 switch (type) {
304 case DM_TYPE_BIO_BASED:
305 case DM_TYPE_DAX_BIO_BASED:
306 - case DM_TYPE_NVME_BIO_BASED:
307 pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
308 front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
309 io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
310 diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
311 index 271bd08f4a255..3f053b11e2cee 100644
312 --- a/drivers/usb/gadget/function/u_ether.c
313 +++ b/drivers/usb/gadget/function/u_ether.c
314 @@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
315 dev->qmult = qmult;
316 snprintf(net->name, sizeof(net->name), "%s%%d", netname);
317
318 - if (get_ether_addr(dev_addr, net->dev_addr))
319 + if (get_ether_addr(dev_addr, net->dev_addr)) {
320 + net->addr_assign_type = NET_ADDR_RANDOM;
321 dev_warn(&g->dev,
322 "using random %s ethernet address\n", "self");
323 + } else {
324 + net->addr_assign_type = NET_ADDR_SET;
325 + }
326 if (get_ether_addr(host_addr, dev->host_mac))
327 dev_warn(&g->dev,
328 "using random %s ethernet address\n", "host");
329 @@ -831,6 +835,9 @@ struct net_device *gether_setup_name_default(const char *netname)
330 INIT_LIST_HEAD(&dev->tx_reqs);
331 INIT_LIST_HEAD(&dev->rx_reqs);
332
333 + /* by default we always have a random MAC address */
334 + net->addr_assign_type = NET_ADDR_RANDOM;
335 +
336 skb_queue_head_init(&dev->rx_frames);
337
338 /* network device setup */
339 @@ -868,7 +875,6 @@ int gether_register_netdev(struct net_device *net)
340 g = dev->gadget;
341
342 memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
343 - net->addr_assign_type = NET_ADDR_RANDOM;
344
345 status = register_netdev(net);
346 if (status < 0) {
347 @@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
348 if (get_ether_addr(dev_addr, new_addr))
349 return -EINVAL;
350 memcpy(dev->dev_mac, new_addr, ETH_ALEN);
351 + net->addr_assign_type = NET_ADDR_SET;
352 return 0;
353 }
354 EXPORT_SYMBOL_GPL(gether_set_dev_addr);
355 diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
356 index a53d7d2c2d95c..60631f3abddbd 100644
357 --- a/include/linux/device-mapper.h
358 +++ b/include/linux/device-mapper.h
359 @@ -28,7 +28,6 @@ enum dm_queue_mode {
360 DM_TYPE_BIO_BASED = 1,
361 DM_TYPE_REQUEST_BASED = 2,
362 DM_TYPE_DAX_BIO_BASED = 3,
363 - DM_TYPE_NVME_BIO_BASED = 4,
364 };
365
366 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
367 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
368 index 959f4f0c85460..d9bee15e36a50 100644
369 --- a/net/ipv4/inet_hashtables.c
370 +++ b/net/ipv4/inet_hashtables.c
371 @@ -675,12 +675,14 @@ EXPORT_SYMBOL_GPL(inet_unhash);
372 * Note that we use 32bit integers (vs RFC 'short integers')
373 * because 2^16 is not a multiple of num_ephemeral and this
374 * property might be used by clever attacker.
375 - * RFC claims using TABLE_LENGTH=10 buckets gives an improvement,
376 - * we use 256 instead to really give more isolation and
377 - * privacy, this only consumes 1 KB of kernel memory.
378 + * RFC claims using TABLE_LENGTH=10 buckets gives an improvement, though
379 + * attacks were since demonstrated, thus we use 65536 instead to really
380 + * give more isolation and privacy, at the expense of 256kB of kernel
381 + * memory.
382 */
383 -#define INET_TABLE_PERTURB_SHIFT 8
384 -static u32 table_perturb[1 << INET_TABLE_PERTURB_SHIFT];
385 +#define INET_TABLE_PERTURB_SHIFT 16
386 +#define INET_TABLE_PERTURB_SIZE (1 << INET_TABLE_PERTURB_SHIFT)
387 +static u32 *table_perturb;
388
389 int __inet_hash_connect(struct inet_timewait_death_row *death_row,
390 struct sock *sk, u64 port_offset,
391 @@ -723,10 +725,11 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
392 if (likely(remaining > 1))
393 remaining &= ~1U;
394
395 - net_get_random_once(table_perturb, sizeof(table_perturb));
396 - index = hash_32(port_offset, INET_TABLE_PERTURB_SHIFT);
397 + net_get_random_once(table_perturb,
398 + INET_TABLE_PERTURB_SIZE * sizeof(*table_perturb));
399 + index = port_offset & (INET_TABLE_PERTURB_SIZE - 1);
400
401 - offset = READ_ONCE(table_perturb[index]) + port_offset;
402 + offset = READ_ONCE(table_perturb[index]) + (port_offset >> 32);
403 offset %= remaining;
404
405 /* In first pass we try ports of @low parity.
406 @@ -782,6 +785,12 @@ next_port:
407 return -EADDRNOTAVAIL;
408
409 ok:
410 + /* Here we want to add a little bit of randomness to the next source
411 + * port that will be chosen. We use a max() with a random here so that
412 + * on low contention the randomness is maximal and on high contention
413 + * it may be inexistent.
414 + */
415 + i = max_t(int, i, (prandom_u32() & 7) * 2);
416 WRITE_ONCE(table_perturb[index], READ_ONCE(table_perturb[index]) + i + 2);
417
418 /* Head lock still held and bh's disabled */
419 @@ -855,6 +864,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name,
420 low_limit,
421 high_limit);
422 init_hashinfo_lhash2(h);
423 +
424 + /* this one is used for source ports of outgoing connections */
425 + table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE,
426 + sizeof(*table_perturb), GFP_KERNEL);
427 + if (!table_perturb)
428 + panic("TCP: failed to alloc table_perturb");
429 }
430
431 int inet_hashinfo2_init_mod(struct inet_hashinfo *h)