Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0236-5.4.137-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 22460 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index 1c565572bfb24..7cd8862d854ed 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 136
10 +SUBLEVEL = 137
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
15 index 37bd41ff8dffa..151c0220047dd 100644
16 --- a/arch/arm/boot/dts/versatile-ab.dts
17 +++ b/arch/arm/boot/dts/versatile-ab.dts
18 @@ -195,16 +195,15 @@
19 #size-cells = <1>;
20 ranges;
21
22 - vic: intc@10140000 {
23 + vic: interrupt-controller@10140000 {
24 compatible = "arm,versatile-vic";
25 interrupt-controller;
26 #interrupt-cells = <1>;
27 reg = <0x10140000 0x1000>;
28 - clear-mask = <0xffffffff>;
29 valid-mask = <0xffffffff>;
30 };
31
32 - sic: intc@10003000 {
33 + sic: interrupt-controller@10003000 {
34 compatible = "arm,versatile-sic";
35 interrupt-controller;
36 #interrupt-cells = <1>;
37 diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
38 index 06a0fdf24026c..e7e751a858d81 100644
39 --- a/arch/arm/boot/dts/versatile-pb.dts
40 +++ b/arch/arm/boot/dts/versatile-pb.dts
41 @@ -7,7 +7,7 @@
42
43 amba {
44 /* The Versatile PB is using more SIC IRQ lines than the AB */
45 - sic: intc@10003000 {
46 + sic: interrupt-controller@10003000 {
47 clear-mask = <0xffffffff>;
48 /*
49 * Valid interrupt lines mask according to
50 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
51 index 377157656a8b6..5d35b9656b67d 100644
52 --- a/arch/x86/kvm/x86.c
53 +++ b/arch/x86/kvm/x86.c
54 @@ -475,8 +475,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
55
56 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
57 queue:
58 - if (has_error && !is_protmode(vcpu))
59 - has_error = false;
60 if (reinject) {
61 /*
62 * On vmentry, vcpu->arch.exception.pending is only
63 @@ -7592,6 +7590,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
64 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
65 }
66
67 +static void kvm_inject_exception(struct kvm_vcpu *vcpu)
68 +{
69 + if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
70 + vcpu->arch.exception.error_code = false;
71 + kvm_x86_ops->queue_exception(vcpu);
72 +}
73 +
74 static int inject_pending_event(struct kvm_vcpu *vcpu)
75 {
76 int r;
77 @@ -7599,7 +7604,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
78 /* try to reinject previous events if any */
79
80 if (vcpu->arch.exception.injected)
81 - kvm_x86_ops->queue_exception(vcpu);
82 + kvm_inject_exception(vcpu);
83 /*
84 * Do not inject an NMI or interrupt if there is a pending
85 * exception. Exceptions and interrupts are recognized at
86 @@ -7665,7 +7670,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
87 }
88 }
89
90 - kvm_x86_ops->queue_exception(vcpu);
91 + kvm_inject_exception(vcpu);
92 }
93
94 /* Don't consider new event if we re-injected an event */
95 diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
96 index 7b6903bad4085..48e6e2b489241 100644
97 --- a/drivers/firmware/arm_scmi/driver.c
98 +++ b/drivers/firmware/arm_scmi/driver.c
99 @@ -54,7 +54,6 @@ enum scmi_error_codes {
100 SCMI_ERR_GENERIC = -8, /* Generic Error */
101 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
102 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
103 - SCMI_ERR_MAX
104 };
105
106 /* List of all SCMI devices active in system */
107 @@ -176,8 +175,10 @@ static const int scmi_linux_errmap[] = {
108
109 static inline int scmi_to_linux_errno(int errno)
110 {
111 - if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
112 - return scmi_linux_errmap[-errno];
113 + int err_idx = -errno;
114 +
115 + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap))
116 + return scmi_linux_errmap[err_idx];
117 return -EIO;
118 }
119
120 @@ -693,8 +694,9 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
121 struct scmi_xfers_info *info = &sinfo->tx_minfo;
122
123 /* Pre-allocated messages, no more than what hdr.seq can support */
124 - if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
125 - dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
126 + if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) {
127 + dev_err(dev,
128 + "Invalid maximum messages %d, not in range [1 - %lu]\n",
129 desc->max_msg, MSG_TOKEN_MAX);
130 return -EINVAL;
131 }
132 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
133 index bf6b4f71dc58f..defee1d208d22 100644
134 --- a/fs/cifs/smb2ops.c
135 +++ b/fs/cifs/smb2ops.c
136 @@ -498,8 +498,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
137 p = buf;
138 while (bytes_left >= sizeof(*p)) {
139 info->speed = le64_to_cpu(p->LinkSpeed);
140 - info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
141 - info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
142 + info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
143 + info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
144
145 cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
146 cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
147 diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
148 index 4af318fbda774..ef9498a6e88ac 100644
149 --- a/fs/hfs/bfind.c
150 +++ b/fs/hfs/bfind.c
151 @@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
152 fd->key = ptr + tree->max_key_len + 2;
153 hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
154 tree->cnid, __builtin_return_address(0));
155 - mutex_lock(&tree->tree_lock);
156 + switch (tree->cnid) {
157 + case HFS_CAT_CNID:
158 + mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
159 + break;
160 + case HFS_EXT_CNID:
161 + mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
162 + break;
163 + case HFS_ATTR_CNID:
164 + mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
165 + break;
166 + default:
167 + return -EINVAL;
168 + }
169 return 0;
170 }
171
172 diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
173 index b63a4df7327b6..c0a73a6ffb28b 100644
174 --- a/fs/hfs/bnode.c
175 +++ b/fs/hfs/bnode.c
176 @@ -15,16 +15,31 @@
177
178 #include "btree.h"
179
180 -void hfs_bnode_read(struct hfs_bnode *node, void *buf,
181 - int off, int len)
182 +void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
183 {
184 struct page *page;
185 + int pagenum;
186 + int bytes_read;
187 + int bytes_to_read;
188 + void *vaddr;
189
190 off += node->page_offset;
191 - page = node->page[0];
192 + pagenum = off >> PAGE_SHIFT;
193 + off &= ~PAGE_MASK; /* compute page offset for the first page */
194
195 - memcpy(buf, kmap(page) + off, len);
196 - kunmap(page);
197 + for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
198 + if (pagenum >= node->tree->pages_per_bnode)
199 + break;
200 + page = node->page[pagenum];
201 + bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
202 +
203 + vaddr = kmap_atomic(page);
204 + memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
205 + kunmap_atomic(vaddr);
206 +
207 + pagenum++;
208 + off = 0; /* page offset only applies to the first page */
209 + }
210 }
211
212 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
213 diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
214 index dcc2aab1b2c43..25ac9a8bb57a7 100644
215 --- a/fs/hfs/btree.h
216 +++ b/fs/hfs/btree.h
217 @@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
218
219 #define NODE_HASH_SIZE 256
220
221 +/* B-tree mutex nested subclasses */
222 +enum hfs_btree_mutex_classes {
223 + CATALOG_BTREE_MUTEX,
224 + EXTENTS_BTREE_MUTEX,
225 + ATTR_BTREE_MUTEX,
226 +};
227 +
228 /* A HFS BTree held in memory */
229 struct hfs_btree {
230 struct super_block *sb;
231 diff --git a/fs/hfs/super.c b/fs/hfs/super.c
232 index c33324686d89e..bcf820ce0e02e 100644
233 --- a/fs/hfs/super.c
234 +++ b/fs/hfs/super.c
235 @@ -421,14 +421,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
236 if (!res) {
237 if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
238 res = -EIO;
239 - goto bail;
240 + goto bail_hfs_find;
241 }
242 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
243 }
244 - if (res) {
245 - hfs_find_exit(&fd);
246 - goto bail_no_root;
247 - }
248 + if (res)
249 + goto bail_hfs_find;
250 res = -EINVAL;
251 root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
252 hfs_find_exit(&fd);
253 @@ -444,6 +442,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
254 /* everything's okay */
255 return 0;
256
257 +bail_hfs_find:
258 + hfs_find_exit(&fd);
259 bail_no_root:
260 pr_err("get root inode failed\n");
261 bail:
262 diff --git a/fs/internal.h b/fs/internal.h
263 index 7651e8b8ef136..61aed95f83d1e 100644
264 --- a/fs/internal.h
265 +++ b/fs/internal.h
266 @@ -52,7 +52,6 @@ extern void __init chrdev_init(void);
267 */
268 extern const struct fs_context_operations legacy_fs_context_ops;
269 extern int parse_monolithic_mount_data(struct fs_context *, void *);
270 -extern void fc_drop_locked(struct fs_context *);
271 extern void vfs_clean_context(struct fs_context *fc);
272 extern int finish_clean_context(struct fs_context *fc);
273
274 diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c
275 index c04bad4b2b43f..10c4c1e80124f 100644
276 --- a/fs/iomap/seek.c
277 +++ b/fs/iomap/seek.c
278 @@ -140,23 +140,20 @@ loff_t
279 iomap_seek_hole(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
280 {
281 loff_t size = i_size_read(inode);
282 - loff_t length = size - offset;
283 loff_t ret;
284
285 /* Nothing to be found before or beyond the end of the file. */
286 if (offset < 0 || offset >= size)
287 return -ENXIO;
288
289 - while (length > 0) {
290 - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
291 - &offset, iomap_seek_hole_actor);
292 + while (offset < size) {
293 + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
294 + ops, &offset, iomap_seek_hole_actor);
295 if (ret < 0)
296 return ret;
297 if (ret == 0)
298 break;
299 -
300 offset += ret;
301 - length -= ret;
302 }
303
304 return offset;
305 @@ -186,27 +183,23 @@ loff_t
306 iomap_seek_data(struct inode *inode, loff_t offset, const struct iomap_ops *ops)
307 {
308 loff_t size = i_size_read(inode);
309 - loff_t length = size - offset;
310 loff_t ret;
311
312 /* Nothing to be found before or beyond the end of the file. */
313 if (offset < 0 || offset >= size)
314 return -ENXIO;
315
316 - while (length > 0) {
317 - ret = iomap_apply(inode, offset, length, IOMAP_REPORT, ops,
318 - &offset, iomap_seek_data_actor);
319 + while (offset < size) {
320 + ret = iomap_apply(inode, offset, size - offset, IOMAP_REPORT,
321 + ops, &offset, iomap_seek_data_actor);
322 if (ret < 0)
323 return ret;
324 if (ret == 0)
325 - break;
326 -
327 + return offset;
328 offset += ret;
329 - length -= ret;
330 }
331
332 - if (length <= 0)
333 - return -ENXIO;
334 - return offset;
335 + /* We've reached the end of the file without finding data */
336 + return -ENXIO;
337 }
338 EXPORT_SYMBOL_GPL(iomap_seek_data);
339 diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
340 index e5c14e2c53d35..ba8a58754340d 100644
341 --- a/include/linux/fs_context.h
342 +++ b/include/linux/fs_context.h
343 @@ -134,6 +134,7 @@ extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
344 extern int generic_parse_monolithic(struct fs_context *fc, void *data);
345 extern int vfs_get_tree(struct fs_context *fc);
346 extern void put_fs_context(struct fs_context *fc);
347 +extern void fc_drop_locked(struct fs_context *fc);
348
349 /*
350 * sget() wrappers to be called from the ->get_tree() op.
351 diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
352 index 86e028388badc..9899b9af7f22f 100644
353 --- a/include/net/busy_poll.h
354 +++ b/include/net/busy_poll.h
355 @@ -36,7 +36,7 @@ static inline bool net_busy_loop_on(void)
356
357 static inline bool sk_can_busy_loop(const struct sock *sk)
358 {
359 - return sk->sk_ll_usec && !signal_pending(current);
360 + return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
361 }
362
363 bool sk_busy_loop_end(void *p, unsigned long start_time);
364 diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
365 index 06e1deeef4640..8c6b04f9f6cbe 100644
366 --- a/include/net/sctp/constants.h
367 +++ b/include/net/sctp/constants.h
368 @@ -328,8 +328,7 @@ enum {
369 #define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
370
371 /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
372 - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
373 - * 192.88.99.0/24.
374 + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
375 * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
376 * addresses.
377 */
378 @@ -337,7 +336,6 @@ enum {
379 ((htonl(INADDR_BROADCAST) == a) || \
380 ipv4_is_multicast(a) || \
381 ipv4_is_zeronet(a) || \
382 - ipv4_is_test_198(a) || \
383 ipv4_is_anycast_6to4(a))
384
385 /* Flags used for the bind address copy functions. */
386 diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
387 index 9329f725d22b6..2d0ef613ca070 100644
388 --- a/kernel/cgroup/cgroup-v1.c
389 +++ b/kernel/cgroup/cgroup-v1.c
390 @@ -1228,9 +1228,7 @@ int cgroup1_get_tree(struct fs_context *fc)
391 ret = cgroup_do_get_tree(fc);
392
393 if (!ret && percpu_ref_is_dying(&ctx->root->cgrp.self.refcnt)) {
394 - struct super_block *sb = fc->root->d_sb;
395 - dput(fc->root);
396 - deactivate_locked_super(sb);
397 + fc_drop_locked(fc);
398 ret = 1;
399 }
400
401 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
402 index 8f41499d8257d..6aeb53b4e19f8 100644
403 --- a/kernel/workqueue.c
404 +++ b/kernel/workqueue.c
405 @@ -3660,15 +3660,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
406 unbound_release_work);
407 struct workqueue_struct *wq = pwq->wq;
408 struct worker_pool *pool = pwq->pool;
409 - bool is_last;
410 + bool is_last = false;
411
412 - if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
413 - return;
414 + /*
415 + * when @pwq is not linked, it doesn't hold any reference to the
416 + * @wq, and @wq is invalid to access.
417 + */
418 + if (!list_empty(&pwq->pwqs_node)) {
419 + if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
420 + return;
421
422 - mutex_lock(&wq->mutex);
423 - list_del_rcu(&pwq->pwqs_node);
424 - is_last = list_empty(&wq->pwqs);
425 - mutex_unlock(&wq->mutex);
426 + mutex_lock(&wq->mutex);
427 + list_del_rcu(&pwq->pwqs_node);
428 + is_last = list_empty(&wq->pwqs);
429 + mutex_unlock(&wq->mutex);
430 + }
431
432 mutex_lock(&wq_pool_mutex);
433 put_unbound_pool(pool);
434 diff --git a/net/802/garp.c b/net/802/garp.c
435 index 400bd857e5f57..f6012f8e59f00 100644
436 --- a/net/802/garp.c
437 +++ b/net/802/garp.c
438 @@ -203,6 +203,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
439 kfree(attr);
440 }
441
442 +static void garp_attr_destroy_all(struct garp_applicant *app)
443 +{
444 + struct rb_node *node, *next;
445 + struct garp_attr *attr;
446 +
447 + for (node = rb_first(&app->gid);
448 + next = node ? rb_next(node) : NULL, node != NULL;
449 + node = next) {
450 + attr = rb_entry(node, struct garp_attr, node);
451 + garp_attr_destroy(app, attr);
452 + }
453 +}
454 +
455 static int garp_pdu_init(struct garp_applicant *app)
456 {
457 struct sk_buff *skb;
458 @@ -609,6 +622,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
459
460 spin_lock_bh(&app->lock);
461 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
462 + garp_attr_destroy_all(app);
463 garp_pdu_queue(app);
464 spin_unlock_bh(&app->lock);
465
466 diff --git a/net/802/mrp.c b/net/802/mrp.c
467 index 2cfdfbfbb2edb..5b804dbe2d08f 100644
468 --- a/net/802/mrp.c
469 +++ b/net/802/mrp.c
470 @@ -292,6 +292,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
471 kfree(attr);
472 }
473
474 +static void mrp_attr_destroy_all(struct mrp_applicant *app)
475 +{
476 + struct rb_node *node, *next;
477 + struct mrp_attr *attr;
478 +
479 + for (node = rb_first(&app->mad);
480 + next = node ? rb_next(node) : NULL, node != NULL;
481 + node = next) {
482 + attr = rb_entry(node, struct mrp_attr, node);
483 + mrp_attr_destroy(app, attr);
484 + }
485 +}
486 +
487 static int mrp_pdu_init(struct mrp_applicant *app)
488 {
489 struct sk_buff *skb;
490 @@ -895,6 +908,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
491
492 spin_lock_bh(&app->lock);
493 mrp_mad_event(app, MRP_EVENT_TX);
494 + mrp_attr_destroy_all(app);
495 mrp_pdu_queue(app);
496 spin_unlock_bh(&app->lock);
497
498 diff --git a/net/core/sock.c b/net/core/sock.c
499 index 68f84fac63e0b..452883b28abab 100644
500 --- a/net/core/sock.c
501 +++ b/net/core/sock.c
502 @@ -1098,7 +1098,7 @@ set_rcvbuf:
503 if (val < 0)
504 ret = -EINVAL;
505 else
506 - sk->sk_ll_usec = val;
507 + WRITE_ONCE(sk->sk_ll_usec, val);
508 }
509 break;
510 #endif
511 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
512 index 33444d9856819..fc913f09606db 100644
513 --- a/net/ipv6/ip6_output.c
514 +++ b/net/ipv6/ip6_output.c
515 @@ -59,10 +59,38 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
516 {
517 struct dst_entry *dst = skb_dst(skb);
518 struct net_device *dev = dst->dev;
519 + unsigned int hh_len = LL_RESERVED_SPACE(dev);
520 + int delta = hh_len - skb_headroom(skb);
521 const struct in6_addr *nexthop;
522 struct neighbour *neigh;
523 int ret;
524
525 + /* Be paranoid, rather than too clever. */
526 + if (unlikely(delta > 0) && dev->header_ops) {
527 + /* pskb_expand_head() might crash, if skb is shared */
528 + if (skb_shared(skb)) {
529 + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
530 +
531 + if (likely(nskb)) {
532 + if (skb->sk)
533 + skb_set_owner_w(nskb, skb->sk);
534 + consume_skb(skb);
535 + } else {
536 + kfree_skb(skb);
537 + }
538 + skb = nskb;
539 + }
540 + if (skb &&
541 + pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
542 + kfree_skb(skb);
543 + skb = NULL;
544 + }
545 + if (!skb) {
546 + IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
547 + return -ENOMEM;
548 + }
549 + }
550 +
551 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
552 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
553
554 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
555 index 7f8702abc7bfe..bb370a7948f42 100644
556 --- a/net/sctp/protocol.c
557 +++ b/net/sctp/protocol.c
558 @@ -397,7 +397,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
559 retval = SCTP_SCOPE_LINK;
560 } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
561 ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
562 - ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
563 + ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
564 + ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
565 retval = SCTP_SCOPE_PRIVATE;
566 } else {
567 retval = SCTP_SCOPE_GLOBAL;
568 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
569 index 9f96826eb3ba0..52ee3a9bb7093 100644
570 --- a/net/unix/af_unix.c
571 +++ b/net/unix/af_unix.c
572 @@ -1512,6 +1512,53 @@ out:
573 return err;
574 }
575
576 +static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
577 +{
578 + scm->fp = scm_fp_dup(UNIXCB(skb).fp);
579 +
580 + /*
581 + * Garbage collection of unix sockets starts by selecting a set of
582 + * candidate sockets which have reference only from being in flight
583 + * (total_refs == inflight_refs). This condition is checked once during
584 + * the candidate collection phase, and candidates are marked as such, so
585 + * that non-candidates can later be ignored. While inflight_refs is
586 + * protected by unix_gc_lock, total_refs (file count) is not, hence this
587 + * is an instantaneous decision.
588 + *
589 + * Once a candidate, however, the socket must not be reinstalled into a
590 + * file descriptor while the garbage collection is in progress.
591 + *
592 + * If the above conditions are met, then the directed graph of
593 + * candidates (*) does not change while unix_gc_lock is held.
594 + *
595 + * Any operations that changes the file count through file descriptors
596 + * (dup, close, sendmsg) does not change the graph since candidates are
597 + * not installed in fds.
598 + *
599 + * Dequeing a candidate via recvmsg would install it into an fd, but
600 + * that takes unix_gc_lock to decrement the inflight count, so it's
601 + * serialized with garbage collection.
602 + *
603 + * MSG_PEEK is special in that it does not change the inflight count,
604 + * yet does install the socket into an fd. The following lock/unlock
605 + * pair is to ensure serialization with garbage collection. It must be
606 + * done between incrementing the file count and installing the file into
607 + * an fd.
608 + *
609 + * If garbage collection starts after the barrier provided by the
610 + * lock/unlock, then it will see the elevated refcount and not mark this
611 + * as a candidate. If a garbage collection is already in progress
612 + * before the file count was incremented, then the lock/unlock pair will
613 + * ensure that garbage collection is finished before progressing to
614 + * installing the fd.
615 + *
616 + * (*) A -> B where B is on the queue of A or B is on the queue of C
617 + * which is on the queue of listening socket A.
618 + */
619 + spin_lock(&unix_gc_lock);
620 + spin_unlock(&unix_gc_lock);
621 +}
622 +
623 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
624 {
625 int err = 0;
626 @@ -2137,7 +2184,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
627 sk_peek_offset_fwd(sk, size);
628
629 if (UNIXCB(skb).fp)
630 - scm.fp = scm_fp_dup(UNIXCB(skb).fp);
631 + unix_peek_fds(&scm, skb);
632 }
633 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
634
635 @@ -2378,7 +2425,7 @@ unlock:
636 /* It is questionable, see note in unix_dgram_recvmsg.
637 */
638 if (UNIXCB(skb).fp)
639 - scm.fp = scm_fp_dup(UNIXCB(skb).fp);
640 + unix_peek_fds(&scm, skb);
641
642 sk_peek_offset_fwd(sk, chunk);
643
644 diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
645 index 812fc97bb1a97..add0ef37ba967 100644
646 --- a/tools/scripts/Makefile.include
647 +++ b/tools/scripts/Makefile.include
648 @@ -39,8 +39,6 @@ EXTRA_WARNINGS += -Wundef
649 EXTRA_WARNINGS += -Wwrite-strings
650 EXTRA_WARNINGS += -Wformat
651
652 -CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
653 -
654 # Makefiles suck: This macro sets a default value of $(2) for the
655 # variable named by $(1), unless the variable has been set by
656 # environment or command line. This is necessary for CC and AR
657 @@ -52,12 +50,22 @@ define allow-override
658 $(eval $(1) = $(2)))
659 endef
660
661 +ifneq ($(LLVM),)
662 +$(call allow-override,CC,clang)
663 +$(call allow-override,AR,llvm-ar)
664 +$(call allow-override,LD,ld.lld)
665 +$(call allow-override,CXX,clang++)
666 +$(call allow-override,STRIP,llvm-strip)
667 +else
668 # Allow setting various cross-compile vars or setting CROSS_COMPILE as a prefix.
669 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
670 $(call allow-override,AR,$(CROSS_COMPILE)ar)
671 $(call allow-override,LD,$(CROSS_COMPILE)ld)
672 $(call allow-override,CXX,$(CROSS_COMPILE)g++)
673 $(call allow-override,STRIP,$(CROSS_COMPILE)strip)
674 +endif
675 +
676 +CC_NO_CLANG := $(shell $(CC) -dM -E -x c /dev/null | grep -Fq "__clang__"; echo $$?)
677
678 ifneq ($(LLVM),)
679 HOSTAR ?= llvm-ar
680 diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
681 index 17ac167823a6d..9ba7feffe344b 100644
682 --- a/tools/testing/selftests/vm/userfaultfd.c
683 +++ b/tools/testing/selftests/vm/userfaultfd.c
684 @@ -141,7 +141,7 @@ static void anon_allocate_area(void **alloc_area)
685 {
686 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
687 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
688 - if (*alloc_area == MAP_FAILED)
689 + if (*alloc_area == MAP_FAILED) {
690 fprintf(stderr, "mmap of anonymous memory failed");
691 *alloc_area = NULL;
692 }