Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0308-5.4.209-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 34770 byte(s)
-sync kernel patches
1 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2 index 787a9c077ef1d..5cf601c94e354 100644
3 --- a/Documentation/networking/ip-sysctl.txt
4 +++ b/Documentation/networking/ip-sysctl.txt
5 @@ -2284,7 +2284,14 @@ sctp_rmem - vector of 3 INTEGERs: min, default, max
6 Default: 4K
7
8 sctp_wmem - vector of 3 INTEGERs: min, default, max
9 - Currently this tunable has no effect.
10 + Only the first value ("min") is used, "default" and "max" are
11 + ignored.
12 +
13 + min: Minimum size of send buffer that can be used by SCTP sockets.
14 + It is guaranteed to each SCTP socket (but not association) even
15 + under moderate memory pressure.
16 +
17 + Default: 4K
18
19 addr_scope_policy - INTEGER
20 Control IPv4 address scoping - draft-stewart-tsvwg-sctp-ipv4-00
21 diff --git a/Makefile b/Makefile
22 index 884a3f314baf8..7093e3b03b9f7 100644
23 --- a/Makefile
24 +++ b/Makefile
25 @@ -1,7 +1,7 @@
26 # SPDX-License-Identifier: GPL-2.0
27 VERSION = 5
28 PATCHLEVEL = 4
29 -SUBLEVEL = 208
30 +SUBLEVEL = 209
31 EXTRAVERSION =
32 NAME = Kleptomaniac Octopus
33
34 diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
35 index b99dd8e1c93f1..7ba6cf8261626 100644
36 --- a/arch/arm/lib/xor-neon.c
37 +++ b/arch/arm/lib/xor-neon.c
38 @@ -26,8 +26,9 @@ MODULE_LICENSE("GPL");
39 * While older versions of GCC do not generate incorrect code, they fail to
40 * recognize the parallel nature of these functions, and emit plain ARM code,
41 * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
42 + *
43 + * #warning This code requires at least version 4.6 of GCC
44 */
45 -#warning This code requires at least version 4.6 of GCC
46 #endif
47
48 #pragma GCC diagnostic ignored "-Wunused-variable"
49 diff --git a/arch/s390/include/asm/archrandom.h b/arch/s390/include/asm/archrandom.h
50 index 2c6e1c6ecbe78..4120c428dc378 100644
51 --- a/arch/s390/include/asm/archrandom.h
52 +++ b/arch/s390/include/asm/archrandom.h
53 @@ -2,7 +2,7 @@
54 /*
55 * Kernel interface for the s390 arch_random_* functions
56 *
57 - * Copyright IBM Corp. 2017, 2020
58 + * Copyright IBM Corp. 2017, 2022
59 *
60 * Author: Harald Freudenberger <freude@de.ibm.com>
61 *
62 @@ -14,6 +14,7 @@
63 #ifdef CONFIG_ARCH_RANDOM
64
65 #include <linux/static_key.h>
66 +#include <linux/preempt.h>
67 #include <linux/atomic.h>
68 #include <asm/cpacf.h>
69
70 @@ -32,7 +33,8 @@ static inline bool __must_check arch_get_random_int(unsigned int *v)
71
72 static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
73 {
74 - if (static_branch_likely(&s390_arch_random_available)) {
75 + if (static_branch_likely(&s390_arch_random_available) &&
76 + in_task()) {
77 cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
78 atomic64_add(sizeof(*v), &s390_arch_random_counter);
79 return true;
80 @@ -42,7 +44,8 @@ static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
81
82 static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
83 {
84 - if (static_branch_likely(&s390_arch_random_available)) {
85 + if (static_branch_likely(&s390_arch_random_available) &&
86 + in_task()) {
87 cpacf_trng(NULL, 0, (u8 *)v, sizeof(*v));
88 atomic64_add(sizeof(*v), &s390_arch_random_counter);
89 return true;
90 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
91 index 0610d344fdbf0..637f6ed78b489 100644
92 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
93 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
94 @@ -1821,11 +1821,15 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
95 * non-zero req_queue_pairs says that user requested a new
96 * queue count via ethtool's set_channels, so use this
97 * value for queues distribution across traffic classes
98 + * We need at least one queue pair for the interface
99 + * to be usable as we see in else statement.
100 */
101 if (vsi->req_queue_pairs > 0)
102 vsi->num_queue_pairs = vsi->req_queue_pairs;
103 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
104 vsi->num_queue_pairs = pf->num_lan_msix;
105 + else
106 + vsi->num_queue_pairs = 1;
107 }
108
109 /* Number of queues per enabled TC */
110 diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
111 index b297a3ca22fc8..83678120573ec 100644
112 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
113 +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
114 @@ -619,7 +619,8 @@ static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
115 rx_desc = ICE_RX_DESC(rx_ring, i);
116
117 if (!(rx_desc->wb.status_error0 &
118 - cpu_to_le16(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)))
119 + (cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S)) |
120 + cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
121 continue;
122
123 rx_buf = &rx_ring->rx_buf[i];
124 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
125 index 88750a96cb3f2..7d28563ab7946 100644
126 --- a/drivers/net/ethernet/intel/ice/ice_main.c
127 +++ b/drivers/net/ethernet/intel/ice/ice_main.c
128 @@ -3495,10 +3495,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
129 if (vsi->netdev) {
130 ice_set_rx_mode(vsi->netdev);
131
132 - err = ice_vsi_vlan_setup(vsi);
133 + if (vsi->type != ICE_VSI_LB) {
134 + err = ice_vsi_vlan_setup(vsi);
135
136 - if (err)
137 - return err;
138 + if (err)
139 + return err;
140 + }
141 }
142 ice_vsi_cfg_dcb_rings(vsi);
143
144 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
145 index 1fa1b71dbfa11..ed1140ecca603 100644
146 --- a/drivers/net/ethernet/sfc/ptp.c
147 +++ b/drivers/net/ethernet/sfc/ptp.c
148 @@ -1093,7 +1093,29 @@ static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
149
150 tx_queue = &ptp_data->channel->tx_queue[type];
151 if (tx_queue && tx_queue->timestamping) {
152 + /* This code invokes normal driver TX code which is always
153 + * protected from softirqs when called from generic TX code,
154 + * which in turn disables preemption. Look at __dev_queue_xmit
155 + * which uses rcu_read_lock_bh disabling preemption for RCU
156 + * plus disabling softirqs. We do not need RCU reader
157 + * protection here.
158 + *
159 + * Although it is theoretically safe for current PTP TX/RX code
160 + * running without disabling softirqs, there are three good
161 + * reasond for doing so:
162 + *
163 + * 1) The code invoked is mainly implemented for non-PTP
164 + * packets and it is always executed with softirqs
165 + * disabled.
166 + * 2) This being a single PTP packet, better to not
167 + * interrupt its processing by softirqs which can lead
168 + * to high latencies.
169 + * 3) netdev_xmit_more checks preemption is disabled and
170 + * triggers a BUG_ON if not.
171 + */
172 + local_bh_disable();
173 efx_enqueue_skb(tx_queue, skb);
174 + local_bh_enable();
175 } else {
176 WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
177 dev_kfree_skb_any(skb);
178 diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
179 index 291fa449993fb..45f295403cb55 100644
180 --- a/drivers/net/sungem_phy.c
181 +++ b/drivers/net/sungem_phy.c
182 @@ -454,6 +454,7 @@ static int bcm5421_init(struct mii_phy* phy)
183 int can_low_power = 1;
184 if (np == NULL || of_get_property(np, "no-autolowpower", NULL))
185 can_low_power = 0;
186 + of_node_put(np);
187 if (can_low_power) {
188 /* Enable automatic low-power */
189 sungem_phy_write(phy, 0x1c, 0x9002);
190 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
191 index e14842fbe3d62..579df7c5411d3 100644
192 --- a/drivers/net/virtio_net.c
193 +++ b/drivers/net/virtio_net.c
194 @@ -213,9 +213,15 @@ struct virtnet_info {
195 /* Packet virtio header size */
196 u8 hdr_len;
197
198 - /* Work struct for refilling if we run low on memory. */
199 + /* Work struct for delayed refilling if we run low on memory. */
200 struct delayed_work refill;
201
202 + /* Is delayed refill enabled? */
203 + bool refill_enabled;
204 +
205 + /* The lock to synchronize the access to refill_enabled */
206 + spinlock_t refill_lock;
207 +
208 /* Work struct for config space updates */
209 struct work_struct config_work;
210
211 @@ -319,6 +325,20 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
212 return p;
213 }
214
215 +static void enable_delayed_refill(struct virtnet_info *vi)
216 +{
217 + spin_lock_bh(&vi->refill_lock);
218 + vi->refill_enabled = true;
219 + spin_unlock_bh(&vi->refill_lock);
220 +}
221 +
222 +static void disable_delayed_refill(struct virtnet_info *vi)
223 +{
224 + spin_lock_bh(&vi->refill_lock);
225 + vi->refill_enabled = false;
226 + spin_unlock_bh(&vi->refill_lock);
227 +}
228 +
229 static void virtqueue_napi_schedule(struct napi_struct *napi,
230 struct virtqueue *vq)
231 {
232 @@ -1388,8 +1408,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
233 }
234
235 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
236 - if (!try_fill_recv(vi, rq, GFP_ATOMIC))
237 - schedule_delayed_work(&vi->refill, 0);
238 + if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
239 + spin_lock(&vi->refill_lock);
240 + if (vi->refill_enabled)
241 + schedule_delayed_work(&vi->refill, 0);
242 + spin_unlock(&vi->refill_lock);
243 + }
244 }
245
246 u64_stats_update_begin(&rq->stats.syncp);
247 @@ -1508,6 +1532,8 @@ static int virtnet_open(struct net_device *dev)
248 struct virtnet_info *vi = netdev_priv(dev);
249 int i, err;
250
251 + enable_delayed_refill(vi);
252 +
253 for (i = 0; i < vi->max_queue_pairs; i++) {
254 if (i < vi->curr_queue_pairs)
255 /* Make sure we have some buffers: if oom use wq. */
256 @@ -1878,6 +1904,8 @@ static int virtnet_close(struct net_device *dev)
257 struct virtnet_info *vi = netdev_priv(dev);
258 int i;
259
260 + /* Make sure NAPI doesn't schedule refill work */
261 + disable_delayed_refill(vi);
262 /* Make sure refill_work doesn't re-enable napi! */
263 cancel_delayed_work_sync(&vi->refill);
264
265 @@ -2417,6 +2445,8 @@ static int virtnet_restore_up(struct virtio_device *vdev)
266
267 virtio_device_ready(vdev);
268
269 + enable_delayed_refill(vi);
270 +
271 if (netif_running(vi->dev)) {
272 err = virtnet_open(vi->dev);
273 if (err)
274 @@ -3140,6 +3170,7 @@ static int virtnet_probe(struct virtio_device *vdev)
275 vdev->priv = vi;
276
277 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
278 + spin_lock_init(&vi->refill_lock);
279
280 /* If we can receive ANY GSO packets, we must allocate large ones. */
281 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
282 diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
283 index 6bcc4a13ae6c7..cc772045d526f 100644
284 --- a/drivers/net/wireless/mediatek/mt7601u/usb.c
285 +++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
286 @@ -26,6 +26,7 @@ static const struct usb_device_id mt7601u_device_table[] = {
287 { USB_DEVICE(0x2717, 0x4106) },
288 { USB_DEVICE(0x2955, 0x0001) },
289 { USB_DEVICE(0x2955, 0x1001) },
290 + { USB_DEVICE(0x2955, 0x1003) },
291 { USB_DEVICE(0x2a5f, 0x1000) },
292 { USB_DEVICE(0x7392, 0x7710) },
293 { 0, }
294 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
295 index 8e6d7ba95df14..98e363d0025b4 100644
296 --- a/drivers/scsi/scsi_lib.c
297 +++ b/drivers/scsi/scsi_lib.c
298 @@ -1719,8 +1719,7 @@ out_put_budget:
299 case BLK_STS_OK:
300 break;
301 case BLK_STS_RESOURCE:
302 - if (atomic_read(&sdev->device_busy) ||
303 - scsi_device_blocked(sdev))
304 + if (scsi_device_blocked(sdev))
305 ret = BLK_STS_DEV_RESOURCE;
306 break;
307 default:
308 diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
309 index 10eec501f6b39..bfc589f4baf53 100644
310 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c
311 +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
312 @@ -125,9 +125,20 @@ out:
313 return ret;
314 }
315
316 +static bool phandle_exists(const struct device_node *np,
317 + const char *phandle_name, int index)
318 +{
319 + struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
320 +
321 + if (parse_np)
322 + of_node_put(parse_np);
323 +
324 + return parse_np != NULL;
325 +}
326 +
327 #define MAX_PROP_SIZE 32
328 static int ufshcd_populate_vreg(struct device *dev, const char *name,
329 - struct ufs_vreg **out_vreg)
330 + struct ufs_vreg **out_vreg)
331 {
332 int ret = 0;
333 char prop_name[MAX_PROP_SIZE];
334 @@ -140,7 +151,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
335 }
336
337 snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
338 - if (!of_parse_phandle(np, prop_name, 0)) {
339 + if (!phandle_exists(np, prop_name, 0)) {
340 dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
341 __func__, prop_name);
342 goto out;
343 diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
344 index d563abc3e1364..914e991731300 100644
345 --- a/fs/ntfs/attrib.c
346 +++ b/fs/ntfs/attrib.c
347 @@ -592,8 +592,12 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
348 a = (ATTR_RECORD*)((u8*)ctx->attr +
349 le32_to_cpu(ctx->attr->length));
350 for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
351 - if ((u8*)a < (u8*)ctx->mrec || (u8*)a > (u8*)ctx->mrec +
352 - le32_to_cpu(ctx->mrec->bytes_allocated))
353 + u8 *mrec_end = (u8 *)ctx->mrec +
354 + le32_to_cpu(ctx->mrec->bytes_allocated);
355 + u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
356 + a->name_length * sizeof(ntfschar);
357 + if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
358 + name_end > mrec_end)
359 break;
360 ctx->attr = a;
361 if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
362 diff --git a/include/net/addrconf.h b/include/net/addrconf.h
363 index 8d90fb9184e8a..880e609b7352a 100644
364 --- a/include/net/addrconf.h
365 +++ b/include/net/addrconf.h
366 @@ -399,6 +399,9 @@ static inline bool ip6_ignore_linkdown(const struct net_device *dev)
367 {
368 const struct inet6_dev *idev = __in6_dev_get(dev);
369
370 + if (unlikely(!idev))
371 + return true;
372 +
373 return !!idev->cnf.ignore_routes_with_linkdown;
374 }
375
376 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
377 index 8efc2419a815f..b2046b02d11d6 100644
378 --- a/include/net/bluetooth/l2cap.h
379 +++ b/include/net/bluetooth/l2cap.h
380 @@ -802,6 +802,7 @@ enum {
381 };
382
383 void l2cap_chan_hold(struct l2cap_chan *c);
384 +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c);
385 void l2cap_chan_put(struct l2cap_chan *c);
386
387 static inline void l2cap_chan_lock(struct l2cap_chan *chan)
388 diff --git a/include/net/tcp.h b/include/net/tcp.h
389 index aaf1d5d5a13b0..8459145497b74 100644
390 --- a/include/net/tcp.h
391 +++ b/include/net/tcp.h
392 @@ -1389,7 +1389,7 @@ void tcp_select_initial_window(const struct sock *sk, int __space,
393
394 static inline int tcp_win_from_space(const struct sock *sk, int space)
395 {
396 - int tcp_adv_win_scale = sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale;
397 + int tcp_adv_win_scale = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_adv_win_scale);
398
399 return tcp_adv_win_scale <= 0 ?
400 (space>>(-tcp_adv_win_scale)) :
401 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
402 index 959a16b133033..286fca6a9ab2a 100644
403 --- a/net/bluetooth/l2cap_core.c
404 +++ b/net/bluetooth/l2cap_core.c
405 @@ -110,7 +110,8 @@ static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
406 }
407
408 /* Find channel with given SCID.
409 - * Returns locked channel. */
410 + * Returns a reference locked channel.
411 + */
412 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
413 u16 cid)
414 {
415 @@ -118,15 +119,19 @@ static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
416
417 mutex_lock(&conn->chan_lock);
418 c = __l2cap_get_chan_by_scid(conn, cid);
419 - if (c)
420 - l2cap_chan_lock(c);
421 + if (c) {
422 + /* Only lock if chan reference is not 0 */
423 + c = l2cap_chan_hold_unless_zero(c);
424 + if (c)
425 + l2cap_chan_lock(c);
426 + }
427 mutex_unlock(&conn->chan_lock);
428
429 return c;
430 }
431
432 /* Find channel with given DCID.
433 - * Returns locked channel.
434 + * Returns a reference locked channel.
435 */
436 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
437 u16 cid)
438 @@ -135,8 +140,12 @@ static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
439
440 mutex_lock(&conn->chan_lock);
441 c = __l2cap_get_chan_by_dcid(conn, cid);
442 - if (c)
443 - l2cap_chan_lock(c);
444 + if (c) {
445 + /* Only lock if chan reference is not 0 */
446 + c = l2cap_chan_hold_unless_zero(c);
447 + if (c)
448 + l2cap_chan_lock(c);
449 + }
450 mutex_unlock(&conn->chan_lock);
451
452 return c;
453 @@ -161,8 +170,12 @@ static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
454
455 mutex_lock(&conn->chan_lock);
456 c = __l2cap_get_chan_by_ident(conn, ident);
457 - if (c)
458 - l2cap_chan_lock(c);
459 + if (c) {
460 + /* Only lock if chan reference is not 0 */
461 + c = l2cap_chan_hold_unless_zero(c);
462 + if (c)
463 + l2cap_chan_lock(c);
464 + }
465 mutex_unlock(&conn->chan_lock);
466
467 return c;
468 @@ -496,6 +509,16 @@ void l2cap_chan_hold(struct l2cap_chan *c)
469 kref_get(&c->kref);
470 }
471
472 +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
473 +{
474 + BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
475 +
476 + if (!kref_get_unless_zero(&c->kref))
477 + return NULL;
478 +
479 + return c;
480 +}
481 +
482 void l2cap_chan_put(struct l2cap_chan *c)
483 {
484 BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
485 @@ -1812,7 +1835,10 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
486 src_match = !bacmp(&c->src, src);
487 dst_match = !bacmp(&c->dst, dst);
488 if (src_match && dst_match) {
489 - l2cap_chan_hold(c);
490 + c = l2cap_chan_hold_unless_zero(c);
491 + if (!c)
492 + continue;
493 +
494 read_unlock(&chan_list_lock);
495 return c;
496 }
497 @@ -1827,7 +1853,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
498 }
499
500 if (c1)
501 - l2cap_chan_hold(c1);
502 + c1 = l2cap_chan_hold_unless_zero(c1);
503
504 read_unlock(&chan_list_lock);
505
506 @@ -4221,6 +4247,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
507
508 unlock:
509 l2cap_chan_unlock(chan);
510 + l2cap_chan_put(chan);
511 return err;
512 }
513
514 @@ -4334,6 +4361,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn,
515
516 done:
517 l2cap_chan_unlock(chan);
518 + l2cap_chan_put(chan);
519 return err;
520 }
521
522 @@ -5062,6 +5090,7 @@ send_move_response:
523 l2cap_send_move_chan_rsp(chan, result);
524
525 l2cap_chan_unlock(chan);
526 + l2cap_chan_put(chan);
527
528 return 0;
529 }
530 @@ -5154,6 +5183,7 @@ static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
531 }
532
533 l2cap_chan_unlock(chan);
534 + l2cap_chan_put(chan);
535 }
536
537 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
538 @@ -5183,6 +5213,7 @@ static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
539 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
540
541 l2cap_chan_unlock(chan);
542 + l2cap_chan_put(chan);
543 }
544
545 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
546 @@ -5246,6 +5277,7 @@ static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
547 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
548
549 l2cap_chan_unlock(chan);
550 + l2cap_chan_put(chan);
551
552 return 0;
553 }
554 @@ -5281,6 +5313,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
555 }
556
557 l2cap_chan_unlock(chan);
558 + l2cap_chan_put(chan);
559
560 return 0;
561 }
562 @@ -5653,12 +5686,11 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
563 if (credits > max_credits) {
564 BT_ERR("LE credits overflow");
565 l2cap_send_disconn_req(chan, ECONNRESET);
566 - l2cap_chan_unlock(chan);
567
568 /* Return 0 so that we don't trigger an unnecessary
569 * command reject packet.
570 */
571 - return 0;
572 + goto unlock;
573 }
574
575 chan->tx_credits += credits;
576 @@ -5669,7 +5701,9 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
577 if (chan->tx_credits)
578 chan->ops->resume(chan);
579
580 +unlock:
581 l2cap_chan_unlock(chan);
582 + l2cap_chan_put(chan);
583
584 return 0;
585 }
586 @@ -6983,6 +7017,7 @@ drop:
587
588 done:
589 l2cap_chan_unlock(chan);
590 + l2cap_chan_put(chan);
591 }
592
593 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
594 @@ -7386,7 +7421,7 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
595 if (src_type != c->src_type)
596 continue;
597
598 - l2cap_chan_hold(c);
599 + c = l2cap_chan_hold_unless_zero(c);
600 read_unlock(&chan_list_lock);
601 return c;
602 }
603 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
604 index 660b41040c771..1023f881091ef 100644
605 --- a/net/ipv4/igmp.c
606 +++ b/net/ipv4/igmp.c
607 @@ -829,7 +829,7 @@ static void igmp_ifc_event(struct in_device *in_dev)
608 struct net *net = dev_net(in_dev->dev);
609 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
610 return;
611 - WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv);
612 + WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv));
613 igmp_ifc_start_timer(in_dev, 1);
614 }
615
616 @@ -1011,7 +1011,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
617 * received value was zero, use the default or statically
618 * configured value.
619 */
620 - in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
621 + in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
622 in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
623
624 /* RFC3376, 8.3. Query Response Interval:
625 @@ -1191,7 +1191,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im,
626 pmc->interface = im->interface;
627 in_dev_hold(in_dev);
628 pmc->multiaddr = im->multiaddr;
629 - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
630 + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
631 pmc->sfmode = im->sfmode;
632 if (pmc->sfmode == MCAST_INCLUDE) {
633 struct ip_sf_list *psf;
634 @@ -1242,9 +1242,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
635 swap(im->tomb, pmc->tomb);
636 swap(im->sources, pmc->sources);
637 for (psf = im->sources; psf; psf = psf->sf_next)
638 - psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
639 + psf->sf_crcount = in_dev->mr_qrv ?:
640 + READ_ONCE(net->ipv4.sysctl_igmp_qrv);
641 } else {
642 - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
643 + im->crcount = in_dev->mr_qrv ?:
644 + READ_ONCE(net->ipv4.sysctl_igmp_qrv);
645 }
646 in_dev_put(pmc->interface);
647 kfree_pmc(pmc);
648 @@ -1351,7 +1353,7 @@ static void igmp_group_added(struct ip_mc_list *im)
649 if (in_dev->dead)
650 return;
651
652 - im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
653 + im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
654 if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
655 spin_lock_bh(&im->lock);
656 igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY);
657 @@ -1365,7 +1367,7 @@ static void igmp_group_added(struct ip_mc_list *im)
658 * IN() to IN(A).
659 */
660 if (im->sfmode == MCAST_EXCLUDE)
661 - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
662 + im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
663
664 igmp_ifc_event(in_dev);
665 #endif
666 @@ -1756,7 +1758,7 @@ static void ip_mc_reset(struct in_device *in_dev)
667
668 in_dev->mr_qi = IGMP_QUERY_INTERVAL;
669 in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
670 - in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
671 + in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv);
672 }
673 #else
674 static void ip_mc_reset(struct in_device *in_dev)
675 @@ -1890,7 +1892,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
676 #ifdef CONFIG_IP_MULTICAST
677 if (psf->sf_oldin &&
678 !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
679 - psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
680 + psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
681 psf->sf_next = pmc->tomb;
682 pmc->tomb = psf;
683 rv = 1;
684 @@ -1954,7 +1956,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
685 /* filter mode change */
686 pmc->sfmode = MCAST_INCLUDE;
687 #ifdef CONFIG_IP_MULTICAST
688 - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
689 + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
690 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
691 for (psf = pmc->sources; psf; psf = psf->sf_next)
692 psf->sf_crcount = 0;
693 @@ -2133,7 +2135,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
694 #ifdef CONFIG_IP_MULTICAST
695 /* else no filters; keep old mode for reports */
696
697 - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
698 + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
699 WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount);
700 for (psf = pmc->sources; psf; psf = psf->sf_next)
701 psf->sf_crcount = 0;
702 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
703 index 4b31f6e9ec61f..0a570d5d0b38f 100644
704 --- a/net/ipv4/tcp.c
705 +++ b/net/ipv4/tcp.c
706 @@ -697,7 +697,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
707 int size_goal)
708 {
709 return skb->len < size_goal &&
710 - sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
711 + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_autocorking) &&
712 !tcp_rtx_queue_empty(sk) &&
713 refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
714 }
715 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
716 index c151c4dd4ae63..f4e00ff909da3 100644
717 --- a/net/ipv4/tcp_input.c
718 +++ b/net/ipv4/tcp_input.c
719 @@ -439,7 +439,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
720 */
721 void tcp_init_buffer_space(struct sock *sk)
722 {
723 - int tcp_app_win = sock_net(sk)->ipv4.sysctl_tcp_app_win;
724 + int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
725 struct tcp_sock *tp = tcp_sk(sk);
726 int maxwin;
727
728 @@ -2030,7 +2030,7 @@ void tcp_enter_loss(struct sock *sk)
729 * loss recovery is underway except recurring timeout(s) on
730 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
731 */
732 - tp->frto = net->ipv4.sysctl_tcp_frto &&
733 + tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
734 (new_recovery || icsk->icsk_retransmits) &&
735 !inet_csk(sk)->icsk_mtup.probe_size;
736 }
737 @@ -2914,7 +2914,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
738
739 static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
740 {
741 - u32 wlen = sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen * HZ;
742 + u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
743 struct tcp_sock *tp = tcp_sk(sk);
744
745 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
746 @@ -3436,7 +3436,8 @@ static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
747 if (*last_oow_ack_time) {
748 s32 elapsed = (s32)(tcp_jiffies32 - *last_oow_ack_time);
749
750 - if (0 <= elapsed && elapsed < net->ipv4.sysctl_tcp_invalid_ratelimit) {
751 + if (0 <= elapsed &&
752 + elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
753 NET_INC_STATS(net, mib_idx);
754 return true; /* rate-limited: don't send yet! */
755 }
756 @@ -3484,7 +3485,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
757 /* Then check host-wide RFC 5961 rate limit. */
758 now = jiffies / HZ;
759 if (now != challenge_timestamp) {
760 - u32 ack_limit = net->ipv4.sysctl_tcp_challenge_ack_limit;
761 + u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
762 u32 half = (ack_limit + 1) >> 1;
763
764 challenge_timestamp = now;
765 @@ -4260,7 +4261,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
766 {
767 struct tcp_sock *tp = tcp_sk(sk);
768
769 - if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
770 + if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
771 int mib_idx;
772
773 if (before(seq, tp->rcv_nxt))
774 @@ -4306,7 +4307,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
775 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
776 tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
777
778 - if (tcp_is_sack(tp) && sock_net(sk)->ipv4.sysctl_tcp_dsack) {
779 + if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
780 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
781
782 tcp_rcv_spurious_retrans(sk, skb);
783 @@ -5302,7 +5303,7 @@ send_now:
784 }
785
786 if (!tcp_is_sack(tp) ||
787 - tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
788 + tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
789 goto send_now;
790
791 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
792 @@ -5325,7 +5326,8 @@ send_now:
793 if (tp->srtt_us && tp->srtt_us < rtt)
794 rtt = tp->srtt_us;
795
796 - delay = min_t(unsigned long, sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns,
797 + delay = min_t(unsigned long,
798 + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
799 rtt * (NSEC_PER_USEC >> 3)/20);
800 sock_hold(sk);
801 hrtimer_start(&tp->compressed_ack_timer, ns_to_ktime(delay),
802 diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
803 index 9a7d8a5998578..0af6249a993af 100644
804 --- a/net/ipv4/tcp_metrics.c
805 +++ b/net/ipv4/tcp_metrics.c
806 @@ -329,7 +329,7 @@ void tcp_update_metrics(struct sock *sk)
807 int m;
808
809 sk_dst_confirm(sk);
810 - if (net->ipv4.sysctl_tcp_nometrics_save || !dst)
811 + if (READ_ONCE(net->ipv4.sysctl_tcp_nometrics_save) || !dst)
812 return;
813
814 rcu_read_lock();
815 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
816 index 97f29ece38000..ef749a47768a5 100644
817 --- a/net/ipv4/tcp_output.c
818 +++ b/net/ipv4/tcp_output.c
819 @@ -1761,7 +1761,7 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
820
821 min_tso = ca_ops->min_tso_segs ?
822 ca_ops->min_tso_segs(sk) :
823 - sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
824 + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
825
826 tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
827 return min_t(u32, tso_segs, sk->sk_gso_max_segs);
828 @@ -2276,7 +2276,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
829 sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
830 if (sk->sk_pacing_status == SK_PACING_NONE)
831 limit = min_t(unsigned long, limit,
832 - sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
833 + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
834 limit <<= factor;
835
836 if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
837 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
838 index 98ac32b49d8c9..051bbd0726dff 100644
839 --- a/net/ipv6/ping.c
840 +++ b/net/ipv6/ping.c
841 @@ -22,6 +22,11 @@
842 #include <linux/proc_fs.h>
843 #include <net/ping.h>
844
845 +static void ping_v6_destroy(struct sock *sk)
846 +{
847 + inet6_destroy_sock(sk);
848 +}
849 +
850 /* Compatibility glue so we can support IPv6 when it's compiled as a module */
851 static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
852 int *addr_len)
853 @@ -165,6 +170,7 @@ struct proto pingv6_prot = {
854 .owner = THIS_MODULE,
855 .init = ping_init_sock,
856 .close = ping_close,
857 + .destroy = ping_v6_destroy,
858 .connect = ip6_datagram_connect_v6_only,
859 .disconnect = __udp_disconnect,
860 .setsockopt = ipv6_setsockopt,
861 diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
862 index 7d3ab08a5a2d0..581bd1353a447 100644
863 --- a/net/netfilter/nfnetlink_queue.c
864 +++ b/net/netfilter/nfnetlink_queue.c
865 @@ -846,11 +846,16 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
866 }
867
868 static int
869 -nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
870 +nfqnl_mangle(void *data, unsigned int data_len, struct nf_queue_entry *e, int diff)
871 {
872 struct sk_buff *nskb;
873
874 if (diff < 0) {
875 + unsigned int min_len = skb_transport_offset(e->skb);
876 +
877 + if (data_len < min_len)
878 + return -EINVAL;
879 +
880 if (pskb_trim(e->skb, data_len))
881 return -ENOMEM;
882 } else if (diff > 0) {
883 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
884 index fb6f62264e874..f960b0e1e552c 100644
885 --- a/net/sctp/associola.c
886 +++ b/net/sctp/associola.c
887 @@ -224,9 +224,8 @@ static struct sctp_association *sctp_association_init(
888 if (!sctp_ulpq_init(&asoc->ulpq, asoc))
889 goto fail_init;
890
891 - if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams,
892 - 0, gfp))
893 - goto fail_init;
894 + if (sctp_stream_init(&asoc->stream, asoc->c.sinit_num_ostreams, 0, gfp))
895 + goto stream_free;
896
897 /* Initialize default path MTU. */
898 asoc->pathmtu = sp->pathmtu;
899 diff --git a/net/sctp/stream.c b/net/sctp/stream.c
900 index cd20638b61514..56762745d6e4e 100644
901 --- a/net/sctp/stream.c
902 +++ b/net/sctp/stream.c
903 @@ -137,7 +137,7 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
904
905 ret = sctp_stream_alloc_out(stream, outcnt, gfp);
906 if (ret)
907 - goto out_err;
908 + return ret;
909
910 for (i = 0; i < stream->outcnt; i++)
911 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
912 @@ -145,22 +145,9 @@ int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
913 handle_in:
914 sctp_stream_interleave_init(stream);
915 if (!incnt)
916 - goto out;
917 -
918 - ret = sctp_stream_alloc_in(stream, incnt, gfp);
919 - if (ret)
920 - goto in_err;
921 -
922 - goto out;
923 + return 0;
924
925 -in_err:
926 - sched->free(stream);
927 - genradix_free(&stream->in);
928 -out_err:
929 - genradix_free(&stream->out);
930 - stream->outcnt = 0;
931 -out:
932 - return ret;
933 + return sctp_stream_alloc_in(stream, incnt, gfp);
934 }
935
936 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
937 diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
938 index 99e5f69fbb742..a2e1d34f52c5b 100644
939 --- a/net/sctp/stream_sched.c
940 +++ b/net/sctp/stream_sched.c
941 @@ -163,7 +163,7 @@ int sctp_sched_set_sched(struct sctp_association *asoc,
942 if (!SCTP_SO(&asoc->stream, i)->ext)
943 continue;
944
945 - ret = n->init_sid(&asoc->stream, i, GFP_KERNEL);
946 + ret = n->init_sid(&asoc->stream, i, GFP_ATOMIC);
947 if (ret)
948 goto err;
949 }
950 diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
951 index 2ec0a32da5793..0b185b1090ff3 100644
952 --- a/tools/perf/util/symbol-elf.c
953 +++ b/tools/perf/util/symbol-elf.c
954 @@ -230,6 +230,33 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
955 return NULL;
956 }
957
958 +static int elf_read_program_header(Elf *elf, u64 vaddr, GElf_Phdr *phdr)
959 +{
960 + size_t i, phdrnum;
961 + u64 sz;
962 +
963 + if (elf_getphdrnum(elf, &phdrnum))
964 + return -1;
965 +
966 + for (i = 0; i < phdrnum; i++) {
967 + if (gelf_getphdr(elf, i, phdr) == NULL)
968 + return -1;
969 +
970 + if (phdr->p_type != PT_LOAD)
971 + continue;
972 +
973 + sz = max(phdr->p_memsz, phdr->p_filesz);
974 + if (!sz)
975 + continue;
976 +
977 + if (vaddr >= phdr->p_vaddr && (vaddr < phdr->p_vaddr + sz))
978 + return 0;
979 + }
980 +
981 + /* Not found any valid program header */
982 + return -1;
983 +}
984 +
985 static bool want_demangle(bool is_kernel_sym)
986 {
987 return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
988 @@ -1091,6 +1118,7 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
989 sym.st_value);
990 used_opd = true;
991 }
992 +
993 /*
994 * When loading symbols in a data mapping, ABS symbols (which
995 * has a value of SHN_ABS in its st_shndx) failed at
996 @@ -1127,11 +1155,20 @@ int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
997 goto out_elf_end;
998 } else if ((used_opd && runtime_ss->adjust_symbols) ||
999 (!used_opd && syms_ss->adjust_symbols)) {
1000 + GElf_Phdr phdr;
1001 +
1002 + if (elf_read_program_header(syms_ss->elf,
1003 + (u64)sym.st_value, &phdr)) {
1004 + pr_warning("%s: failed to find program header for "
1005 + "symbol: %s st_value: %#" PRIx64 "\n",
1006 + __func__, elf_name, (u64)sym.st_value);
1007 + continue;
1008 + }
1009 pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " "
1010 - "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__,
1011 - (u64)sym.st_value, (u64)shdr.sh_addr,
1012 - (u64)shdr.sh_offset);
1013 - sym.st_value -= shdr.sh_addr - shdr.sh_offset;
1014 + "p_vaddr: %#" PRIx64 " p_offset: %#" PRIx64 "\n",
1015 + __func__, (u64)sym.st_value, (u64)phdr.p_vaddr,
1016 + (u64)phdr.p_offset);
1017 + sym.st_value -= phdr.p_vaddr - phdr.p_offset;
1018 }
1019
1020 demangled = demangle_sym(dso, kmodule, elf_name);