Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.15/0102-4.15.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3086 - (show annotations) (download)
Wed Mar 21 14:52:20 2018 UTC (6 years, 1 month ago) by niro
File size: 26231 byte(s)
-linux-4.15.3
1 diff --git a/Makefile b/Makefile
2 index 54f1bc10b531..13566ad7863a 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 15
9 -SUBLEVEL = 2
10 +SUBLEVEL = 3
11 EXTRAVERSION =
12 NAME = Fearless Coyote
13
14 diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
15 index 9267cbdb14d2..3ced1ba1fd11 100644
16 --- a/crypto/tcrypt.c
17 +++ b/crypto/tcrypt.c
18 @@ -198,11 +198,13 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
19 }
20
21 sg_init_table(sg, np + 1);
22 - np--;
23 + if (rem)
24 + np--;
25 for (k = 0; k < np; k++)
26 sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
27
28 - sg_set_buf(&sg[k + 1], xbuf[k], rem);
29 + if (rem)
30 + sg_set_buf(&sg[k + 1], xbuf[k], rem);
31 }
32
33 static void test_aead_speed(const char *algo, int enc, unsigned int secs,
34 diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
35 index 016d7427ebfa..761d8279abca 100644
36 --- a/drivers/gpio/gpio-uniphier.c
37 +++ b/drivers/gpio/gpio-uniphier.c
38 @@ -505,4 +505,4 @@ module_platform_driver(uniphier_gpio_driver);
39
40 MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
41 MODULE_DESCRIPTION("UniPhier GPIO driver");
42 -MODULE_LICENSE("GPL");
43 +MODULE_LICENSE("GPL v2");
44 diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
45 index 46768c056193..0c28d0b995cc 100644
46 --- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
47 +++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_util.c
48 @@ -115,3 +115,6 @@ struct mtk_vcodec_ctx *mtk_vcodec_get_curr_ctx(struct mtk_vcodec_dev *dev)
49 return ctx;
50 }
51 EXPORT_SYMBOL(mtk_vcodec_get_curr_ctx);
52 +
53 +MODULE_LICENSE("GPL v2");
54 +MODULE_DESCRIPTION("Mediatek video codec driver");
55 diff --git a/drivers/media/platform/soc_camera/soc_scale_crop.c b/drivers/media/platform/soc_camera/soc_scale_crop.c
56 index 270ec613c27c..6164102e6f9f 100644
57 --- a/drivers/media/platform/soc_camera/soc_scale_crop.c
58 +++ b/drivers/media/platform/soc_camera/soc_scale_crop.c
59 @@ -420,3 +420,7 @@ void soc_camera_calc_client_output(struct soc_camera_device *icd,
60 mf->height = soc_camera_shift_scale(rect->height, shift, scale_v);
61 }
62 EXPORT_SYMBOL(soc_camera_calc_client_output);
63 +
64 +MODULE_DESCRIPTION("soc-camera scaling-cropping functions");
65 +MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>");
66 +MODULE_LICENSE("GPL");
67 diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
68 index 807c94c70049..92f93a880015 100644
69 --- a/drivers/media/platform/tegra-cec/tegra_cec.c
70 +++ b/drivers/media/platform/tegra-cec/tegra_cec.c
71 @@ -493,3 +493,8 @@ static struct platform_driver tegra_cec_driver = {
72 };
73
74 module_platform_driver(tegra_cec_driver);
75 +
76 +MODULE_DESCRIPTION("Tegra HDMI CEC driver");
77 +MODULE_AUTHOR("NVIDIA CORPORATION");
78 +MODULE_AUTHOR("Cisco Systems, Inc. and/or its affiliates");
79 +MODULE_LICENSE("GPL v2");
80 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
81 index f7080d0ab874..46b0372dd032 100644
82 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
83 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
84 @@ -3891,7 +3891,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
85 struct list_head *head = &mbx->cmd_q;
86 struct qlcnic_cmd_args *cmd = NULL;
87
88 - spin_lock(&mbx->queue_lock);
89 + spin_lock_bh(&mbx->queue_lock);
90
91 while (!list_empty(head)) {
92 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
93 @@ -3902,7 +3902,7 @@ static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
94 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
95 }
96
97 - spin_unlock(&mbx->queue_lock);
98 + spin_unlock_bh(&mbx->queue_lock);
99 }
100
101 static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
102 @@ -3938,12 +3938,12 @@ static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
103 {
104 struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
105
106 - spin_lock(&mbx->queue_lock);
107 + spin_lock_bh(&mbx->queue_lock);
108
109 list_del(&cmd->list);
110 mbx->num_cmds--;
111
112 - spin_unlock(&mbx->queue_lock);
113 + spin_unlock_bh(&mbx->queue_lock);
114
115 qlcnic_83xx_notify_cmd_completion(adapter, cmd);
116 }
117 @@ -4008,7 +4008,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
118 init_completion(&cmd->completion);
119 cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
120
121 - spin_lock(&mbx->queue_lock);
122 + spin_lock_bh(&mbx->queue_lock);
123
124 list_add_tail(&cmd->list, &mbx->cmd_q);
125 mbx->num_cmds++;
126 @@ -4016,7 +4016,7 @@ static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
127 *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
128 queue_work(mbx->work_q, &mbx->work);
129
130 - spin_unlock(&mbx->queue_lock);
131 + spin_unlock_bh(&mbx->queue_lock);
132
133 return 0;
134 }
135 @@ -4112,15 +4112,15 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
136 mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
137 spin_unlock_irqrestore(&mbx->aen_lock, flags);
138
139 - spin_lock(&mbx->queue_lock);
140 + spin_lock_bh(&mbx->queue_lock);
141
142 if (list_empty(head)) {
143 - spin_unlock(&mbx->queue_lock);
144 + spin_unlock_bh(&mbx->queue_lock);
145 return;
146 }
147 cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
148
149 - spin_unlock(&mbx->queue_lock);
150 + spin_unlock_bh(&mbx->queue_lock);
151
152 mbx_ops->encode_cmd(adapter, cmd);
153 mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
154 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
155 index 734286ebe5ef..dd713dff8d22 100644
156 --- a/drivers/net/ethernet/realtek/r8169.c
157 +++ b/drivers/net/ethernet/realtek/r8169.c
158 @@ -1395,7 +1395,7 @@ DECLARE_RTL_COND(rtl_ocp_tx_cond)
159 {
160 void __iomem *ioaddr = tp->mmio_addr;
161
162 - return RTL_R8(IBISR0) & 0x02;
163 + return RTL_R8(IBISR0) & 0x20;
164 }
165
166 static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
167 @@ -1403,7 +1403,7 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
168 void __iomem *ioaddr = tp->mmio_addr;
169
170 RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
171 - rtl_msleep_loop_wait_low(tp, &rtl_ocp_tx_cond, 50, 2000);
172 + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
173 RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
174 RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
175 }
176 diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
177 index fc8f8bdf6579..056cb6093630 100644
178 --- a/drivers/net/ethernet/rocker/rocker_main.c
179 +++ b/drivers/net/ethernet/rocker/rocker_main.c
180 @@ -2902,6 +2902,12 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
181 goto err_alloc_ordered_workqueue;
182 }
183
184 + err = rocker_probe_ports(rocker);
185 + if (err) {
186 + dev_err(&pdev->dev, "failed to probe ports\n");
187 + goto err_probe_ports;
188 + }
189 +
190 /* Only FIBs pointing to our own netdevs are programmed into
191 * the device, so no need to pass a callback.
192 */
193 @@ -2918,22 +2924,16 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
194
195 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
196
197 - err = rocker_probe_ports(rocker);
198 - if (err) {
199 - dev_err(&pdev->dev, "failed to probe ports\n");
200 - goto err_probe_ports;
201 - }
202 -
203 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
204 (int)sizeof(rocker->hw.id), &rocker->hw.id);
205
206 return 0;
207
208 -err_probe_ports:
209 - unregister_switchdev_notifier(&rocker_switchdev_notifier);
210 err_register_switchdev_notifier:
211 unregister_fib_notifier(&rocker->fib_nb);
212 err_register_fib_notifier:
213 + rocker_remove_ports(rocker);
214 +err_probe_ports:
215 destroy_workqueue(rocker->rocker_owq);
216 err_alloc_ordered_workqueue:
217 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
218 @@ -2961,9 +2961,9 @@ static void rocker_remove(struct pci_dev *pdev)
219 {
220 struct rocker *rocker = pci_get_drvdata(pdev);
221
222 - rocker_remove_ports(rocker);
223 unregister_switchdev_notifier(&rocker_switchdev_notifier);
224 unregister_fib_notifier(&rocker->fib_nb);
225 + rocker_remove_ports(rocker);
226 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
227 destroy_workqueue(rocker->rocker_owq);
228 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
229 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
230 index 728819feab44..e7114c34fe4b 100644
231 --- a/drivers/net/usb/qmi_wwan.c
232 +++ b/drivers/net/usb/qmi_wwan.c
233 @@ -1245,6 +1245,7 @@ static const struct usb_device_id products[] = {
234 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
235 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
236 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
237 + {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
238
239 /* 4. Gobi 1000 devices */
240 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
241 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
242 index c7bdeb655646..5636c7ca8eba 100644
243 --- a/drivers/vhost/net.c
244 +++ b/drivers/vhost/net.c
245 @@ -1208,6 +1208,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
246 }
247 vhost_net_stop(n, &tx_sock, &rx_sock);
248 vhost_net_flush(n);
249 + vhost_dev_stop(&n->dev);
250 vhost_dev_reset_owner(&n->dev, umem);
251 vhost_net_vq_reset(n);
252 done:
253 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
254 index becf86aa4ac6..d6ec5a5a6782 100644
255 --- a/include/net/sch_generic.h
256 +++ b/include/net/sch_generic.h
257 @@ -280,7 +280,6 @@ struct tcf_block {
258 struct net *net;
259 struct Qdisc *q;
260 struct list_head cb_list;
261 - struct work_struct work;
262 };
263
264 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
265 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
266 index ac2ffd5e02b9..0a78ce57872d 100644
267 --- a/mm/memcontrol.c
268 +++ b/mm/memcontrol.c
269 @@ -5828,6 +5828,20 @@ void mem_cgroup_sk_alloc(struct sock *sk)
270 if (!mem_cgroup_sockets_enabled)
271 return;
272
273 + /*
274 + * Socket cloning can throw us here with sk_memcg already
275 + * filled. It won't however, necessarily happen from
276 + * process context. So the test for root memcg given
277 + * the current task's memcg won't help us in this case.
278 + *
279 + * Respecting the original socket's memcg is a better
280 + * decision in this case.
281 + */
282 + if (sk->sk_memcg) {
283 + css_get(&sk->sk_memcg->css);
284 + return;
285 + }
286 +
287 rcu_read_lock();
288 memcg = mem_cgroup_from_task(current);
289 if (memcg == root_mem_cgroup)
290 diff --git a/net/core/sock.c b/net/core/sock.c
291 index c0b5b2f17412..7571dabfc4cf 100644
292 --- a/net/core/sock.c
293 +++ b/net/core/sock.c
294 @@ -1675,16 +1675,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
295 newsk->sk_dst_pending_confirm = 0;
296 newsk->sk_wmem_queued = 0;
297 newsk->sk_forward_alloc = 0;
298 -
299 - /* sk->sk_memcg will be populated at accept() time */
300 - newsk->sk_memcg = NULL;
301 -
302 atomic_set(&newsk->sk_drops, 0);
303 newsk->sk_send_head = NULL;
304 newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
305 atomic_set(&newsk->sk_zckey, 0);
306
307 sock_reset_flag(newsk, SOCK_DONE);
308 + mem_cgroup_sk_alloc(newsk);
309 cgroup_sk_alloc(&newsk->sk_cgrp_data);
310
311 rcu_read_lock();
312 diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
313 index 5eeb1d20cc38..676092d7bd81 100644
314 --- a/net/core/sock_reuseport.c
315 +++ b/net/core/sock_reuseport.c
316 @@ -94,6 +94,16 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
317 return more_reuse;
318 }
319
320 +static void reuseport_free_rcu(struct rcu_head *head)
321 +{
322 + struct sock_reuseport *reuse;
323 +
324 + reuse = container_of(head, struct sock_reuseport, rcu);
325 + if (reuse->prog)
326 + bpf_prog_destroy(reuse->prog);
327 + kfree(reuse);
328 +}
329 +
330 /**
331 * reuseport_add_sock - Add a socket to the reuseport group of another.
332 * @sk: New socket to add to the group.
333 @@ -102,7 +112,7 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
334 */
335 int reuseport_add_sock(struct sock *sk, struct sock *sk2)
336 {
337 - struct sock_reuseport *reuse;
338 + struct sock_reuseport *old_reuse, *reuse;
339
340 if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
341 int err = reuseport_alloc(sk2);
342 @@ -113,10 +123,13 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
343
344 spin_lock_bh(&reuseport_lock);
345 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
346 - lockdep_is_held(&reuseport_lock)),
347 - WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
348 - lockdep_is_held(&reuseport_lock)),
349 - "socket already in reuseport group");
350 + lockdep_is_held(&reuseport_lock));
351 + old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
352 + lockdep_is_held(&reuseport_lock));
353 + if (old_reuse && old_reuse->num_socks != 1) {
354 + spin_unlock_bh(&reuseport_lock);
355 + return -EBUSY;
356 + }
357
358 if (reuse->num_socks == reuse->max_socks) {
359 reuse = reuseport_grow(reuse);
360 @@ -134,19 +147,11 @@ int reuseport_add_sock(struct sock *sk, struct sock *sk2)
361
362 spin_unlock_bh(&reuseport_lock);
363
364 + if (old_reuse)
365 + call_rcu(&old_reuse->rcu, reuseport_free_rcu);
366 return 0;
367 }
368
369 -static void reuseport_free_rcu(struct rcu_head *head)
370 -{
371 - struct sock_reuseport *reuse;
372 -
373 - reuse = container_of(head, struct sock_reuseport, rcu);
374 - if (reuse->prog)
375 - bpf_prog_destroy(reuse->prog);
376 - kfree(reuse);
377 -}
378 -
379 void reuseport_detach_sock(struct sock *sk)
380 {
381 struct sock_reuseport *reuse;
382 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
383 index 2d49717a7421..f0b1fc35dde1 100644
384 --- a/net/ipv4/igmp.c
385 +++ b/net/ipv4/igmp.c
386 @@ -386,7 +386,11 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
387 pip->frag_off = htons(IP_DF);
388 pip->ttl = 1;
389 pip->daddr = fl4.daddr;
390 +
391 + rcu_read_lock();
392 pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
393 + rcu_read_unlock();
394 +
395 pip->protocol = IPPROTO_IGMP;
396 pip->tot_len = 0; /* filled in later */
397 ip_select_ident(net, skb, NULL);
398 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
399 index 4ca46dc08e63..3668c4182655 100644
400 --- a/net/ipv4/inet_connection_sock.c
401 +++ b/net/ipv4/inet_connection_sock.c
402 @@ -475,7 +475,6 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
403 }
404 spin_unlock_bh(&queue->fastopenq.lock);
405 }
406 - mem_cgroup_sk_alloc(newsk);
407 out:
408 release_sock(sk);
409 if (req)
410 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
411 index 8e053ad7cae2..c821f5d68720 100644
412 --- a/net/ipv4/tcp.c
413 +++ b/net/ipv4/tcp.c
414 @@ -2434,6 +2434,12 @@ int tcp_disconnect(struct sock *sk, int flags)
415
416 WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
417
418 + if (sk->sk_frag.page) {
419 + put_page(sk->sk_frag.page);
420 + sk->sk_frag.page = NULL;
421 + sk->sk_frag.offset = 0;
422 + }
423 +
424 sk->sk_error_report(sk);
425 return err;
426 }
427 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
428 index 8322f26e770e..25c5a0b60cfc 100644
429 --- a/net/ipv4/tcp_bbr.c
430 +++ b/net/ipv4/tcp_bbr.c
431 @@ -481,7 +481,8 @@ static void bbr_advance_cycle_phase(struct sock *sk)
432
433 bbr->cycle_idx = (bbr->cycle_idx + 1) & (CYCLE_LEN - 1);
434 bbr->cycle_mstamp = tp->delivered_mstamp;
435 - bbr->pacing_gain = bbr_pacing_gain[bbr->cycle_idx];
436 + bbr->pacing_gain = bbr->lt_use_bw ? BBR_UNIT :
437 + bbr_pacing_gain[bbr->cycle_idx];
438 }
439
440 /* Gain cycling: cycle pacing gain to converge to fair share of available bw. */
441 @@ -490,8 +491,7 @@ static void bbr_update_cycle_phase(struct sock *sk,
442 {
443 struct bbr *bbr = inet_csk_ca(sk);
444
445 - if ((bbr->mode == BBR_PROBE_BW) && !bbr->lt_use_bw &&
446 - bbr_is_next_cycle_phase(sk, rs))
447 + if (bbr->mode == BBR_PROBE_BW && bbr_is_next_cycle_phase(sk, rs))
448 bbr_advance_cycle_phase(sk);
449 }
450
451 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
452 index f49bd7897e95..2547222589fe 100644
453 --- a/net/ipv6/addrconf.c
454 +++ b/net/ipv6/addrconf.c
455 @@ -186,7 +186,8 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
456
457 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
458 static void addrconf_dad_work(struct work_struct *w);
459 -static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
460 +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
461 + bool send_na);
462 static void addrconf_dad_run(struct inet6_dev *idev);
463 static void addrconf_rs_timer(struct timer_list *t);
464 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
465 @@ -3833,12 +3834,17 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
466 idev->cnf.accept_dad < 1) ||
467 !(ifp->flags&IFA_F_TENTATIVE) ||
468 ifp->flags & IFA_F_NODAD) {
469 + bool send_na = false;
470 +
471 + if (ifp->flags & IFA_F_TENTATIVE &&
472 + !(ifp->flags & IFA_F_OPTIMISTIC))
473 + send_na = true;
474 bump_id = ifp->flags & IFA_F_TENTATIVE;
475 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
476 spin_unlock(&ifp->lock);
477 read_unlock_bh(&idev->lock);
478
479 - addrconf_dad_completed(ifp, bump_id);
480 + addrconf_dad_completed(ifp, bump_id, send_na);
481 return;
482 }
483
484 @@ -3967,16 +3973,21 @@ static void addrconf_dad_work(struct work_struct *w)
485 }
486
487 if (ifp->dad_probes == 0) {
488 + bool send_na = false;
489 +
490 /*
491 * DAD was successful
492 */
493
494 + if (ifp->flags & IFA_F_TENTATIVE &&
495 + !(ifp->flags & IFA_F_OPTIMISTIC))
496 + send_na = true;
497 bump_id = ifp->flags & IFA_F_TENTATIVE;
498 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
499 spin_unlock(&ifp->lock);
500 write_unlock_bh(&idev->lock);
501
502 - addrconf_dad_completed(ifp, bump_id);
503 + addrconf_dad_completed(ifp, bump_id, send_na);
504
505 goto out;
506 }
507 @@ -4014,7 +4025,8 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
508 return true;
509 }
510
511 -static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
512 +static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
513 + bool send_na)
514 {
515 struct net_device *dev = ifp->idev->dev;
516 struct in6_addr lladdr;
517 @@ -4046,6 +4058,16 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id)
518 if (send_mld)
519 ipv6_mc_dad_complete(ifp->idev);
520
521 + /* send unsolicited NA if enabled */
522 + if (send_na &&
523 + (ifp->idev->cnf.ndisc_notify ||
524 + dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
525 + ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
526 + /*router=*/ !!ifp->idev->cnf.forwarding,
527 + /*solicited=*/ false, /*override=*/ true,
528 + /*inc_opt=*/ true);
529 + }
530 +
531 if (send_rs) {
532 /*
533 * If a host as already performed a random delay
534 @@ -4352,9 +4374,11 @@ static void addrconf_verify_rtnl(void)
535 spin_lock(&ifpub->lock);
536 ifpub->regen_count = 0;
537 spin_unlock(&ifpub->lock);
538 + rcu_read_unlock_bh();
539 ipv6_create_tempaddr(ifpub, ifp, true);
540 in6_ifa_put(ifpub);
541 in6_ifa_put(ifp);
542 + rcu_read_lock_bh();
543 goto restart;
544 }
545 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
546 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
547 index c9441ca45399..416917719a6f 100644
548 --- a/net/ipv6/af_inet6.c
549 +++ b/net/ipv6/af_inet6.c
550 @@ -284,6 +284,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
551 struct net *net = sock_net(sk);
552 __be32 v4addr = 0;
553 unsigned short snum;
554 + bool saved_ipv6only;
555 int addr_type = 0;
556 int err = 0;
557
558 @@ -389,19 +390,21 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
559 if (!(addr_type & IPV6_ADDR_MULTICAST))
560 np->saddr = addr->sin6_addr;
561
562 + saved_ipv6only = sk->sk_ipv6only;
563 + if (addr_type != IPV6_ADDR_ANY && addr_type != IPV6_ADDR_MAPPED)
564 + sk->sk_ipv6only = 1;
565 +
566 /* Make sure we are allowed to bind here. */
567 if ((snum || !inet->bind_address_no_port) &&
568 sk->sk_prot->get_port(sk, snum)) {
569 + sk->sk_ipv6only = saved_ipv6only;
570 inet_reset_saddr(sk);
571 err = -EADDRINUSE;
572 goto out;
573 }
574
575 - if (addr_type != IPV6_ADDR_ANY) {
576 + if (addr_type != IPV6_ADDR_ANY)
577 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
578 - if (addr_type != IPV6_ADDR_MAPPED)
579 - sk->sk_ipv6only = 1;
580 - }
581 if (snum)
582 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
583 inet->inet_sport = htons(inet->inet_num);
584 diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
585 index a2e1a864eb46..4fc566ec7e79 100644
586 --- a/net/ipv6/ip6mr.c
587 +++ b/net/ipv6/ip6mr.c
588 @@ -495,6 +495,7 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
589 return ERR_PTR(-ENOENT);
590
591 it->mrt = mrt;
592 + it->cache = NULL;
593 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
594 : SEQ_START_TOKEN;
595 }
596 diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
597 index b3cea200c85e..f61a5b613b52 100644
598 --- a/net/ipv6/ndisc.c
599 +++ b/net/ipv6/ndisc.c
600 @@ -566,6 +566,11 @@ static void ndisc_send_unsol_na(struct net_device *dev)
601
602 read_lock_bh(&idev->lock);
603 list_for_each_entry(ifa, &idev->addr_list, if_list) {
604 + /* skip tentative addresses until dad completes */
605 + if (ifa->flags & IFA_F_TENTATIVE &&
606 + !(ifa->flags & IFA_F_OPTIMISTIC))
607 + continue;
608 +
609 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifa->addr,
610 /*router=*/ !!idev->cnf.forwarding,
611 /*solicited=*/ false, /*override=*/ true,
612 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
613 index 0458b761f3c5..a560fb1d0230 100644
614 --- a/net/ipv6/route.c
615 +++ b/net/ipv6/route.c
616 @@ -1586,12 +1586,19 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
617 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
618 * expired, independently from their aging, as per RFC 8201 section 4
619 */
620 - if (!(rt->rt6i_flags & RTF_EXPIRES) &&
621 - time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
622 - RT6_TRACE("aging clone %p\n", rt);
623 + if (!(rt->rt6i_flags & RTF_EXPIRES)) {
624 + if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
625 + RT6_TRACE("aging clone %p\n", rt);
626 + rt6_remove_exception(bucket, rt6_ex);
627 + return;
628 + }
629 + } else if (time_after(jiffies, rt->dst.expires)) {
630 + RT6_TRACE("purging expired route %p\n", rt);
631 rt6_remove_exception(bucket, rt6_ex);
632 return;
633 - } else if (rt->rt6i_flags & RTF_GATEWAY) {
634 + }
635 +
636 + if (rt->rt6i_flags & RTF_GATEWAY) {
637 struct neighbour *neigh;
638 __u8 neigh_flags = 0;
639
640 @@ -1606,11 +1613,8 @@ static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
641 rt6_remove_exception(bucket, rt6_ex);
642 return;
643 }
644 - } else if (__rt6_check_expired(rt)) {
645 - RT6_TRACE("purging expired route %p\n", rt);
646 - rt6_remove_exception(bucket, rt6_ex);
647 - return;
648 }
649 +
650 gc_args->more++;
651 }
652
653 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
654 index b9d63d2246e6..e6b853f0ee4f 100644
655 --- a/net/sched/cls_api.c
656 +++ b/net/sched/cls_api.c
657 @@ -217,8 +217,12 @@ static void tcf_chain_flush(struct tcf_chain *chain)
658
659 static void tcf_chain_destroy(struct tcf_chain *chain)
660 {
661 + struct tcf_block *block = chain->block;
662 +
663 list_del(&chain->list);
664 kfree(chain);
665 + if (list_empty(&block->chain_list))
666 + kfree(block);
667 }
668
669 static void tcf_chain_hold(struct tcf_chain *chain)
670 @@ -329,49 +333,34 @@ int tcf_block_get(struct tcf_block **p_block,
671 }
672 EXPORT_SYMBOL(tcf_block_get);
673
674 -static void tcf_block_put_final(struct work_struct *work)
675 -{
676 - struct tcf_block *block = container_of(work, struct tcf_block, work);
677 - struct tcf_chain *chain, *tmp;
678 -
679 - rtnl_lock();
680 -
681 - /* At this point, all the chains should have refcnt == 1. */
682 - list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
683 - tcf_chain_put(chain);
684 - rtnl_unlock();
685 - kfree(block);
686 -}
687 -
688 /* XXX: Standalone actions are not allowed to jump to any chain, and bound
689 * actions should be all removed after flushing.
690 */
691 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
692 struct tcf_block_ext_info *ei)
693 {
694 - struct tcf_chain *chain;
695 + struct tcf_chain *chain, *tmp;
696
697 if (!block)
698 return;
699 - /* Hold a refcnt for all chains, except 0, so that they don't disappear
700 + /* Hold a refcnt for all chains, so that they don't disappear
701 * while we are iterating.
702 */
703 list_for_each_entry(chain, &block->chain_list, list)
704 - if (chain->index)
705 - tcf_chain_hold(chain);
706 + tcf_chain_hold(chain);
707
708 list_for_each_entry(chain, &block->chain_list, list)
709 tcf_chain_flush(chain);
710
711 tcf_block_offload_unbind(block, q, ei);
712
713 - INIT_WORK(&block->work, tcf_block_put_final);
714 - /* Wait for existing RCU callbacks to cool down, make sure their works
715 - * have been queued before this. We can not flush pending works here
716 - * because we are holding the RTNL lock.
717 - */
718 - rcu_barrier();
719 - tcf_queue_work(&block->work);
720 + /* At this point, all the chains should have refcnt >= 1. */
721 + list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
722 + tcf_chain_put(chain);
723 +
724 + /* Finally, put chain 0 and allow block to be freed. */
725 + chain = list_first_entry(&block->chain_list, struct tcf_chain, list);
726 + tcf_chain_put(chain);
727 }
728 EXPORT_SYMBOL(tcf_block_put_ext);
729
730 diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
731 index 507859cdd1cb..33294b5b2c6a 100644
732 --- a/net/sched/cls_u32.c
733 +++ b/net/sched/cls_u32.c
734 @@ -544,6 +544,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle)
735 static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
736 u32 flags)
737 {
738 + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
739 struct tcf_block *block = tp->chain->block;
740 struct tc_cls_u32_offload cls_u32 = {};
741 bool skip_sw = tc_skip_sw(flags);
742 @@ -563,7 +564,7 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
743 cls_u32.knode.sel = &n->sel;
744 cls_u32.knode.exts = &n->exts;
745 if (n->ht_down)
746 - cls_u32.knode.link_handle = n->ht_down->handle;
747 + cls_u32.knode.link_handle = ht->handle;
748
749 err = tc_setup_cb_call(block, NULL, TC_SETUP_CLSU32, &cls_u32, skip_sw);
750 if (err < 0) {
751 @@ -840,8 +841,9 @@ static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
752 static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
753 struct tc_u_knode *n)
754 {
755 - struct tc_u_knode *new;
756 + struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
757 struct tc_u32_sel *s = &n->sel;
758 + struct tc_u_knode *new;
759
760 new = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key),
761 GFP_KERNEL);
762 @@ -859,11 +861,11 @@ static struct tc_u_knode *u32_init_knode(struct tcf_proto *tp,
763 new->fshift = n->fshift;
764 new->res = n->res;
765 new->flags = n->flags;
766 - RCU_INIT_POINTER(new->ht_down, n->ht_down);
767 + RCU_INIT_POINTER(new->ht_down, ht);
768
769 /* bump reference count as long as we hold pointer to structure */
770 - if (new->ht_down)
771 - new->ht_down->refcnt++;
772 + if (ht)
773 + ht->refcnt++;
774
775 #ifdef CONFIG_CLS_U32_PERF
776 /* Statistics may be incremented by readers during update