Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0162-4.9.63-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 56453 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 8ab48891d088..339d4a85ffba 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 62
9     +SUBLEVEL = 63
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
14     index 40b3e31935d0..c05e7cfd0cbc 100644
15     --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
16     +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
17     @@ -215,16 +215,16 @@
18    
19     pinctrl_fec: fecgrp {
20     fsl,pins = <
21     - MX53_PAD_FEC_MDC__FEC_MDC 0x4
22     - MX53_PAD_FEC_MDIO__FEC_MDIO 0x1fc
23     - MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x180
24     - MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x180
25     - MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x180
26     - MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x180
27     - MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x180
28     - MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x4
29     - MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x4
30     - MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x4
31     + MX53_PAD_FEC_MDC__FEC_MDC 0x80000000
32     + MX53_PAD_FEC_MDIO__FEC_MDIO 0x80000000
33     + MX53_PAD_FEC_REF_CLK__FEC_TX_CLK 0x80000000
34     + MX53_PAD_FEC_RX_ER__FEC_RX_ER 0x80000000
35     + MX53_PAD_FEC_CRS_DV__FEC_RX_DV 0x80000000
36     + MX53_PAD_FEC_RXD1__FEC_RDATA_1 0x80000000
37     + MX53_PAD_FEC_RXD0__FEC_RDATA_0 0x80000000
38     + MX53_PAD_FEC_TX_EN__FEC_TX_EN 0x80000000
39     + MX53_PAD_FEC_TXD1__FEC_TDATA_1 0x80000000
40     + MX53_PAD_FEC_TXD0__FEC_TDATA_0 0x80000000
41     >;
42     };
43    
44     diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
45     index 8f01f21e78f1..6eda5abbd719 100644
46     --- a/arch/powerpc/Kconfig
47     +++ b/arch/powerpc/Kconfig
48     @@ -1087,11 +1087,6 @@ source "arch/powerpc/Kconfig.debug"
49    
50     source "security/Kconfig"
51    
52     -config KEYS_COMPAT
53     - bool
54     - depends on COMPAT && KEYS
55     - default y
56     -
57     source "crypto/Kconfig"
58    
59     config PPC_LIB_RHEAP
60     diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
61     index 426481d4cc86..9aa0d04c9dcc 100644
62     --- a/arch/s390/Kconfig
63     +++ b/arch/s390/Kconfig
64     @@ -359,9 +359,6 @@ config COMPAT
65     config SYSVIPC_COMPAT
66     def_bool y if COMPAT && SYSVIPC
67    
68     -config KEYS_COMPAT
69     - def_bool y if COMPAT && KEYS
70     -
71     config SMP
72     def_bool y
73     prompt "Symmetric multi-processing support"
74     diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
75     index b27e48e25841..8b4152f3a764 100644
76     --- a/arch/sparc/Kconfig
77     +++ b/arch/sparc/Kconfig
78     @@ -568,9 +568,6 @@ config SYSVIPC_COMPAT
79     depends on COMPAT && SYSVIPC
80     default y
81    
82     -config KEYS_COMPAT
83     - def_bool y if COMPAT && KEYS
84     -
85     endmenu
86    
87     source "net/Kconfig"
88     diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
89     index bada636d1065..b9c546a305a4 100644
90     --- a/arch/x86/Kconfig
91     +++ b/arch/x86/Kconfig
92     @@ -2732,10 +2732,6 @@ config COMPAT_FOR_U64_ALIGNMENT
93     config SYSVIPC_COMPAT
94     def_bool y
95     depends on SYSVIPC
96     -
97     -config KEYS_COMPAT
98     - def_bool y
99     - depends on KEYS
100     endif
101    
102     endmenu
103     diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
104     index f4e8fbec6a94..b5304e264881 100644
105     --- a/drivers/input/misc/ims-pcu.c
106     +++ b/drivers/input/misc/ims-pcu.c
107     @@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
108     return NULL;
109     }
110    
111     - while (buflen > 0) {
112     + while (buflen >= sizeof(*union_desc)) {
113     union_desc = (struct usb_cdc_union_desc *)buf;
114    
115     + if (union_desc->bLength > buflen) {
116     + dev_err(&intf->dev, "Too large descriptor\n");
117     + return NULL;
118     + }
119     +
120     if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE &&
121     union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) {
122     dev_dbg(&intf->dev, "Found union header\n");
123     - return union_desc;
124     +
125     + if (union_desc->bLength >= sizeof(*union_desc))
126     + return union_desc;
127     +
128     + dev_err(&intf->dev,
129     + "Union descriptor to short (%d vs %zd\n)",
130     + union_desc->bLength, sizeof(*union_desc));
131     + return NULL;
132     }
133    
134     buflen -= union_desc->bLength;
135     diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
136     index 6030ac5b8c63..a9fa4c0ac220 100644
137     --- a/drivers/misc/panel.c
138     +++ b/drivers/misc/panel.c
139     @@ -1423,17 +1423,25 @@ static ssize_t lcd_write(struct file *file,
140    
141     static int lcd_open(struct inode *inode, struct file *file)
142     {
143     + int ret;
144     +
145     + ret = -EBUSY;
146     if (!atomic_dec_and_test(&lcd_available))
147     - return -EBUSY; /* open only once at a time */
148     + goto fail; /* open only once at a time */
149    
150     + ret = -EPERM;
151     if (file->f_mode & FMODE_READ) /* device is write-only */
152     - return -EPERM;
153     + goto fail;
154    
155     if (lcd.must_clear) {
156     lcd_clear_display();
157     lcd.must_clear = false;
158     }
159     return nonseekable_open(inode, file);
160     +
161     + fail:
162     + atomic_inc(&lcd_available);
163     + return ret;
164     }
165    
166     static int lcd_release(struct inode *inode, struct file *file)
167     @@ -1696,14 +1704,21 @@ static ssize_t keypad_read(struct file *file,
168    
169     static int keypad_open(struct inode *inode, struct file *file)
170     {
171     + int ret;
172     +
173     + ret = -EBUSY;
174     if (!atomic_dec_and_test(&keypad_available))
175     - return -EBUSY; /* open only once at a time */
176     + goto fail; /* open only once at a time */
177    
178     + ret = -EPERM;
179     if (file->f_mode & FMODE_WRITE) /* device is read-only */
180     - return -EPERM;
181     + goto fail;
182    
183     keypad_buflen = 0; /* flush the buffer on opening */
184     return 0;
185     + fail:
186     + atomic_inc(&keypad_available);
187     + return ret;
188     }
189    
190     static int keypad_release(struct inode *inode, struct file *file)
191     diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
192     index adea6f5a4d71..9da9db17fc36 100644
193     --- a/drivers/net/macvtap.c
194     +++ b/drivers/net/macvtap.c
195     @@ -559,6 +559,10 @@ static int macvtap_open(struct inode *inode, struct file *file)
196     &macvtap_proto, 0);
197     if (!q)
198     goto err;
199     + if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL)) {
200     + sk_free(&q->sk);
201     + goto err;
202     + }
203    
204     RCU_INIT_POINTER(q->sock.wq, &q->wq);
205     init_waitqueue_head(&q->wq.wait);
206     @@ -582,22 +586,18 @@ static int macvtap_open(struct inode *inode, struct file *file)
207     if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
208     sock_set_flag(&q->sk, SOCK_ZEROCOPY);
209    
210     - err = -ENOMEM;
211     - if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
212     - goto err_array;
213     -
214     err = macvtap_set_queue(dev, file, q);
215     - if (err)
216     - goto err_queue;
217     + if (err) {
218     + /* macvtap_sock_destruct() will take care of freeing skb_array */
219     + goto err_put;
220     + }
221    
222     dev_put(dev);
223    
224     rtnl_unlock();
225     return err;
226    
227     -err_queue:
228     - skb_array_cleanup(&q->skb_array);
229     -err_array:
230     +err_put:
231     sock_put(&q->sk);
232     err:
233     if (dev)
234     @@ -1077,6 +1077,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
235     case TUNSETSNDBUF:
236     if (get_user(s, sp))
237     return -EFAULT;
238     + if (s <= 0)
239     + return -EINVAL;
240    
241     q->sk.sk_sndbuf = s;
242     return 0;
243     diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
244     index 96fa0e61d3af..440d5f42810f 100644
245     --- a/drivers/net/ppp/ppp_generic.c
246     +++ b/drivers/net/ppp/ppp_generic.c
247     @@ -1338,7 +1338,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
248    
249     static int ppp_dev_init(struct net_device *dev)
250     {
251     + struct ppp *ppp;
252     +
253     netdev_lockdep_set_classes(dev);
254     +
255     + ppp = netdev_priv(dev);
256     + /* Let the netdevice take a reference on the ppp file. This ensures
257     + * that ppp_destroy_interface() won't run before the device gets
258     + * unregistered.
259     + */
260     + atomic_inc(&ppp->file.refcnt);
261     +
262     return 0;
263     }
264    
265     @@ -1361,6 +1371,15 @@ static void ppp_dev_uninit(struct net_device *dev)
266     wake_up_interruptible(&ppp->file.rwait);
267     }
268    
269     +static void ppp_dev_priv_destructor(struct net_device *dev)
270     +{
271     + struct ppp *ppp;
272     +
273     + ppp = netdev_priv(dev);
274     + if (atomic_dec_and_test(&ppp->file.refcnt))
275     + ppp_destroy_interface(ppp);
276     +}
277     +
278     static const struct net_device_ops ppp_netdev_ops = {
279     .ndo_init = ppp_dev_init,
280     .ndo_uninit = ppp_dev_uninit,
281     @@ -1386,6 +1405,7 @@ static void ppp_setup(struct net_device *dev)
282     dev->tx_queue_len = 3;
283     dev->type = ARPHRD_PPP;
284     dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
285     + dev->destructor = ppp_dev_priv_destructor;
286     netif_keep_dst(dev);
287     }
288    
289     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
290     index ba7f9e054c4a..518cbfbc8b65 100644
291     --- a/drivers/net/tun.c
292     +++ b/drivers/net/tun.c
293     @@ -1787,6 +1787,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
294    
295     if (!dev)
296     return -ENOMEM;
297     + err = dev_get_valid_name(net, dev, name);
298     + if (err < 0)
299     + goto err_free_dev;
300    
301     dev_net_set(dev, net);
302     dev->rtnl_link_ops = &tun_link_ops;
303     @@ -2180,6 +2183,10 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
304     ret = -EFAULT;
305     break;
306     }
307     + if (sndbuf <= 0) {
308     + ret = -EINVAL;
309     + break;
310     + }
311    
312     tun->sndbuf = sndbuf;
313     tun_set_sndbuf(tun);
314     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
315     index 425a89c635d0..fc844a1f6c3f 100644
316     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
317     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
318     @@ -4754,9 +4754,6 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
319     err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
320     if (err < 0)
321     brcmf_err("setting AP mode failed %d\n", err);
322     - err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
323     - if (err < 0)
324     - brcmf_err("setting INFRA mode failed %d\n", err);
325     if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MBSS))
326     brcmf_fil_iovar_int_set(ifp, "mbss", 0);
327     brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_REGULATORY,
328     diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
329     index 6643f6fc7795..0ad8ecef1e30 100644
330     --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
331     +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
332     @@ -484,7 +484,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
333     static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
334     {
335     struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
336     - unsigned long flags;
337    
338     /*
339     * Ensure that the complete FCP WRITE payload has been received.
340     @@ -492,17 +491,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
341     */
342     cmd->cmd_in_wq = 0;
343    
344     - spin_lock_irqsave(&cmd->cmd_lock, flags);
345     - cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
346     - if (cmd->aborted) {
347     - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
348     - spin_unlock_irqrestore(&cmd->cmd_lock, flags);
349     -
350     - tcm_qla2xxx_free_cmd(cmd);
351     - return;
352     - }
353     - spin_unlock_irqrestore(&cmd->cmd_lock, flags);
354     -
355     cmd->vha->tgt_counters.qla_core_ret_ctio++;
356     if (!cmd->write_data_transferred) {
357     /*
358     @@ -682,34 +670,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
359     qlt_xmit_tm_rsp(mcmd);
360     }
361    
362     -
363     -#define DATA_WORK_NOT_FREE(_flags) \
364     - (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
365     - CMD_FLAG_DATA_WORK)
366     static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
367     {
368     struct qla_tgt_cmd *cmd = container_of(se_cmd,
369     struct qla_tgt_cmd, se_cmd);
370     - unsigned long flags;
371    
372     if (qlt_abort_cmd(cmd))
373     return;
374     -
375     - spin_lock_irqsave(&cmd->cmd_lock, flags);
376     - if ((cmd->state == QLA_TGT_STATE_NEW)||
377     - ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
378     - DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
379     -
380     - cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
381     - spin_unlock_irqrestore(&cmd->cmd_lock, flags);
382     - /* Cmd have not reached firmware.
383     - * Use this trigger to free it. */
384     - tcm_qla2xxx_free_cmd(cmd);
385     - return;
386     - }
387     - spin_unlock_irqrestore(&cmd->cmd_lock, flags);
388     - return;
389     -
390     }
391    
392     static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
393     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
394     index e49fcd5e61f7..f3c9d18e9dc5 100644
395     --- a/drivers/target/iscsi/iscsi_target.c
396     +++ b/drivers/target/iscsi/iscsi_target.c
397     @@ -1940,7 +1940,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
398     struct iscsi_tm *hdr;
399     int out_of_order_cmdsn = 0, ret;
400     bool sess_ref = false;
401     - u8 function;
402     + u8 function, tcm_function = TMR_UNKNOWN;
403    
404     hdr = (struct iscsi_tm *) buf;
405     hdr->flags &= ~ISCSI_FLAG_CMD_FINAL;
406     @@ -1986,10 +1986,6 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
407     * LIO-Target $FABRIC_MOD
408     */
409     if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
410     -
411     - u8 tcm_function;
412     - int ret;
413     -
414     transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
415     conn->sess->se_sess, 0, DMA_NONE,
416     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
417     @@ -2025,15 +2021,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
418     return iscsit_add_reject_cmd(cmd,
419     ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
420     }
421     -
422     - ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req,
423     - tcm_function, GFP_KERNEL);
424     - if (ret < 0)
425     - return iscsit_add_reject_cmd(cmd,
426     + }
427     + ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
428     + GFP_KERNEL);
429     + if (ret < 0)
430     + return iscsit_add_reject_cmd(cmd,
431     ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
432    
433     - cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
434     - }
435     + cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
436    
437     cmd->iscsi_opcode = ISCSI_OP_SCSI_TMFUNC;
438     cmd->i_state = ISTATE_SEND_TASKMGTRSP;
439     diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
440     index d94927e5623b..e31f72b3a22c 100644
441     --- a/drivers/usb/misc/usbtest.c
442     +++ b/drivers/usb/misc/usbtest.c
443     @@ -209,12 +209,13 @@ get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
444     return tmp;
445     }
446    
447     - if (in) {
448     + if (in)
449     dev->in_pipe = usb_rcvbulkpipe(udev,
450     in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
451     + if (out)
452     dev->out_pipe = usb_sndbulkpipe(udev,
453     out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
454     - }
455     +
456     if (iso_in) {
457     dev->iso_in = &iso_in->desc;
458     dev->in_iso_pipe = usb_rcvisocpipe(udev,
459     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
460     index 23db1ae37464..47c7f5b8f675 100644
461     --- a/include/linux/netdevice.h
462     +++ b/include/linux/netdevice.h
463     @@ -3742,6 +3742,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
464     unsigned char name_assign_type,
465     void (*setup)(struct net_device *),
466     unsigned int txqs, unsigned int rxqs);
467     +int dev_get_valid_name(struct net *net, struct net_device *dev,
468     + const char *name);
469     +
470     #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
471     alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
472    
473     diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
474     index 236a81034fef..0464b207d0cf 100644
475     --- a/include/net/inet_sock.h
476     +++ b/include/net/inet_sock.h
477     @@ -96,7 +96,7 @@ struct inet_request_sock {
478     kmemcheck_bitfield_end(flags);
479     u32 ir_mark;
480     union {
481     - struct ip_options_rcu *opt;
482     + struct ip_options_rcu __rcu *ireq_opt;
483     #if IS_ENABLED(CONFIG_IPV6)
484     struct {
485     struct ipv6_txoptions *ipv6_opt;
486     @@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
487     return sk->sk_bound_dev_if;
488     }
489    
490     +static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
491     +{
492     + return rcu_dereference_check(ireq->ireq_opt,
493     + atomic_read(&ireq->req.rsk_refcnt) > 0);
494     +}
495     +
496     struct inet_cork {
497     unsigned int flags;
498     __be32 addr;
499     diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
500     index d9d52c020a70..9ae819e27940 100644
501     --- a/include/net/netfilter/nf_conntrack.h
502     +++ b/include/net/netfilter/nf_conntrack.h
503     @@ -17,7 +17,6 @@
504     #include <linux/bitops.h>
505     #include <linux/compiler.h>
506     #include <linux/atomic.h>
507     -#include <linux/rhashtable.h>
508    
509     #include <linux/netfilter/nf_conntrack_tcp.h>
510     #include <linux/netfilter/nf_conntrack_dccp.h>
511     @@ -101,7 +100,7 @@ struct nf_conn {
512     possible_net_t ct_net;
513    
514     #if IS_ENABLED(CONFIG_NF_NAT)
515     - struct rhlist_head nat_bysource;
516     + struct hlist_node nat_bysource;
517     #endif
518     /* all members below initialized via memset */
519     u8 __nfct_init_offset[0];
520     diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
521     index c327a431a6f3..02515f7ed4cc 100644
522     --- a/include/net/netfilter/nf_nat.h
523     +++ b/include/net/netfilter/nf_nat.h
524     @@ -1,6 +1,5 @@
525     #ifndef _NF_NAT_H
526     #define _NF_NAT_H
527     -#include <linux/rhashtable.h>
528     #include <linux/netfilter_ipv4.h>
529     #include <linux/netfilter/nf_nat.h>
530     #include <net/netfilter/nf_conntrack_tuple.h>
531     diff --git a/include/net/tcp.h b/include/net/tcp.h
532     index 123979fe12bf..fba4fc46871d 100644
533     --- a/include/net/tcp.h
534     +++ b/include/net/tcp.h
535     @@ -1681,12 +1681,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
536     tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
537     }
538    
539     -/* Called when old skb is about to be deleted (to be combined with new skb) */
540     -static inline void tcp_highest_sack_combine(struct sock *sk,
541     +/* Called when old skb is about to be deleted and replaced by new skb */
542     +static inline void tcp_highest_sack_replace(struct sock *sk,
543     struct sk_buff *old,
544     struct sk_buff *new)
545     {
546     - if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
547     + if (old == tcp_highest_sack(sk))
548     tcp_sk(sk)->highest_sack = new;
549     }
550    
551     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
552     index 0383c601e17c..a87e8940fe57 100644
553     --- a/include/target/target_core_base.h
554     +++ b/include/target/target_core_base.h
555     @@ -197,6 +197,7 @@ enum tcm_tmreq_table {
556     TMR_LUN_RESET = 5,
557     TMR_TARGET_WARM_RESET = 6,
558     TMR_TARGET_COLD_RESET = 7,
559     + TMR_UNKNOWN = 0xff,
560     };
561    
562     /* fabric independent task management response values */
563     diff --git a/net/core/dev.c b/net/core/dev.c
564     index 7f2caad46a3d..c37891828e4e 100644
565     --- a/net/core/dev.c
566     +++ b/net/core/dev.c
567     @@ -1115,9 +1115,8 @@ static int dev_alloc_name_ns(struct net *net,
568     return ret;
569     }
570    
571     -static int dev_get_valid_name(struct net *net,
572     - struct net_device *dev,
573     - const char *name)
574     +int dev_get_valid_name(struct net *net, struct net_device *dev,
575     + const char *name)
576     {
577     BUG_ON(!net);
578    
579     @@ -1133,6 +1132,7 @@ static int dev_get_valid_name(struct net *net,
580    
581     return 0;
582     }
583     +EXPORT_SYMBOL(dev_get_valid_name);
584    
585     /**
586     * dev_change_name - change name of a device
587     diff --git a/net/core/sock.c b/net/core/sock.c
588     index 231c38d91855..e3b60460dc9c 100644
589     --- a/net/core/sock.c
590     +++ b/net/core/sock.c
591     @@ -1526,6 +1526,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
592     newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
593    
594     sock_reset_flag(newsk, SOCK_DONE);
595     + cgroup_sk_alloc(&newsk->sk_cgrp_data);
596     skb_queue_head_init(&newsk->sk_error_queue);
597    
598     filter = rcu_dereference_protected(newsk->sk_filter, 1);
599     @@ -1560,8 +1561,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
600     atomic64_set(&newsk->sk_cookie, 0);
601    
602     mem_cgroup_sk_alloc(newsk);
603     - cgroup_sk_alloc(&newsk->sk_cgrp_data);
604     -
605     /*
606     * Before updating sk_refcnt, we must commit prior changes to memory
607     * (Documentation/RCU/rculist_nulls.txt for details)
608     diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
609     index 9a1a352fd1eb..77f396b679ce 100644
610     --- a/net/core/sock_reuseport.c
611     +++ b/net/core/sock_reuseport.c
612     @@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk)
613     * soft irq of receive path or setsockopt from process context
614     */
615     spin_lock_bh(&reuseport_lock);
616     - WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
617     - lockdep_is_held(&reuseport_lock)),
618     - "multiple allocations for the same socket");
619     +
620     + /* Allocation attempts can occur concurrently via the setsockopt path
621     + * and the bind/hash path. Nothing to do when we lose the race.
622     + */
623     + if (rcu_dereference_protected(sk->sk_reuseport_cb,
624     + lockdep_is_held(&reuseport_lock)))
625     + goto out;
626     +
627     reuse = __reuseport_alloc(INIT_SOCKS);
628     if (!reuse) {
629     spin_unlock_bh(&reuseport_lock);
630     @@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk)
631     reuse->num_socks = 1;
632     rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
633    
634     +out:
635     spin_unlock_bh(&reuseport_lock);
636    
637     return 0;
638     diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
639     index 8fc160098e11..8c7799cdd3cf 100644
640     --- a/net/dccp/ipv4.c
641     +++ b/net/dccp/ipv4.c
642     @@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
643     sk_daddr_set(newsk, ireq->ir_rmt_addr);
644     sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
645     newinet->inet_saddr = ireq->ir_loc_addr;
646     - newinet->inet_opt = ireq->opt;
647     - ireq->opt = NULL;
648     + RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
649     newinet->mc_index = inet_iif(skb);
650     newinet->mc_ttl = ip_hdr(skb)->ttl;
651     newinet->inet_id = jiffies;
652     @@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
653     if (__inet_inherit_port(sk, newsk) < 0)
654     goto put_and_exit;
655     *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
656     -
657     + if (*own_req)
658     + ireq->ireq_opt = NULL;
659     + else
660     + newinet->inet_opt = NULL;
661     return newsk;
662    
663     exit_overflow:
664     @@ -441,6 +443,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
665     __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
666     return NULL;
667     put_and_exit:
668     + newinet->inet_opt = NULL;
669     inet_csk_prepare_forced_close(newsk);
670     dccp_done(newsk);
671     goto exit;
672     @@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
673     ireq->ir_rmt_addr);
674     err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
675     ireq->ir_rmt_addr,
676     - ireq->opt);
677     + ireq_opt_deref(ireq));
678     err = net_xmit_eval(err);
679     }
680    
681     @@ -548,7 +551,7 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
682     static void dccp_v4_reqsk_destructor(struct request_sock *req)
683     {
684     dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg);
685     - kfree(inet_rsk(req)->opt);
686     + kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
687     }
688    
689     void dccp_syn_ack_timeout(const struct request_sock *req)
690     diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
691     index ae206163c273..972353cd1778 100644
692     --- a/net/ipv4/cipso_ipv4.c
693     +++ b/net/ipv4/cipso_ipv4.c
694     @@ -1943,7 +1943,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
695     buf = NULL;
696    
697     req_inet = inet_rsk(req);
698     - opt = xchg(&req_inet->opt, opt);
699     + opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt);
700     if (opt)
701     kfree_rcu(opt, rcu);
702    
703     @@ -1965,11 +1965,13 @@ int cipso_v4_req_setattr(struct request_sock *req,
704     * values on failure.
705     *
706     */
707     -static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
708     +static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr)
709     {
710     + struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1);
711     int hdr_delta = 0;
712     - struct ip_options_rcu *opt = *opt_ptr;
713    
714     + if (!opt || opt->opt.cipso == 0)
715     + return 0;
716     if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
717     u8 cipso_len;
718     u8 cipso_off;
719     @@ -2031,14 +2033,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
720     */
721     void cipso_v4_sock_delattr(struct sock *sk)
722     {
723     - int hdr_delta;
724     - struct ip_options_rcu *opt;
725     struct inet_sock *sk_inet;
726     + int hdr_delta;
727    
728     sk_inet = inet_sk(sk);
729     - opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
730     - if (!opt || opt->opt.cipso == 0)
731     - return;
732    
733     hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
734     if (sk_inet->is_icsk && hdr_delta > 0) {
735     @@ -2058,15 +2056,7 @@ void cipso_v4_sock_delattr(struct sock *sk)
736     */
737     void cipso_v4_req_delattr(struct request_sock *req)
738     {
739     - struct ip_options_rcu *opt;
740     - struct inet_request_sock *req_inet;
741     -
742     - req_inet = inet_rsk(req);
743     - opt = req_inet->opt;
744     - if (!opt || opt->opt.cipso == 0)
745     - return;
746     -
747     - cipso_v4_delopt(&req_inet->opt);
748     + cipso_v4_delopt(&inet_rsk(req)->ireq_opt);
749     }
750    
751     /**
752     diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
753     index d5cac99170b1..8c72034df28e 100644
754     --- a/net/ipv4/gre_offload.c
755     +++ b/net/ipv4/gre_offload.c
756     @@ -98,7 +98,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
757     greh = (struct gre_base_hdr *)skb_transport_header(skb);
758     pcsum = (__sum16 *)(greh + 1);
759    
760     - if (gso_partial) {
761     + if (gso_partial && skb_is_gso(skb)) {
762     unsigned int partial_adj;
763    
764     /* Adjust checksum to account for the fact that
765     diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
766     index cf3d5674846a..d1cab49393e2 100644
767     --- a/net/ipv4/inet_connection_sock.c
768     +++ b/net/ipv4/inet_connection_sock.c
769     @@ -407,9 +407,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
770     {
771     const struct inet_request_sock *ireq = inet_rsk(req);
772     struct net *net = read_pnet(&ireq->ireq_net);
773     - struct ip_options_rcu *opt = ireq->opt;
774     + struct ip_options_rcu *opt;
775     struct rtable *rt;
776    
777     + opt = ireq_opt_deref(ireq);
778     +
779     flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
780     RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
781     sk->sk_protocol, inet_sk_flowi_flags(sk),
782     @@ -443,10 +445,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
783     struct flowi4 *fl4;
784     struct rtable *rt;
785    
786     + opt = rcu_dereference(ireq->ireq_opt);
787     fl4 = &newinet->cork.fl.u.ip4;
788    
789     - rcu_read_lock();
790     - opt = rcu_dereference(newinet->inet_opt);
791     flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
792     RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
793     sk->sk_protocol, inet_sk_flowi_flags(sk),
794     @@ -459,13 +460,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
795     goto no_route;
796     if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
797     goto route_err;
798     - rcu_read_unlock();
799     return &rt->dst;
800    
801     route_err:
802     ip_rt_put(rt);
803     no_route:
804     - rcu_read_unlock();
805     __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
806     return NULL;
807     }
808     diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
809     index ca97835bfec4..b9bcf3db3af9 100644
810     --- a/net/ipv4/inet_hashtables.c
811     +++ b/net/ipv4/inet_hashtables.c
812     @@ -455,10 +455,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
813     return reuseport_add_sock(sk, sk2);
814     }
815    
816     - /* Initial allocation may have already happened via setsockopt */
817     - if (!rcu_access_pointer(sk->sk_reuseport_cb))
818     - return reuseport_alloc(sk);
819     - return 0;
820     + return reuseport_alloc(sk);
821     }
822    
823     int __inet_hash(struct sock *sk, struct sock *osk,
824     diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
825     index c9392589c415..56d71a004dce 100644
826     --- a/net/ipv4/ipip.c
827     +++ b/net/ipv4/ipip.c
828     @@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly;
829    
830     static int ipip_err(struct sk_buff *skb, u32 info)
831     {
832     -
833     -/* All the routers (except for Linux) return only
834     - 8 bytes of packet payload. It means, that precise relaying of
835     - ICMP in the real Internet is absolutely infeasible.
836     - */
837     + /* All the routers (except for Linux) return only
838     + * 8 bytes of packet payload. It means, that precise relaying of
839     + * ICMP in the real Internet is absolutely infeasible.
840     + */
841     struct net *net = dev_net(skb->dev);
842     struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
843     const struct iphdr *iph = (const struct iphdr *)skb->data;
844     - struct ip_tunnel *t;
845     - int err;
846     const int type = icmp_hdr(skb)->type;
847     const int code = icmp_hdr(skb)->code;
848     + struct ip_tunnel *t;
849     + int err = 0;
850     +
851     + switch (type) {
852     + case ICMP_DEST_UNREACH:
853     + switch (code) {
854     + case ICMP_SR_FAILED:
855     + /* Impossible event. */
856     + goto out;
857     + default:
858     + /* All others are translated to HOST_UNREACH.
859     + * rfc2003 contains "deep thoughts" about NET_UNREACH,
860     + * I believe they are just ether pollution. --ANK
861     + */
862     + break;
863     + }
864     + break;
865     +
866     + case ICMP_TIME_EXCEEDED:
867     + if (code != ICMP_EXC_TTL)
868     + goto out;
869     + break;
870     +
871     + case ICMP_REDIRECT:
872     + break;
873     +
874     + default:
875     + goto out;
876     + }
877    
878     - err = -ENOENT;
879     t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
880     iph->daddr, iph->saddr, 0);
881     - if (!t)
882     + if (!t) {
883     + err = -ENOENT;
884     goto out;
885     + }
886    
887     if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
888     - ipv4_update_pmtu(skb, dev_net(skb->dev), info,
889     - t->parms.link, 0, iph->protocol, 0);
890     - err = 0;
891     + ipv4_update_pmtu(skb, net, info, t->parms.link, 0,
892     + iph->protocol, 0);
893     goto out;
894     }
895    
896     if (type == ICMP_REDIRECT) {
897     - ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
898     - iph->protocol, 0);
899     - err = 0;
900     + ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0);
901     goto out;
902     }
903    
904     - if (t->parms.iph.daddr == 0)
905     + if (t->parms.iph.daddr == 0) {
906     + err = -ENOENT;
907     goto out;
908     + }
909    
910     - err = 0;
911     if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
912     goto out;
913    
914     diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
915     index b6f710d515d0..0597ad73a1fa 100644
916     --- a/net/ipv4/syncookies.c
917     +++ b/net/ipv4/syncookies.c
918     @@ -354,7 +354,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
919     /* We throwed the options of the initial SYN away, so we hope
920     * the ACK carries the same options again (see RFC1122 4.2.3.8)
921     */
922     - ireq->opt = tcp_v4_save_options(skb);
923     + RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
924    
925     if (security_inet_conn_request(sk, skb, req)) {
926     reqsk_free(req);
927     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
928     index c03850771a4e..8fcd0c642742 100644
929     --- a/net/ipv4/tcp_input.c
930     +++ b/net/ipv4/tcp_input.c
931     @@ -6237,7 +6237,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
932     struct inet_request_sock *ireq = inet_rsk(req);
933    
934     kmemcheck_annotate_bitfield(ireq, flags);
935     - ireq->opt = NULL;
936     + ireq->ireq_opt = NULL;
937     #if IS_ENABLED(CONFIG_IPV6)
938     ireq->pktopts = NULL;
939     #endif
940     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
941     index 6988566dc72f..d577ec07a0d8 100644
942     --- a/net/ipv4/tcp_ipv4.c
943     +++ b/net/ipv4/tcp_ipv4.c
944     @@ -861,7 +861,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
945    
946     err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
947     ireq->ir_rmt_addr,
948     - ireq->opt);
949     + ireq_opt_deref(ireq));
950     err = net_xmit_eval(err);
951     }
952    
953     @@ -873,7 +873,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
954     */
955     static void tcp_v4_reqsk_destructor(struct request_sock *req)
956     {
957     - kfree(inet_rsk(req)->opt);
958     + kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
959     }
960    
961     #ifdef CONFIG_TCP_MD5SIG
962     @@ -1199,7 +1199,7 @@ static void tcp_v4_init_req(struct request_sock *req,
963    
964     sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
965     sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
966     - ireq->opt = tcp_v4_save_options(skb);
967     + RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(skb));
968     }
969    
970     static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
971     @@ -1295,10 +1295,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
972     sk_daddr_set(newsk, ireq->ir_rmt_addr);
973     sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
974     newsk->sk_bound_dev_if = ireq->ir_iif;
975     - newinet->inet_saddr = ireq->ir_loc_addr;
976     - inet_opt = ireq->opt;
977     - rcu_assign_pointer(newinet->inet_opt, inet_opt);
978     - ireq->opt = NULL;
979     + newinet->inet_saddr = ireq->ir_loc_addr;
980     + inet_opt = rcu_dereference(ireq->ireq_opt);
981     + RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
982     newinet->mc_index = inet_iif(skb);
983     newinet->mc_ttl = ip_hdr(skb)->ttl;
984     newinet->rcv_tos = ip_hdr(skb)->tos;
985     @@ -1346,9 +1345,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
986     if (__inet_inherit_port(sk, newsk) < 0)
987     goto put_and_exit;
988     *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
989     - if (*own_req)
990     + if (likely(*own_req)) {
991     tcp_move_syn(newtp, req);
992     -
993     + ireq->ireq_opt = NULL;
994     + } else {
995     + newinet->inet_opt = NULL;
996     + }
997     return newsk;
998    
999     exit_overflow:
1000     @@ -1359,6 +1361,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1001     tcp_listendrop(sk);
1002     return NULL;
1003     put_and_exit:
1004     + newinet->inet_opt = NULL;
1005     inet_csk_prepare_forced_close(newsk);
1006     tcp_done(newsk);
1007     goto exit;
1008     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1009     index 85920707c4d3..566b43afe378 100644
1010     --- a/net/ipv4/tcp_output.c
1011     +++ b/net/ipv4/tcp_output.c
1012     @@ -1996,6 +1996,7 @@ static int tcp_mtu_probe(struct sock *sk)
1013     nskb->ip_summed = skb->ip_summed;
1014    
1015     tcp_insert_write_queue_before(nskb, skb, sk);
1016     + tcp_highest_sack_replace(sk, skb, nskb);
1017    
1018     len = 0;
1019     tcp_for_write_queue_from_safe(skb, next, sk) {
1020     @@ -2535,7 +2536,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1021    
1022     BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
1023    
1024     - tcp_highest_sack_combine(sk, next_skb, skb);
1025     + tcp_highest_sack_replace(sk, next_skb, skb);
1026    
1027     tcp_unlink_write_queue(next_skb, sk);
1028    
1029     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1030     index 4363b1e89bdf..bef4a94ce1a0 100644
1031     --- a/net/ipv4/udp.c
1032     +++ b/net/ipv4/udp.c
1033     @@ -222,10 +222,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
1034     }
1035     }
1036    
1037     - /* Initial allocation may have already happened via setsockopt */
1038     - if (!rcu_access_pointer(sk->sk_reuseport_cb))
1039     - return reuseport_alloc(sk);
1040     - return 0;
1041     + return reuseport_alloc(sk);
1042     }
1043    
1044     /**
1045     diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
1046     index 0932c85b42af..6401574cd638 100644
1047     --- a/net/ipv4/udp_offload.c
1048     +++ b/net/ipv4/udp_offload.c
1049     @@ -122,7 +122,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
1050     * will be using a length value equal to only one MSS sized
1051     * segment instead of the entire frame.
1052     */
1053     - if (gso_partial) {
1054     + if (gso_partial && skb_is_gso(skb)) {
1055     uh->len = htons(skb_shinfo(skb)->gso_size +
1056     SKB_GSO_CB(skb)->data_offset +
1057     skb->head - (unsigned char *)uh);
1058     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1059     index cc101b1be903..a4fb90c4819f 100644
1060     --- a/net/ipv6/addrconf.c
1061     +++ b/net/ipv6/addrconf.c
1062     @@ -3299,6 +3299,7 @@ static void addrconf_permanent_addr(struct net_device *dev)
1063     if ((ifp->flags & IFA_F_PERMANENT) &&
1064     fixup_permanent_addr(idev, ifp) < 0) {
1065     write_unlock_bh(&idev->lock);
1066     + in6_ifa_hold(ifp);
1067     ipv6_del_addr(ifp);
1068     write_lock_bh(&idev->lock);
1069    
1070     diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
1071     index b912f0dbaf72..b82e439804d1 100644
1072     --- a/net/ipv6/ip6_flowlabel.c
1073     +++ b/net/ipv6/ip6_flowlabel.c
1074     @@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
1075     }
1076     opt_space->dst1opt = fopt->dst1opt;
1077     opt_space->opt_flen = fopt->opt_flen;
1078     + opt_space->tot_len = fopt->tot_len;
1079     return opt_space;
1080     }
1081     EXPORT_SYMBOL_GPL(fl6_merge_options);
1082     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1083     index 41c10486cf7e..e9b14e3493f2 100644
1084     --- a/net/ipv6/ip6_gre.c
1085     +++ b/net/ipv6/ip6_gre.c
1086     @@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1087     case ICMPV6_DEST_UNREACH:
1088     net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
1089     t->parms.name);
1090     - break;
1091     + if (code != ICMPV6_PORT_UNREACH)
1092     + break;
1093     + return;
1094     case ICMPV6_TIME_EXCEED:
1095     if (code == ICMPV6_EXC_HOPLIMIT) {
1096     net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
1097     t->parms.name);
1098     + break;
1099     }
1100     - break;
1101     + return;
1102     case ICMPV6_PARAMPROB:
1103     teli = 0;
1104     if (code == ICMPV6_HDR_FIELD)
1105     @@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1106     net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
1107     t->parms.name);
1108     }
1109     - break;
1110     + return;
1111     case ICMPV6_PKT_TOOBIG:
1112     mtu = be32_to_cpu(info) - offset - t->tun_hlen;
1113     if (t->dev->type == ARPHRD_ETHER)
1114     @@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1115     if (mtu < IPV6_MIN_MTU)
1116     mtu = IPV6_MIN_MTU;
1117     t->dev->mtu = mtu;
1118     - break;
1119     + return;
1120     }
1121    
1122     if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO))
1123     @@ -505,8 +508,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
1124     __u32 *pmtu, __be16 proto)
1125     {
1126     struct ip6_tnl *tunnel = netdev_priv(dev);
1127     - __be16 protocol = (dev->type == ARPHRD_ETHER) ?
1128     - htons(ETH_P_TEB) : proto;
1129     + struct dst_entry *dst = skb_dst(skb);
1130     + __be16 protocol;
1131    
1132     if (dev->type == ARPHRD_ETHER)
1133     IPCB(skb)->flags = 0;
1134     @@ -520,9 +523,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
1135     tunnel->o_seqno++;
1136    
1137     /* Push GRE header. */
1138     + protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto;
1139     gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags,
1140     protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno));
1141    
1142     + /* TooBig packet may have updated dst->dev's mtu */
1143     + if (dst && dst_mtu(dst) > dst->dev->mtu)
1144     + dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
1145     +
1146     return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
1147     NEXTHDR_GRE);
1148     }
1149     diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
1150     index 424fbe1f8978..649f4d87b318 100644
1151     --- a/net/ipv6/ip6_offload.c
1152     +++ b/net/ipv6/ip6_offload.c
1153     @@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
1154    
1155     for (skb = segs; skb; skb = skb->next) {
1156     ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
1157     - if (gso_partial)
1158     + if (gso_partial && skb_is_gso(skb))
1159     payload_len = skb_shinfo(skb)->gso_size +
1160     SKB_GSO_CB(skb)->data_offset +
1161     skb->head - (unsigned char *)(ipv6h + 1);
1162     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1163     index e0236e902ea7..6e01c9a8dfd3 100644
1164     --- a/net/ipv6/ip6_output.c
1165     +++ b/net/ipv6/ip6_output.c
1166     @@ -1215,11 +1215,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1167     if (WARN_ON(v6_cork->opt))
1168     return -EINVAL;
1169    
1170     - v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation);
1171     + v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation);
1172     if (unlikely(!v6_cork->opt))
1173     return -ENOBUFS;
1174    
1175     - v6_cork->opt->tot_len = opt->tot_len;
1176     + v6_cork->opt->tot_len = sizeof(*opt);
1177     v6_cork->opt->opt_flen = opt->opt_flen;
1178     v6_cork->opt->opt_nflen = opt->opt_nflen;
1179    
1180     diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
1181     index 1696f1fd5877..163f1fa53917 100644
1182     --- a/net/l2tp/l2tp_ppp.c
1183     +++ b/net/l2tp/l2tp_ppp.c
1184     @@ -993,6 +993,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session,
1185     session->name, cmd, arg);
1186    
1187     sk = ps->sock;
1188     + if (!sk)
1189     + return -EBADR;
1190     +
1191     sock_hold(sk);
1192    
1193     switch (cmd) {
1194     diff --git a/net/mac80211/key.c b/net/mac80211/key.c
1195     index edd6f2945f69..4c625a325ce2 100644
1196     --- a/net/mac80211/key.c
1197     +++ b/net/mac80211/key.c
1198     @@ -4,7 +4,7 @@
1199     * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
1200     * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
1201     * Copyright 2013-2014 Intel Mobile Communications GmbH
1202     - * Copyright 2015 Intel Deutschland GmbH
1203     + * Copyright 2015-2017 Intel Deutschland GmbH
1204     *
1205     * This program is free software; you can redistribute it and/or modify
1206     * it under the terms of the GNU General Public License version 2 as
1207     @@ -19,6 +19,7 @@
1208     #include <linux/slab.h>
1209     #include <linux/export.h>
1210     #include <net/mac80211.h>
1211     +#include <crypto/algapi.h>
1212     #include <asm/unaligned.h>
1213     #include "ieee80211_i.h"
1214     #include "driver-ops.h"
1215     @@ -608,6 +609,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key)
1216     ieee80211_key_free_common(key);
1217     }
1218    
1219     +static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata,
1220     + struct ieee80211_key *old,
1221     + struct ieee80211_key *new)
1222     +{
1223     + u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP];
1224     + u8 *tk_old, *tk_new;
1225     +
1226     + if (!old || new->conf.keylen != old->conf.keylen)
1227     + return false;
1228     +
1229     + tk_old = old->conf.key;
1230     + tk_new = new->conf.key;
1231     +
1232     + /*
1233     + * In station mode, don't compare the TX MIC key, as it's never used
1234     + * and offloaded rekeying may not care to send it to the host. This
1235     + * is the case in iwlwifi, for example.
1236     + */
1237     + if (sdata->vif.type == NL80211_IFTYPE_STATION &&
1238     + new->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
1239     + new->conf.keylen == WLAN_KEY_LEN_TKIP &&
1240     + !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1241     + memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP);
1242     + memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP);
1243     + memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
1244     + memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8);
1245     + tk_old = tkip_old;
1246     + tk_new = tkip_new;
1247     + }
1248     +
1249     + return !crypto_memneq(tk_old, tk_new, new->conf.keylen);
1250     +}
1251     +
1252     int ieee80211_key_link(struct ieee80211_key *key,
1253     struct ieee80211_sub_if_data *sdata,
1254     struct sta_info *sta)
1255     @@ -619,9 +653,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
1256    
1257     pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
1258     idx = key->conf.keyidx;
1259     - key->local = sdata->local;
1260     - key->sdata = sdata;
1261     - key->sta = sta;
1262    
1263     mutex_lock(&sdata->local->key_mtx);
1264    
1265     @@ -632,6 +663,20 @@ int ieee80211_key_link(struct ieee80211_key *key,
1266     else
1267     old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
1268    
1269     + /*
1270     + * Silently accept key re-installation without really installing the
1271     + * new version of the key to avoid nonce reuse or replay issues.
1272     + */
1273     + if (ieee80211_key_identical(sdata, old_key, key)) {
1274     + ieee80211_key_free_unused(key);
1275     + ret = 0;
1276     + goto out;
1277     + }
1278     +
1279     + key->local = sdata->local;
1280     + key->sdata = sdata;
1281     + key->sta = sta;
1282     +
1283     increment_tailroom_need_count(sdata);
1284    
1285     ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
1286     @@ -647,6 +692,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
1287     ret = 0;
1288     }
1289    
1290     + out:
1291     mutex_unlock(&sdata->local->key_mtx);
1292    
1293     return ret;
1294     diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
1295     index ed9ce7c63252..750b8bf13e60 100644
1296     --- a/net/netfilter/nf_conntrack_core.c
1297     +++ b/net/netfilter/nf_conntrack_core.c
1298     @@ -689,7 +689,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
1299    
1300     l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
1301     if (l4proto->allow_clash &&
1302     - !nfct_nat(ct) &&
1303     + ((ct->status & IPS_NAT_DONE_MASK) == 0) &&
1304     !nf_ct_is_dying(ct) &&
1305     atomic_inc_not_zero(&ct->ct_general.use)) {
1306     nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
1307     diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
1308     index 2916f4815c9c..624d6e4dcd5c 100644
1309     --- a/net/netfilter/nf_nat_core.c
1310     +++ b/net/netfilter/nf_nat_core.c
1311     @@ -30,19 +30,17 @@
1312     #include <net/netfilter/nf_conntrack_zones.h>
1313     #include <linux/netfilter/nf_nat.h>
1314    
1315     +static DEFINE_SPINLOCK(nf_nat_lock);
1316     +
1317     static DEFINE_MUTEX(nf_nat_proto_mutex);
1318     static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
1319     __read_mostly;
1320     static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
1321     __read_mostly;
1322    
1323     -struct nf_nat_conn_key {
1324     - const struct net *net;
1325     - const struct nf_conntrack_tuple *tuple;
1326     - const struct nf_conntrack_zone *zone;
1327     -};
1328     -
1329     -static struct rhltable nf_nat_bysource_table;
1330     +static struct hlist_head *nf_nat_bysource __read_mostly;
1331     +static unsigned int nf_nat_htable_size __read_mostly;
1332     +static unsigned int nf_nat_hash_rnd __read_mostly;
1333    
1334     inline const struct nf_nat_l3proto *
1335     __nf_nat_l3proto_find(u8 family)
1336     @@ -121,17 +119,19 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
1337     EXPORT_SYMBOL(nf_xfrm_me_harder);
1338     #endif /* CONFIG_XFRM */
1339    
1340     -static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
1341     +/* We keep an extra hash for each conntrack, for fast searching. */
1342     +static inline unsigned int
1343     +hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
1344     {
1345     - const struct nf_conntrack_tuple *t;
1346     - const struct nf_conn *ct = data;
1347     + unsigned int hash;
1348     +
1349     + get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
1350    
1351     - t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
1352     /* Original src, to ensure we map it consistently if poss. */
1353     + hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
1354     + tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
1355    
1356     - seed ^= net_hash_mix(nf_ct_net(ct));
1357     - return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
1358     - t->dst.protonum ^ seed);
1359     + return reciprocal_scale(hash, nf_nat_htable_size);
1360     }
1361    
1362     /* Is this tuple already taken? (not by us) */
1363     @@ -187,28 +187,6 @@ same_src(const struct nf_conn *ct,
1364     t->src.u.all == tuple->src.u.all);
1365     }
1366    
1367     -static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
1368     - const void *obj)
1369     -{
1370     - const struct nf_nat_conn_key *key = arg->key;
1371     - const struct nf_conn *ct = obj;
1372     -
1373     - if (!same_src(ct, key->tuple) ||
1374     - !net_eq(nf_ct_net(ct), key->net) ||
1375     - !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
1376     - return 1;
1377     -
1378     - return 0;
1379     -}
1380     -
1381     -static struct rhashtable_params nf_nat_bysource_params = {
1382     - .head_offset = offsetof(struct nf_conn, nat_bysource),
1383     - .obj_hashfn = nf_nat_bysource_hash,
1384     - .obj_cmpfn = nf_nat_bysource_cmp,
1385     - .nelem_hint = 256,
1386     - .min_size = 1024,
1387     -};
1388     -
1389     /* Only called for SRC manip */
1390     static int
1391     find_appropriate_src(struct net *net,
1392     @@ -219,26 +197,22 @@ find_appropriate_src(struct net *net,
1393     struct nf_conntrack_tuple *result,
1394     const struct nf_nat_range *range)
1395     {
1396     + unsigned int h = hash_by_src(net, tuple);
1397     const struct nf_conn *ct;
1398     - struct nf_nat_conn_key key = {
1399     - .net = net,
1400     - .tuple = tuple,
1401     - .zone = zone
1402     - };
1403     - struct rhlist_head *hl, *h;
1404     -
1405     - hl = rhltable_lookup(&nf_nat_bysource_table, &key,
1406     - nf_nat_bysource_params);
1407    
1408     - rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
1409     - nf_ct_invert_tuplepr(result,
1410     - &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
1411     - result->dst = tuple->dst;
1412     -
1413     - if (in_range(l3proto, l4proto, result, range))
1414     - return 1;
1415     + hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
1416     + if (same_src(ct, tuple) &&
1417     + net_eq(net, nf_ct_net(ct)) &&
1418     + nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
1419     + /* Copy source part from reply tuple. */
1420     + nf_ct_invert_tuplepr(result,
1421     + &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
1422     + result->dst = tuple->dst;
1423     +
1424     + if (in_range(l3proto, l4proto, result, range))
1425     + return 1;
1426     + }
1427     }
1428     -
1429     return 0;
1430     }
1431    
1432     @@ -411,6 +385,7 @@ nf_nat_setup_info(struct nf_conn *ct,
1433     const struct nf_nat_range *range,
1434     enum nf_nat_manip_type maniptype)
1435     {
1436     + struct net *net = nf_ct_net(ct);
1437     struct nf_conntrack_tuple curr_tuple, new_tuple;
1438     struct nf_conn_nat *nat;
1439    
1440     @@ -452,19 +427,16 @@ nf_nat_setup_info(struct nf_conn *ct,
1441     }
1442    
1443     if (maniptype == NF_NAT_MANIP_SRC) {
1444     - struct nf_nat_conn_key key = {
1445     - .net = nf_ct_net(ct),
1446     - .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
1447     - .zone = nf_ct_zone(ct),
1448     - };
1449     - int err;
1450     -
1451     - err = rhltable_insert_key(&nf_nat_bysource_table,
1452     - &key,
1453     - &ct->nat_bysource,
1454     - nf_nat_bysource_params);
1455     - if (err)
1456     - return NF_DROP;
1457     + unsigned int srchash;
1458     +
1459     + srchash = hash_by_src(net,
1460     + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
1461     + spin_lock_bh(&nf_nat_lock);
1462     + /* nf_conntrack_alter_reply might re-allocate extension aera */
1463     + nat = nfct_nat(ct);
1464     + hlist_add_head_rcu(&ct->nat_bysource,
1465     + &nf_nat_bysource[srchash]);
1466     + spin_unlock_bh(&nf_nat_lock);
1467     }
1468    
1469     /* It's done. */
1470     @@ -550,10 +522,6 @@ struct nf_nat_proto_clean {
1471     static int nf_nat_proto_remove(struct nf_conn *i, void *data)
1472     {
1473     const struct nf_nat_proto_clean *clean = data;
1474     - struct nf_conn_nat *nat = nfct_nat(i);
1475     -
1476     - if (!nat)
1477     - return 0;
1478    
1479     if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
1480     (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
1481     @@ -564,12 +532,10 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
1482    
1483     static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
1484     {
1485     - struct nf_conn_nat *nat = nfct_nat(ct);
1486     -
1487     if (nf_nat_proto_remove(ct, data))
1488     return 1;
1489    
1490     - if (!nat)
1491     + if ((ct->status & IPS_SRC_NAT_DONE) == 0)
1492     return 0;
1493    
1494     /* This netns is being destroyed, and conntrack has nat null binding.
1495     @@ -578,9 +544,10 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
1496     * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
1497     * will delete entry from already-freed table.
1498     */
1499     + spin_lock_bh(&nf_nat_lock);
1500     + hlist_del_rcu(&ct->nat_bysource);
1501     ct->status &= ~IPS_NAT_DONE_MASK;
1502     - rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
1503     - nf_nat_bysource_params);
1504     + spin_unlock_bh(&nf_nat_lock);
1505    
1506     /* don't delete conntrack. Although that would make things a lot
1507     * simpler, we'd end up flushing all conntracks on nat rmmod.
1508     @@ -705,13 +672,11 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
1509     /* No one using conntrack by the time this called. */
1510     static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
1511     {
1512     - struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
1513     -
1514     - if (!nat)
1515     - return;
1516     -
1517     - rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
1518     - nf_nat_bysource_params);
1519     + if (ct->status & IPS_SRC_NAT_DONE) {
1520     + spin_lock_bh(&nf_nat_lock);
1521     + hlist_del_rcu(&ct->nat_bysource);
1522     + spin_unlock_bh(&nf_nat_lock);
1523     + }
1524     }
1525    
1526     static struct nf_ct_ext_type nat_extend __read_mostly = {
1527     @@ -846,13 +811,16 @@ static int __init nf_nat_init(void)
1528     {
1529     int ret;
1530    
1531     - ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
1532     - if (ret)
1533     - return ret;
1534     + /* Leave them the same for the moment. */
1535     + nf_nat_htable_size = nf_conntrack_htable_size;
1536     +
1537     + nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
1538     + if (!nf_nat_bysource)
1539     + return -ENOMEM;
1540    
1541     ret = nf_ct_extend_register(&nat_extend);
1542     if (ret < 0) {
1543     - rhltable_destroy(&nf_nat_bysource_table);
1544     + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
1545     printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
1546     return ret;
1547     }
1548     @@ -876,7 +844,7 @@ static int __init nf_nat_init(void)
1549     return 0;
1550    
1551     cleanup_extend:
1552     - rhltable_destroy(&nf_nat_bysource_table);
1553     + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
1554     nf_ct_extend_unregister(&nat_extend);
1555     return ret;
1556     }
1557     @@ -896,8 +864,8 @@ static void __exit nf_nat_cleanup(void)
1558    
1559     for (i = 0; i < NFPROTO_NUMPROTO; i++)
1560     kfree(nf_nat_l4protos[i]);
1561     -
1562     - rhltable_destroy(&nf_nat_bysource_table);
1563     + synchronize_net();
1564     + nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
1565     }
1566    
1567     MODULE_LICENSE("GPL");
1568     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1569     index 2a5775f8a6ca..a1dca3b169a1 100644
1570     --- a/net/netlink/af_netlink.c
1571     +++ b/net/netlink/af_netlink.c
1572     @@ -2207,16 +2207,17 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1573     cb->min_dump_alloc = control->min_dump_alloc;
1574     cb->skb = skb;
1575    
1576     + if (cb->start) {
1577     + ret = cb->start(cb);
1578     + if (ret)
1579     + goto error_unlock;
1580     + }
1581     +
1582     nlk->cb_running = true;
1583    
1584     mutex_unlock(nlk->cb_mutex);
1585    
1586     - ret = 0;
1587     - if (cb->start)
1588     - ret = cb->start(cb);
1589     -
1590     - if (!ret)
1591     - ret = netlink_dump(sk);
1592     + ret = netlink_dump(sk);
1593    
1594     sock_put(sk);
1595    
1596     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1597     index b17f9097c6fe..e7f6657269e0 100644
1598     --- a/net/packet/af_packet.c
1599     +++ b/net/packet/af_packet.c
1600     @@ -1720,7 +1720,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1601    
1602     out:
1603     if (err && rollover) {
1604     - kfree(rollover);
1605     + kfree_rcu(rollover, rcu);
1606     po->rollover = NULL;
1607     }
1608     mutex_unlock(&fanout_mutex);
1609     @@ -1747,8 +1747,10 @@ static struct packet_fanout *fanout_release(struct sock *sk)
1610     else
1611     f = NULL;
1612    
1613     - if (po->rollover)
1614     + if (po->rollover) {
1615     kfree_rcu(po->rollover, rcu);
1616     + po->rollover = NULL;
1617     + }
1618     }
1619     mutex_unlock(&fanout_mutex);
1620    
1621     @@ -3851,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1622     void *data = &val;
1623     union tpacket_stats_u st;
1624     struct tpacket_rollover_stats rstats;
1625     + struct packet_rollover *rollover;
1626    
1627     if (level != SOL_PACKET)
1628     return -ENOPROTOOPT;
1629     @@ -3929,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1630     0);
1631     break;
1632     case PACKET_ROLLOVER_STATS:
1633     - if (!po->rollover)
1634     + rcu_read_lock();
1635     + rollover = rcu_dereference(po->rollover);
1636     + if (rollover) {
1637     + rstats.tp_all = atomic_long_read(&rollover->num);
1638     + rstats.tp_huge = atomic_long_read(&rollover->num_huge);
1639     + rstats.tp_failed = atomic_long_read(&rollover->num_failed);
1640     + data = &rstats;
1641     + lv = sizeof(rstats);
1642     + }
1643     + rcu_read_unlock();
1644     + if (!rollover)
1645     return -EINVAL;
1646     - rstats.tp_all = atomic_long_read(&po->rollover->num);
1647     - rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
1648     - rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
1649     - data = &rstats;
1650     - lv = sizeof(rstats);
1651     break;
1652     case PACKET_TX_HAS_OFF:
1653     val = po->tp_tx_has_off;
1654     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
1655     index 195a3b2d9afc..ea13df1be067 100644
1656     --- a/net/sched/sch_api.c
1657     +++ b/net/sched/sch_api.c
1658     @@ -296,6 +296,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
1659     {
1660     struct Qdisc *q;
1661    
1662     + if (!handle)
1663     + return NULL;
1664     q = qdisc_match_from_root(dev->qdisc, handle);
1665     if (q)
1666     goto out;
1667     diff --git a/net/sctp/input.c b/net/sctp/input.c
1668     index 6c79915c7dbc..68b84d3a7cac 100644
1669     --- a/net/sctp/input.c
1670     +++ b/net/sctp/input.c
1671     @@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
1672     {
1673     struct dst_entry *dst;
1674    
1675     - if (!t)
1676     + if (sock_owned_by_user(sk) || !t)
1677     return;
1678     dst = sctp_transport_dst_check(t);
1679     if (dst)
1680     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1681     index ca4a63e3eadd..f7f00d012888 100644
1682     --- a/net/sctp/ipv6.c
1683     +++ b/net/sctp/ipv6.c
1684     @@ -881,8 +881,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
1685     net = sock_net(&opt->inet.sk);
1686     rcu_read_lock();
1687     dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id);
1688     - if (!dev ||
1689     - !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) {
1690     + if (!dev || !(opt->inet.freebind ||
1691     + net->ipv6.sysctl.ip_nonlocal_bind ||
1692     + ipv6_chk_addr(net, &addr->v6.sin6_addr,
1693     + dev, 0))) {
1694     rcu_read_unlock();
1695     return 0;
1696     }
1697     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1698     index 3ef725229449..ffcc8aa78db7 100644
1699     --- a/net/sctp/socket.c
1700     +++ b/net/sctp/socket.c
1701     @@ -168,6 +168,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
1702     sk_mem_charge(sk, chunk->skb->truesize);
1703     }
1704    
1705     +static void sctp_clear_owner_w(struct sctp_chunk *chunk)
1706     +{
1707     + skb_orphan(chunk->skb);
1708     +}
1709     +
1710     +static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
1711     + void (*cb)(struct sctp_chunk *))
1712     +
1713     +{
1714     + struct sctp_outq *q = &asoc->outqueue;
1715     + struct sctp_transport *t;
1716     + struct sctp_chunk *chunk;
1717     +
1718     + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
1719     + list_for_each_entry(chunk, &t->transmitted, transmitted_list)
1720     + cb(chunk);
1721     +
1722     + list_for_each_entry(chunk, &q->retransmit, list)
1723     + cb(chunk);
1724     +
1725     + list_for_each_entry(chunk, &q->sacked, list)
1726     + cb(chunk);
1727     +
1728     + list_for_each_entry(chunk, &q->abandoned, list)
1729     + cb(chunk);
1730     +
1731     + list_for_each_entry(chunk, &q->out_chunk_list, list)
1732     + cb(chunk);
1733     +}
1734     +
1735     /* Verify that this is a valid address. */
1736     static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr,
1737     int len)
1738     @@ -7826,7 +7856,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
1739     * paths won't try to lock it and then oldsk.
1740     */
1741     lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
1742     + sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w);
1743     sctp_assoc_migrate(assoc, newsk);
1744     + sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w);
1745    
1746     /* If the association on the newsk is already closed before accept()
1747     * is called, set RCV_SHUTDOWN flag.
1748     diff --git a/net/unix/diag.c b/net/unix/diag.c
1749     index 4d9679701a6d..384c84e83462 100644
1750     --- a/net/unix/diag.c
1751     +++ b/net/unix/diag.c
1752     @@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
1753     err = -ENOENT;
1754     if (sk == NULL)
1755     goto out_nosk;
1756     + if (!net_eq(sock_net(sk), net))
1757     + goto out;
1758    
1759     err = sock_diag_check_cookie(sk, req->udiag_cookie);
1760     if (err)
1761     diff --git a/security/keys/Kconfig b/security/keys/Kconfig
1762     index e0a39781b10f..0832f6368955 100644
1763     --- a/security/keys/Kconfig
1764     +++ b/security/keys/Kconfig
1765     @@ -20,6 +20,10 @@ config KEYS
1766    
1767     If you are unsure as to whether this is required, answer N.
1768    
1769     +config KEYS_COMPAT
1770     + def_bool y
1771     + depends on COMPAT && KEYS
1772     +
1773     config PERSISTENT_KEYRINGS
1774     bool "Enable register of persistent per-UID keyrings"
1775     depends on KEYS
1776     diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
1777     index c4acf17e9f5e..e40a2cba5002 100644
1778     --- a/sound/core/seq/seq_device.c
1779     +++ b/sound/core/seq/seq_device.c
1780     @@ -148,8 +148,10 @@ void snd_seq_device_load_drivers(void)
1781     flush_work(&autoload_work);
1782     }
1783     EXPORT_SYMBOL(snd_seq_device_load_drivers);
1784     +#define cancel_autoload_drivers() cancel_work_sync(&autoload_work)
1785     #else
1786     #define queue_autoload_drivers() /* NOP */
1787     +#define cancel_autoload_drivers() /* NOP */
1788     #endif
1789    
1790     /*
1791     @@ -159,6 +161,7 @@ static int snd_seq_device_dev_free(struct snd_device *device)
1792     {
1793     struct snd_seq_device *dev = device->device_data;
1794    
1795     + cancel_autoload_drivers();
1796     put_device(&dev->dev);
1797     return 0;
1798     }