Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.10/0111-3.10.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2288 - (hide annotations) (download)
Wed Sep 25 06:47:47 2013 UTC (10 years, 7 months ago) by niro
File size: 45530 byte(s)
-linux-3.10 patches
1 niro 2288 diff --git a/Makefile b/Makefile
2     index 595076d..afe001e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 10
8     -SUBLEVEL = 11
9     +SUBLEVEL = 12
10     EXTRAVERSION =
11     NAME = TOSSUG Baby Fish
12    
13     diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
14     index 84c4bef..43a18f7 100644
15     --- a/arch/arm/boot/dts/at91sam9260.dtsi
16     +++ b/arch/arm/boot/dts/at91sam9260.dtsi
17     @@ -340,6 +340,14 @@
18     };
19     };
20    
21     + i2c_gpio0 {
22     + pinctrl_i2c_gpio0: i2c_gpio0-0 {
23     + atmel,pins =
24     + <0 23 0x0 0x3 /* PA23 gpio I2C_SDA pin */
25     + 0 24 0x0 0x3>; /* PA24 gpio I2C_SCL pin */
26     + };
27     + };
28     +
29     pioA: gpio@fffff400 {
30     compatible = "atmel,at91rm9200-gpio";
31     reg = <0xfffff400 0x200>;
32     @@ -592,6 +600,8 @@
33     i2c-gpio,delay-us = <2>; /* ~100 kHz */
34     #address-cells = <1>;
35     #size-cells = <0>;
36     + pinctrl-names = "default";
37     + pinctrl-0 = <&pinctrl_i2c_gpio0>;
38     status = "disabled";
39     };
40     };
41     diff --git a/arch/x86/include/asm/xor_avx.h b/arch/x86/include/asm/xor_avx.h
42     index 7ea79c5..492b298 100644
43     --- a/arch/x86/include/asm/xor_avx.h
44     +++ b/arch/x86/include/asm/xor_avx.h
45     @@ -167,12 +167,12 @@ static struct xor_block_template xor_block_avx = {
46    
47     #define AVX_XOR_SPEED \
48     do { \
49     - if (cpu_has_avx) \
50     + if (cpu_has_avx && cpu_has_osxsave) \
51     xor_speed(&xor_block_avx); \
52     } while (0)
53    
54     #define AVX_SELECT(FASTEST) \
55     - (cpu_has_avx ? &xor_block_avx : FASTEST)
56     + (cpu_has_avx && cpu_has_osxsave ? &xor_block_avx : FASTEST)
57    
58     #else
59    
60     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
61     index f975696..666cf3a 100644
62     --- a/drivers/net/bonding/bond_main.c
63     +++ b/drivers/net/bonding/bond_main.c
64     @@ -3770,11 +3770,17 @@ static int bond_neigh_init(struct neighbour *n)
65     * The bonding ndo_neigh_setup is called at init time beofre any
66     * slave exists. So we must declare proxy setup function which will
67     * be used at run time to resolve the actual slave neigh param setup.
68     + *
69     + * It's also called by master devices (such as vlans) to setup their
70     + * underlying devices. In that case - do nothing, we're already set up from
71     + * our init.
72     */
73     static int bond_neigh_setup(struct net_device *dev,
74     struct neigh_parms *parms)
75     {
76     - parms->neigh_setup = bond_neigh_init;
77     + /* modify only our neigh_parms */
78     + if (parms->dev == dev)
79     + parms->neigh_setup = bond_neigh_init;
80    
81     return 0;
82     }
83     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
84     index a13463e..0877a05 100644
85     --- a/drivers/net/ethernet/broadcom/tg3.c
86     +++ b/drivers/net/ethernet/broadcom/tg3.c
87     @@ -3003,6 +3003,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
88     return false;
89     }
90    
91     +static bool tg3_phy_led_bug(struct tg3 *tp)
92     +{
93     + switch (tg3_asic_rev(tp)) {
94     + case ASIC_REV_5719:
95     + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
96     + !tp->pci_fn)
97     + return true;
98     + return false;
99     + }
100     +
101     + return false;
102     +}
103     +
104     static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
105     {
106     u32 val;
107     @@ -3050,8 +3063,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
108     }
109     return;
110     } else if (do_low_power) {
111     - tg3_writephy(tp, MII_TG3_EXT_CTRL,
112     - MII_TG3_EXT_CTRL_FORCE_LED_OFF);
113     + if (!tg3_phy_led_bug(tp))
114     + tg3_writephy(tp, MII_TG3_EXT_CTRL,
115     + MII_TG3_EXT_CTRL_FORCE_LED_OFF);
116    
117     val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
118     MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
119     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
120     index 6e43426..7371626 100644
121     --- a/drivers/net/ethernet/emulex/benet/be_main.c
122     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
123     @@ -2561,8 +2561,8 @@ static int be_close(struct net_device *netdev)
124     /* Wait for all pending tx completions to arrive so that
125     * all tx skbs are freed.
126     */
127     - be_tx_compl_clean(adapter);
128     netif_tx_disable(netdev);
129     + be_tx_compl_clean(adapter);
130    
131     be_rx_qs_destroy(adapter);
132    
133     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
134     index c966785..254f255 100644
135     --- a/drivers/net/ethernet/marvell/mvneta.c
136     +++ b/drivers/net/ethernet/marvell/mvneta.c
137     @@ -136,7 +136,9 @@
138     #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
139     #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
140     #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
141     +#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
142     #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
143     +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
144     #define MVNETA_MIB_COUNTERS_BASE 0x3080
145     #define MVNETA_MIB_LATE_COLLISION 0x7c
146     #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
147     @@ -911,6 +913,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
148     /* Assign port SDMA configuration */
149     mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
150    
151     + /* Disable PHY polling in hardware, since we're using the
152     + * kernel phylib to do this.
153     + */
154     + val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
155     + val &= ~MVNETA_PHY_POLLING_ENABLE;
156     + mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
157     +
158     mvneta_set_ucast_table(pp, -1);
159     mvneta_set_special_mcast_table(pp, -1);
160     mvneta_set_other_mcast_table(pp, -1);
161     @@ -2288,7 +2297,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
162     val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
163     val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
164     MVNETA_GMAC_CONFIG_GMII_SPEED |
165     - MVNETA_GMAC_CONFIG_FULL_DUPLEX);
166     + MVNETA_GMAC_CONFIG_FULL_DUPLEX |
167     + MVNETA_GMAC_AN_SPEED_EN |
168     + MVNETA_GMAC_AN_DUPLEX_EN);
169    
170     if (phydev->duplex)
171     val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
172     diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
173     index 887aebe..9095ff9 100644
174     --- a/drivers/net/ethernet/realtek/8139cp.c
175     +++ b/drivers/net/ethernet/realtek/8139cp.c
176     @@ -524,6 +524,7 @@ rx_status_loop:
177     PCI_DMA_FROMDEVICE);
178     if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
179     dev->stats.rx_dropped++;
180     + kfree_skb(new_skb);
181     goto rx_next;
182     }
183    
184     diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
185     index 2738b5f..a520465 100644
186     --- a/drivers/net/ethernet/sfc/filter.c
187     +++ b/drivers/net/ethernet/sfc/filter.c
188     @@ -675,7 +675,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
189     BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
190     BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
191     EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
192     - rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
193     + rep_index = spec->type - EFX_FILTER_UC_DEF;
194     ins_index = rep_index;
195    
196     spin_lock_bh(&state->lock);
197     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
198     index 6e91931..06eba6e 100644
199     --- a/drivers/net/macvlan.c
200     +++ b/drivers/net/macvlan.c
201     @@ -727,6 +727,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
202     return -EADDRNOTAVAIL;
203     }
204    
205     + if (data && data[IFLA_MACVLAN_FLAGS] &&
206     + nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
207     + return -EINVAL;
208     +
209     if (data && data[IFLA_MACVLAN_MODE]) {
210     switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
211     case MACVLAN_MODE_PRIVATE:
212     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
213     index 2491eb2..7b54f4f 100644
214     --- a/drivers/net/tun.c
215     +++ b/drivers/net/tun.c
216     @@ -1076,8 +1076,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
217     u32 rxhash;
218    
219     if (!(tun->flags & TUN_NO_PI)) {
220     - if ((len -= sizeof(pi)) > total_len)
221     + if (len < sizeof(pi))
222     return -EINVAL;
223     + len -= sizeof(pi);
224    
225     if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
226     return -EFAULT;
227     @@ -1085,8 +1086,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
228     }
229    
230     if (tun->flags & TUN_VNET_HDR) {
231     - if ((len -= tun->vnet_hdr_sz) > total_len)
232     + if (len < tun->vnet_hdr_sz)
233     return -EINVAL;
234     + len -= tun->vnet_hdr_sz;
235    
236     if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
237     return -EFAULT;
238     diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
239     index 8728198..25ba7ec 100644
240     --- a/drivers/net/usb/cdc_mbim.c
241     +++ b/drivers/net/usb/cdc_mbim.c
242     @@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
243     { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
244     .driver_info = (unsigned long)&cdc_mbim_info_zlp,
245     },
246     + /* HP hs2434 Mobile Broadband Module needs ZLPs */
247     + { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
248     + .driver_info = (unsigned long)&cdc_mbim_info_zlp,
249     + },
250     { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
251     .driver_info = (unsigned long)&cdc_mbim_info,
252     },
253     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
254     index 57325f3..054489f 100644
255     --- a/drivers/net/vxlan.c
256     +++ b/drivers/net/vxlan.c
257     @@ -1090,7 +1090,7 @@ static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
258     iph->daddr = dst;
259     iph->saddr = fl4.saddr;
260     iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
261     - tunnel_ip_select_ident(skb, old_iph, &rt->dst);
262     + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
263    
264     nf_reset(skb);
265    
266     diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
267     index 2eb88ea..c4a2e77 100644
268     --- a/drivers/net/wireless/mwifiex/main.c
269     +++ b/drivers/net/wireless/mwifiex/main.c
270     @@ -363,20 +363,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
271     dev_err(adapter->dev, "cannot create default STA interface\n");
272     goto err_add_intf;
273     }
274     -
275     - /* Create AP interface by default */
276     - if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
277     - NL80211_IFTYPE_AP, NULL, NULL)) {
278     - dev_err(adapter->dev, "cannot create default AP interface\n");
279     - goto err_add_intf;
280     - }
281     -
282     - /* Create P2P interface by default */
283     - if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
284     - NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
285     - dev_err(adapter->dev, "cannot create default P2P interface\n");
286     - goto err_add_intf;
287     - }
288     rtnl_unlock();
289    
290     mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
291     diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
292     index 771812d..3bb9401 100644
293     --- a/drivers/rtc/rtc-max77686.c
294     +++ b/drivers/rtc/rtc-max77686.c
295     @@ -240,9 +240,9 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
296     }
297    
298     alrm->pending = 0;
299     - ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS1, &val);
300     + ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
301     if (ret < 0) {
302     - dev_err(info->dev, "%s:%d fail to read status1 reg(%d)\n",
303     + dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
304     __func__, __LINE__, ret);
305     goto out;
306     }
307     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
308     index 8ca5ac7..d6a518c 100644
309     --- a/drivers/vhost/net.c
310     +++ b/drivers/vhost/net.c
311     @@ -307,6 +307,11 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
312     struct vhost_virtqueue *vq = ubufs->vq;
313     int cnt = atomic_read(&ubufs->kref.refcount);
314    
315     + /* set len to mark this desc buffers done DMA */
316     + vq->heads[ubuf->desc].len = success ?
317     + VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
318     + vhost_net_ubuf_put(ubufs);
319     +
320     /*
321     * Trigger polling thread if guest stopped submitting new buffers:
322     * in this case, the refcount after decrement will eventually reach 1
323     @@ -317,10 +322,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
324     */
325     if (cnt <= 2 || !(cnt % 16))
326     vhost_poll_queue(&vq->poll);
327     - /* set len to mark this desc buffers done DMA */
328     - vq->heads[ubuf->desc].len = success ?
329     - VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
330     - vhost_net_ubuf_put(ubufs);
331     }
332    
333     /* Expects to be always run from workqueue - which acts as
334     diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
335     index 850e95b..b8b7dc7 100644
336     --- a/include/linux/ipv6.h
337     +++ b/include/linux/ipv6.h
338     @@ -101,6 +101,7 @@ struct inet6_skb_parm {
339     #define IP6SKB_FORWARDED 2
340     #define IP6SKB_REROUTED 4
341     #define IP6SKB_ROUTERALERT 8
342     +#define IP6SKB_FRAGMENTED 16
343     };
344    
345     #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
346     diff --git a/include/net/genetlink.h b/include/net/genetlink.h
347     index 93024a4..8e0b6c8 100644
348     --- a/include/net/genetlink.h
349     +++ b/include/net/genetlink.h
350     @@ -61,6 +61,7 @@ struct genl_family {
351     struct list_head ops_list; /* private */
352     struct list_head family_list; /* private */
353     struct list_head mcast_groups; /* private */
354     + struct module *module;
355     };
356    
357     /**
358     @@ -121,9 +122,24 @@ struct genl_ops {
359     struct list_head ops_list;
360     };
361    
362     -extern int genl_register_family(struct genl_family *family);
363     -extern int genl_register_family_with_ops(struct genl_family *family,
364     +extern int __genl_register_family(struct genl_family *family);
365     +
366     +static inline int genl_register_family(struct genl_family *family)
367     +{
368     + family->module = THIS_MODULE;
369     + return __genl_register_family(family);
370     +}
371     +
372     +extern int __genl_register_family_with_ops(struct genl_family *family,
373     struct genl_ops *ops, size_t n_ops);
374     +
375     +static inline int genl_register_family_with_ops(struct genl_family *family,
376     + struct genl_ops *ops, size_t n_ops)
377     +{
378     + family->module = THIS_MODULE;
379     + return __genl_register_family_with_ops(family, ops, n_ops);
380     +}
381     +
382     extern int genl_unregister_family(struct genl_family *family);
383     extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
384     extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
385     diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
386     index 09b1360..a9942e1 100644
387     --- a/include/net/ip_tunnels.h
388     +++ b/include/net/ip_tunnels.h
389     @@ -141,20 +141,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
390     return INET_ECN_encapsulate(tos, inner);
391     }
392    
393     -static inline void tunnel_ip_select_ident(struct sk_buff *skb,
394     - const struct iphdr *old_iph,
395     - struct dst_entry *dst)
396     -{
397     - struct iphdr *iph = ip_hdr(skb);
398     -
399     - /* Use inner packet iph-id if possible. */
400     - if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
401     - iph->id = old_iph->id;
402     - else
403     - __ip_select_ident(iph, dst,
404     - (skb_shinfo(skb)->gso_segs ?: 1) - 1);
405     -}
406     -
407     static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
408     {
409     int err;
410     diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
411     index e7f4e21..63ed1d1 100644
412     --- a/include/net/sch_generic.h
413     +++ b/include/net/sch_generic.h
414     @@ -682,13 +682,19 @@ struct psched_ratecfg {
415     u64 rate_bps;
416     u32 mult;
417     u16 overhead;
418     + u8 linklayer;
419     u8 shift;
420     };
421    
422     static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
423     unsigned int len)
424     {
425     - return ((u64)(len + r->overhead) * r->mult) >> r->shift;
426     + len += r->overhead;
427     +
428     + if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
429     + return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
430     +
431     + return ((u64)len * r->mult) >> r->shift;
432     }
433    
434     extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
435     @@ -699,6 +705,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
436     memset(res, 0, sizeof(*res));
437     res->rate = r->rate_bps >> 3;
438     res->overhead = r->overhead;
439     + res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
440     }
441    
442     #endif
443     diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
444     index e0133c7..590beda 100644
445     --- a/include/uapi/linux/icmpv6.h
446     +++ b/include/uapi/linux/icmpv6.h
447     @@ -115,6 +115,8 @@ struct icmp6hdr {
448     #define ICMPV6_NOT_NEIGHBOUR 2
449     #define ICMPV6_ADDR_UNREACH 3
450     #define ICMPV6_PORT_UNREACH 4
451     +#define ICMPV6_POLICY_FAIL 5
452     +#define ICMPV6_REJECT_ROUTE 6
453    
454     /*
455     * Codes for Time Exceeded
456     diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
457     index dbd71b0..09d62b92 100644
458     --- a/include/uapi/linux/pkt_sched.h
459     +++ b/include/uapi/linux/pkt_sched.h
460     @@ -73,9 +73,17 @@ struct tc_estimator {
461     #define TC_H_ROOT (0xFFFFFFFFU)
462     #define TC_H_INGRESS (0xFFFFFFF1U)
463    
464     +/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
465     +enum tc_link_layer {
466     + TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
467     + TC_LINKLAYER_ETHERNET,
468     + TC_LINKLAYER_ATM,
469     +};
470     +#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
471     +
472     struct tc_ratespec {
473     unsigned char cell_log;
474     - unsigned char __reserved;
475     + __u8 linklayer; /* lower 4 bits */
476     unsigned short overhead;
477     short cell_align;
478     unsigned short mpu;
479     diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
480     index ebfa444..84dd783 100644
481     --- a/net/bridge/br_fdb.c
482     +++ b/net/bridge/br_fdb.c
483     @@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
484     if (!pv)
485     return;
486    
487     - for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
488     + for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
489     f = __br_fdb_get(br, br->dev->dev_addr, vid);
490     if (f && f->is_local && !f->dst)
491     fdb_delete(br, f);
492     @@ -725,7 +725,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
493     /* VID was specified, so use it. */
494     err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
495     } else {
496     - if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
497     + if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
498     err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
499     goto out;
500     }
501     @@ -734,7 +734,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
502     * specify a VLAN. To be nice, add/update entry for every
503     * vlan on this port.
504     */
505     - for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
506     + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
507     err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
508     if (err)
509     goto out;
510     @@ -812,7 +812,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
511    
512     err = __br_fdb_delete(p, addr, vid);
513     } else {
514     - if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
515     + if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
516     err = __br_fdb_delete(p, addr, 0);
517     goto out;
518     }
519     @@ -822,7 +822,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
520     * vlan on this port.
521     */
522     err = -ENOENT;
523     - for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
524     + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
525     err &= __br_fdb_delete(p, addr, vid);
526     }
527     }
528     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
529     index d6448e3..d82058f 100644
530     --- a/net/bridge/br_multicast.c
531     +++ b/net/bridge/br_multicast.c
532     @@ -1185,7 +1185,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
533     max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
534     if (max_delay)
535     group = &mld->mld_mca;
536     - } else if (skb->len >= sizeof(*mld2q)) {
537     + } else {
538     if (!pskb_may_pull(skb, sizeof(*mld2q))) {
539     err = -EINVAL;
540     goto out;
541     @@ -1193,7 +1193,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
542     mld2q = (struct mld2_query *)icmp6_hdr(skb);
543     if (!mld2q->mld2q_nsrcs)
544     group = &mld2q->mld2q_mca;
545     - max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
546     +
547     + max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL);
548     }
549    
550     if (!group)
551     diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
552     index 8e3abf5..f66a034 100644
553     --- a/net/bridge/br_netlink.c
554     +++ b/net/bridge/br_netlink.c
555     @@ -128,7 +128,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
556     else
557     pv = br_get_vlan_info(br);
558    
559     - if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
560     + if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
561     goto done;
562    
563     af = nla_nest_start(skb, IFLA_AF_SPEC);
564     @@ -136,7 +136,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
565     goto nla_put_failure;
566    
567     pvid = br_get_pvid(pv);
568     - for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
569     + for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
570     vinfo.vid = vid;
571     vinfo.flags = 0;
572     if (vid == pvid)
573     diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
574     index bd58b45..9a9ffe7 100644
575     --- a/net/bridge/br_vlan.c
576     +++ b/net/bridge/br_vlan.c
577     @@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
578    
579     clear_bit(vid, v->vlan_bitmap);
580     v->num_vlans--;
581     - if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
582     + if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
583     if (v->port_idx)
584     rcu_assign_pointer(v->parent.port->vlan_info, NULL);
585     else
586     @@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
587     {
588     smp_wmb();
589     v->pvid = 0;
590     - bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
591     + bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
592     if (v->port_idx)
593     rcu_assign_pointer(v->parent.port->vlan_info, NULL);
594     else
595     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
596     index 00ee068..c99cc37 100644
597     --- a/net/core/flow_dissector.c
598     +++ b/net/core/flow_dissector.c
599     @@ -345,14 +345,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
600     if (new_index < 0)
601     new_index = skb_tx_hash(dev, skb);
602    
603     - if (queue_index != new_index && sk) {
604     - struct dst_entry *dst =
605     - rcu_dereference_check(sk->sk_dst_cache, 1);
606     -
607     - if (dst && skb_dst(skb) == dst)
608     - sk_tx_queue_set(sk, queue_index);
609     -
610     - }
611     + if (queue_index != new_index && sk &&
612     + rcu_access_pointer(sk->sk_dst_cache))
613     + sk_tx_queue_set(sk, queue_index);
614    
615     queue_index = new_index;
616     }
617     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
618     index ce90b02..0034b61 100644
619     --- a/net/core/neighbour.c
620     +++ b/net/core/neighbour.c
621     @@ -1445,16 +1445,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
622     atomic_set(&p->refcnt, 1);
623     p->reachable_time =
624     neigh_rand_reach_time(p->base_reachable_time);
625     + dev_hold(dev);
626     + p->dev = dev;
627     + write_pnet(&p->net, hold_net(net));
628     + p->sysctl_table = NULL;
629    
630     if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
631     + release_net(net);
632     + dev_put(dev);
633     kfree(p);
634     return NULL;
635     }
636    
637     - dev_hold(dev);
638     - p->dev = dev;
639     - write_pnet(&p->net, hold_net(net));
640     - p->sysctl_table = NULL;
641     write_lock_bh(&tbl->lock);
642     p->next = tbl->parms.next;
643     tbl->parms.next = p;
644     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
645     index a08bd2b..fd01eca 100644
646     --- a/net/core/rtnetlink.c
647     +++ b/net/core/rtnetlink.c
648     @@ -2142,7 +2142,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
649     /* If aging addresses are supported device will need to
650     * implement its own handler for this.
651     */
652     - if (ndm->ndm_state & NUD_PERMANENT) {
653     + if (!(ndm->ndm_state & NUD_PERMANENT)) {
654     pr_info("%s: FDB only supports static addresses\n", dev->name);
655     return -EINVAL;
656     }
657     @@ -2374,7 +2374,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
658     struct nlattr *extfilt;
659     u32 filter_mask = 0;
660    
661     - extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
662     + extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
663     IFLA_EXT_MASK);
664     if (extfilt)
665     filter_mask = nla_get_u32(extfilt);
666     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
667     index cfdb46a..2ff093b 100644
668     --- a/net/core/sysctl_net_core.c
669     +++ b/net/core/sysctl_net_core.c
670     @@ -20,7 +20,9 @@
671     #include <net/sock.h>
672     #include <net/net_ratelimit.h>
673    
674     +static int zero = 0;
675     static int one = 1;
676     +static int ushort_max = USHRT_MAX;
677    
678     #ifdef CONFIG_RPS
679     static int rps_sock_flow_sysctl(ctl_table *table, int write,
680     @@ -204,7 +206,9 @@ static struct ctl_table netns_core_table[] = {
681     .data = &init_net.core.sysctl_somaxconn,
682     .maxlen = sizeof(int),
683     .mode = 0644,
684     - .proc_handler = proc_dointvec
685     + .extra1 = &zero,
686     + .extra2 = &ushort_max,
687     + .proc_handler = proc_dointvec_minmax
688     },
689     { }
690     };
691     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
692     index dfc39d4..9e38217 100644
693     --- a/net/ipv4/devinet.c
694     +++ b/net/ipv4/devinet.c
695     @@ -771,7 +771,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
696     ci = nla_data(tb[IFA_CACHEINFO]);
697     if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
698     err = -EINVAL;
699     - goto errout;
700     + goto errout_free;
701     }
702     *pvalid_lft = ci->ifa_valid;
703     *pprefered_lft = ci->ifa_prefered;
704     @@ -779,6 +779,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
705    
706     return ifa;
707    
708     +errout_free:
709     + inet_free_ifa(ifa);
710     errout:
711     return ERR_PTR(err);
712     }
713     diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
714     index 49616fe..6e8a13d 100644
715     --- a/net/ipv4/fib_trie.c
716     +++ b/net/ipv4/fib_trie.c
717     @@ -71,7 +71,6 @@
718     #include <linux/init.h>
719     #include <linux/list.h>
720     #include <linux/slab.h>
721     -#include <linux/prefetch.h>
722     #include <linux/export.h>
723     #include <net/net_namespace.h>
724     #include <net/ip.h>
725     @@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
726     if (!c)
727     continue;
728    
729     - if (IS_LEAF(c)) {
730     - prefetch(rcu_dereference_rtnl(p->child[idx]));
731     + if (IS_LEAF(c))
732     return (struct leaf *) c;
733     - }
734    
735     /* Rescan start scanning in new node */
736     p = (struct tnode *) c;
737     diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
738     index 855004f..c52fee0 100644
739     --- a/net/ipv4/ip_gre.c
740     +++ b/net/ipv4/ip_gre.c
741     @@ -572,7 +572,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
742     if (daddr)
743     memcpy(&iph->daddr, daddr, 4);
744     if (iph->daddr)
745     - return t->hlen;
746     + return t->hlen + sizeof(*iph);
747    
748     return -(t->hlen + sizeof(*iph));
749     }
750     diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
751     index cbfc37f..b7a4c21 100644
752     --- a/net/ipv4/ip_tunnel.c
753     +++ b/net/ipv4/ip_tunnel.c
754     @@ -686,7 +686,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
755     iph->daddr = fl4.daddr;
756     iph->saddr = fl4.saddr;
757     iph->ttl = ttl;
758     - tunnel_ip_select_ident(skb, inner_iph, &rt->dst);
759     + __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
760    
761     iptunnel_xmit(skb, dev);
762     return;
763     diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
764     index dd44e0a..61e60d6 100644
765     --- a/net/ipv4/raw.c
766     +++ b/net/ipv4/raw.c
767     @@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
768     flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
769     RT_SCOPE_UNIVERSE,
770     inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
771     - inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
772     + inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
773     + (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
774     daddr, saddr, 0, 0);
775    
776     if (!inet->hdrincl) {
777     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
778     index ab450c0..2005561 100644
779     --- a/net/ipv4/tcp.c
780     +++ b/net/ipv4/tcp.c
781     @@ -1118,6 +1118,13 @@ new_segment:
782     goto wait_for_memory;
783    
784     /*
785     + * All packets are restored as if they have
786     + * already been sent.
787     + */
788     + if (tp->repair)
789     + TCP_SKB_CB(skb)->when = tcp_time_stamp;
790     +
791     + /*
792     * Check whether we can use HW checksum.
793     */
794     if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
795     diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
796     index a9077f4..b6ae92a 100644
797     --- a/net/ipv4/tcp_cubic.c
798     +++ b/net/ipv4/tcp_cubic.c
799     @@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
800     */
801     static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
802     {
803     - u64 offs;
804     - u32 delta, t, bic_target, max_cnt;
805     + u32 delta, bic_target, max_cnt;
806     + u64 offs, t;
807    
808     ca->ack_cnt++; /* count the number of ACKs */
809    
810     @@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
811     * if the cwnd < 1 million packets !!!
812     */
813    
814     + t = (s32)(tcp_time_stamp - ca->epoch_start);
815     + t += msecs_to_jiffies(ca->delay_min >> 3);
816     /* change the unit from HZ to bictcp_HZ */
817     - t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
818     - - ca->epoch_start) << BICTCP_HZ) / HZ;
819     + t <<= BICTCP_HZ;
820     + do_div(t, HZ);
821    
822     if (t < ca->bic_K) /* t - K */
823     offs = ca->bic_K - t;
824     @@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
825     return;
826    
827     /* Discard delay samples right after fast recovery */
828     - if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
829     + if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
830     return;
831    
832     delay = (rtt_us << 3) / USEC_PER_MSEC;
833     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
834     index 9c62257..4b75aad 100644
835     --- a/net/ipv4/tcp_input.c
836     +++ b/net/ipv4/tcp_input.c
837     @@ -3598,7 +3598,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
838     ++ptr;
839     tp->rx_opt.rcv_tsval = ntohl(*ptr);
840     ++ptr;
841     - tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
842     + if (*ptr)
843     + tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
844     + else
845     + tp->rx_opt.rcv_tsecr = 0;
846     return true;
847     }
848     return false;
849     @@ -3623,7 +3626,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
850     }
851    
852     tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
853     - if (tp->rx_opt.saw_tstamp)
854     + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
855     tp->rx_opt.rcv_tsecr -= tp->tsoffset;
856    
857     return true;
858     @@ -5376,7 +5379,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
859     int saved_clamp = tp->rx_opt.mss_clamp;
860    
861     tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
862     - if (tp->rx_opt.saw_tstamp)
863     + if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
864     tp->rx_opt.rcv_tsecr -= tp->tsoffset;
865    
866     if (th->ack) {
867     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
868     index ec335fa..0145ce7 100644
869     --- a/net/ipv4/tcp_output.c
870     +++ b/net/ipv4/tcp_output.c
871     @@ -2664,7 +2664,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
872     int tcp_header_size;
873     int mss;
874    
875     - skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
876     + skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
877     if (unlikely(!skb)) {
878     dst_release(dst);
879     return NULL;
880     @@ -2808,6 +2808,8 @@ void tcp_connect_init(struct sock *sk)
881    
882     if (likely(!tp->repair))
883     tp->rcv_nxt = 0;
884     + else
885     + tp->rcv_tstamp = tcp_time_stamp;
886     tp->rcv_wup = tp->rcv_nxt;
887     tp->copied_seq = tp->rcv_nxt;
888    
889     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
890     index fb8c94c..d3057f9 100644
891     --- a/net/ipv6/addrconf.c
892     +++ b/net/ipv6/addrconf.c
893     @@ -1124,12 +1124,10 @@ retry:
894     if (ifp->flags & IFA_F_OPTIMISTIC)
895     addr_flags |= IFA_F_OPTIMISTIC;
896    
897     - ift = !max_addresses ||
898     - ipv6_count_addresses(idev) < max_addresses ?
899     - ipv6_add_addr(idev, &addr, tmp_plen,
900     - ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
901     - addr_flags) : NULL;
902     - if (IS_ERR_OR_NULL(ift)) {
903     + ift = ipv6_add_addr(idev, &addr, tmp_plen,
904     + ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK,
905     + addr_flags);
906     + if (IS_ERR(ift)) {
907     in6_ifa_put(ifp);
908     in6_dev_put(idev);
909     pr_info("%s: retry temporary address regeneration\n", __func__);
910     diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
911     index f083a58..b30ad37 100644
912     --- a/net/ipv6/addrlabel.c
913     +++ b/net/ipv6/addrlabel.c
914     @@ -251,38 +251,36 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
915     /* add a label */
916     static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
917     {
918     + struct hlist_node *n;
919     + struct ip6addrlbl_entry *last = NULL, *p = NULL;
920     int ret = 0;
921    
922     - ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n",
923     - __func__,
924     - newp, replace);
925     + ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
926     + replace);
927    
928     - if (hlist_empty(&ip6addrlbl_table.head)) {
929     - hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
930     - } else {
931     - struct hlist_node *n;
932     - struct ip6addrlbl_entry *p = NULL;
933     - hlist_for_each_entry_safe(p, n,
934     - &ip6addrlbl_table.head, list) {
935     - if (p->prefixlen == newp->prefixlen &&
936     - net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
937     - p->ifindex == newp->ifindex &&
938     - ipv6_addr_equal(&p->prefix, &newp->prefix)) {
939     - if (!replace) {
940     - ret = -EEXIST;
941     - goto out;
942     - }
943     - hlist_replace_rcu(&p->list, &newp->list);
944     - ip6addrlbl_put(p);
945     - goto out;
946     - } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
947     - (p->prefixlen < newp->prefixlen)) {
948     - hlist_add_before_rcu(&newp->list, &p->list);
949     + hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
950     + if (p->prefixlen == newp->prefixlen &&
951     + net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
952     + p->ifindex == newp->ifindex &&
953     + ipv6_addr_equal(&p->prefix, &newp->prefix)) {
954     + if (!replace) {
955     + ret = -EEXIST;
956     goto out;
957     }
958     + hlist_replace_rcu(&p->list, &newp->list);
959     + ip6addrlbl_put(p);
960     + goto out;
961     + } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
962     + (p->prefixlen < newp->prefixlen)) {
963     + hlist_add_before_rcu(&newp->list, &p->list);
964     + goto out;
965     }
966     - hlist_add_after_rcu(&p->list, &newp->list);
967     + last = p;
968     }
969     + if (last)
970     + hlist_add_after_rcu(&last->list, &newp->list);
971     + else
972     + hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
973     out:
974     if (!ret)
975     ip6addrlbl_table.seq++;
976     diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
977     index b4ff0a4..70e704d 100644
978     --- a/net/ipv6/icmp.c
979     +++ b/net/ipv6/icmp.c
980     @@ -931,6 +931,14 @@ static const struct icmp6_err {
981     .err = ECONNREFUSED,
982     .fatal = 1,
983     },
984     + { /* POLICY_FAIL */
985     + .err = EACCES,
986     + .fatal = 1,
987     + },
988     + { /* REJECT_ROUTE */
989     + .err = EACCES,
990     + .fatal = 1,
991     + },
992     };
993    
994     int icmpv6_err_convert(u8 type, u8 code, int *err)
995     @@ -942,7 +950,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
996     switch (type) {
997     case ICMPV6_DEST_UNREACH:
998     fatal = 1;
999     - if (code <= ICMPV6_PORT_UNREACH) {
1000     + if (code < ARRAY_SIZE(tab_unreach)) {
1001     *err = tab_unreach[code].err;
1002     fatal = tab_unreach[code].fatal;
1003     }
1004     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1005     index 5fc9c7a..2221ff6 100644
1006     --- a/net/ipv6/ip6_fib.c
1007     +++ b/net/ipv6/ip6_fib.c
1008     @@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
1009    
1010     if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
1011     #ifdef CONFIG_IPV6_SUBTREES
1012     - if (fn->subtree)
1013     - fn = fib6_lookup_1(fn->subtree, args + 1);
1014     + if (fn->subtree) {
1015     + struct fib6_node *sfn;
1016     + sfn = fib6_lookup_1(fn->subtree,
1017     + args + 1);
1018     + if (!sfn)
1019     + goto backtrack;
1020     + fn = sfn;
1021     + }
1022     #endif
1023     - if (!fn || fn->fn_flags & RTN_RTINFO)
1024     + if (fn->fn_flags & RTN_RTINFO)
1025     return fn;
1026     }
1027     }
1028     -
1029     +#ifdef CONFIG_IPV6_SUBTREES
1030     +backtrack:
1031     +#endif
1032     if (fn->fn_flags & RTN_ROOT)
1033     break;
1034    
1035     diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1036     index ca4ffcc..060a044 100644
1037     --- a/net/ipv6/ndisc.c
1038     +++ b/net/ipv6/ndisc.c
1039     @@ -372,14 +372,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
1040     int tlen = dev->needed_tailroom;
1041     struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
1042     struct sk_buff *skb;
1043     - int err;
1044    
1045     - skb = sock_alloc_send_skb(sk,
1046     - hlen + sizeof(struct ipv6hdr) + len + tlen,
1047     - 1, &err);
1048     + skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
1049     if (!skb) {
1050     - ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n",
1051     - __func__, err);
1052     + ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
1053     + __func__);
1054     return NULL;
1055     }
1056    
1057     @@ -389,6 +386,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
1058     skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
1059     skb_reset_transport_header(skb);
1060    
1061     + /* Manually assign socket ownership as we avoid calling
1062     + * sock_alloc_send_pskb() to bypass wmem buffer limits
1063     + */
1064     + skb_set_owner_w(skb, sk);
1065     +
1066     return skb;
1067     }
1068    
1069     diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
1070     index 790d9f4..1aeb473 100644
1071     --- a/net/ipv6/reassembly.c
1072     +++ b/net/ipv6/reassembly.c
1073     @@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
1074     ipv6_hdr(head)->payload_len = htons(payload_len);
1075     ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
1076     IP6CB(head)->nhoff = nhoff;
1077     + IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
1078    
1079     /* Yes, and fold redundant checksum back. 8) */
1080     if (head->ip_summed == CHECKSUM_COMPLETE)
1081     @@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
1082     struct net *net = dev_net(skb_dst(skb)->dev);
1083     int evicted;
1084    
1085     + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
1086     + goto fail_hdr;
1087     +
1088     IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
1089    
1090     /* Jumbo payload inhibits frag. header */
1091     @@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
1092     ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
1093    
1094     IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
1095     + IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
1096     return 1;
1097     }
1098    
1099     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
1100     index 0a17ed9..66c7188 100644
1101     --- a/net/ipv6/tcp_ipv6.c
1102     +++ b/net/ipv6/tcp_ipv6.c
1103     @@ -1426,7 +1426,7 @@ ipv6_pktoptions:
1104     if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
1105     np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
1106     if (np->rxopt.bits.rxtclass)
1107     - np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
1108     + np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
1109     if (ipv6_opt_accepted(sk, opt_skb)) {
1110     skb_set_owner_r(opt_skb, sk);
1111     opt_skb = xchg(&np->pktoptions, opt_skb);
1112     diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
1113     index 1076fe1..393f17e 100644
1114     --- a/net/netlink/genetlink.c
1115     +++ b/net/netlink/genetlink.c
1116     @@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
1117     EXPORT_SYMBOL(genl_unregister_ops);
1118    
1119     /**
1120     - * genl_register_family - register a generic netlink family
1121     + * __genl_register_family - register a generic netlink family
1122     * @family: generic netlink family
1123     *
1124     * Registers the specified family after validating it first. Only one
1125     @@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
1126     *
1127     * Return 0 on success or a negative error code.
1128     */
1129     -int genl_register_family(struct genl_family *family)
1130     +int __genl_register_family(struct genl_family *family)
1131     {
1132     int err = -EINVAL;
1133    
1134     @@ -430,10 +430,10 @@ errout_locked:
1135     errout:
1136     return err;
1137     }
1138     -EXPORT_SYMBOL(genl_register_family);
1139     +EXPORT_SYMBOL(__genl_register_family);
1140    
1141     /**
1142     - * genl_register_family_with_ops - register a generic netlink family
1143     + * __genl_register_family_with_ops - register a generic netlink family
1144     * @family: generic netlink family
1145     * @ops: operations to be registered
1146     * @n_ops: number of elements to register
1147     @@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
1148     *
1149     * Return 0 on success or a negative error code.
1150     */
1151     -int genl_register_family_with_ops(struct genl_family *family,
1152     +int __genl_register_family_with_ops(struct genl_family *family,
1153     struct genl_ops *ops, size_t n_ops)
1154     {
1155     int err, i;
1156    
1157     - err = genl_register_family(family);
1158     + err = __genl_register_family(family);
1159     if (err)
1160     return err;
1161    
1162     @@ -476,7 +476,7 @@ err_out:
1163     genl_unregister_family(family);
1164     return err;
1165     }
1166     -EXPORT_SYMBOL(genl_register_family_with_ops);
1167     +EXPORT_SYMBOL(__genl_register_family_with_ops);
1168    
1169     /**
1170     * genl_unregister_family - unregister generic netlink family
1171     @@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
1172     }
1173     EXPORT_SYMBOL(genlmsg_put);
1174    
1175     +static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
1176     +{
1177     + struct genl_ops *ops = cb->data;
1178     + int rc;
1179     +
1180     + genl_lock();
1181     + rc = ops->dumpit(skb, cb);
1182     + genl_unlock();
1183     + return rc;
1184     +}
1185     +
1186     +static int genl_lock_done(struct netlink_callback *cb)
1187     +{
1188     + struct genl_ops *ops = cb->data;
1189     + int rc = 0;
1190     +
1191     + if (ops->done) {
1192     + genl_lock();
1193     + rc = ops->done(cb);
1194     + genl_unlock();
1195     + }
1196     + return rc;
1197     +}
1198     +
1199     static int genl_family_rcv_msg(struct genl_family *family,
1200     struct sk_buff *skb,
1201     struct nlmsghdr *nlh)
1202     @@ -572,15 +596,34 @@ static int genl_family_rcv_msg(struct genl_family *family,
1203     return -EPERM;
1204    
1205     if (nlh->nlmsg_flags & NLM_F_DUMP) {
1206     - struct netlink_dump_control c = {
1207     - .dump = ops->dumpit,
1208     - .done = ops->done,
1209     - };
1210     + int rc;
1211    
1212     if (ops->dumpit == NULL)
1213     return -EOPNOTSUPP;
1214    
1215     - return netlink_dump_start(net->genl_sock, skb, nlh, &c);
1216     + if (!family->parallel_ops) {
1217     + struct netlink_dump_control c = {
1218     + .module = family->module,
1219     + .data = ops,
1220     + .dump = genl_lock_dumpit,
1221     + .done = genl_lock_done,
1222     + };
1223     +
1224     + genl_unlock();
1225     + rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1226     + genl_lock();
1227     +
1228     + } else {
1229     + struct netlink_dump_control c = {
1230     + .module = family->module,
1231     + .dump = ops->dumpit,
1232     + .done = ops->done,
1233     + };
1234     +
1235     + rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
1236     + }
1237     +
1238     + return rc;
1239     }
1240    
1241     if (ops->doit == NULL)
1242     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1243     index 20a1bd0..a6895ab 100644
1244     --- a/net/packet/af_packet.c
1245     +++ b/net/packet/af_packet.c
1246     @@ -3259,9 +3259,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
1247    
1248     if (po->tp_version == TPACKET_V3) {
1249     lv = sizeof(struct tpacket_stats_v3);
1250     + st.stats3.tp_packets += st.stats3.tp_drops;
1251     data = &st.stats3;
1252     } else {
1253     lv = sizeof(struct tpacket_stats);
1254     + st.stats1.tp_packets += st.stats1.tp_drops;
1255     data = &st.stats1;
1256     }
1257    
1258     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
1259     index 281c1bd..51b968d 100644
1260     --- a/net/sched/sch_api.c
1261     +++ b/net/sched/sch_api.c
1262     @@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
1263     return q;
1264     }
1265    
1266     +/* The linklayer setting were not transferred from iproute2, in older
1267     + * versions, and the rate tables lookup systems have been dropped in
1268     + * the kernel. To keep backward compatible with older iproute2 tc
1269     + * utils, we detect the linklayer setting by detecting if the rate
1270     + * table were modified.
1271     + *
1272     + * For linklayer ATM table entries, the rate table will be aligned to
1273     + * 48 bytes, thus some table entries will contain the same value. The
1274     + * mpu (min packet unit) is also encoded into the old rate table, thus
1275     + * starting from the mpu, we find low and high table entries for
1276     + * mapping this cell. If these entries contain the same value, when
1277     + * the rate tables have been modified for linklayer ATM.
1278     + *
1279     + * This is done by rounding mpu to the nearest 48 bytes cell/entry,
1280     + * and then roundup to the next cell, calc the table entry one below,
1281     + * and compare.
1282     + */
1283     +static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
1284     +{
1285     + int low = roundup(r->mpu, 48);
1286     + int high = roundup(low+1, 48);
1287     + int cell_low = low >> r->cell_log;
1288     + int cell_high = (high >> r->cell_log) - 1;
1289     +
1290     + /* rtab is too inaccurate at rates > 100Mbit/s */
1291     + if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
1292     + pr_debug("TC linklayer: Giving up ATM detection\n");
1293     + return TC_LINKLAYER_ETHERNET;
1294     + }
1295     +
1296     + if ((cell_high > cell_low) && (cell_high < 256)
1297     + && (rtab[cell_low] == rtab[cell_high])) {
1298     + pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
1299     + cell_low, cell_high, rtab[cell_high]);
1300     + return TC_LINKLAYER_ATM;
1301     + }
1302     + return TC_LINKLAYER_ETHERNET;
1303     +}
1304     +
1305     static struct qdisc_rate_table *qdisc_rtab_list;
1306    
1307     struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
1308     @@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
1309     rtab->rate = *r;
1310     rtab->refcnt = 1;
1311     memcpy(rtab->data, nla_data(tab), 1024);
1312     + if (r->linklayer == TC_LINKLAYER_UNAWARE)
1313     + r->linklayer = __detect_linklayer(r, rtab->data);
1314     rtab->next = qdisc_rtab_list;
1315     qdisc_rtab_list = rtab;
1316     }
1317     diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1318     index 2022408..a7f838b 100644
1319     --- a/net/sched/sch_generic.c
1320     +++ b/net/sched/sch_generic.c
1321     @@ -908,6 +908,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
1322     memset(r, 0, sizeof(*r));
1323     r->overhead = conf->overhead;
1324     r->rate_bps = (u64)conf->rate << 3;
1325     + r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1326     r->mult = 1;
1327     /*
1328     * Calibrate mult, shift so that token counting is accurate
1329     diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
1330     index adaedd7..910667c 100644
1331     --- a/net/sched/sch_htb.c
1332     +++ b/net/sched/sch_htb.c
1333     @@ -87,7 +87,7 @@ struct htb_class {
1334     unsigned int children;
1335     struct htb_class *parent; /* parent class */
1336    
1337     - int prio; /* these two are used only by leaves... */
1338     + u32 prio; /* these two are used only by leaves... */
1339     int quantum; /* but stored for parent-to-leaf return */
1340    
1341     union {
1342     @@ -1312,6 +1312,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1343     struct htb_sched *q = qdisc_priv(sch);
1344     struct htb_class *cl = (struct htb_class *)*arg, *parent;
1345     struct nlattr *opt = tca[TCA_OPTIONS];
1346     + struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1347     struct nlattr *tb[TCA_HTB_MAX + 1];
1348     struct tc_htb_opt *hopt;
1349    
1350     @@ -1333,6 +1334,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1351     if (!hopt->rate.rate || !hopt->ceil.rate)
1352     goto failure;
1353    
1354     + /* Keeping backward compatible with rate_table based iproute2 tc */
1355     + if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
1356     + rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
1357     + if (rtab)
1358     + qdisc_put_rtab(rtab);
1359     + }
1360     + if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
1361     + ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
1362     + if (ctab)
1363     + qdisc_put_rtab(ctab);
1364     + }
1365     +
1366     if (!cl) { /* new class */
1367     struct Qdisc *new_q;
1368     int prio;
1369     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
1370     index 515ce38..7e26ad4 100644
1371     --- a/net/tipc/socket.c
1372     +++ b/net/tipc/socket.c
1373     @@ -1179,7 +1179,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1374     /* Accept only ACK or NACK message */
1375     if (unlikely(msg_errcode(msg))) {
1376     sock->state = SS_DISCONNECTING;
1377     - sk->sk_err = -ECONNREFUSED;
1378     + sk->sk_err = ECONNREFUSED;
1379     retval = TIPC_OK;
1380     break;
1381     }
1382     @@ -1190,7 +1190,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
1383     res = auto_connect(sock, msg);
1384     if (res) {
1385     sock->state = SS_DISCONNECTING;
1386     - sk->sk_err = res;
1387     + sk->sk_err = -res;
1388     retval = TIPC_OK;
1389     break;
1390     }