Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0137-4.4.38-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2873 - (hide annotations) (download)
Mon Mar 27 13:49:15 2017 UTC (7 years, 1 month ago) by niro
File size: 32131 byte(s)
linux-4.4.38
1 niro 2873 diff --git a/Makefile b/Makefile
2     index b57ec79b4941..6876efe0d735 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 37
9     +SUBLEVEL = 38
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
14     index c3c12efe0bc0..9c0c8fd0b292 100644
15     --- a/arch/sparc/kernel/signal_32.c
16     +++ b/arch/sparc/kernel/signal_32.c
17     @@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
18     sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
19    
20     /* 1. Make sure we are not getting garbage from the user */
21     - if (!invalid_frame_pointer(sf, sizeof(*sf)))
22     + if (invalid_frame_pointer(sf, sizeof(*sf)))
23     goto segv_and_exit;
24    
25     if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
26     @@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
27    
28     synchronize_user_stack();
29     sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
30     - if (!invalid_frame_pointer(sf, sizeof(*sf)))
31     + if (invalid_frame_pointer(sf, sizeof(*sf)))
32     goto segv;
33    
34     if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
35     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
36     index a5331c336b2a..3d3414c14792 100644
37     --- a/arch/sparc/mm/init_64.c
38     +++ b/arch/sparc/mm/init_64.c
39     @@ -800,8 +800,10 @@ struct mdesc_mblock {
40     };
41     static struct mdesc_mblock *mblocks;
42     static int num_mblocks;
43     +static int find_numa_node_for_addr(unsigned long pa,
44     + struct node_mem_mask *pnode_mask);
45    
46     -static unsigned long ra_to_pa(unsigned long addr)
47     +static unsigned long __init ra_to_pa(unsigned long addr)
48     {
49     int i;
50    
51     @@ -817,8 +819,11 @@ static unsigned long ra_to_pa(unsigned long addr)
52     return addr;
53     }
54    
55     -static int find_node(unsigned long addr)
56     +static int __init find_node(unsigned long addr)
57     {
58     + static bool search_mdesc = true;
59     + static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
60     + static int last_index;
61     int i;
62    
63     addr = ra_to_pa(addr);
64     @@ -828,13 +833,30 @@ static int find_node(unsigned long addr)
65     if ((addr & p->mask) == p->val)
66     return i;
67     }
68     - /* The following condition has been observed on LDOM guests.*/
69     - WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
70     - " rule. Some physical memory will be owned by node 0.");
71     - return 0;
72     + /* The following condition has been observed on LDOM guests because
73     + * node_masks only contains the best latency mask and value.
74     + * LDOM guest's mdesc can contain a single latency group to
75     + * cover multiple address range. Print warning message only if the
76     + * address cannot be found in node_masks nor mdesc.
77     + */
78     + if ((search_mdesc) &&
79     + ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
80     + /* find the available node in the mdesc */
81     + last_index = find_numa_node_for_addr(addr, &last_mem_mask);
82     + numadbg("find_node: latency group for address 0x%lx is %d\n",
83     + addr, last_index);
84     + if ((last_index < 0) || (last_index >= num_node_masks)) {
85     + /* WARN_ONCE() and use default group 0 */
86     + WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
87     + search_mdesc = false;
88     + last_index = 0;
89     + }
90     + }
91     +
92     + return last_index;
93     }
94    
95     -static u64 memblock_nid_range(u64 start, u64 end, int *nid)
96     +static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
97     {
98     *nid = find_node(start);
99     start += PAGE_SIZE;
100     @@ -1158,6 +1180,41 @@ int __node_distance(int from, int to)
101     return numa_latency[from][to];
102     }
103    
104     +static int find_numa_node_for_addr(unsigned long pa,
105     + struct node_mem_mask *pnode_mask)
106     +{
107     + struct mdesc_handle *md = mdesc_grab();
108     + u64 node, arc;
109     + int i = 0;
110     +
111     + node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
112     + if (node == MDESC_NODE_NULL)
113     + goto out;
114     +
115     + mdesc_for_each_node_by_name(md, node, "group") {
116     + mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
117     + u64 target = mdesc_arc_target(md, arc);
118     + struct mdesc_mlgroup *m = find_mlgroup(target);
119     +
120     + if (!m)
121     + continue;
122     + if ((pa & m->mask) == m->match) {
123     + if (pnode_mask) {
124     + pnode_mask->mask = m->mask;
125     + pnode_mask->val = m->match;
126     + }
127     + mdesc_release(md);
128     + return i;
129     + }
130     + }
131     + i++;
132     + }
133     +
134     +out:
135     + mdesc_release(md);
136     + return -1;
137     +}
138     +
139     static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
140     {
141     int i;
142     diff --git a/block/blk-map.c b/block/blk-map.c
143     index f565e11f465a..69953bd97e65 100644
144     --- a/block/blk-map.c
145     +++ b/block/blk-map.c
146     @@ -90,6 +90,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
147     if (!iter || !iter->count)
148     return -EINVAL;
149    
150     + if (!iter_is_iovec(iter))
151     + return -EINVAL;
152     +
153     iov_for_each(iov, i, *iter) {
154     unsigned long uaddr = (unsigned long) iov.iov_base;
155    
156     diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
157     index 6f946fedbb77..0864f05633a2 100644
158     --- a/drivers/net/dsa/bcm_sf2.c
159     +++ b/drivers/net/dsa/bcm_sf2.c
160     @@ -1137,6 +1137,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
161     struct phy_device *phydev)
162     {
163     struct bcm_sf2_priv *priv = ds_to_priv(ds);
164     + struct ethtool_eee *p = &priv->port_sts[port].eee;
165     u32 id_mode_dis = 0, port_mode;
166     const char *str = NULL;
167     u32 reg;
168     @@ -1211,6 +1212,9 @@ force_link:
169     reg |= DUPLX_MODE;
170    
171     core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
172     +
173     + if (!phydev->is_pseudo_fixed_link)
174     + p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
175     }
176    
177     static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
178     diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
179     index 0fb3f8de88e9..91627561c58d 100644
180     --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
181     +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
182     @@ -1168,6 +1168,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
183     struct bcmgenet_tx_ring *ring)
184     {
185     struct bcmgenet_priv *priv = netdev_priv(dev);
186     + struct device *kdev = &priv->pdev->dev;
187     struct enet_cb *tx_cb_ptr;
188     struct netdev_queue *txq;
189     unsigned int pkts_compl = 0;
190     @@ -1195,7 +1196,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
191     pkts_compl++;
192     dev->stats.tx_packets++;
193     dev->stats.tx_bytes += tx_cb_ptr->skb->len;
194     - dma_unmap_single(&dev->dev,
195     + dma_unmap_single(kdev,
196     dma_unmap_addr(tx_cb_ptr, dma_addr),
197     dma_unmap_len(tx_cb_ptr, dma_len),
198     DMA_TO_DEVICE);
199     @@ -1203,7 +1204,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
200     } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
201     dev->stats.tx_bytes +=
202     dma_unmap_len(tx_cb_ptr, dma_len);
203     - dma_unmap_page(&dev->dev,
204     + dma_unmap_page(kdev,
205     dma_unmap_addr(tx_cb_ptr, dma_addr),
206     dma_unmap_len(tx_cb_ptr, dma_len),
207     DMA_TO_DEVICE);
208     @@ -1754,6 +1755,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
209    
210     static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
211     {
212     + struct device *kdev = &priv->pdev->dev;
213     struct enet_cb *cb;
214     int i;
215    
216     @@ -1761,7 +1763,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
217     cb = &priv->rx_cbs[i];
218    
219     if (dma_unmap_addr(cb, dma_addr)) {
220     - dma_unmap_single(&priv->dev->dev,
221     + dma_unmap_single(kdev,
222     dma_unmap_addr(cb, dma_addr),
223     priv->rx_buf_len, DMA_FROM_DEVICE);
224     dma_unmap_addr_set(cb, dma_addr, 0);
225     diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
226     index 5606a043063e..4b62aa1f9ff8 100644
227     --- a/drivers/net/ethernet/marvell/sky2.c
228     +++ b/drivers/net/ethernet/marvell/sky2.c
229     @@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
230    
231     static void sky2_shutdown(struct pci_dev *pdev)
232     {
233     + struct sky2_hw *hw = pci_get_drvdata(pdev);
234     + int port;
235     +
236     + for (port = 0; port < hw->ports; port++) {
237     + struct net_device *ndev = hw->dev[port];
238     +
239     + rtnl_lock();
240     + if (netif_running(ndev)) {
241     + dev_close(ndev);
242     + netif_device_detach(ndev);
243     + }
244     + rtnl_unlock();
245     + }
246     sky2_suspend(&pdev->dev);
247     pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
248     pci_set_power_state(pdev, PCI_D3hot);
249     diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
250     index 36fc9427418f..480f3dae0780 100644
251     --- a/drivers/net/ethernet/renesas/sh_eth.c
252     +++ b/drivers/net/ethernet/renesas/sh_eth.c
253     @@ -832,7 +832,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
254    
255     .ecsr_value = ECSR_ICD,
256     .ecsipr_value = ECSIPR_ICDIP,
257     - .eesipr_value = 0xff7f009f,
258     + .eesipr_value = 0xe77f009f,
259    
260     .tx_check = EESR_TC1 | EESR_FTC,
261     .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
262     diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
263     index 4827c6987ac3..f0961cbaf87e 100644
264     --- a/drivers/net/geneve.c
265     +++ b/drivers/net/geneve.c
266     @@ -815,7 +815,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
267     struct geneve_dev *geneve = netdev_priv(dev);
268     struct geneve_sock *gs4 = geneve->sock4;
269     struct rtable *rt = NULL;
270     - const struct iphdr *iip; /* interior IP header */
271     int err = -EINVAL;
272     struct flowi4 fl4;
273     __u8 tos, ttl;
274     @@ -842,8 +841,6 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
275     sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
276     skb_reset_mac_header(skb);
277    
278     - iip = ip_hdr(skb);
279     -
280     if (info) {
281     const struct ip_tunnel_key *key = &info->key;
282     u8 *opts = NULL;
283     @@ -859,7 +856,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
284     if (unlikely(err))
285     goto err;
286    
287     - tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
288     + tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
289     ttl = key->ttl;
290     df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
291     } else {
292     @@ -869,7 +866,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
293     if (unlikely(err))
294     goto err;
295    
296     - tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
297     + tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
298     ttl = geneve->ttl;
299     if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
300     ttl = 1;
301     @@ -903,7 +900,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
302     struct geneve_dev *geneve = netdev_priv(dev);
303     struct geneve_sock *gs6 = geneve->sock6;
304     struct dst_entry *dst = NULL;
305     - const struct iphdr *iip; /* interior IP header */
306     int err = -EINVAL;
307     struct flowi6 fl6;
308     __u8 prio, ttl;
309     @@ -927,8 +923,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
310     sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
311     skb_reset_mac_header(skb);
312    
313     - iip = ip_hdr(skb);
314     -
315     if (info) {
316     const struct ip_tunnel_key *key = &info->key;
317     u8 *opts = NULL;
318     @@ -945,7 +939,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
319     if (unlikely(err))
320     goto err;
321    
322     - prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
323     + prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
324     ttl = key->ttl;
325     } else {
326     udp_csum = false;
327     @@ -954,7 +948,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
328     if (unlikely(err))
329     goto err;
330    
331     - prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, iip, skb);
332     + prio = ip_tunnel_ecn_encap(fl6.flowi6_tos, ip_hdr(skb), skb);
333     ttl = geneve->ttl;
334     if (!ttl && ipv6_addr_is_multicast(&fl6.daddr))
335     ttl = 1;
336     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
337     index f94ab786088f..0e2a19e58923 100644
338     --- a/drivers/net/virtio_net.c
339     +++ b/drivers/net/virtio_net.c
340     @@ -1465,6 +1465,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
341     netif_napi_del(&vi->rq[i].napi);
342     }
343    
344     + /* We called napi_hash_del() before netif_napi_del(),
345     + * we need to respect an RCU grace period before freeing vi->rq
346     + */
347     + synchronize_net();
348     +
349     kfree(vi->rq);
350     kfree(vi->sq);
351     }
352     diff --git a/include/linux/uio.h b/include/linux/uio.h
353     index 5f9c59da978b..e2225109b816 100644
354     --- a/include/linux/uio.h
355     +++ b/include/linux/uio.h
356     @@ -101,12 +101,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages);
357    
358     const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags);
359    
360     -static inline size_t iov_iter_count(struct iov_iter *i)
361     +static inline size_t iov_iter_count(const struct iov_iter *i)
362     {
363     return i->count;
364     }
365    
366     -static inline bool iter_is_iovec(struct iov_iter *i)
367     +static inline bool iter_is_iovec(const struct iov_iter *i)
368     {
369     return !(i->type & (ITER_BVEC | ITER_KVEC));
370     }
371     diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
372     index 2c2eb1b629b1..2e9a1c2818c7 100644
373     --- a/net/core/net_namespace.c
374     +++ b/net/core/net_namespace.c
375     @@ -217,6 +217,8 @@ int peernet2id_alloc(struct net *net, struct net *peer)
376     bool alloc;
377     int id;
378    
379     + if (atomic_read(&net->count) == 0)
380     + return NETNSA_NSID_NOT_ASSIGNED;
381     spin_lock_irqsave(&net->nsid_lock, flags);
382     alloc = atomic_read(&peer->count) == 0 ? false : true;
383     id = __peernet2id_alloc(net, peer, &alloc);
384     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
385     index 87b91ffbdec3..b94e165a4f79 100644
386     --- a/net/core/rtnetlink.c
387     +++ b/net/core/rtnetlink.c
388     @@ -2600,7 +2600,10 @@ nla_put_failure:
389    
390     static inline size_t rtnl_fdb_nlmsg_size(void)
391     {
392     - return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
393     + return NLMSG_ALIGN(sizeof(struct ndmsg)) +
394     + nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
395     + nla_total_size(sizeof(u16)) + /* NDA_VLAN */
396     + 0;
397     }
398    
399     static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type)
400     diff --git a/net/core/sock.c b/net/core/sock.c
401     index 88f017854509..f4c0917e66b5 100644
402     --- a/net/core/sock.c
403     +++ b/net/core/sock.c
404     @@ -745,7 +745,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
405     val = min_t(u32, val, sysctl_wmem_max);
406     set_sndbuf:
407     sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
408     - sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
409     + sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
410     /* Wake up sending tasks if we upped the value. */
411     sk->sk_write_space(sk);
412     break;
413     @@ -781,7 +781,7 @@ set_rcvbuf:
414     * returning the value we actually used in getsockopt
415     * is the most desirable behavior.
416     */
417     - sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
418     + sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
419     break;
420    
421     case SO_RCVBUFFORCE:
422     diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
423     index 861e1fa25d5e..0759f5b9180e 100644
424     --- a/net/dccp/ipv4.c
425     +++ b/net/dccp/ipv4.c
426     @@ -698,6 +698,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
427     {
428     const struct dccp_hdr *dh;
429     unsigned int cscov;
430     + u8 dccph_doff;
431    
432     if (skb->pkt_type != PACKET_HOST)
433     return 1;
434     @@ -719,18 +720,19 @@ int dccp_invalid_packet(struct sk_buff *skb)
435     /*
436     * If P.Data Offset is too small for packet type, drop packet and return
437     */
438     - if (dh->dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
439     - DCCP_WARN("P.Data Offset(%u) too small\n", dh->dccph_doff);
440     + dccph_doff = dh->dccph_doff;
441     + if (dccph_doff < dccp_hdr_len(skb) / sizeof(u32)) {
442     + DCCP_WARN("P.Data Offset(%u) too small\n", dccph_doff);
443     return 1;
444     }
445     /*
446     * If P.Data Offset is too too large for packet, drop packet and return
447     */
448     - if (!pskb_may_pull(skb, dh->dccph_doff * sizeof(u32))) {
449     - DCCP_WARN("P.Data Offset(%u) too large\n", dh->dccph_doff);
450     + if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
451     + DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
452     return 1;
453     }
454     -
455     + dh = dccp_hdr(skb);
456     /*
457     * If P.type is not Data, Ack, or DataAck and P.X == 0 (the packet
458     * has short sequence numbers), drop packet and return
459     diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
460     index d95631d09248..20fb25e3027b 100644
461     --- a/net/ipv4/esp4.c
462     +++ b/net/ipv4/esp4.c
463     @@ -476,7 +476,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
464     esph = (void *)skb_push(skb, 4);
465     *seqhi = esph->spi;
466     esph->spi = esph->seq_no;
467     - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
468     + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
469     aead_request_set_callback(req, 0, esp_input_done_esn, skb);
470     }
471    
472     diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
473     index f2ad5216c438..2b7283303650 100644
474     --- a/net/ipv4/ip_output.c
475     +++ b/net/ipv4/ip_output.c
476     @@ -102,6 +102,9 @@ int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
477    
478     iph->tot_len = htons(skb->len);
479     ip_send_check(iph);
480     +
481     + skb->protocol = htons(ETH_P_IP);
482     +
483     return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
484     net, sk, skb, NULL, skb_dst(skb)->dev,
485     dst_output);
486     diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
487     index aa67e0e64b69..23160d2b3f71 100644
488     --- a/net/ipv4/ping.c
489     +++ b/net/ipv4/ping.c
490     @@ -660,6 +660,10 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
491     if (len > 0xFFFF)
492     return -EMSGSIZE;
493    
494     + /* Must have at least a full ICMP header. */
495     + if (len < icmph_len)
496     + return -EINVAL;
497     +
498     /*
499     * Check the flags.
500     */
501     diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
502     index 060a60b2f8a6..111ba55fd512 100644
503     --- a/net/ipv6/esp6.c
504     +++ b/net/ipv6/esp6.c
505     @@ -418,7 +418,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
506     esph = (void *)skb_push(skb, 4);
507     *seqhi = esph->spi;
508     esph->spi = esph->seq_no;
509     - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
510     + esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
511     aead_request_set_callback(req, 0, esp_input_done_esn, skb);
512     }
513    
514     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
515     index e8878886eba4..2994d1f1a661 100644
516     --- a/net/ipv6/ip6_tunnel.c
517     +++ b/net/ipv6/ip6_tunnel.c
518     @@ -1043,6 +1043,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
519     struct ipv6_tel_txoption opt;
520     struct dst_entry *dst = NULL, *ndst = NULL;
521     struct net_device *tdev;
522     + bool use_cache = false;
523     int mtu;
524     unsigned int max_headroom = sizeof(struct ipv6hdr);
525     u8 proto;
526     @@ -1070,7 +1071,15 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
527    
528     memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
529     neigh_release(neigh);
530     - } else if (!fl6->flowi6_mark)
531     + } else if (!(t->parms.flags &
532     + (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
533     + /* enable the cache only only if the routing decision does
534     + * not depend on the current inner header value
535     + */
536     + use_cache = true;
537     + }
538     +
539     + if (use_cache)
540     dst = ip6_tnl_dst_get(t);
541    
542     if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
543     @@ -1134,7 +1143,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
544     skb = new_skb;
545     }
546    
547     - if (!fl6->flowi6_mark && ndst)
548     + if (use_cache && ndst)
549     ip6_tnl_dst_set(t, ndst);
550     skb_dst_set(skb, dst);
551    
552     diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
553     index 462f2a76b5c2..1d184322a7b1 100644
554     --- a/net/ipv6/output_core.c
555     +++ b/net/ipv6/output_core.c
556     @@ -148,6 +148,8 @@ int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
557     ipv6_hdr(skb)->payload_len = htons(len);
558     IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
559    
560     + skb->protocol = htons(ETH_P_IPV6);
561     +
562     return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
563     net, sk, skb, NULL, skb_dst(skb)->dev,
564     dst_output);
565     diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
566     index 42de4ccd159f..d0e906d39642 100644
567     --- a/net/l2tp/l2tp_ip.c
568     +++ b/net/l2tp/l2tp_ip.c
569     @@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
570     int ret;
571     int chk_addr_ret;
572    
573     - if (!sock_flag(sk, SOCK_ZAPPED))
574     - return -EINVAL;
575     if (addr_len < sizeof(struct sockaddr_l2tpip))
576     return -EINVAL;
577     if (addr->l2tp_family != AF_INET)
578     @@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
579     read_unlock_bh(&l2tp_ip_lock);
580    
581     lock_sock(sk);
582     + if (!sock_flag(sk, SOCK_ZAPPED))
583     + goto out;
584     +
585     if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
586     goto out;
587    
588     diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
589     index 9ee4ddb6b397..3c4f867d3633 100644
590     --- a/net/l2tp/l2tp_ip6.c
591     +++ b/net/l2tp/l2tp_ip6.c
592     @@ -266,8 +266,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
593     int addr_type;
594     int err;
595    
596     - if (!sock_flag(sk, SOCK_ZAPPED))
597     - return -EINVAL;
598     if (addr->l2tp_family != AF_INET6)
599     return -EINVAL;
600     if (addr_len < sizeof(*addr))
601     @@ -293,6 +291,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
602     lock_sock(sk);
603    
604     err = -EINVAL;
605     + if (!sock_flag(sk, SOCK_ZAPPED))
606     + goto out_unlock;
607     +
608     if (sk->sk_state != TCP_CLOSE)
609     goto out_unlock;
610    
611     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
612     index 28fc283c1ec1..360700a2f46c 100644
613     --- a/net/netlink/af_netlink.c
614     +++ b/net/netlink/af_netlink.c
615     @@ -931,7 +931,6 @@ static void netlink_sock_destruct(struct sock *sk)
616     if (nlk->cb_running) {
617     if (nlk->cb.done)
618     nlk->cb.done(&nlk->cb);
619     -
620     module_put(nlk->cb.module);
621     kfree_skb(nlk->cb.skb);
622     }
623     @@ -960,6 +959,14 @@ static void netlink_sock_destruct(struct sock *sk)
624     WARN_ON(nlk_sk(sk)->groups);
625     }
626    
627     +static void netlink_sock_destruct_work(struct work_struct *work)
628     +{
629     + struct netlink_sock *nlk = container_of(work, struct netlink_sock,
630     + work);
631     +
632     + sk_free(&nlk->sk);
633     +}
634     +
635     /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
636     * SMP. Look, when several writers sleep and reader wakes them up, all but one
637     * immediately hit write lock and grab all the cpus. Exclusive sleep solves
638     @@ -1265,8 +1272,18 @@ out_module:
639     static void deferred_put_nlk_sk(struct rcu_head *head)
640     {
641     struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
642     + struct sock *sk = &nlk->sk;
643     +
644     + if (!atomic_dec_and_test(&sk->sk_refcnt))
645     + return;
646     +
647     + if (nlk->cb_running && nlk->cb.done) {
648     + INIT_WORK(&nlk->work, netlink_sock_destruct_work);
649     + schedule_work(&nlk->work);
650     + return;
651     + }
652    
653     - sock_put(&nlk->sk);
654     + sk_free(sk);
655     }
656    
657     static int netlink_release(struct socket *sock)
658     diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
659     index 14437d9b1965..df32cb92d9fc 100644
660     --- a/net/netlink/af_netlink.h
661     +++ b/net/netlink/af_netlink.h
662     @@ -3,6 +3,7 @@
663    
664     #include <linux/rhashtable.h>
665     #include <linux/atomic.h>
666     +#include <linux/workqueue.h>
667     #include <net/sock.h>
668    
669     #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
670     @@ -53,6 +54,7 @@ struct netlink_sock {
671    
672     struct rhash_head node;
673     struct rcu_head rcu;
674     + struct work_struct work;
675     };
676    
677     static inline struct netlink_sock *nlk_sk(struct sock *sk)
678     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
679     index 34e4fcfd240b..f223d1c80ccf 100644
680     --- a/net/packet/af_packet.c
681     +++ b/net/packet/af_packet.c
682     @@ -3572,19 +3572,25 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
683    
684     if (optlen != sizeof(val))
685     return -EINVAL;
686     - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
687     - return -EBUSY;
688     if (copy_from_user(&val, optval, sizeof(val)))
689     return -EFAULT;
690     switch (val) {
691     case TPACKET_V1:
692     case TPACKET_V2:
693     case TPACKET_V3:
694     - po->tp_version = val;
695     - return 0;
696     + break;
697     default:
698     return -EINVAL;
699     }
700     + lock_sock(sk);
701     + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
702     + ret = -EBUSY;
703     + } else {
704     + po->tp_version = val;
705     + ret = 0;
706     + }
707     + release_sock(sk);
708     + return ret;
709     }
710     case PACKET_RESERVE:
711     {
712     @@ -4067,6 +4073,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
713     /* Added to avoid minimal code churn */
714     struct tpacket_req *req = &req_u->req;
715    
716     + lock_sock(sk);
717     /* Opening a Tx-ring is NOT supported in TPACKET_V3 */
718     if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
719     WARN(1, "Tx-ring is not supported.\n");
720     @@ -4148,7 +4155,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
721     goto out;
722     }
723    
724     - lock_sock(sk);
725    
726     /* Detach socket from network */
727     spin_lock(&po->bind_lock);
728     @@ -4197,11 +4203,11 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
729     if (!tx_ring)
730     prb_shutdown_retire_blk_timer(po, rb_queue);
731     }
732     - release_sock(sk);
733    
734     if (pg_vec)
735     free_pg_vec(pg_vec, order, req->tp_block_nr);
736     out:
737     + release_sock(sk);
738     return err;
739     }
740    
741     diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
742     index e38a7701f154..c3434e902445 100644
743     --- a/net/sched/act_pedit.c
744     +++ b/net/sched/act_pedit.c
745     @@ -104,6 +104,17 @@ static void tcf_pedit_cleanup(struct tc_action *a, int bind)
746     kfree(keys);
747     }
748    
749     +static bool offset_valid(struct sk_buff *skb, int offset)
750     +{
751     + if (offset > 0 && offset > skb->len)
752     + return false;
753     +
754     + if (offset < 0 && -offset > skb_headroom(skb))
755     + return false;
756     +
757     + return true;
758     +}
759     +
760     static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
761     struct tcf_result *res)
762     {
763     @@ -130,6 +141,11 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
764     if (tkey->offmask) {
765     char *d, _d;
766    
767     + if (!offset_valid(skb, off + tkey->at)) {
768     + pr_info("tc filter pedit 'at' offset %d out of bounds\n",
769     + off + tkey->at);
770     + goto bad;
771     + }
772     d = skb_header_pointer(skb, off + tkey->at, 1,
773     &_d);
774     if (!d)
775     @@ -142,10 +158,10 @@ static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
776     " offset must be on 32 bit boundaries\n");
777     goto bad;
778     }
779     - if (offset > 0 && offset > skb->len) {
780     - pr_info("tc filter pedit"
781     - " offset %d can't exceed pkt length %d\n",
782     - offset, skb->len);
783     +
784     + if (!offset_valid(skb, off + offset)) {
785     + pr_info("tc filter pedit offset %d out of bounds\n",
786     + offset);
787     goto bad;
788     }
789    
790     diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
791     index 0b8c3ace671f..1bf1f4517db6 100644
792     --- a/net/sched/cls_basic.c
793     +++ b/net/sched/cls_basic.c
794     @@ -62,9 +62,6 @@ static unsigned long basic_get(struct tcf_proto *tp, u32 handle)
795     struct basic_head *head = rtnl_dereference(tp->root);
796     struct basic_filter *f;
797    
798     - if (head == NULL)
799     - return 0UL;
800     -
801     list_for_each_entry(f, &head->flist, link) {
802     if (f->handle == handle) {
803     l = (unsigned long) f;
804     @@ -109,7 +106,6 @@ static bool basic_destroy(struct tcf_proto *tp, bool force)
805     tcf_unbind_filter(tp, &f->res);
806     call_rcu(&f->rcu, basic_delete_filter);
807     }
808     - RCU_INIT_POINTER(tp->root, NULL);
809     kfree_rcu(head, rcu);
810     return true;
811     }
812     diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
813     index 5faaa5425f7b..3eef0215e53f 100644
814     --- a/net/sched/cls_bpf.c
815     +++ b/net/sched/cls_bpf.c
816     @@ -199,7 +199,6 @@ static bool cls_bpf_destroy(struct tcf_proto *tp, bool force)
817     call_rcu(&prog->rcu, __cls_bpf_delete_prog);
818     }
819    
820     - RCU_INIT_POINTER(tp->root, NULL);
821     kfree_rcu(head, rcu);
822     return true;
823     }
824     @@ -210,9 +209,6 @@ static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
825     struct cls_bpf_prog *prog;
826     unsigned long ret = 0UL;
827    
828     - if (head == NULL)
829     - return 0UL;
830     -
831     list_for_each_entry(prog, &head->plist, link) {
832     if (prog->handle == handle) {
833     ret = (unsigned long) prog;
834     diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
835     index 4c85bd3a750c..c104c2019feb 100644
836     --- a/net/sched/cls_cgroup.c
837     +++ b/net/sched/cls_cgroup.c
838     @@ -130,11 +130,10 @@ static bool cls_cgroup_destroy(struct tcf_proto *tp, bool force)
839    
840     if (!force)
841     return false;
842     -
843     - if (head) {
844     - RCU_INIT_POINTER(tp->root, NULL);
845     + /* Head can still be NULL due to cls_cgroup_init(). */
846     + if (head)
847     call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
848     - }
849     +
850     return true;
851     }
852    
853     diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
854     index fbfec6a18839..d7ba2b4ff0f3 100644
855     --- a/net/sched/cls_flow.c
856     +++ b/net/sched/cls_flow.c
857     @@ -583,7 +583,6 @@ static bool flow_destroy(struct tcf_proto *tp, bool force)
858     list_del_rcu(&f->list);
859     call_rcu(&f->rcu, flow_destroy_filter);
860     }
861     - RCU_INIT_POINTER(tp->root, NULL);
862     kfree_rcu(head, rcu);
863     return true;
864     }
865     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
866     index 95b021243233..e5a58c82728a 100644
867     --- a/net/sched/cls_flower.c
868     +++ b/net/sched/cls_flower.c
869     @@ -13,6 +13,7 @@
870     #include <linux/init.h>
871     #include <linux/module.h>
872     #include <linux/rhashtable.h>
873     +#include <linux/workqueue.h>
874    
875     #include <linux/if_ether.h>
876     #include <linux/in6.h>
877     @@ -55,7 +56,10 @@ struct cls_fl_head {
878     bool mask_assigned;
879     struct list_head filters;
880     struct rhashtable_params ht_params;
881     - struct rcu_head rcu;
882     + union {
883     + struct work_struct work;
884     + struct rcu_head rcu;
885     + };
886     };
887    
888     struct cls_fl_filter {
889     @@ -165,6 +169,24 @@ static void fl_destroy_filter(struct rcu_head *head)
890     kfree(f);
891     }
892    
893     +static void fl_destroy_sleepable(struct work_struct *work)
894     +{
895     + struct cls_fl_head *head = container_of(work, struct cls_fl_head,
896     + work);
897     + if (head->mask_assigned)
898     + rhashtable_destroy(&head->ht);
899     + kfree(head);
900     + module_put(THIS_MODULE);
901     +}
902     +
903     +static void fl_destroy_rcu(struct rcu_head *rcu)
904     +{
905     + struct cls_fl_head *head = container_of(rcu, struct cls_fl_head, rcu);
906     +
907     + INIT_WORK(&head->work, fl_destroy_sleepable);
908     + schedule_work(&head->work);
909     +}
910     +
911     static bool fl_destroy(struct tcf_proto *tp, bool force)
912     {
913     struct cls_fl_head *head = rtnl_dereference(tp->root);
914     @@ -177,10 +199,9 @@ static bool fl_destroy(struct tcf_proto *tp, bool force)
915     list_del_rcu(&f->list);
916     call_rcu(&f->rcu, fl_destroy_filter);
917     }
918     - RCU_INIT_POINTER(tp->root, NULL);
919     - if (head->mask_assigned)
920     - rhashtable_destroy(&head->ht);
921     - kfree_rcu(head, rcu);
922     +
923     + __module_get(THIS_MODULE);
924     + call_rcu(&head->rcu, fl_destroy_rcu);
925     return true;
926     }
927    
928     diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
929     index f9c9fc075fe6..9992dfac6938 100644
930     --- a/net/sched/cls_rsvp.h
931     +++ b/net/sched/cls_rsvp.h
932     @@ -152,7 +152,8 @@ static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
933     return -1;
934     nhptr = ip_hdr(skb);
935     #endif
936     -
937     + if (unlikely(!head))
938     + return -1;
939     restart:
940    
941     #if RSVP_DST_LEN == 4
942     diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
943     index 944c8ff45055..403746b20263 100644
944     --- a/net/sched/cls_tcindex.c
945     +++ b/net/sched/cls_tcindex.c
946     @@ -503,7 +503,6 @@ static bool tcindex_destroy(struct tcf_proto *tp, bool force)
947     walker.fn = tcindex_destroy_element;
948     tcindex_walk(tp, &walker);
949    
950     - RCU_INIT_POINTER(tp->root, NULL);
951     call_rcu(&p->rcu, __tcindex_destroy);
952     return true;
953     }
954     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
955     index 824cc1e160bc..73f75258ce46 100644
956     --- a/net/unix/af_unix.c
957     +++ b/net/unix/af_unix.c
958     @@ -2194,7 +2194,8 @@ out:
959     * Sleep until more data has arrived. But check for races..
960     */
961     static long unix_stream_data_wait(struct sock *sk, long timeo,
962     - struct sk_buff *last, unsigned int last_len)
963     + struct sk_buff *last, unsigned int last_len,
964     + bool freezable)
965     {
966     struct sk_buff *tail;
967     DEFINE_WAIT(wait);
968     @@ -2215,7 +2216,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
969    
970     sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
971     unix_state_unlock(sk);
972     - timeo = freezable_schedule_timeout(timeo);
973     + if (freezable)
974     + timeo = freezable_schedule_timeout(timeo);
975     + else
976     + timeo = schedule_timeout(timeo);
977     unix_state_lock(sk);
978    
979     if (sock_flag(sk, SOCK_DEAD))
980     @@ -2245,7 +2249,8 @@ struct unix_stream_read_state {
981     unsigned int splice_flags;
982     };
983    
984     -static int unix_stream_read_generic(struct unix_stream_read_state *state)
985     +static int unix_stream_read_generic(struct unix_stream_read_state *state,
986     + bool freezable)
987     {
988     struct scm_cookie scm;
989     struct socket *sock = state->socket;
990     @@ -2324,7 +2329,7 @@ again:
991     mutex_unlock(&u->iolock);
992    
993     timeo = unix_stream_data_wait(sk, timeo, last,
994     - last_len);
995     + last_len, freezable);
996    
997     if (signal_pending(current)) {
998     err = sock_intr_errno(timeo);
999     @@ -2466,7 +2471,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
1000     .flags = flags
1001     };
1002    
1003     - return unix_stream_read_generic(&state);
1004     + return unix_stream_read_generic(&state, true);
1005     }
1006    
1007     static ssize_t skb_unix_socket_splice(struct sock *sk,
1008     @@ -2512,7 +2517,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
1009     flags & SPLICE_F_NONBLOCK)
1010     state.flags = MSG_DONTWAIT;
1011    
1012     - return unix_stream_read_generic(&state);
1013     + return unix_stream_read_generic(&state, false);
1014     }
1015    
1016     static int unix_shutdown(struct socket *sock, int mode)