Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.18/0107-3.18.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2551 - (show annotations) (download)
Wed Mar 18 09:21:11 2015 UTC (9 years, 1 month ago) by niro
File size: 21599 byte(s)
-linux-3.18.8
1 diff --git a/Makefile b/Makefile
2 index 0efae2279fbe..0b3f8a1b3715 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 18
8 -SUBLEVEL = 7
9 +SUBLEVEL = 8
10 EXTRAVERSION =
11 NAME = Diseased Newt
12
13 diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
14 index ed2c8a1ed8ca..98893a8332c7 100644
15 --- a/drivers/media/rc/ir-lirc-codec.c
16 +++ b/drivers/media/rc/ir-lirc-codec.c
17 @@ -42,11 +42,17 @@ static int ir_lirc_decode(struct rc_dev *dev, struct ir_raw_event ev)
18 return -EINVAL;
19
20 /* Packet start */
21 - if (ev.reset)
22 - return 0;
23 + if (ev.reset) {
24 + /* Userspace expects a long space event before the start of
25 + * the signal to use as a sync. This may be done with repeat
26 + * packets and normal samples. But if a reset has been sent
27 + * then we assume that a long time has passed, so we send a
28 + * space with the maximum time value. */
29 + sample = LIRC_SPACE(LIRC_VALUE_MASK);
30 + IR_dprintk(2, "delivering reset sync space to lirc_dev\n");
31
32 /* Carrier reports */
33 - if (ev.carrier_report) {
34 + } else if (ev.carrier_report) {
35 sample = LIRC_FREQUENCY(ev.carrier);
36 IR_dprintk(2, "carrier report (freq: %d)\n", sample);
37
38 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
39 index 40beef5bca88..ec4cebabff49 100644
40 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
41 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
42 @@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
43 }
44 #endif
45 if (!bnx2x_fp_lock_napi(fp))
46 - return work_done;
47 + return budget;
48
49 for_each_cos_in_tx_queue(fp, cos)
50 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
51 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
52 index 613037584d08..c531c8ae1be4 100644
53 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
54 +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
55 @@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
56
57 work_done = netxen_process_rcv_ring(sds_ring, budget);
58
59 - if ((work_done < budget) && tx_complete) {
60 + if (!tx_complete)
61 + work_done = budget;
62 +
63 + if (work_done < budget) {
64 napi_complete(&sds_ring->napi);
65 if (test_bit(__NX_DEV_UP, &adapter->state))
66 netxen_nic_enable_int(sds_ring);
67 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
68 index 7d76c9523395..63d7a642733d 100644
69 --- a/drivers/net/hyperv/netvsc.c
70 +++ b/drivers/net/hyperv/netvsc.c
71 @@ -716,7 +716,7 @@ int netvsc_send(struct hv_device *device,
72 u64 req_id;
73 unsigned int section_index = NETVSC_INVALID_INDEX;
74 u32 msg_size = 0;
75 - struct sk_buff *skb;
76 + struct sk_buff *skb = NULL;
77 u16 q_idx = packet->q_idx;
78
79
80 @@ -743,8 +743,6 @@ int netvsc_send(struct hv_device *device,
81 packet);
82 skb = (struct sk_buff *)
83 (unsigned long)packet->send_completion_tid;
84 - if (skb)
85 - dev_kfree_skb_any(skb);
86 packet->page_buf_cnt = 0;
87 }
88 }
89 @@ -807,6 +805,13 @@ int netvsc_send(struct hv_device *device,
90 packet, ret);
91 }
92
93 + if (ret != 0) {
94 + if (section_index != NETVSC_INVALID_INDEX)
95 + netvsc_free_send_slot(net_device, section_index);
96 + } else if (skb) {
97 + dev_kfree_skb_any(skb);
98 + }
99 +
100 return ret;
101 }
102
103 diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
104 index 602c625d95d5..b5edc7f96a39 100644
105 --- a/drivers/net/ppp/ppp_deflate.c
106 +++ b/drivers/net/ppp/ppp_deflate.c
107 @@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
108 /*
109 * See if we managed to reduce the size of the packet.
110 */
111 - if (olen < isize) {
112 + if (olen < isize && olen <= osize) {
113 state->stats.comp_bytes += olen;
114 state->stats.comp_packets++;
115 } else {
116 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
117 index 2c9e6864abd9..fc7391e14c2a 100644
118 --- a/fs/ext4/super.c
119 +++ b/fs/ext4/super.c
120 @@ -4849,9 +4849,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
121 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
122 test_opt(sb, JOURNAL_CHECKSUM)) {
123 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
124 - "during remount not supported");
125 - err = -EINVAL;
126 - goto restore_opts;
127 + "during remount not supported; ignoring");
128 + sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
129 }
130
131 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
132 diff --git a/include/net/ip.h b/include/net/ip.h
133 index 0bb620702929..09cf5aebb283 100644
134 --- a/include/net/ip.h
135 +++ b/include/net/ip.h
136 @@ -39,11 +39,12 @@ struct inet_skb_parm {
137 struct ip_options opt; /* Compiled IP options */
138 unsigned char flags;
139
140 -#define IPSKB_FORWARDED 1
141 -#define IPSKB_XFRM_TUNNEL_SIZE 2
142 -#define IPSKB_XFRM_TRANSFORMED 4
143 -#define IPSKB_FRAG_COMPLETE 8
144 -#define IPSKB_REROUTED 16
145 +#define IPSKB_FORWARDED BIT(0)
146 +#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
147 +#define IPSKB_XFRM_TRANSFORMED BIT(2)
148 +#define IPSKB_FRAG_COMPLETE BIT(3)
149 +#define IPSKB_REROUTED BIT(4)
150 +#define IPSKB_DOREDIRECT BIT(5)
151
152 u16 frag_max_size;
153 };
154 @@ -180,7 +181,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
155 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
156 }
157
158 -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
159 +void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
160 const struct ip_options *sopt,
161 __be32 daddr, __be32 saddr,
162 const struct ip_reply_arg *arg,
163 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
164 index 24945cefc4fd..0ffef1a38efc 100644
165 --- a/include/net/netns/ipv4.h
166 +++ b/include/net/netns/ipv4.h
167 @@ -52,6 +52,7 @@ struct netns_ipv4 {
168 struct inet_peer_base *peers;
169 struct tcpm_hash_bucket *tcp_metrics_hash;
170 unsigned int tcp_metrics_hash_log;
171 + struct sock * __percpu *tcp_sk;
172 struct netns_frags frags;
173 #ifdef CONFIG_NETFILTER
174 struct xt_table *iptable_filter;
175 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
176 index d17ed6fb2f70..5ccfe161f359 100644
177 --- a/include/net/sch_generic.h
178 +++ b/include/net/sch_generic.h
179 @@ -79,6 +79,9 @@ struct Qdisc {
180 struct netdev_queue *dev_queue;
181
182 struct gnet_stats_rate_est64 rate_est;
183 + struct gnet_stats_basic_cpu __percpu *cpu_bstats;
184 + struct gnet_stats_queue __percpu *cpu_qstats;
185 +
186 struct Qdisc *next_sched;
187 struct sk_buff *gso_skb;
188 /*
189 @@ -86,15 +89,9 @@ struct Qdisc {
190 */
191 unsigned long state;
192 struct sk_buff_head q;
193 - union {
194 - struct gnet_stats_basic_packed bstats;
195 - struct gnet_stats_basic_cpu __percpu *cpu_bstats;
196 - } __packed;
197 + struct gnet_stats_basic_packed bstats;
198 unsigned int __state;
199 - union {
200 - struct gnet_stats_queue qstats;
201 - struct gnet_stats_queue __percpu *cpu_qstats;
202 - } __packed;
203 + struct gnet_stats_queue qstats;
204 struct rcu_head rcu_head;
205 int padded;
206 atomic_t refcnt;
207 diff --git a/net/core/dev.c b/net/core/dev.c
208 index 84409688ff39..9704a5c1103e 100644
209 --- a/net/core/dev.c
210 +++ b/net/core/dev.c
211 @@ -6990,10 +6990,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
212 oldsd->output_queue = NULL;
213 oldsd->output_queue_tailp = &oldsd->output_queue;
214 }
215 - /* Append NAPI poll list from offline CPU. */
216 - if (!list_empty(&oldsd->poll_list)) {
217 - list_splice_init(&oldsd->poll_list, &sd->poll_list);
218 - raise_softirq_irqoff(NET_RX_SOFTIRQ);
219 + /* Append NAPI poll list from offline CPU, with one exception :
220 + * process_backlog() must be called by cpu owning percpu backlog.
221 + * We properly handle process_queue & input_pkt_queue later.
222 + */
223 + while (!list_empty(&oldsd->poll_list)) {
224 + struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
225 + struct napi_struct,
226 + poll_list);
227 +
228 + list_del_init(&napi->poll_list);
229 + if (napi->poll == process_backlog)
230 + napi->state = 0;
231 + else
232 + ____napi_schedule(sd, napi);
233 }
234
235 raise_softirq_irqoff(NET_TX_SOFTIRQ);
236 @@ -7004,7 +7014,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
237 netif_rx_internal(skb);
238 input_queue_head_incr(oldsd);
239 }
240 - while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
241 + while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
242 netif_rx_internal(skb);
243 input_queue_head_incr(oldsd);
244 }
245 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
246 index 76321ea442c3..ca82629de0b2 100644
247 --- a/net/core/rtnetlink.c
248 +++ b/net/core/rtnetlink.c
249 @@ -2770,12 +2770,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
250 goto errout;
251 }
252
253 + if (!skb->len)
254 + goto errout;
255 +
256 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
257 return 0;
258 errout:
259 WARN_ON(err == -EMSGSIZE);
260 kfree_skb(skb);
261 - rtnl_set_sk_err(net, RTNLGRP_LINK, err);
262 + if (err)
263 + rtnl_set_sk_err(net, RTNLGRP_LINK, err);
264 return err;
265 }
266
267 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
268 index 3a83ce5efa80..787b3c294ce6 100644
269 --- a/net/ipv4/ip_forward.c
270 +++ b/net/ipv4/ip_forward.c
271 @@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
272 * We now generate an ICMP HOST REDIRECT giving the route
273 * we calculated.
274 */
275 - if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
276 + if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
277 + !skb_sec_path(skb))
278 ip_rt_send_redirect(skb);
279
280 skb->priority = rt_tos2priority(iph->tos);
281 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
282 index bc6471d4abcd..4a2a074bfb4a 100644
283 --- a/net/ipv4/ip_output.c
284 +++ b/net/ipv4/ip_output.c
285 @@ -1506,23 +1506,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
286 /*
287 * Generic function to send a packet as reply to another packet.
288 * Used to send some TCP resets/acks so far.
289 - *
290 - * Use a fake percpu inet socket to avoid false sharing and contention.
291 */
292 -static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
293 - .sk = {
294 - .__sk_common = {
295 - .skc_refcnt = ATOMIC_INIT(1),
296 - },
297 - .sk_wmem_alloc = ATOMIC_INIT(1),
298 - .sk_allocation = GFP_ATOMIC,
299 - .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
300 - },
301 - .pmtudisc = IP_PMTUDISC_WANT,
302 - .uc_ttl = -1,
303 -};
304 -
305 -void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
306 +void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
307 const struct ip_options *sopt,
308 __be32 daddr, __be32 saddr,
309 const struct ip_reply_arg *arg,
310 @@ -1532,9 +1517,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
311 struct ipcm_cookie ipc;
312 struct flowi4 fl4;
313 struct rtable *rt = skb_rtable(skb);
314 + struct net *net = sock_net(sk);
315 struct sk_buff *nskb;
316 - struct sock *sk;
317 - struct inet_sock *inet;
318 int err;
319
320 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
321 @@ -1565,15 +1549,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
322 if (IS_ERR(rt))
323 return;
324
325 - inet = &get_cpu_var(unicast_sock);
326 + inet_sk(sk)->tos = arg->tos;
327
328 - inet->tos = arg->tos;
329 - sk = &inet->sk;
330 sk->sk_priority = skb->priority;
331 sk->sk_protocol = ip_hdr(skb)->protocol;
332 sk->sk_bound_dev_if = arg->bound_dev_if;
333 - sock_net_set(sk, net);
334 - __skb_queue_head_init(&sk->sk_write_queue);
335 sk->sk_sndbuf = sysctl_wmem_default;
336 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
337 len, 0, &ipc, &rt, MSG_DONTWAIT);
338 @@ -1589,13 +1569,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb,
339 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
340 arg->csum));
341 nskb->ip_summed = CHECKSUM_NONE;
342 - skb_orphan(nskb);
343 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
344 ip_push_pending_frames(sk, &fl4);
345 }
346 out:
347 - put_cpu_var(unicast_sock);
348 -
349 ip_rt_put(rt);
350 }
351
352 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
353 index 9daf2177dc00..046fce012da5 100644
354 --- a/net/ipv4/ip_sockglue.c
355 +++ b/net/ipv4/ip_sockglue.c
356 @@ -443,15 +443,11 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
357
358 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
359 sin = &errhdr.offender;
360 - sin->sin_family = AF_UNSPEC;
361 + memset(sin, 0, sizeof(*sin));
362 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
363 - struct inet_sock *inet = inet_sk(sk);
364 -
365 sin->sin_family = AF_INET;
366 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
367 - sin->sin_port = 0;
368 - memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
369 - if (inet->cmsg_flags)
370 + if (inet_sk(sk)->cmsg_flags)
371 ip_cmsg_recv(msg, skb);
372 }
373
374 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
375 index 5d740cccf69e..5638b179b355 100644
376 --- a/net/ipv4/ping.c
377 +++ b/net/ipv4/ping.c
378 @@ -965,8 +965,11 @@ void ping_rcv(struct sk_buff *skb)
379
380 sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
381 if (sk != NULL) {
382 + struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
383 +
384 pr_debug("rcv on socket %p\n", sk);
385 - ping_queue_rcv_skb(sk, skb_get(skb));
386 + if (skb2)
387 + ping_queue_rcv_skb(sk, skb2);
388 sock_put(sk);
389 return;
390 }
391 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
392 index 6a2155b02602..d58dd0ec3e53 100644
393 --- a/net/ipv4/route.c
394 +++ b/net/ipv4/route.c
395 @@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,
396
397 do_cache = res->fi && !itag;
398 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
399 + skb->protocol == htons(ETH_P_IP) &&
400 (IN_DEV_SHARED_MEDIA(out_dev) ||
401 - inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
402 - flags |= RTCF_DOREDIRECT;
403 - do_cache = false;
404 - }
405 + inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
406 + IPCB(skb)->flags |= IPSKB_DOREDIRECT;
407
408 if (skb->protocol != htons(ETH_P_IP)) {
409 /* Not IP (i.e. ARP). Do not create route, if it is
410 @@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
411 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
412 if (rt->rt_flags & RTCF_NOTIFY)
413 r->rtm_flags |= RTM_F_NOTIFY;
414 + if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
415 + r->rtm_flags |= RTCF_DOREDIRECT;
416
417 if (nla_put_be32(skb, RTA_DST, dst))
418 goto nla_put_failure;
419 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
420 index ef7089ca86e2..944ce5edbfb7 100644
421 --- a/net/ipv4/tcp_ipv4.c
422 +++ b/net/ipv4/tcp_ipv4.c
423 @@ -683,7 +683,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
424 arg.bound_dev_if = sk->sk_bound_dev_if;
425
426 arg.tos = ip_hdr(skb)->tos;
427 - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
428 + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
429 + skb, &TCP_SKB_CB(skb)->header.h4.opt,
430 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
431 &arg, arg.iov[0].iov_len);
432
433 @@ -767,7 +768,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
434 if (oif)
435 arg.bound_dev_if = oif;
436 arg.tos = tos;
437 - ip_send_unicast_reply(net, skb, &TCP_SKB_CB(skb)->header.h4.opt,
438 + ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
439 + skb, &TCP_SKB_CB(skb)->header.h4.opt,
440 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
441 &arg, arg.iov[0].iov_len);
442
443 @@ -2426,14 +2428,39 @@ struct proto tcp_prot = {
444 };
445 EXPORT_SYMBOL(tcp_prot);
446
447 +static void __net_exit tcp_sk_exit(struct net *net)
448 +{
449 + int cpu;
450 +
451 + for_each_possible_cpu(cpu)
452 + inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
453 + free_percpu(net->ipv4.tcp_sk);
454 +}
455 +
456 static int __net_init tcp_sk_init(struct net *net)
457 {
458 + int res, cpu;
459 +
460 + net->ipv4.tcp_sk = alloc_percpu(struct sock *);
461 + if (!net->ipv4.tcp_sk)
462 + return -ENOMEM;
463 +
464 + for_each_possible_cpu(cpu) {
465 + struct sock *sk;
466 +
467 + res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
468 + IPPROTO_TCP, net);
469 + if (res)
470 + goto fail;
471 + *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
472 + }
473 net->ipv4.sysctl_tcp_ecn = 2;
474 return 0;
475 -}
476
477 -static void __net_exit tcp_sk_exit(struct net *net)
478 -{
479 +fail:
480 + tcp_sk_exit(net);
481 +
482 + return res;
483 }
484
485 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
486 diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
487 index 7927db0a9279..4a000f1dd757 100644
488 --- a/net/ipv4/udp_diag.c
489 +++ b/net/ipv4/udp_diag.c
490 @@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
491 s_slot = cb->args[0];
492 num = s_num = cb->args[1];
493
494 - for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
495 + for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
496 struct sock *sk;
497 struct hlist_nulls_node *node;
498 struct udp_hslot *hslot = &table->hash[slot];
499
500 + num = 0;
501 +
502 if (hlist_nulls_empty(&hslot->head))
503 continue;
504
505 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
506 index 2cdc38338be3..11e3945eeac7 100644
507 --- a/net/ipv6/datagram.c
508 +++ b/net/ipv6/datagram.c
509 @@ -383,11 +383,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
510
511 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
512 sin = &errhdr.offender;
513 - sin->sin6_family = AF_UNSPEC;
514 + memset(sin, 0, sizeof(*sin));
515 +
516 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
517 sin->sin6_family = AF_INET6;
518 - sin->sin6_flowinfo = 0;
519 - sin->sin6_port = 0;
520 if (np->rxopt.all)
521 ip6_datagram_recv_common_ctl(sk, msg, skb);
522 if (skb->protocol == htons(ETH_P_IPV6)) {
523 @@ -398,12 +397,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
524 ipv6_iface_scope_id(&sin->sin6_addr,
525 IP6CB(skb)->iif);
526 } else {
527 - struct inet_sock *inet = inet_sk(sk);
528 -
529 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
530 &sin->sin6_addr);
531 - sin->sin6_scope_id = 0;
532 - if (inet->cmsg_flags)
533 + if (inet_sk(sk)->cmsg_flags)
534 ip_cmsg_recv(msg, skb);
535 }
536 }
537 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
538 index b2d1838897c9..f1c6d5e98322 100644
539 --- a/net/ipv6/ip6_fib.c
540 +++ b/net/ipv6/ip6_fib.c
541 @@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
542 return 0;
543 }
544
545 +static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
546 + struct net *net)
547 +{
548 + if (atomic_read(&rt->rt6i_ref) != 1) {
549 + /* This route is used as dummy address holder in some split
550 + * nodes. It is not leaked, but it still holds other resources,
551 + * which must be released in time. So, scan ascendant nodes
552 + * and replace dummy references to this route with references
553 + * to still alive ones.
554 + */
555 + while (fn) {
556 + if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
557 + fn->leaf = fib6_find_prefix(net, fn);
558 + atomic_inc(&fn->leaf->rt6i_ref);
559 + rt6_release(rt);
560 + }
561 + fn = fn->parent;
562 + }
563 + /* No more references are possible at this point. */
564 + BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
565 + }
566 +}
567 +
568 /*
569 * Insert routing information in a node.
570 */
571 @@ -807,11 +830,12 @@ add:
572 rt->dst.rt6_next = iter->dst.rt6_next;
573 atomic_inc(&rt->rt6i_ref);
574 inet6_rt_notify(RTM_NEWROUTE, rt, info);
575 - rt6_release(iter);
576 if (!(fn->fn_flags & RTN_RTINFO)) {
577 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
578 fn->fn_flags |= RTN_RTINFO;
579 }
580 + fib6_purge_rt(iter, fn, info->nl_net);
581 + rt6_release(iter);
582 }
583
584 return 0;
585 @@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
586 fn = fib6_repair_tree(net, fn);
587 }
588
589 - if (atomic_read(&rt->rt6i_ref) != 1) {
590 - /* This route is used as dummy address holder in some split
591 - * nodes. It is not leaked, but it still holds other resources,
592 - * which must be released in time. So, scan ascendant nodes
593 - * and replace dummy references to this route with references
594 - * to still alive ones.
595 - */
596 - while (fn) {
597 - if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
598 - fn->leaf = fib6_find_prefix(net, fn);
599 - atomic_inc(&fn->leaf->rt6i_ref);
600 - rt6_release(rt);
601 - }
602 - fn = fn->parent;
603 - }
604 - /* No more references are possible at this point. */
605 - BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
606 - }
607 + fib6_purge_rt(rt, fn, net);
608
609 inet6_rt_notify(RTM_DELROUTE, rt, info);
610 rt6_release(rt);
611 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
612 index a318dd89b6d9..d02ee019382e 100644
613 --- a/net/ipv6/route.c
614 +++ b/net/ipv6/route.c
615 @@ -1150,12 +1150,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
616 struct net *net = dev_net(dst->dev);
617
618 rt6->rt6i_flags |= RTF_MODIFIED;
619 - if (mtu < IPV6_MIN_MTU) {
620 - u32 features = dst_metric(dst, RTAX_FEATURES);
621 + if (mtu < IPV6_MIN_MTU)
622 mtu = IPV6_MIN_MTU;
623 - features |= RTAX_FEATURE_ALLFRAG;
624 - dst_metric_set(dst, RTAX_FEATURES, features);
625 - }
626 +
627 dst_metric_set(dst, RTAX_MTU, mtu);
628 rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
629 }
630 diff --git a/net/sctp/associola.c b/net/sctp/associola.c
631 index f791edd64d6c..26d06dbcc1c8 100644
632 --- a/net/sctp/associola.c
633 +++ b/net/sctp/associola.c
634 @@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
635 asoc->peer.peer_hmacs = new->peer.peer_hmacs;
636 new->peer.peer_hmacs = NULL;
637
638 - sctp_auth_key_put(asoc->asoc_shared_key);
639 sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
640 }
641
642 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
643 index 9f32741abb1c..371a152d9759 100644
644 --- a/net/sctp/sm_make_chunk.c
645 +++ b/net/sctp/sm_make_chunk.c
646 @@ -2608,7 +2608,7 @@ do_addr_param:
647
648 addr_param = param.v + sizeof(sctp_addip_param_t);
649
650 - af = sctp_get_af_specific(param_type2af(param.p->type));
651 + af = sctp_get_af_specific(param_type2af(addr_param->p.type));
652 if (af == NULL)
653 break;
654
655 diff --git a/net/socket.c b/net/socket.c
656 index fe20c319a0bb..cf9ebf10c841 100644
657 --- a/net/socket.c
658 +++ b/net/socket.c
659 @@ -892,9 +892,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
660 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
661 struct sock_iocb *siocb)
662 {
663 - if (!is_sync_kiocb(iocb))
664 - BUG();
665 -
666 siocb->kiocb = iocb;
667 iocb->private = siocb;
668 return siocb;