Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0122-3.10.23-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2350 - (show annotations) (download)
Mon Dec 16 10:05:27 2013 UTC (10 years, 4 months ago) by niro
File size: 118956 byte(s)
-linux-3.10.23
1 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2 index 3994f0bbeeb6..a59ee432a98f 100644
3 --- a/Documentation/networking/ip-sysctl.txt
4 +++ b/Documentation/networking/ip-sysctl.txt
5 @@ -571,9 +571,6 @@ tcp_limit_output_bytes - INTEGER
6 typical pfifo_fast qdiscs.
7 tcp_limit_output_bytes limits the number of bytes on qdisc
8 or device to reduce artificial RTT/cwnd and reduce bufferbloat.
9 - Note: For GSO/TSO enabled flows, we try to have at least two
10 - packets in flight. Reducing tcp_limit_output_bytes might also
11 - reduce the size of individual GSO packet (64KB being the max)
12 Default: 131072
13
14 tcp_challenge_ack_limit - INTEGER
15 diff --git a/Makefile b/Makefile
16 index 001188ce8241..c6d2ec5e9580 100644
17 --- a/Makefile
18 +++ b/Makefile
19 @@ -1,6 +1,6 @@
20 VERSION = 3
21 PATCHLEVEL = 10
22 -SUBLEVEL = 22
23 +SUBLEVEL = 23
24 EXTRAVERSION =
25 NAME = TOSSUG Baby Fish
26
27 diff --git a/block/blk-core.c b/block/blk-core.c
28 index acf3bf6a44d1..2c66daba44dd 100644
29 --- a/block/blk-core.c
30 +++ b/block/blk-core.c
31 @@ -741,9 +741,17 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
32
33 q->sg_reserved_size = INT_MAX;
34
35 + /* Protect q->elevator from elevator_change */
36 + mutex_lock(&q->sysfs_lock);
37 +
38 /* init elevator */
39 - if (elevator_init(q, NULL))
40 + if (elevator_init(q, NULL)) {
41 + mutex_unlock(&q->sysfs_lock);
42 return NULL;
43 + }
44 +
45 + mutex_unlock(&q->sysfs_lock);
46 +
47 return q;
48 }
49 EXPORT_SYMBOL(blk_init_allocated_queue);
50 diff --git a/block/elevator.c b/block/elevator.c
51 index 668394d18588..6d765f7e2b2b 100644
52 --- a/block/elevator.c
53 +++ b/block/elevator.c
54 @@ -186,6 +186,12 @@ int elevator_init(struct request_queue *q, char *name)
55 struct elevator_type *e = NULL;
56 int err;
57
58 + /*
59 + * q->sysfs_lock must be held to provide mutual exclusion between
60 + * elevator_switch() and here.
61 + */
62 + lockdep_assert_held(&q->sysfs_lock);
63 +
64 if (unlikely(q->elevator))
65 return 0;
66
67 @@ -959,7 +965,7 @@ fail_init:
68 /*
69 * Switch this queue to the given IO scheduler.
70 */
71 -int elevator_change(struct request_queue *q, const char *name)
72 +static int __elevator_change(struct request_queue *q, const char *name)
73 {
74 char elevator_name[ELV_NAME_MAX];
75 struct elevator_type *e;
76 @@ -981,6 +987,18 @@ int elevator_change(struct request_queue *q, const char *name)
77
78 return elevator_switch(q, e);
79 }
80 +
81 +int elevator_change(struct request_queue *q, const char *name)
82 +{
83 + int ret;
84 +
85 + /* Protect q->elevator from elevator_init() */
86 + mutex_lock(&q->sysfs_lock);
87 + ret = __elevator_change(q, name);
88 + mutex_unlock(&q->sysfs_lock);
89 +
90 + return ret;
91 +}
92 EXPORT_SYMBOL(elevator_change);
93
94 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
95 @@ -991,7 +1009,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
96 if (!q->elevator)
97 return count;
98
99 - ret = elevator_change(q, name);
100 + ret = __elevator_change(q, name);
101 if (!ret)
102 return count;
103
104 diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
105 index 0262210cad38..850246206b12 100644
106 --- a/crypto/algif_hash.c
107 +++ b/crypto/algif_hash.c
108 @@ -114,6 +114,9 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
109 struct hash_ctx *ctx = ask->private;
110 int err;
111
112 + if (flags & MSG_SENDPAGE_NOTLAST)
113 + flags |= MSG_MORE;
114 +
115 lock_sock(sk);
116 sg_init_table(ctx->sgl.sg, 1);
117 sg_set_page(ctx->sgl.sg, page, size, offset);
118 @@ -161,8 +164,6 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock,
119 else if (len < ds)
120 msg->msg_flags |= MSG_TRUNC;
121
122 - msg->msg_namelen = 0;
123 -
124 lock_sock(sk);
125 if (ctx->more) {
126 ctx->more = 0;
127 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
128 index a1c4f0a55583..a19c027b29bd 100644
129 --- a/crypto/algif_skcipher.c
130 +++ b/crypto/algif_skcipher.c
131 @@ -378,6 +378,9 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
132 struct skcipher_sg_list *sgl;
133 int err = -EINVAL;
134
135 + if (flags & MSG_SENDPAGE_NOTLAST)
136 + flags |= MSG_MORE;
137 +
138 lock_sock(sk);
139 if (!ctx->more && ctx->used)
140 goto unlock;
141 @@ -432,7 +435,6 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
142 long copied = 0;
143
144 lock_sock(sk);
145 - msg->msg_namelen = 0;
146 for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0;
147 iovlen--, iov++) {
148 unsigned long seglen = iov->iov_len;
149 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
150 index 272f00927761..1bdf104e90bb 100644
151 --- a/drivers/atm/idt77252.c
152 +++ b/drivers/atm/idt77252.c
153 @@ -3511,7 +3511,7 @@ static int init_card(struct atm_dev *dev)
154 tmp = dev_get_by_name(&init_net, tname); /* jhs: was "tmp = dev_get(tname);" */
155 if (tmp) {
156 memcpy(card->atmdev->esi, tmp->dev_addr, 6);
157 -
158 + dev_put(tmp);
159 printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
160 }
161 /*
162 diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
163 index c73fc2b74de2..18c5b9b16645 100644
164 --- a/drivers/connector/cn_proc.c
165 +++ b/drivers/connector/cn_proc.c
166 @@ -32,11 +32,23 @@
167 #include <linux/atomic.h>
168 #include <linux/pid_namespace.h>
169
170 -#include <asm/unaligned.h>
171 -
172 #include <linux/cn_proc.h>
173
174 -#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event))
175 +/*
176 + * Size of a cn_msg followed by a proc_event structure. Since the
177 + * sizeof struct cn_msg is a multiple of 4 bytes, but not 8 bytes, we
178 + * add one 4-byte word to the size here, and then start the actual
179 + * cn_msg structure 4 bytes into the stack buffer. The result is that
180 + * the immediately following proc_event structure is aligned to 8 bytes.
181 + */
182 +#define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event) + 4)
183 +
184 +/* See comment above; we test our assumption about sizeof struct cn_msg here. */
185 +static inline struct cn_msg *buffer_to_cn_msg(__u8 *buffer)
186 +{
187 + BUILD_BUG_ON(sizeof(struct cn_msg) != 20);
188 + return (struct cn_msg *)(buffer + 4);
189 +}
190
191 static atomic_t proc_event_num_listeners = ATOMIC_INIT(0);
192 static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC };
193 @@ -56,19 +68,19 @@ void proc_fork_connector(struct task_struct *task)
194 {
195 struct cn_msg *msg;
196 struct proc_event *ev;
197 - __u8 buffer[CN_PROC_MSG_SIZE];
198 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
199 struct timespec ts;
200 struct task_struct *parent;
201
202 if (atomic_read(&proc_event_num_listeners) < 1)
203 return;
204
205 - msg = (struct cn_msg *)buffer;
206 + msg = buffer_to_cn_msg(buffer);
207 ev = (struct proc_event *)msg->data;
208 memset(&ev->event_data, 0, sizeof(ev->event_data));
209 get_seq(&msg->seq, &ev->cpu);
210 ktime_get_ts(&ts); /* get high res monotonic timestamp */
211 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
212 + ev->timestamp_ns = timespec_to_ns(&ts);
213 ev->what = PROC_EVENT_FORK;
214 rcu_read_lock();
215 parent = rcu_dereference(task->real_parent);
216 @@ -91,17 +103,17 @@ void proc_exec_connector(struct task_struct *task)
217 struct cn_msg *msg;
218 struct proc_event *ev;
219 struct timespec ts;
220 - __u8 buffer[CN_PROC_MSG_SIZE];
221 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
222
223 if (atomic_read(&proc_event_num_listeners) < 1)
224 return;
225
226 - msg = (struct cn_msg *)buffer;
227 + msg = buffer_to_cn_msg(buffer);
228 ev = (struct proc_event *)msg->data;
229 memset(&ev->event_data, 0, sizeof(ev->event_data));
230 get_seq(&msg->seq, &ev->cpu);
231 ktime_get_ts(&ts); /* get high res monotonic timestamp */
232 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
233 + ev->timestamp_ns = timespec_to_ns(&ts);
234 ev->what = PROC_EVENT_EXEC;
235 ev->event_data.exec.process_pid = task->pid;
236 ev->event_data.exec.process_tgid = task->tgid;
237 @@ -117,14 +129,14 @@ void proc_id_connector(struct task_struct *task, int which_id)
238 {
239 struct cn_msg *msg;
240 struct proc_event *ev;
241 - __u8 buffer[CN_PROC_MSG_SIZE];
242 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
243 struct timespec ts;
244 const struct cred *cred;
245
246 if (atomic_read(&proc_event_num_listeners) < 1)
247 return;
248
249 - msg = (struct cn_msg *)buffer;
250 + msg = buffer_to_cn_msg(buffer);
251 ev = (struct proc_event *)msg->data;
252 memset(&ev->event_data, 0, sizeof(ev->event_data));
253 ev->what = which_id;
254 @@ -145,7 +157,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
255 rcu_read_unlock();
256 get_seq(&msg->seq, &ev->cpu);
257 ktime_get_ts(&ts); /* get high res monotonic timestamp */
258 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
259 + ev->timestamp_ns = timespec_to_ns(&ts);
260
261 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
262 msg->ack = 0; /* not used */
263 @@ -159,17 +171,17 @@ void proc_sid_connector(struct task_struct *task)
264 struct cn_msg *msg;
265 struct proc_event *ev;
266 struct timespec ts;
267 - __u8 buffer[CN_PROC_MSG_SIZE];
268 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
269
270 if (atomic_read(&proc_event_num_listeners) < 1)
271 return;
272
273 - msg = (struct cn_msg *)buffer;
274 + msg = buffer_to_cn_msg(buffer);
275 ev = (struct proc_event *)msg->data;
276 memset(&ev->event_data, 0, sizeof(ev->event_data));
277 get_seq(&msg->seq, &ev->cpu);
278 ktime_get_ts(&ts); /* get high res monotonic timestamp */
279 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
280 + ev->timestamp_ns = timespec_to_ns(&ts);
281 ev->what = PROC_EVENT_SID;
282 ev->event_data.sid.process_pid = task->pid;
283 ev->event_data.sid.process_tgid = task->tgid;
284 @@ -186,17 +198,17 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
285 struct cn_msg *msg;
286 struct proc_event *ev;
287 struct timespec ts;
288 - __u8 buffer[CN_PROC_MSG_SIZE];
289 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
290
291 if (atomic_read(&proc_event_num_listeners) < 1)
292 return;
293
294 - msg = (struct cn_msg *)buffer;
295 + msg = buffer_to_cn_msg(buffer);
296 ev = (struct proc_event *)msg->data;
297 memset(&ev->event_data, 0, sizeof(ev->event_data));
298 get_seq(&msg->seq, &ev->cpu);
299 ktime_get_ts(&ts); /* get high res monotonic timestamp */
300 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
301 + ev->timestamp_ns = timespec_to_ns(&ts);
302 ev->what = PROC_EVENT_PTRACE;
303 ev->event_data.ptrace.process_pid = task->pid;
304 ev->event_data.ptrace.process_tgid = task->tgid;
305 @@ -221,17 +233,17 @@ void proc_comm_connector(struct task_struct *task)
306 struct cn_msg *msg;
307 struct proc_event *ev;
308 struct timespec ts;
309 - __u8 buffer[CN_PROC_MSG_SIZE];
310 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
311
312 if (atomic_read(&proc_event_num_listeners) < 1)
313 return;
314
315 - msg = (struct cn_msg *)buffer;
316 + msg = buffer_to_cn_msg(buffer);
317 ev = (struct proc_event *)msg->data;
318 memset(&ev->event_data, 0, sizeof(ev->event_data));
319 get_seq(&msg->seq, &ev->cpu);
320 ktime_get_ts(&ts); /* get high res monotonic timestamp */
321 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
322 + ev->timestamp_ns = timespec_to_ns(&ts);
323 ev->what = PROC_EVENT_COMM;
324 ev->event_data.comm.process_pid = task->pid;
325 ev->event_data.comm.process_tgid = task->tgid;
326 @@ -248,18 +260,18 @@ void proc_coredump_connector(struct task_struct *task)
327 {
328 struct cn_msg *msg;
329 struct proc_event *ev;
330 - __u8 buffer[CN_PROC_MSG_SIZE];
331 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
332 struct timespec ts;
333
334 if (atomic_read(&proc_event_num_listeners) < 1)
335 return;
336
337 - msg = (struct cn_msg *)buffer;
338 + msg = buffer_to_cn_msg(buffer);
339 ev = (struct proc_event *)msg->data;
340 memset(&ev->event_data, 0, sizeof(ev->event_data));
341 get_seq(&msg->seq, &ev->cpu);
342 ktime_get_ts(&ts); /* get high res monotonic timestamp */
343 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
344 + ev->timestamp_ns = timespec_to_ns(&ts);
345 ev->what = PROC_EVENT_COREDUMP;
346 ev->event_data.coredump.process_pid = task->pid;
347 ev->event_data.coredump.process_tgid = task->tgid;
348 @@ -275,18 +287,18 @@ void proc_exit_connector(struct task_struct *task)
349 {
350 struct cn_msg *msg;
351 struct proc_event *ev;
352 - __u8 buffer[CN_PROC_MSG_SIZE];
353 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
354 struct timespec ts;
355
356 if (atomic_read(&proc_event_num_listeners) < 1)
357 return;
358
359 - msg = (struct cn_msg *)buffer;
360 + msg = buffer_to_cn_msg(buffer);
361 ev = (struct proc_event *)msg->data;
362 memset(&ev->event_data, 0, sizeof(ev->event_data));
363 get_seq(&msg->seq, &ev->cpu);
364 ktime_get_ts(&ts); /* get high res monotonic timestamp */
365 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
366 + ev->timestamp_ns = timespec_to_ns(&ts);
367 ev->what = PROC_EVENT_EXIT;
368 ev->event_data.exit.process_pid = task->pid;
369 ev->event_data.exit.process_tgid = task->tgid;
370 @@ -312,18 +324,18 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
371 {
372 struct cn_msg *msg;
373 struct proc_event *ev;
374 - __u8 buffer[CN_PROC_MSG_SIZE];
375 + __u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
376 struct timespec ts;
377
378 if (atomic_read(&proc_event_num_listeners) < 1)
379 return;
380
381 - msg = (struct cn_msg *)buffer;
382 + msg = buffer_to_cn_msg(buffer);
383 ev = (struct proc_event *)msg->data;
384 memset(&ev->event_data, 0, sizeof(ev->event_data));
385 msg->seq = rcvd_seq;
386 ktime_get_ts(&ts); /* get high res monotonic timestamp */
387 - put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
388 + ev->timestamp_ns = timespec_to_ns(&ts);
389 ev->cpu = -1;
390 ev->what = PROC_EVENT_NONE;
391 ev->event_data.ack.err = err;
392 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
393 index 635e8f2e84f7..01ed8ac4d156 100644
394 --- a/drivers/gpu/drm/radeon/r600_hdmi.c
395 +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
396 @@ -24,6 +24,7 @@
397 * Authors: Christian König
398 */
399 #include <linux/hdmi.h>
400 +#include <linux/gcd.h>
401 #include <drm/drmP.h>
402 #include <drm/radeon_drm.h>
403 #include "radeon.h"
404 @@ -57,35 +58,57 @@ enum r600_hdmi_iec_status_bits {
405 static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
406 /* 32kHz 44.1kHz 48kHz */
407 /* Clock N CTS N CTS N CTS */
408 - { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
409 + { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
410 { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
411 { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
412 { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
413 { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
414 { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
415 - { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
416 + { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
417 { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
418 - { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
419 + { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
420 { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
421 - { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
422 };
423
424 +
425 /*
426 - * calculate CTS value if it's not found in the table
427 + * calculate CTS and N values if they are not found in the table
428 */
429 -static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
430 +static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
431 {
432 - u64 n;
433 - u32 d;
434 -
435 - if (*CTS == 0) {
436 - n = (u64)clock * (u64)N * 1000ULL;
437 - d = 128 * freq;
438 - do_div(n, d);
439 - *CTS = n;
440 - }
441 - DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
442 - N, *CTS, freq);
443 + int n, cts;
444 + unsigned long div, mul;
445 +
446 + /* Safe, but overly large values */
447 + n = 128 * freq;
448 + cts = clock * 1000;
449 +
450 + /* Smallest valid fraction */
451 + div = gcd(n, cts);
452 +
453 + n /= div;
454 + cts /= div;
455 +
456 + /*
457 + * The optimal N is 128*freq/1000. Calculate the closest larger
458 + * value that doesn't truncate any bits.
459 + */
460 + mul = ((128*freq/1000) + (n-1))/n;
461 +
462 + n *= mul;
463 + cts *= mul;
464 +
465 + /* Check that we are in spec (not always possible) */
466 + if (n < (128*freq/1500))
467 + printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
468 + if (n > (128*freq/300))
469 + printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
470 +
471 + *N = n;
472 + *CTS = cts;
473 +
474 + DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
475 + *N, *CTS, freq);
476 }
477
478 struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
479 @@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
480 struct radeon_hdmi_acr res;
481 u8 i;
482
483 - for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
484 - r600_hdmi_predefined_acr[i].clock != 0; i++)
485 - ;
486 - res = r600_hdmi_predefined_acr[i];
487 + /* Precalculated values for common clocks */
488 + for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
489 + if (r600_hdmi_predefined_acr[i].clock == clock)
490 + return r600_hdmi_predefined_acr[i];
491 + }
492
493 - /* In case some CTS are missing */
494 - r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
495 - r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
496 - r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
497 + /* And odd clocks get manually calculated */
498 + r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
499 + r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
500 + r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
501
502 return res;
503 }
504 diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
505 index 1bfd292cac8f..06eb45fa6331 100644
506 --- a/drivers/hid/hid-lg.c
507 +++ b/drivers/hid/hid-lg.c
508 @@ -47,6 +47,7 @@
509 #define DFP_RDESC_ORIG_SIZE 97
510 #define FV_RDESC_ORIG_SIZE 130
511 #define MOMO_RDESC_ORIG_SIZE 87
512 +#define MOMO2_RDESC_ORIG_SIZE 87
513
514 /* Fixed report descriptors for Logitech Driving Force (and Pro)
515 * wheel controllers
516 @@ -284,6 +285,54 @@ static __u8 momo_rdesc_fixed[] = {
517 0xC0 /* End Collection */
518 };
519
520 +static __u8 momo2_rdesc_fixed[] = {
521 +0x05, 0x01, /* Usage Page (Desktop), */
522 +0x09, 0x04, /* Usage (Joystik), */
523 +0xA1, 0x01, /* Collection (Application), */
524 +0xA1, 0x02, /* Collection (Logical), */
525 +0x95, 0x01, /* Report Count (1), */
526 +0x75, 0x0A, /* Report Size (10), */
527 +0x15, 0x00, /* Logical Minimum (0), */
528 +0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
529 +0x35, 0x00, /* Physical Minimum (0), */
530 +0x46, 0xFF, 0x03, /* Physical Maximum (1023), */
531 +0x09, 0x30, /* Usage (X), */
532 +0x81, 0x02, /* Input (Variable), */
533 +0x95, 0x0A, /* Report Count (10), */
534 +0x75, 0x01, /* Report Size (1), */
535 +0x25, 0x01, /* Logical Maximum (1), */
536 +0x45, 0x01, /* Physical Maximum (1), */
537 +0x05, 0x09, /* Usage Page (Button), */
538 +0x19, 0x01, /* Usage Minimum (01h), */
539 +0x29, 0x0A, /* Usage Maximum (0Ah), */
540 +0x81, 0x02, /* Input (Variable), */
541 +0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
542 +0x09, 0x00, /* Usage (00h), */
543 +0x95, 0x04, /* Report Count (4), */
544 +0x81, 0x02, /* Input (Variable), */
545 +0x95, 0x01, /* Report Count (1), */
546 +0x75, 0x08, /* Report Size (8), */
547 +0x26, 0xFF, 0x00, /* Logical Maximum (255), */
548 +0x46, 0xFF, 0x00, /* Physical Maximum (255), */
549 +0x09, 0x01, /* Usage (01h), */
550 +0x81, 0x02, /* Input (Variable), */
551 +0x05, 0x01, /* Usage Page (Desktop), */
552 +0x09, 0x31, /* Usage (Y), */
553 +0x81, 0x02, /* Input (Variable), */
554 +0x09, 0x32, /* Usage (Z), */
555 +0x81, 0x02, /* Input (Variable), */
556 +0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
557 +0x09, 0x00, /* Usage (00h), */
558 +0x81, 0x02, /* Input (Variable), */
559 +0xC0, /* End Collection, */
560 +0xA1, 0x02, /* Collection (Logical), */
561 +0x09, 0x02, /* Usage (02h), */
562 +0x95, 0x07, /* Report Count (7), */
563 +0x91, 0x02, /* Output (Variable), */
564 +0xC0, /* End Collection, */
565 +0xC0 /* End Collection */
566 +};
567 +
568 /*
569 * Certain Logitech keyboards send in report #3 keys which are far
570 * above the logical maximum described in descriptor. This extends
571 @@ -343,6 +392,15 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
572 }
573 break;
574
575 + case USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2:
576 + if (*rsize == MOMO2_RDESC_ORIG_SIZE) {
577 + hid_info(hdev,
578 + "fixing up Logitech Momo Racing Force (Black) report descriptor\n");
579 + rdesc = momo2_rdesc_fixed;
580 + *rsize = sizeof(momo2_rdesc_fixed);
581 + }
582 + break;
583 +
584 case USB_DEVICE_ID_LOGITECH_VIBRATION_WHEEL:
585 if (*rsize == FV_RDESC_ORIG_SIZE) {
586 hid_info(hdev,
587 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
588 index fa004b112a89..25943a683d15 100644
589 --- a/drivers/iommu/intel-iommu.c
590 +++ b/drivers/iommu/intel-iommu.c
591 @@ -782,7 +782,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
592 int offset;
593
594 BUG_ON(!domain->pgd);
595 - BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
596 +
597 + if (addr_width < BITS_PER_LONG && pfn >> addr_width)
598 + /* Address beyond IOMMU's addressing capabilities. */
599 + return NULL;
600 +
601 parent = domain->pgd;
602
603 while (level > 0) {
604 diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
605 index 5b19b2d6ec2d..45011f63ad16 100644
606 --- a/drivers/iommu/intel_irq_remapping.c
607 +++ b/drivers/iommu/intel_irq_remapping.c
608 @@ -525,12 +525,13 @@ static int __init intel_irq_remapping_supported(void)
609 if (disable_irq_remap)
610 return 0;
611 if (irq_remap_broken) {
612 - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
613 - "This system BIOS has enabled interrupt remapping\n"
614 - "on a chipset that contains an erratum making that\n"
615 - "feature unstable. To maintain system stability\n"
616 - "interrupt remapping is being disabled. Please\n"
617 - "contact your BIOS vendor for an update\n");
618 + printk(KERN_WARNING
619 + "This system BIOS has enabled interrupt remapping\n"
620 + "on a chipset that contains an erratum making that\n"
621 + "feature unstable. To maintain system stability\n"
622 + "interrupt remapping is being disabled. Please\n"
623 + "contact your BIOS vendor for an update\n");
624 + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
625 disable_irq_remap = 1;
626 return 0;
627 }
628 diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c
629 index baf2686aa8eb..02125e6a9109 100644
630 --- a/drivers/isdn/isdnloop/isdnloop.c
631 +++ b/drivers/isdn/isdnloop/isdnloop.c
632 @@ -1083,8 +1083,10 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
633 spin_unlock_irqrestore(&card->isdnloop_lock, flags);
634 return -ENOMEM;
635 }
636 - for (i = 0; i < 3; i++)
637 - strcpy(card->s0num[i], sdef.num[i]);
638 + for (i = 0; i < 3; i++) {
639 + strlcpy(card->s0num[i], sdef.num[i],
640 + sizeof(card->s0num[0]));
641 + }
642 break;
643 case ISDN_PTYPE_1TR6:
644 if (isdnloop_fake(card, "DRV1.04TC-1TR6-CAPI-CNS-BASIS-29.11.95",
645 @@ -1097,7 +1099,7 @@ isdnloop_start(isdnloop_card *card, isdnloop_sdef *sdefp)
646 spin_unlock_irqrestore(&card->isdnloop_lock, flags);
647 return -ENOMEM;
648 }
649 - strcpy(card->s0num[0], sdef.num[0]);
650 + strlcpy(card->s0num[0], sdef.num[0], sizeof(card->s0num[0]));
651 card->s0num[1][0] = '\0';
652 card->s0num[2][0] = '\0';
653 break;
654 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
655 index e47dcb9d1e91..5cefb479c707 100644
656 --- a/drivers/isdn/mISDN/socket.c
657 +++ b/drivers/isdn/mISDN/socket.c
658 @@ -117,7 +117,6 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
659 {
660 struct sk_buff *skb;
661 struct sock *sk = sock->sk;
662 - struct sockaddr_mISDN *maddr;
663
664 int copied, err;
665
666 @@ -135,9 +134,9 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
667 if (!skb)
668 return err;
669
670 - if (msg->msg_namelen >= sizeof(struct sockaddr_mISDN)) {
671 - msg->msg_namelen = sizeof(struct sockaddr_mISDN);
672 - maddr = (struct sockaddr_mISDN *)msg->msg_name;
673 + if (msg->msg_name) {
674 + struct sockaddr_mISDN *maddr = msg->msg_name;
675 +
676 maddr->family = AF_ISDN;
677 maddr->dev = _pms(sk)->dev->id;
678 if ((sk->sk_protocol == ISDN_P_LAPD_TE) ||
679 @@ -150,11 +149,7 @@ mISDN_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
680 maddr->sapi = _pms(sk)->ch.addr & 0xFF;
681 maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xFF;
682 }
683 - } else {
684 - if (msg->msg_namelen)
685 - printk(KERN_WARNING "%s: too small namelen %d\n",
686 - __func__, msg->msg_namelen);
687 - msg->msg_namelen = 0;
688 + msg->msg_namelen = sizeof(*maddr);
689 }
690
691 copied = skb->len + MISDN_HEADER_LEN;
692 diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
693 index dd27b0783d52..76a3d3a752d8 100644
694 --- a/drivers/mmc/card/block.c
695 +++ b/drivers/mmc/card/block.c
696 @@ -769,7 +769,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
697 * Otherwise we don't understand what happened, so abort.
698 */
699 static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
700 - struct mmc_blk_request *brq, int *ecc_err)
701 + struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
702 {
703 bool prev_cmd_status_valid = true;
704 u32 status, stop_status = 0;
705 @@ -807,6 +807,16 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
706 (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
707 *ecc_err = 1;
708
709 + /* Flag General errors */
710 + if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
711 + if ((status & R1_ERROR) ||
712 + (brq->stop.resp[0] & R1_ERROR)) {
713 + pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
714 + req->rq_disk->disk_name, __func__,
715 + brq->stop.resp[0], status);
716 + *gen_err = 1;
717 + }
718 +
719 /*
720 * Check the current card state. If it is in some data transfer
721 * mode, tell it to stop (and hopefully transition back to TRAN.)
722 @@ -826,6 +836,13 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
723 return ERR_ABORT;
724 if (stop_status & R1_CARD_ECC_FAILED)
725 *ecc_err = 1;
726 + if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
727 + if (stop_status & R1_ERROR) {
728 + pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
729 + req->rq_disk->disk_name, __func__,
730 + stop_status);
731 + *gen_err = 1;
732 + }
733 }
734
735 /* Check for set block count errors */
736 @@ -1069,7 +1086,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
737 mmc_active);
738 struct mmc_blk_request *brq = &mq_mrq->brq;
739 struct request *req = mq_mrq->req;
740 - int ecc_err = 0;
741 + int ecc_err = 0, gen_err = 0;
742
743 /*
744 * sbc.error indicates a problem with the set block count
745 @@ -1083,7 +1100,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
746 */
747 if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
748 brq->data.error) {
749 - switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
750 + switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
751 case ERR_RETRY:
752 return MMC_BLK_RETRY;
753 case ERR_ABORT:
754 @@ -1115,6 +1132,14 @@ static int mmc_blk_err_check(struct mmc_card *card,
755 u32 status;
756 unsigned long timeout;
757
758 + /* Check stop command response */
759 + if (brq->stop.resp[0] & R1_ERROR) {
760 + pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
761 + req->rq_disk->disk_name, __func__,
762 + brq->stop.resp[0]);
763 + gen_err = 1;
764 + }
765 +
766 timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
767 do {
768 int err = get_card_status(card, &status, 5);
769 @@ -1124,6 +1149,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
770 return MMC_BLK_CMD_ERR;
771 }
772
773 + if (status & R1_ERROR) {
774 + pr_err("%s: %s: general error sending status command, card status %#x\n",
775 + req->rq_disk->disk_name, __func__,
776 + status);
777 + gen_err = 1;
778 + }
779 +
780 /* Timeout if the device never becomes ready for data
781 * and never leaves the program state.
782 */
783 @@ -1143,6 +1175,13 @@ static int mmc_blk_err_check(struct mmc_card *card,
784 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
785 }
786
787 + /* if general error occurs, retry the write operation. */
788 + if (gen_err) {
789 + pr_warn("%s: retrying write for general error\n",
790 + req->rq_disk->disk_name);
791 + return MMC_BLK_RETRY;
792 + }
793 +
794 if (brq->data.error) {
795 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
796 req->rq_disk->disk_name, brq->data.error,
797 diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
798 index d7434e0a610e..a88d04b3a77a 100644
799 --- a/drivers/net/bonding/bond_sysfs.c
800 +++ b/drivers/net/bonding/bond_sysfs.c
801 @@ -537,8 +537,9 @@ static ssize_t bonding_store_arp_interval(struct device *d,
802 goto out;
803 }
804 if (bond->params.mode == BOND_MODE_ALB ||
805 - bond->params.mode == BOND_MODE_TLB) {
806 - pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n",
807 + bond->params.mode == BOND_MODE_TLB ||
808 + bond->params.mode == BOND_MODE_8023AD) {
809 + pr_info("%s: ARP monitoring cannot be used with ALB/TLB/802.3ad. Only MII monitoring is supported on %s.\n",
810 bond->dev->name, bond->dev->name);
811 ret = -EINVAL;
812 goto out;
813 @@ -696,6 +697,8 @@ static ssize_t bonding_store_downdelay(struct device *d,
814 int new_value, ret = count;
815 struct bonding *bond = to_bond(d);
816
817 + if (!rtnl_trylock())
818 + return restart_syscall();
819 if (!(bond->params.miimon)) {
820 pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
821 bond->dev->name);
822 @@ -729,6 +732,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
823 }
824
825 out:
826 + rtnl_unlock();
827 return ret;
828 }
829 static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR,
830 @@ -751,6 +755,8 @@ static ssize_t bonding_store_updelay(struct device *d,
831 int new_value, ret = count;
832 struct bonding *bond = to_bond(d);
833
834 + if (!rtnl_trylock())
835 + return restart_syscall();
836 if (!(bond->params.miimon)) {
837 pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
838 bond->dev->name);
839 @@ -784,6 +790,7 @@ static ssize_t bonding_store_updelay(struct device *d,
840 }
841
842 out:
843 + rtnl_unlock();
844 return ret;
845 }
846 static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR,
847 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
848 index a5c9df07a7d0..c72e214eb47c 100644
849 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
850 +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
851 @@ -264,6 +264,10 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
852 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
853 mdev->port_cnt++;
854
855 + /* Initialize time stamp mechanism */
856 + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
857 + mlx4_en_init_timestamp(mdev);
858 +
859 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
860 if (!dev->caps.comp_pool) {
861 mdev->profile.prof[i].rx_ring_num =
862 @@ -301,10 +305,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
863 mdev->pndev[i] = NULL;
864 }
865
866 - /* Initialize time stamp mechanism */
867 - if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
868 - mlx4_en_init_timestamp(mdev);
869 -
870 return mdev;
871
872 err_mr:
873 diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
874 index 9095ff930f29..064425d3178d 100644
875 --- a/drivers/net/ethernet/realtek/8139cp.c
876 +++ b/drivers/net/ethernet/realtek/8139cp.c
877 @@ -678,9 +678,6 @@ static void cp_tx (struct cp_private *cp)
878 le32_to_cpu(txd->opts1) & 0xffff,
879 PCI_DMA_TODEVICE);
880
881 - bytes_compl += skb->len;
882 - pkts_compl++;
883 -
884 if (status & LastFrag) {
885 if (status & (TxError | TxFIFOUnder)) {
886 netif_dbg(cp, tx_err, cp->dev,
887 @@ -702,6 +699,8 @@ static void cp_tx (struct cp_private *cp)
888 netif_dbg(cp, tx_done, cp->dev,
889 "tx done, slot %d\n", tx_tail);
890 }
891 + bytes_compl += skb->len;
892 + pkts_compl++;
893 dev_kfree_skb_irq(skb);
894 }
895
896 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
897 index 7199d2a8dcf8..e9b5d77a90db 100644
898 --- a/drivers/net/ethernet/realtek/r8169.c
899 +++ b/drivers/net/ethernet/realtek/r8169.c
900 @@ -3456,6 +3456,11 @@ static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
901 rtl_writephy(tp, 0x14, 0x9065);
902 rtl_writephy(tp, 0x14, 0x1065);
903
904 + /* Check ALDPS bit, disable it if enabled */
905 + rtl_writephy(tp, 0x1f, 0x0a43);
906 + if (rtl_readphy(tp, 0x10) & 0x0004)
907 + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004);
908 +
909 rtl_writephy(tp, 0x1f, 0x0000);
910 }
911
912 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
913 index 523d6b2426a8..c70ff7dac00e 100644
914 --- a/drivers/net/macvtap.c
915 +++ b/drivers/net/macvtap.c
916 @@ -661,6 +661,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
917 const struct iovec *iv, unsigned long total_len,
918 size_t count, int noblock)
919 {
920 + int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
921 struct sk_buff *skb;
922 struct macvlan_dev *vlan;
923 unsigned long len = total_len;
924 @@ -703,6 +704,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
925
926 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
927 copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
928 + if (copylen > good_linear)
929 + copylen = good_linear;
930 linear = copylen;
931 if (iov_pages(iv, vnet_hdr_len + copylen, count)
932 <= MAX_SKB_FRAGS)
933 @@ -711,7 +714,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
934
935 if (!zerocopy) {
936 copylen = len;
937 - linear = vnet_hdr.hdr_len;
938 + if (vnet_hdr.hdr_len > good_linear)
939 + linear = good_linear;
940 + else
941 + linear = vnet_hdr.hdr_len;
942 }
943
944 skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
945 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
946 index bb07ba94c3aa..6839fb07a4c9 100644
947 --- a/drivers/net/ppp/pppoe.c
948 +++ b/drivers/net/ppp/pppoe.c
949 @@ -979,8 +979,6 @@ static int pppoe_recvmsg(struct kiocb *iocb, struct socket *sock,
950 if (error < 0)
951 goto end;
952
953 - m->msg_namelen = 0;
954 -
955 if (skb) {
956 total_len = min_t(size_t, total_len, skb->len);
957 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len);
958 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
959 index b3051052f3ad..fe3fd77821bf 100644
960 --- a/drivers/net/team/team.c
961 +++ b/drivers/net/team/team.c
962 @@ -1217,6 +1217,8 @@ static int team_user_linkup_option_get(struct team *team,
963 return 0;
964 }
965
966 +static void __team_carrier_check(struct team *team);
967 +
968 static int team_user_linkup_option_set(struct team *team,
969 struct team_gsetter_ctx *ctx)
970 {
971 @@ -1224,6 +1226,7 @@ static int team_user_linkup_option_set(struct team *team,
972
973 port->user.linkup = ctx->data.bool_val;
974 team_refresh_port_linkup(port);
975 + __team_carrier_check(port->team);
976 return 0;
977 }
978
979 @@ -1243,6 +1246,7 @@ static int team_user_linkup_en_option_set(struct team *team,
980
981 port->user.linkup_enabled = ctx->data.bool_val;
982 team_refresh_port_linkup(port);
983 + __team_carrier_check(port->team);
984 return 0;
985 }
986
987 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
988 index b18ead556994..9ef85fea1d1e 100644
989 --- a/drivers/net/tun.c
990 +++ b/drivers/net/tun.c
991 @@ -1069,6 +1069,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
992 struct sk_buff *skb;
993 size_t len = total_len, align = NET_SKB_PAD, linear;
994 struct virtio_net_hdr gso = { 0 };
995 + int good_linear;
996 int offset = 0;
997 int copylen;
998 bool zerocopy = false;
999 @@ -1109,12 +1110,16 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1000 return -EINVAL;
1001 }
1002
1003 + good_linear = SKB_MAX_HEAD(align);
1004 +
1005 if (msg_control) {
1006 /* There are 256 bytes to be copied in skb, so there is
1007 * enough room for skb expand head in case it is used.
1008 * The rest of the buffer is mapped from userspace.
1009 */
1010 copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1011 + if (copylen > good_linear)
1012 + copylen = good_linear;
1013 linear = copylen;
1014 if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1015 zerocopy = true;
1016 @@ -1122,7 +1127,10 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1017
1018 if (!zerocopy) {
1019 copylen = len;
1020 - linear = gso.hdr_len;
1021 + if (gso.hdr_len > good_linear)
1022 + linear = good_linear;
1023 + else
1024 + linear = gso.hdr_len;
1025 }
1026
1027 skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1028 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
1029 index 06ee82f557d4..28f16ed6422d 100644
1030 --- a/drivers/net/usb/usbnet.c
1031 +++ b/drivers/net/usb/usbnet.c
1032 @@ -206,9 +206,6 @@ static void intr_complete (struct urb *urb)
1033 break;
1034 }
1035
1036 - if (!netif_running (dev->net))
1037 - return;
1038 -
1039 status = usb_submit_urb (urb, GFP_ATOMIC);
1040 if (status != 0)
1041 netif_err(dev, timer, dev->net,
1042 diff --git a/drivers/video/kyro/fbdev.c b/drivers/video/kyro/fbdev.c
1043 index 6157f74ac600..ec7fc87fa5ab 100644
1044 --- a/drivers/video/kyro/fbdev.c
1045 +++ b/drivers/video/kyro/fbdev.c
1046 @@ -625,15 +625,15 @@ static int kyrofb_ioctl(struct fb_info *info,
1047 }
1048 break;
1049 case KYRO_IOCTL_UVSTRIDE:
1050 - if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(unsigned long)))
1051 + if (copy_to_user(argp, &deviceInfo.ulOverlayUVStride, sizeof(deviceInfo.ulOverlayUVStride)))
1052 return -EFAULT;
1053 break;
1054 case KYRO_IOCTL_STRIDE:
1055 - if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(unsigned long)))
1056 + if (copy_to_user(argp, &deviceInfo.ulOverlayStride, sizeof(deviceInfo.ulOverlayStride)))
1057 return -EFAULT;
1058 break;
1059 case KYRO_IOCTL_OVERLAY_OFFSET:
1060 - if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(unsigned long)))
1061 + if (copy_to_user(argp, &deviceInfo.ulOverlayOffset, sizeof(deviceInfo.ulOverlayOffset)))
1062 return -EFAULT;
1063 break;
1064 }
1065 diff --git a/fs/aio.c b/fs/aio.c
1066 index 2bbcacf74d0c..ebd06fd0de89 100644
1067 --- a/fs/aio.c
1068 +++ b/fs/aio.c
1069 @@ -423,10 +423,12 @@ static void kill_ioctx_rcu(struct rcu_head *head)
1070 * when the processes owning a context have all exited to encourage
1071 * the rapid destruction of the kioctx.
1072 */
1073 -static void kill_ioctx(struct kioctx *ctx)
1074 +static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
1075 {
1076 if (!atomic_xchg(&ctx->dead, 1)) {
1077 + spin_lock(&mm->ioctx_lock);
1078 hlist_del_rcu(&ctx->list);
1079 + spin_unlock(&mm->ioctx_lock);
1080
1081 /*
1082 * It'd be more correct to do this in free_ioctx(), after all
1083 @@ -494,7 +496,7 @@ void exit_aio(struct mm_struct *mm)
1084 */
1085 ctx->mmap_size = 0;
1086
1087 - kill_ioctx(ctx);
1088 + kill_ioctx(mm, ctx);
1089 }
1090 }
1091
1092 @@ -852,7 +854,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1093 if (!IS_ERR(ioctx)) {
1094 ret = put_user(ioctx->user_id, ctxp);
1095 if (ret)
1096 - kill_ioctx(ioctx);
1097 + kill_ioctx(current->mm, ioctx);
1098 put_ioctx(ioctx);
1099 }
1100
1101 @@ -870,7 +872,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1102 {
1103 struct kioctx *ioctx = lookup_ioctx(ctx);
1104 if (likely(NULL != ioctx)) {
1105 - kill_ioctx(ioctx);
1106 + kill_ioctx(current->mm, ioctx);
1107 put_ioctx(ioctx);
1108 return 0;
1109 }
1110 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
1111 index 5e999680094a..ca01d830e989 100644
1112 --- a/fs/xfs/xfs_ioctl.c
1113 +++ b/fs/xfs/xfs_ioctl.c
1114 @@ -1612,6 +1612,12 @@ xfs_file_ioctl(
1115 case XFS_IOC_FREE_EOFBLOCKS: {
1116 struct xfs_eofblocks eofb;
1117
1118 + if (!capable(CAP_SYS_ADMIN))
1119 + return -EPERM;
1120 +
1121 + if (mp->m_flags & XFS_MOUNT_RDONLY)
1122 + return -XFS_ERROR(EROFS);
1123 +
1124 if (copy_from_user(&eofb, arg, sizeof(eofb)))
1125 return -XFS_ERROR(EFAULT);
1126
1127 diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
1128 index 963d71431388..ae1193bcf074 100644
1129 --- a/include/linux/clockchips.h
1130 +++ b/include/linux/clockchips.h
1131 @@ -30,6 +30,7 @@ enum clock_event_nofitiers {
1132 #include <linux/notifier.h>
1133
1134 struct clock_event_device;
1135 +struct module;
1136
1137 /* Clock event mode commands */
1138 enum clock_event_mode {
1139 @@ -83,6 +84,7 @@ enum clock_event_mode {
1140 * @irq: IRQ number (only for non CPU local devices)
1141 * @cpumask: cpumask to indicate for which CPUs this device works
1142 * @list: list head for the management code
1143 + * @owner: module reference
1144 */
1145 struct clock_event_device {
1146 void (*event_handler)(struct clock_event_device *);
1147 @@ -112,6 +114,7 @@ struct clock_event_device {
1148 int irq;
1149 const struct cpumask *cpumask;
1150 struct list_head list;
1151 + struct module *owner;
1152 } ____cacheline_aligned;
1153
1154 /*
1155 @@ -150,7 +153,6 @@ extern void clockevents_exchange_device(struct clock_event_device *old,
1156 struct clock_event_device *new);
1157 extern void clockevents_set_mode(struct clock_event_device *dev,
1158 enum clock_event_mode mode);
1159 -extern int clockevents_register_notifier(struct notifier_block *nb);
1160 extern int clockevents_program_event(struct clock_event_device *dev,
1161 ktime_t expires, bool force);
1162
1163 diff --git a/include/linux/net.h b/include/linux/net.h
1164 index 99c9f0c103c2..0c4ae5d94de9 100644
1165 --- a/include/linux/net.h
1166 +++ b/include/linux/net.h
1167 @@ -163,6 +163,14 @@ struct proto_ops {
1168 #endif
1169 int (*sendmsg) (struct kiocb *iocb, struct socket *sock,
1170 struct msghdr *m, size_t total_len);
1171 + /* Notes for implementing recvmsg:
1172 + * ===============================
1173 + * msg->msg_namelen should get updated by the recvmsg handlers
1174 + * iff msg_name != NULL. It is by default 0 to prevent
1175 + * returning uninitialized memory to user space. The recvfrom
1176 + * handlers can assume that msg.msg_name is either NULL or has
1177 + * a minimum size of sizeof(struct sockaddr_storage).
1178 + */
1179 int (*recvmsg) (struct kiocb *iocb, struct socket *sock,
1180 struct msghdr *m, size_t total_len,
1181 int flags);
1182 diff --git a/include/linux/random.h b/include/linux/random.h
1183 index 6312dd9ba449..bf9085e89fb5 100644
1184 --- a/include/linux/random.h
1185 +++ b/include/linux/random.h
1186 @@ -50,9 +50,9 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
1187 {
1188 u32 i = (seed >> 32) ^ (seed << 10) ^ seed;
1189
1190 - state->s1 = __seed(i, 1);
1191 - state->s2 = __seed(i, 7);
1192 - state->s3 = __seed(i, 15);
1193 + state->s1 = __seed(i, 2);
1194 + state->s2 = __seed(i, 8);
1195 + state->s3 = __seed(i, 16);
1196 }
1197
1198 #ifdef CONFIG_ARCH_RANDOM
1199 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1200 index eaf602781635..74db47ec09ea 100644
1201 --- a/include/linux/skbuff.h
1202 +++ b/include/linux/skbuff.h
1203 @@ -331,11 +331,6 @@ typedef unsigned int sk_buff_data_t;
1204 typedef unsigned char *sk_buff_data_t;
1205 #endif
1206
1207 -#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
1208 - defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
1209 -#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
1210 -#endif
1211 -
1212 /**
1213 * struct sk_buff - socket buffer
1214 * @next: Next buffer in list
1215 @@ -368,7 +363,6 @@ typedef unsigned char *sk_buff_data_t;
1216 * @protocol: Packet protocol from driver
1217 * @destructor: Destruct function
1218 * @nfct: Associated connection, if any
1219 - * @nfct_reasm: netfilter conntrack re-assembly pointer
1220 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
1221 * @skb_iif: ifindex of device we arrived on
1222 * @tc_index: Traffic control index
1223 @@ -455,9 +449,6 @@ struct sk_buff {
1224 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1225 struct nf_conntrack *nfct;
1226 #endif
1227 -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
1228 - struct sk_buff *nfct_reasm;
1229 -#endif
1230 #ifdef CONFIG_BRIDGE_NETFILTER
1231 struct nf_bridge_info *nf_bridge;
1232 #endif
1233 @@ -2700,18 +2691,6 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
1234 atomic_inc(&nfct->use);
1235 }
1236 #endif
1237 -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
1238 -static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
1239 -{
1240 - if (skb)
1241 - atomic_inc(&skb->users);
1242 -}
1243 -static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
1244 -{
1245 - if (skb)
1246 - kfree_skb(skb);
1247 -}
1248 -#endif
1249 #ifdef CONFIG_BRIDGE_NETFILTER
1250 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
1251 {
1252 @@ -2730,10 +2709,6 @@ static inline void nf_reset(struct sk_buff *skb)
1253 nf_conntrack_put(skb->nfct);
1254 skb->nfct = NULL;
1255 #endif
1256 -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
1257 - nf_conntrack_put_reasm(skb->nfct_reasm);
1258 - skb->nfct_reasm = NULL;
1259 -#endif
1260 #ifdef CONFIG_BRIDGE_NETFILTER
1261 nf_bridge_put(skb->nf_bridge);
1262 skb->nf_bridge = NULL;
1263 @@ -2755,10 +2730,6 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1264 nf_conntrack_get(src->nfct);
1265 dst->nfctinfo = src->nfctinfo;
1266 #endif
1267 -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
1268 - dst->nfct_reasm = src->nfct_reasm;
1269 - nf_conntrack_get_reasm(src->nfct_reasm);
1270 -#endif
1271 #ifdef CONFIG_BRIDGE_NETFILTER
1272 dst->nf_bridge = src->nf_bridge;
1273 nf_bridge_get(src->nf_bridge);
1274 @@ -2770,9 +2741,6 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
1275 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1276 nf_conntrack_put(dst->nfct);
1277 #endif
1278 -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
1279 - nf_conntrack_put_reasm(dst->nfct_reasm);
1280 -#endif
1281 #ifdef CONFIG_BRIDGE_NETFILTER
1282 nf_bridge_put(dst->nf_bridge);
1283 #endif
1284 diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
1285 index bd6cf61142be..8c0f6cb2a603 100644
1286 --- a/include/linux/vm_event_item.h
1287 +++ b/include/linux/vm_event_item.h
1288 @@ -39,6 +39,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
1289 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
1290 #ifdef CONFIG_NUMA_BALANCING
1291 NUMA_PTE_UPDATES,
1292 + NUMA_HUGE_PTE_UPDATES,
1293 NUMA_HINT_FAULTS,
1294 NUMA_HINT_FAULTS_LOCAL,
1295 NUMA_PAGE_MIGRATE,
1296 diff --git a/include/net/ip.h b/include/net/ip.h
1297 index edfa59174d9a..788f1d8a796f 100644
1298 --- a/include/net/ip.h
1299 +++ b/include/net/ip.h
1300 @@ -454,7 +454,7 @@ extern int compat_ip_getsockopt(struct sock *sk, int level,
1301 int optname, char __user *optval, int __user *optlen);
1302 extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
1303
1304 -extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
1305 +extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
1306 extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
1307 __be16 port, u32 info, u8 *payload);
1308 extern void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
1309 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
1310 index 4c062ccff9aa..f0c13a386bf3 100644
1311 --- a/include/net/ip_vs.h
1312 +++ b/include/net/ip_vs.h
1313 @@ -109,7 +109,6 @@ extern int ip_vs_conn_tab_size;
1314 struct ip_vs_iphdr {
1315 __u32 len; /* IPv4 simply where L4 starts
1316 IPv6 where L4 Transport Header starts */
1317 - __u32 thoff_reasm; /* Transport Header Offset in nfct_reasm skb */
1318 __u16 fragoffs; /* IPv6 fragment offset, 0 if first frag (or not frag)*/
1319 __s16 protocol;
1320 __s32 flags;
1321 @@ -117,34 +116,12 @@ struct ip_vs_iphdr {
1322 union nf_inet_addr daddr;
1323 };
1324
1325 -/* Dependency to module: nf_defrag_ipv6 */
1326 -#if defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
1327 -static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
1328 -{
1329 - return skb->nfct_reasm;
1330 -}
1331 -static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
1332 - int len, void *buffer,
1333 - const struct ip_vs_iphdr *ipvsh)
1334 -{
1335 - if (unlikely(ipvsh->fragoffs && skb_nfct_reasm(skb)))
1336 - return skb_header_pointer(skb_nfct_reasm(skb),
1337 - ipvsh->thoff_reasm, len, buffer);
1338 -
1339 - return skb_header_pointer(skb, offset, len, buffer);
1340 -}
1341 -#else
1342 -static inline struct sk_buff *skb_nfct_reasm(const struct sk_buff *skb)
1343 -{
1344 - return NULL;
1345 -}
1346 static inline void *frag_safe_skb_hp(const struct sk_buff *skb, int offset,
1347 int len, void *buffer,
1348 const struct ip_vs_iphdr *ipvsh)
1349 {
1350 return skb_header_pointer(skb, offset, len, buffer);
1351 }
1352 -#endif
1353
1354 static inline void
1355 ip_vs_fill_ip4hdr(const void *nh, struct ip_vs_iphdr *iphdr)
1356 @@ -171,19 +148,12 @@ ip_vs_fill_iph_skb(int af, const struct sk_buff *skb, struct ip_vs_iphdr *iphdr)
1357 (struct ipv6hdr *)skb_network_header(skb);
1358 iphdr->saddr.in6 = iph->saddr;
1359 iphdr->daddr.in6 = iph->daddr;
1360 - /* ipv6_find_hdr() updates len, flags, thoff_reasm */
1361 - iphdr->thoff_reasm = 0;
1362 + /* ipv6_find_hdr() updates len, flags */
1363 iphdr->len = 0;
1364 iphdr->flags = 0;
1365 iphdr->protocol = ipv6_find_hdr(skb, &iphdr->len, -1,
1366 &iphdr->fragoffs,
1367 &iphdr->flags);
1368 - /* get proto from re-assembled packet and it's offset */
1369 - if (skb_nfct_reasm(skb))
1370 - iphdr->protocol = ipv6_find_hdr(skb_nfct_reasm(skb),
1371 - &iphdr->thoff_reasm,
1372 - -1, NULL, NULL);
1373 -
1374 } else
1375 #endif
1376 {
1377 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1378 index 0810aa57c780..9e093fc33dab 100644
1379 --- a/include/net/ipv6.h
1380 +++ b/include/net/ipv6.h
1381 @@ -793,8 +793,10 @@ extern int compat_ipv6_getsockopt(struct sock *sk,
1382 extern int ip6_datagram_connect(struct sock *sk,
1383 struct sockaddr *addr, int addr_len);
1384
1385 -extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len);
1386 -extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len);
1387 +extern int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
1388 + int *addr_len);
1389 +extern int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
1390 + int *addr_len);
1391 extern void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
1392 u32 info, u8 *payload);
1393 extern void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info);
1394 diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
1395 index fd79c9a1779d..17920d847b40 100644
1396 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h
1397 +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h
1398 @@ -6,10 +6,7 @@ extern void nf_defrag_ipv6_enable(void);
1399 extern int nf_ct_frag6_init(void);
1400 extern void nf_ct_frag6_cleanup(void);
1401 extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user);
1402 -extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
1403 - struct net_device *in,
1404 - struct net_device *out,
1405 - int (*okfn)(struct sk_buff *));
1406 +extern void nf_ct_frag6_consume_orig(struct sk_buff *skb);
1407
1408 struct inet_frags_ctl;
1409
1410 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
1411 index 6a23c6c556c3..9df0e3b19f09 100644
1412 --- a/kernel/time/clockevents.c
1413 +++ b/kernel/time/clockevents.c
1414 @@ -15,7 +15,6 @@
1415 #include <linux/hrtimer.h>
1416 #include <linux/init.h>
1417 #include <linux/module.h>
1418 -#include <linux/notifier.h>
1419 #include <linux/smp.h>
1420
1421 #include "tick-internal.h"
1422 @@ -23,10 +22,6 @@
1423 /* The registered clock event devices */
1424 static LIST_HEAD(clockevent_devices);
1425 static LIST_HEAD(clockevents_released);
1426 -
1427 -/* Notification for clock events */
1428 -static RAW_NOTIFIER_HEAD(clockevents_chain);
1429 -
1430 /* Protection for the above */
1431 static DEFINE_RAW_SPINLOCK(clockevents_lock);
1432
1433 @@ -267,30 +262,6 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
1434 return (rc && force) ? clockevents_program_min_delta(dev) : rc;
1435 }
1436
1437 -/**
1438 - * clockevents_register_notifier - register a clock events change listener
1439 - */
1440 -int clockevents_register_notifier(struct notifier_block *nb)
1441 -{
1442 - unsigned long flags;
1443 - int ret;
1444 -
1445 - raw_spin_lock_irqsave(&clockevents_lock, flags);
1446 - ret = raw_notifier_chain_register(&clockevents_chain, nb);
1447 - raw_spin_unlock_irqrestore(&clockevents_lock, flags);
1448 -
1449 - return ret;
1450 -}
1451 -
1452 -/*
1453 - * Notify about a clock event change. Called with clockevents_lock
1454 - * held.
1455 - */
1456 -static void clockevents_do_notify(unsigned long reason, void *dev)
1457 -{
1458 - raw_notifier_call_chain(&clockevents_chain, reason, dev);
1459 -}
1460 -
1461 /*
1462 * Called after a notify add to make devices available which were
1463 * released from the notifier call.
1464 @@ -304,7 +275,7 @@ static void clockevents_notify_released(void)
1465 struct clock_event_device, list);
1466 list_del(&dev->list);
1467 list_add(&dev->list, &clockevent_devices);
1468 - clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
1469 + tick_check_new_device(dev);
1470 }
1471 }
1472
1473 @@ -325,7 +296,7 @@ void clockevents_register_device(struct clock_event_device *dev)
1474 raw_spin_lock_irqsave(&clockevents_lock, flags);
1475
1476 list_add(&dev->list, &clockevent_devices);
1477 - clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
1478 + tick_check_new_device(dev);
1479 clockevents_notify_released();
1480
1481 raw_spin_unlock_irqrestore(&clockevents_lock, flags);
1482 @@ -421,6 +392,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
1483 * released list and do a notify add later.
1484 */
1485 if (old) {
1486 + module_put(old->owner);
1487 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
1488 list_del(&old->list);
1489 list_add(&old->list, &clockevents_released);
1490 @@ -468,7 +440,7 @@ void clockevents_notify(unsigned long reason, void *arg)
1491 int cpu;
1492
1493 raw_spin_lock_irqsave(&clockevents_lock, flags);
1494 - clockevents_do_notify(reason, arg);
1495 + tick_notify(reason, arg);
1496
1497 switch (reason) {
1498 case CLOCK_EVT_NOTIFY_CPU_DEAD:
1499 diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
1500 index bb2215174f05..af8d1d4f3d55 100644
1501 --- a/kernel/time/ntp.c
1502 +++ b/kernel/time/ntp.c
1503 @@ -475,6 +475,7 @@ static void sync_cmos_clock(struct work_struct *work)
1504 * called as close as possible to 500 ms before the new second starts.
1505 * This code is run on a timer. If the clock is set, that timer
1506 * may not expire at the correct time. Thus, we adjust...
1507 + * We want the clock to be within a couple of ticks from the target.
1508 */
1509 if (!ntp_synced()) {
1510 /*
1511 @@ -485,7 +486,7 @@ static void sync_cmos_clock(struct work_struct *work)
1512 }
1513
1514 getnstimeofday(&now);
1515 - if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
1516 + if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
1517 struct timespec adjust = now;
1518
1519 fail = -ENODEV;
1520 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
1521 index 297b90b5277e..52d4827cf2d4 100644
1522 --- a/kernel/time/tick-broadcast.c
1523 +++ b/kernel/time/tick-broadcast.c
1524 @@ -19,6 +19,7 @@
1525 #include <linux/profile.h>
1526 #include <linux/sched.h>
1527 #include <linux/smp.h>
1528 +#include <linux/module.h>
1529
1530 #include "tick-internal.h"
1531
1532 @@ -65,17 +66,34 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc)
1533 /*
1534 * Check, if the device can be utilized as broadcast device:
1535 */
1536 -int tick_check_broadcast_device(struct clock_event_device *dev)
1537 +static bool tick_check_broadcast_device(struct clock_event_device *curdev,
1538 + struct clock_event_device *newdev)
1539 +{
1540 + if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
1541 + (newdev->features & CLOCK_EVT_FEAT_C3STOP))
1542 + return false;
1543 +
1544 + if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
1545 + !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
1546 + return false;
1547 +
1548 + return !curdev || newdev->rating > curdev->rating;
1549 +}
1550 +
1551 +/*
1552 + * Conditionally install/replace broadcast device
1553 + */
1554 +void tick_install_broadcast_device(struct clock_event_device *dev)
1555 {
1556 struct clock_event_device *cur = tick_broadcast_device.evtdev;
1557
1558 - if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
1559 - (tick_broadcast_device.evtdev &&
1560 - tick_broadcast_device.evtdev->rating >= dev->rating) ||
1561 - (dev->features & CLOCK_EVT_FEAT_C3STOP))
1562 - return 0;
1563 + if (!tick_check_broadcast_device(cur, dev))
1564 + return;
1565 +
1566 + if (!try_module_get(dev->owner))
1567 + return;
1568
1569 - clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
1570 + clockevents_exchange_device(cur, dev);
1571 if (cur)
1572 cur->event_handler = clockevents_handle_noop;
1573 tick_broadcast_device.evtdev = dev;
1574 @@ -91,7 +109,6 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
1575 */
1576 if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
1577 tick_clock_notify();
1578 - return 1;
1579 }
1580
1581 /*
1582 diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
1583 index 7ce5e5a4a4c5..086216c433fa 100644
1584 --- a/kernel/time/tick-common.c
1585 +++ b/kernel/time/tick-common.c
1586 @@ -18,6 +18,7 @@
1587 #include <linux/percpu.h>
1588 #include <linux/profile.h>
1589 #include <linux/sched.h>
1590 +#include <linux/module.h>
1591
1592 #include <asm/irq_regs.h>
1593
1594 @@ -206,14 +207,50 @@ static void tick_setup_device(struct tick_device *td,
1595 tick_setup_oneshot(newdev, handler, next_event);
1596 }
1597
1598 +static bool tick_check_percpu(struct clock_event_device *curdev,
1599 + struct clock_event_device *newdev, int cpu)
1600 +{
1601 + if (!cpumask_test_cpu(cpu, newdev->cpumask))
1602 + return false;
1603 + if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
1604 + return true;
1605 + /* Check if irq affinity can be set */
1606 + if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
1607 + return false;
1608 + /* Prefer an existing cpu local device */
1609 + if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
1610 + return false;
1611 + return true;
1612 +}
1613 +
1614 +static bool tick_check_preferred(struct clock_event_device *curdev,
1615 + struct clock_event_device *newdev)
1616 +{
1617 + /* Prefer oneshot capable device */
1618 + if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
1619 + if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
1620 + return false;
1621 + if (tick_oneshot_mode_active())
1622 + return false;
1623 + }
1624 +
1625 + /*
1626 + * Use the higher rated one, but prefer a CPU local device with a lower
1627 + * rating than a non-CPU local device
1628 + */
1629 + return !curdev ||
1630 + newdev->rating > curdev->rating ||
1631 + !cpumask_equal(curdev->cpumask, newdev->cpumask);
1632 +}
1633 +
1634 /*
1635 * Check, if the new registered device should be used.
1636 */
1637 -static int tick_check_new_device(struct clock_event_device *newdev)
1638 +void tick_check_new_device(struct clock_event_device *newdev)
1639 {
1640 struct clock_event_device *curdev;
1641 struct tick_device *td;
1642 - int cpu, ret = NOTIFY_OK;
1643 + int cpu;
1644 unsigned long flags;
1645
1646 raw_spin_lock_irqsave(&tick_device_lock, flags);
1647 @@ -226,40 +263,15 @@ static int tick_check_new_device(struct clock_event_device *newdev)
1648 curdev = td->evtdev;
1649
1650 /* cpu local device ? */
1651 - if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
1652 -
1653 - /*
1654 - * If the cpu affinity of the device interrupt can not
1655 - * be set, ignore it.
1656 - */
1657 - if (!irq_can_set_affinity(newdev->irq))
1658 - goto out_bc;
1659 + if (!tick_check_percpu(curdev, newdev, cpu))
1660 + goto out_bc;
1661
1662 - /*
1663 - * If we have a cpu local device already, do not replace it
1664 - * by a non cpu local device
1665 - */
1666 - if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
1667 - goto out_bc;
1668 - }
1669 + /* Preference decision */
1670 + if (!tick_check_preferred(curdev, newdev))
1671 + goto out_bc;
1672
1673 - /*
1674 - * If we have an active device, then check the rating and the oneshot
1675 - * feature.
1676 - */
1677 - if (curdev) {
1678 - /*
1679 - * Prefer one shot capable devices !
1680 - */
1681 - if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
1682 - !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
1683 - goto out_bc;
1684 - /*
1685 - * Check the rating
1686 - */
1687 - if (curdev->rating >= newdev->rating)
1688 - goto out_bc;
1689 - }
1690 + if (!try_module_get(newdev->owner))
1691 + return;
1692
1693 /*
1694 * Replace the eventually existing device by the new
1695 @@ -276,18 +288,14 @@ static int tick_check_new_device(struct clock_event_device *newdev)
1696 tick_oneshot_notify();
1697
1698 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
1699 - return NOTIFY_STOP;
1700 + return;
1701
1702 out_bc:
1703 /*
1704 * Can the new device be used as a broadcast device ?
1705 */
1706 - if (tick_check_broadcast_device(newdev))
1707 - ret = NOTIFY_STOP;
1708 -
1709 + tick_install_broadcast_device(newdev);
1710 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
1711 -
1712 - return ret;
1713 }
1714
1715 /*
1716 @@ -361,17 +369,10 @@ static void tick_resume(void)
1717 raw_spin_unlock_irqrestore(&tick_device_lock, flags);
1718 }
1719
1720 -/*
1721 - * Notification about clock event devices
1722 - */
1723 -static int tick_notify(struct notifier_block *nb, unsigned long reason,
1724 - void *dev)
1725 +void tick_notify(unsigned long reason, void *dev)
1726 {
1727 switch (reason) {
1728
1729 - case CLOCK_EVT_NOTIFY_ADD:
1730 - return tick_check_new_device(dev);
1731 -
1732 case CLOCK_EVT_NOTIFY_BROADCAST_ON:
1733 case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
1734 case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
1735 @@ -405,21 +406,12 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
1736 default:
1737 break;
1738 }
1739 -
1740 - return NOTIFY_OK;
1741 }
1742
1743 -static struct notifier_block tick_notifier = {
1744 - .notifier_call = tick_notify,
1745 -};
1746 -
1747 /**
1748 * tick_init - initialize the tick control
1749 - *
1750 - * Register the notifier with the clockevents framework
1751 */
1752 void __init tick_init(void)
1753 {
1754 - clockevents_register_notifier(&tick_notifier);
1755 tick_broadcast_init();
1756 }
1757 diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
1758 index f0299eae4602..60742fe6f63d 100644
1759 --- a/kernel/time/tick-internal.h
1760 +++ b/kernel/time/tick-internal.h
1761 @@ -18,6 +18,8 @@ extern int tick_do_timer_cpu __read_mostly;
1762
1763 extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
1764 extern void tick_handle_periodic(struct clock_event_device *dev);
1765 +extern void tick_notify(unsigned long reason, void *dev);
1766 +extern void tick_check_new_device(struct clock_event_device *dev);
1767
1768 extern void clockevents_shutdown(struct clock_event_device *dev);
1769
1770 @@ -90,7 +92,7 @@ static inline bool tick_broadcast_oneshot_available(void) { return false; }
1771 */
1772 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
1773 extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
1774 -extern int tick_check_broadcast_device(struct clock_event_device *dev);
1775 +extern void tick_install_broadcast_device(struct clock_event_device *dev);
1776 extern int tick_is_broadcast_device(struct clock_event_device *dev);
1777 extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
1778 extern void tick_shutdown_broadcast(unsigned int *cpup);
1779 @@ -102,9 +104,8 @@ tick_set_periodic_handler(struct clock_event_device *dev, int broadcast);
1780
1781 #else /* !BROADCAST */
1782
1783 -static inline int tick_check_broadcast_device(struct clock_event_device *dev)
1784 +static inline void tick_install_broadcast_device(struct clock_event_device *dev)
1785 {
1786 - return 0;
1787 }
1788
1789 static inline int tick_is_broadcast_device(struct clock_event_device *dev)
1790 diff --git a/lib/random32.c b/lib/random32.c
1791 index 52280d5526be..01e8890d1089 100644
1792 --- a/lib/random32.c
1793 +++ b/lib/random32.c
1794 @@ -141,7 +141,7 @@ void prandom_seed(u32 entropy)
1795 */
1796 for_each_possible_cpu (i) {
1797 struct rnd_state *state = &per_cpu(net_rand_state, i);
1798 - state->s1 = __seed(state->s1 ^ entropy, 1);
1799 + state->s1 = __seed(state->s1 ^ entropy, 2);
1800 }
1801 }
1802 EXPORT_SYMBOL(prandom_seed);
1803 @@ -158,9 +158,9 @@ static int __init prandom_init(void)
1804 struct rnd_state *state = &per_cpu(net_rand_state,i);
1805
1806 #define LCG(x) ((x) * 69069) /* super-duper LCG */
1807 - state->s1 = __seed(LCG(i + jiffies), 1);
1808 - state->s2 = __seed(LCG(state->s1), 7);
1809 - state->s3 = __seed(LCG(state->s2), 15);
1810 + state->s1 = __seed(LCG(i + jiffies), 2);
1811 + state->s2 = __seed(LCG(state->s1), 8);
1812 + state->s3 = __seed(LCG(state->s2), 16);
1813
1814 /* "warm it up" */
1815 prandom_u32_state(state);
1816 @@ -187,9 +187,9 @@ static int __init prandom_reseed(void)
1817 u32 seeds[3];
1818
1819 get_random_bytes(&seeds, sizeof(seeds));
1820 - state->s1 = __seed(seeds[0], 1);
1821 - state->s2 = __seed(seeds[1], 7);
1822 - state->s3 = __seed(seeds[2], 15);
1823 + state->s1 = __seed(seeds[0], 2);
1824 + state->s2 = __seed(seeds[1], 8);
1825 + state->s3 = __seed(seeds[2], 16);
1826
1827 /* mix it in */
1828 prandom_u32_state(state);
1829 diff --git a/mm/mprotect.c b/mm/mprotect.c
1830 index 2bbb648ea73f..d4d5399c7aba 100644
1831 --- a/mm/mprotect.c
1832 +++ b/mm/mprotect.c
1833 @@ -135,6 +135,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1834 pmd_t *pmd;
1835 unsigned long next;
1836 unsigned long pages = 0;
1837 + unsigned long nr_huge_updates = 0;
1838 bool all_same_node;
1839
1840 pmd = pmd_offset(pud, addr);
1841 @@ -145,7 +146,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1842 split_huge_page_pmd(vma, addr, pmd);
1843 else if (change_huge_pmd(vma, pmd, addr, newprot,
1844 prot_numa)) {
1845 - pages++;
1846 + pages += HPAGE_PMD_NR;
1847 + nr_huge_updates++;
1848 continue;
1849 }
1850 /* fall through */
1851 @@ -165,6 +167,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
1852 change_pmd_protnuma(vma->vm_mm, addr, pmd);
1853 } while (pmd++, addr = next, addr != end);
1854
1855 + if (nr_huge_updates)
1856 + count_vm_numa_events(NUMA_HUGE_PTE_UPDATES, nr_huge_updates);
1857 +
1858 return pages;
1859 }
1860
1861 diff --git a/mm/vmstat.c b/mm/vmstat.c
1862 index f42745e65780..10bbb5427a6d 100644
1863 --- a/mm/vmstat.c
1864 +++ b/mm/vmstat.c
1865 @@ -779,6 +779,7 @@ const char * const vmstat_text[] = {
1866
1867 #ifdef CONFIG_NUMA_BALANCING
1868 "numa_pte_updates",
1869 + "numa_huge_pte_updates",
1870 "numa_hint_faults",
1871 "numa_hint_faults_local",
1872 "numa_pages_migrated",
1873 diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
1874 index ef12839a7cfe..0018daccdea9 100644
1875 --- a/net/appletalk/ddp.c
1876 +++ b/net/appletalk/ddp.c
1877 @@ -1735,7 +1735,6 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1878 size_t size, int flags)
1879 {
1880 struct sock *sk = sock->sk;
1881 - struct sockaddr_at *sat = (struct sockaddr_at *)msg->msg_name;
1882 struct ddpehdr *ddp;
1883 int copied = 0;
1884 int offset = 0;
1885 @@ -1764,14 +1763,13 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1886 }
1887 err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied);
1888
1889 - if (!err) {
1890 - if (sat) {
1891 - sat->sat_family = AF_APPLETALK;
1892 - sat->sat_port = ddp->deh_sport;
1893 - sat->sat_addr.s_node = ddp->deh_snode;
1894 - sat->sat_addr.s_net = ddp->deh_snet;
1895 - }
1896 - msg->msg_namelen = sizeof(*sat);
1897 + if (!err && msg->msg_name) {
1898 + struct sockaddr_at *sat = msg->msg_name;
1899 + sat->sat_family = AF_APPLETALK;
1900 + sat->sat_port = ddp->deh_sport;
1901 + sat->sat_addr.s_node = ddp->deh_snode;
1902 + sat->sat_addr.s_net = ddp->deh_snet;
1903 + msg->msg_namelen = sizeof(*sat);
1904 }
1905
1906 skb_free_datagram(sk, skb); /* Free the datagram. */
1907 diff --git a/net/atm/common.c b/net/atm/common.c
1908 index 737bef59ce89..7b491006eaf4 100644
1909 --- a/net/atm/common.c
1910 +++ b/net/atm/common.c
1911 @@ -531,8 +531,6 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
1912 struct sk_buff *skb;
1913 int copied, error = -EINVAL;
1914
1915 - msg->msg_namelen = 0;
1916 -
1917 if (sock->state != SS_CONNECTED)
1918 return -ENOTCONN;
1919
1920 diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
1921 index e277e38f736b..ba6db78a02b1 100644
1922 --- a/net/ax25/af_ax25.c
1923 +++ b/net/ax25/af_ax25.c
1924 @@ -1636,11 +1636,11 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,
1925
1926 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1927
1928 - if (msg->msg_namelen != 0) {
1929 - struct sockaddr_ax25 *sax = (struct sockaddr_ax25 *)msg->msg_name;
1930 + if (msg->msg_name) {
1931 ax25_digi digi;
1932 ax25_address src;
1933 const unsigned char *mac = skb_mac_header(skb);
1934 + struct sockaddr_ax25 *sax = msg->msg_name;
1935
1936 memset(sax, 0, sizeof(struct full_sockaddr_ax25));
1937 ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,
1938 diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
1939 index 9096137c889c..6629cdc134dc 100644
1940 --- a/net/bluetooth/af_bluetooth.c
1941 +++ b/net/bluetooth/af_bluetooth.c
1942 @@ -221,8 +221,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1943 if (flags & (MSG_OOB))
1944 return -EOPNOTSUPP;
1945
1946 - msg->msg_namelen = 0;
1947 -
1948 skb = skb_recv_datagram(sk, flags, noblock, &err);
1949 if (!skb) {
1950 if (sk->sk_shutdown & RCV_SHUTDOWN)
1951 @@ -287,8 +285,6 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1952 if (flags & MSG_OOB)
1953 return -EOPNOTSUPP;
1954
1955 - msg->msg_namelen = 0;
1956 -
1957 BT_DBG("sk %p size %zu", sk, size);
1958
1959 lock_sock(sk);
1960 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
1961 index 9bd7d959e384..fa4bf6631425 100644
1962 --- a/net/bluetooth/hci_sock.c
1963 +++ b/net/bluetooth/hci_sock.c
1964 @@ -752,8 +752,6 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1965 if (!skb)
1966 return err;
1967
1968 - msg->msg_namelen = 0;
1969 -
1970 copied = skb->len;
1971 if (len < copied) {
1972 msg->msg_flags |= MSG_TRUNC;
1973 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
1974 index 30b3721dc6d7..c1c6028e389a 100644
1975 --- a/net/bluetooth/rfcomm/sock.c
1976 +++ b/net/bluetooth/rfcomm/sock.c
1977 @@ -608,7 +608,6 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1978
1979 if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
1980 rfcomm_dlc_accept(d);
1981 - msg->msg_namelen = 0;
1982 return 0;
1983 }
1984
1985 diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
1986 index e7bd4eea575c..2bb1d3a5e76b 100644
1987 --- a/net/bluetooth/sco.c
1988 +++ b/net/bluetooth/sco.c
1989 @@ -700,7 +700,6 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1990 test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
1991 sco_conn_defer_accept(pi->conn->hcon, 0);
1992 sk->sk_state = BT_CONFIG;
1993 - msg->msg_namelen = 0;
1994
1995 release_sock(sk);
1996 return 0;
1997 diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
1998 index 4cdba60926ff..32bd1e87f149 100644
1999 --- a/net/bridge/br_if.c
2000 +++ b/net/bridge/br_if.c
2001 @@ -172,6 +172,8 @@ void br_dev_delete(struct net_device *dev, struct list_head *head)
2002 del_nbp(p);
2003 }
2004
2005 + br_fdb_delete_by_port(br, NULL, 1);
2006 +
2007 del_timer_sync(&br->gc_timer);
2008
2009 br_sysfs_delbr(br->dev);
2010 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
2011 index 05a41c7ec304..d6be3edb7a43 100644
2012 --- a/net/caif/caif_socket.c
2013 +++ b/net/caif/caif_socket.c
2014 @@ -286,8 +286,6 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
2015 if (m->msg_flags&MSG_OOB)
2016 goto read_error;
2017
2018 - m->msg_namelen = 0;
2019 -
2020 skb = skb_recv_datagram(sk, flags, 0 , &ret);
2021 if (!skb)
2022 goto read_error;
2023 @@ -361,8 +359,6 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
2024 if (flags&MSG_OOB)
2025 goto out;
2026
2027 - msg->msg_namelen = 0;
2028 -
2029 /*
2030 * Lock the socket to prevent queue disordering
2031 * while sleeps in memcpy_tomsg
2032 diff --git a/net/compat.c b/net/compat.c
2033 index 89032580bd1d..dd32e34c1e2c 100644
2034 --- a/net/compat.c
2035 +++ b/net/compat.c
2036 @@ -72,7 +72,7 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
2037 __get_user(kmsg->msg_flags, &umsg->msg_flags))
2038 return -EFAULT;
2039 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
2040 - return -EINVAL;
2041 + kmsg->msg_namelen = sizeof(struct sockaddr_storage);
2042 kmsg->msg_name = compat_ptr(tmp1);
2043 kmsg->msg_iov = compat_ptr(tmp2);
2044 kmsg->msg_control = compat_ptr(tmp3);
2045 @@ -93,7 +93,8 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
2046 if (err < 0)
2047 return err;
2048 }
2049 - kern_msg->msg_name = kern_address;
2050 + if (kern_msg->msg_name)
2051 + kern_msg->msg_name = kern_address;
2052 } else
2053 kern_msg->msg_name = NULL;
2054
2055 diff --git a/net/core/dev.c b/net/core/dev.c
2056 index 7ddbb31b10d3..1283c8442e99 100644
2057 --- a/net/core/dev.c
2058 +++ b/net/core/dev.c
2059 @@ -4478,7 +4478,7 @@ static void dev_change_rx_flags(struct net_device *dev, int flags)
2060 {
2061 const struct net_device_ops *ops = dev->netdev_ops;
2062
2063 - if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
2064 + if (ops->ndo_change_rx_flags)
2065 ops->ndo_change_rx_flags(dev, flags);
2066 }
2067
2068 diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
2069 index d5a9f8ead0d8..0e9131195eb0 100644
2070 --- a/net/core/fib_rules.c
2071 +++ b/net/core/fib_rules.c
2072 @@ -445,7 +445,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
2073 if (frh->action && (frh->action != rule->action))
2074 continue;
2075
2076 - if (frh->table && (frh_get_table(frh, tb) != rule->table))
2077 + if (frh_get_table(frh, tb) &&
2078 + (frh_get_table(frh, tb) != rule->table))
2079 continue;
2080
2081 if (tb[FRA_PRIORITY] &&
2082 diff --git a/net/core/iovec.c b/net/core/iovec.c
2083 index de178e462682..9a31515fb8e3 100644
2084 --- a/net/core/iovec.c
2085 +++ b/net/core/iovec.c
2086 @@ -48,7 +48,8 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
2087 if (err < 0)
2088 return err;
2089 }
2090 - m->msg_name = address;
2091 + if (m->msg_name)
2092 + m->msg_name = address;
2093 } else {
2094 m->msg_name = NULL;
2095 }
2096 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
2097 index 11f2704c3810..ebbea5371967 100644
2098 --- a/net/core/pktgen.c
2099 +++ b/net/core/pktgen.c
2100 @@ -2515,6 +2515,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2101 if (x) {
2102 int ret;
2103 __u8 *eth;
2104 + struct iphdr *iph;
2105 +
2106 nhead = x->props.header_len - skb_headroom(skb);
2107 if (nhead > 0) {
2108 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2109 @@ -2536,6 +2538,11 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2110 eth = (__u8 *) skb_push(skb, ETH_HLEN);
2111 memcpy(eth, pkt_dev->hh, 12);
2112 *(u16 *) &eth[12] = protocol;
2113 +
2114 + /* Update IPv4 header len as well as checksum value */
2115 + iph = ip_hdr(skb);
2116 + iph->tot_len = htons(skb->len - ETH_HLEN);
2117 + ip_send_check(iph);
2118 }
2119 }
2120 return 1;
2121 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2122 index 1c1738cc4538..d9e8736bcdc1 100644
2123 --- a/net/core/skbuff.c
2124 +++ b/net/core/skbuff.c
2125 @@ -585,9 +585,6 @@ static void skb_release_head_state(struct sk_buff *skb)
2126 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
2127 nf_conntrack_put(skb->nfct);
2128 #endif
2129 -#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2130 - nf_conntrack_put_reasm(skb->nfct_reasm);
2131 -#endif
2132 #ifdef CONFIG_BRIDGE_NETFILTER
2133 nf_bridge_put(skb->nf_bridge);
2134 #endif
2135 diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
2136 index 55e1fd5b3e56..31b127e8086b 100644
2137 --- a/net/ieee802154/6lowpan.c
2138 +++ b/net/ieee802154/6lowpan.c
2139 @@ -862,7 +862,7 @@ lowpan_process_data(struct sk_buff *skb)
2140 * Traffic class carried in-line
2141 * ECN + DSCP (1 byte), Flow Label is elided
2142 */
2143 - case 1: /* 10b */
2144 + case 2: /* 10b */
2145 if (lowpan_fetch_skb_u8(skb, &tmp))
2146 goto drop;
2147
2148 @@ -875,7 +875,7 @@ lowpan_process_data(struct sk_buff *skb)
2149 * Flow Label carried in-line
2150 * ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
2151 */
2152 - case 2: /* 01b */
2153 + case 1: /* 01b */
2154 if (lowpan_fetch_skb_u8(skb, &tmp))
2155 goto drop;
2156
2157 diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
2158 index 581a59504bd5..1865fdf5a5a5 100644
2159 --- a/net/ieee802154/dgram.c
2160 +++ b/net/ieee802154/dgram.c
2161 @@ -315,9 +315,8 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
2162 if (saddr) {
2163 saddr->family = AF_IEEE802154;
2164 saddr->addr = mac_cb(skb)->sa;
2165 - }
2166 - if (addr_len)
2167 *addr_len = sizeof(*saddr);
2168 + }
2169
2170 if (flags & MSG_TRUNC)
2171 copied = skb->len;
2172 diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
2173 index b28e863fe0a7..19e36376d2a0 100644
2174 --- a/net/ipv4/datagram.c
2175 +++ b/net/ipv4/datagram.c
2176 @@ -57,7 +57,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2177 if (IS_ERR(rt)) {
2178 err = PTR_ERR(rt);
2179 if (err == -ENETUNREACH)
2180 - IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
2181 + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
2182 goto out;
2183 }
2184
2185 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
2186 index d9c4f113d709..23e6ab0a2dc0 100644
2187 --- a/net/ipv4/ip_sockglue.c
2188 +++ b/net/ipv4/ip_sockglue.c
2189 @@ -368,7 +368,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 inf
2190 /*
2191 * Handle MSG_ERRQUEUE
2192 */
2193 -int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
2194 +int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
2195 {
2196 struct sock_exterr_skb *serr;
2197 struct sk_buff *skb, *skb2;
2198 @@ -405,6 +405,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
2199 serr->addr_offset);
2200 sin->sin_port = serr->port;
2201 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2202 + *addr_len = sizeof(*sin);
2203 }
2204
2205 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
2206 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
2207 index 065604127418..feb19db62359 100644
2208 --- a/net/ipv4/ip_vti.c
2209 +++ b/net/ipv4/ip_vti.c
2210 @@ -350,6 +350,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
2211 if (!rt->dst.xfrm ||
2212 rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
2213 dev->stats.tx_carrier_errors++;
2214 + ip_rt_put(rt);
2215 goto tx_error_icmp;
2216 }
2217 tdev = rt->dst.dev;
2218 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2219 index 7d93d62cd5fd..8cae28f5c3cf 100644
2220 --- a/net/ipv4/ping.c
2221 +++ b/net/ipv4/ping.c
2222 @@ -570,7 +570,7 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2223 err = PTR_ERR(rt);
2224 rt = NULL;
2225 if (err == -ENETUNREACH)
2226 - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
2227 + IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
2228 goto out;
2229 }
2230
2231 @@ -626,7 +626,6 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2232 size_t len, int noblock, int flags, int *addr_len)
2233 {
2234 struct inet_sock *isk = inet_sk(sk);
2235 - struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
2236 struct sk_buff *skb;
2237 int copied, err;
2238
2239 @@ -636,11 +635,8 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2240 if (flags & MSG_OOB)
2241 goto out;
2242
2243 - if (addr_len)
2244 - *addr_len = sizeof(*sin);
2245 -
2246 if (flags & MSG_ERRQUEUE)
2247 - return ip_recv_error(sk, msg, len);
2248 + return ip_recv_error(sk, msg, len, addr_len);
2249
2250 skb = skb_recv_datagram(sk, flags, noblock, &err);
2251 if (!skb)
2252 @@ -660,11 +656,14 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2253 sock_recv_timestamp(msg, sk, skb);
2254
2255 /* Copy the address. */
2256 - if (sin) {
2257 + if (msg->msg_name) {
2258 + struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
2259 +
2260 sin->sin_family = AF_INET;
2261 sin->sin_port = 0 /* skb->h.uh->source */;
2262 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
2263 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2264 + *addr_len = sizeof(*sin);
2265 }
2266 if (isk->cmsg_flags)
2267 ip_cmsg_recv(msg, skb);
2268 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2269 index 6fb233772f79..402870fdfa0e 100644
2270 --- a/net/ipv4/raw.c
2271 +++ b/net/ipv4/raw.c
2272 @@ -692,11 +692,8 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2273 if (flags & MSG_OOB)
2274 goto out;
2275
2276 - if (addr_len)
2277 - *addr_len = sizeof(*sin);
2278 -
2279 if (flags & MSG_ERRQUEUE) {
2280 - err = ip_recv_error(sk, msg, len);
2281 + err = ip_recv_error(sk, msg, len, addr_len);
2282 goto out;
2283 }
2284
2285 @@ -722,6 +719,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2286 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
2287 sin->sin_port = 0;
2288 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2289 + *addr_len = sizeof(*sin);
2290 }
2291 if (inet->cmsg_flags)
2292 ip_cmsg_recv(msg, skb);
2293 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2294 index d11e73ce9365..f6c6ab14da41 100644
2295 --- a/net/ipv4/route.c
2296 +++ b/net/ipv4/route.c
2297 @@ -1720,8 +1720,12 @@ local_input:
2298 rth->dst.error= -err;
2299 rth->rt_flags &= ~RTCF_LOCAL;
2300 }
2301 - if (do_cache)
2302 - rt_cache_route(&FIB_RES_NH(res), rth);
2303 + if (do_cache) {
2304 + if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
2305 + rth->dst.flags |= DST_NOCACHE;
2306 + rt_add_uncached_list(rth);
2307 + }
2308 + }
2309 skb_dst_set(skb, &rth->dst);
2310 err = 0;
2311 goto out;
2312 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2313 index c888abf5a728..1a2e249cef49 100644
2314 --- a/net/ipv4/tcp.c
2315 +++ b/net/ipv4/tcp.c
2316 @@ -807,12 +807,6 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
2317 xmit_size_goal = min_t(u32, gso_size,
2318 sk->sk_gso_max_size - 1 - hlen);
2319
2320 - /* TSQ : try to have at least two segments in flight
2321 - * (one in NIC TX ring, another in Qdisc)
2322 - */
2323 - xmit_size_goal = min_t(u32, xmit_size_goal,
2324 - sysctl_tcp_limit_output_bytes >> 1);
2325 -
2326 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
2327
2328 /* We try hard to avoid divides here */
2329 @@ -2905,6 +2899,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2330 netdev_features_t features)
2331 {
2332 struct sk_buff *segs = ERR_PTR(-EINVAL);
2333 + unsigned int sum_truesize = 0;
2334 struct tcphdr *th;
2335 unsigned int thlen;
2336 unsigned int seq;
2337 @@ -2988,13 +2983,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2338 if (copy_destructor) {
2339 skb->destructor = gso_skb->destructor;
2340 skb->sk = gso_skb->sk;
2341 - /* {tcp|sock}_wfree() use exact truesize accounting :
2342 - * sum(skb->truesize) MUST be exactly be gso_skb->truesize
2343 - * So we account mss bytes of 'true size' for each segment.
2344 - * The last segment will contain the remaining.
2345 - */
2346 - skb->truesize = mss;
2347 - gso_skb->truesize -= mss;
2348 + sum_truesize += skb->truesize;
2349 }
2350 skb = skb->next;
2351 th = tcp_hdr(skb);
2352 @@ -3011,7 +3000,9 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2353 if (copy_destructor) {
2354 swap(gso_skb->sk, skb->sk);
2355 swap(gso_skb->destructor, skb->destructor);
2356 - swap(gso_skb->truesize, skb->truesize);
2357 + sum_truesize += skb->truesize;
2358 + atomic_add(sum_truesize - gso_skb->truesize,
2359 + &skb->sk->sk_wmem_alloc);
2360 }
2361
2362 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2363 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2364 index 7999fc55c83b..5d87806d3ade 100644
2365 --- a/net/ipv4/tcp_ipv4.c
2366 +++ b/net/ipv4/tcp_ipv4.c
2367 @@ -176,7 +176,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2368 if (IS_ERR(rt)) {
2369 err = PTR_ERR(rt);
2370 if (err == -ENETUNREACH)
2371 - IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
2372 + IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
2373 return err;
2374 }
2375
2376 diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
2377 index f6a005c485a9..306dbd9a9441 100644
2378 --- a/net/ipv4/tcp_metrics.c
2379 +++ b/net/ipv4/tcp_metrics.c
2380 @@ -665,10 +665,13 @@ void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
2381 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
2382 struct tcp_fastopen_cookie *cookie, bool syn_lost)
2383 {
2384 + struct dst_entry *dst = __sk_dst_get(sk);
2385 struct tcp_metrics_block *tm;
2386
2387 + if (!dst)
2388 + return;
2389 rcu_read_lock();
2390 - tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
2391 + tm = tcp_get_metrics(sk, dst, true);
2392 if (tm) {
2393 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
2394
2395 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
2396 index cd16eb06bebf..5560abfe6d30 100644
2397 --- a/net/ipv4/tcp_output.c
2398 +++ b/net/ipv4/tcp_output.c
2399 @@ -1866,8 +1866,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2400 * - better RTT estimation and ACK scheduling
2401 * - faster recovery
2402 * - high rates
2403 + * Alas, some drivers / subsystems require a fair amount
2404 + * of queued bytes to ensure line rate.
2405 + * One example is wifi aggregation (802.11 AMPDU)
2406 */
2407 - limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
2408 + limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes,
2409 + sk->sk_pacing_rate >> 10);
2410
2411 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
2412 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
2413 @@ -3098,7 +3102,6 @@ void tcp_send_window_probe(struct sock *sk)
2414 {
2415 if (sk->sk_state == TCP_ESTABLISHED) {
2416 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
2417 - tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
2418 tcp_xmit_probe_skb(sk, 0);
2419 }
2420 }
2421 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
2422 index 93b731d53221..c3075b552248 100644
2423 --- a/net/ipv4/udp.c
2424 +++ b/net/ipv4/udp.c
2425 @@ -971,7 +971,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2426 err = PTR_ERR(rt);
2427 rt = NULL;
2428 if (err == -ENETUNREACH)
2429 - IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
2430 + IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
2431 goto out;
2432 }
2433
2434 @@ -1070,6 +1070,9 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
2435 struct udp_sock *up = udp_sk(sk);
2436 int ret;
2437
2438 + if (flags & MSG_SENDPAGE_NOTLAST)
2439 + flags |= MSG_MORE;
2440 +
2441 if (!up->pending) {
2442 struct msghdr msg = { .msg_flags = flags|MSG_MORE };
2443
2444 @@ -1207,14 +1210,8 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
2445 int is_udplite = IS_UDPLITE(sk);
2446 bool slow;
2447
2448 - /*
2449 - * Check any passed addresses
2450 - */
2451 - if (addr_len)
2452 - *addr_len = sizeof(*sin);
2453 -
2454 if (flags & MSG_ERRQUEUE)
2455 - return ip_recv_error(sk, msg, len);
2456 + return ip_recv_error(sk, msg, len, addr_len);
2457
2458 try_again:
2459 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
2460 @@ -1274,6 +1271,7 @@ try_again:
2461 sin->sin_port = udp_hdr(skb)->source;
2462 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
2463 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
2464 + *addr_len = sizeof(*sin);
2465 }
2466 if (inet->cmsg_flags)
2467 ip_cmsg_recv(msg, skb);
2468 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
2469 index 4b56cbbc7890..8997340e3742 100644
2470 --- a/net/ipv6/datagram.c
2471 +++ b/net/ipv6/datagram.c
2472 @@ -318,7 +318,7 @@ void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
2473 /*
2474 * Handle MSG_ERRQUEUE
2475 */
2476 -int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
2477 +int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
2478 {
2479 struct ipv6_pinfo *np = inet6_sk(sk);
2480 struct sock_exterr_skb *serr;
2481 @@ -369,6 +369,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
2482 &sin->sin6_addr);
2483 sin->sin6_scope_id = 0;
2484 }
2485 + *addr_len = sizeof(*sin);
2486 }
2487
2488 memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
2489 @@ -377,6 +378,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
2490 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
2491 sin->sin6_family = AF_INET6;
2492 sin->sin6_flowinfo = 0;
2493 + sin->sin6_port = 0;
2494 if (skb->protocol == htons(ETH_P_IPV6)) {
2495 sin->sin6_addr = ipv6_hdr(skb)->saddr;
2496 if (np->rxopt.all)
2497 @@ -423,7 +425,8 @@ EXPORT_SYMBOL_GPL(ipv6_recv_error);
2498 /*
2499 * Handle IPV6_RECVPATHMTU
2500 */
2501 -int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
2502 +int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len,
2503 + int *addr_len)
2504 {
2505 struct ipv6_pinfo *np = inet6_sk(sk);
2506 struct sk_buff *skb;
2507 @@ -457,6 +460,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len)
2508 sin->sin6_port = 0;
2509 sin->sin6_scope_id = mtu_info.ip6m_addr.sin6_scope_id;
2510 sin->sin6_addr = mtu_info.ip6m_addr.sin6_addr;
2511 + *addr_len = sizeof(*sin);
2512 }
2513
2514 put_cmsg(msg, SOL_IPV6, IPV6_PATHMTU, sizeof(mtu_info), &mtu_info);
2515 diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
2516 index 46e88433ec7d..f0ccdb787100 100644
2517 --- a/net/ipv6/ip6_flowlabel.c
2518 +++ b/net/ipv6/ip6_flowlabel.c
2519 @@ -453,8 +453,10 @@ static int mem_check(struct sock *sk)
2520 if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
2521 return 0;
2522
2523 + rcu_read_lock_bh();
2524 for_each_sk_fl_rcu(np, sfl)
2525 count++;
2526 + rcu_read_unlock_bh();
2527
2528 if (room <= 0 ||
2529 ((count >= FL_MAX_PER_SOCK ||
2530 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2531 index 878f8027ebf6..b98b8e06739e 100644
2532 --- a/net/ipv6/ip6_output.c
2533 +++ b/net/ipv6/ip6_output.c
2534 @@ -141,8 +141,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
2535 }
2536 rcu_read_unlock_bh();
2537
2538 - IP6_INC_STATS_BH(dev_net(dst->dev),
2539 - ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
2540 + IP6_INC_STATS(dev_net(dst->dev),
2541 + ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
2542 kfree_skb(skb);
2543 return -EINVAL;
2544 }
2545 @@ -150,7 +150,8 @@ static int ip6_finish_output2(struct sk_buff *skb)
2546 static int ip6_finish_output(struct sk_buff *skb)
2547 {
2548 if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
2549 - dst_allfrag(skb_dst(skb)))
2550 + dst_allfrag(skb_dst(skb)) ||
2551 + (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
2552 return ip6_fragment(skb, ip6_finish_output2);
2553 else
2554 return ip6_finish_output2(skb);
2555 diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
2556 index c9b6a6e6a1e8..97cd7507c1a4 100644
2557 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
2558 +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
2559 @@ -172,63 +172,13 @@ out:
2560 return nf_conntrack_confirm(skb);
2561 }
2562
2563 -static unsigned int __ipv6_conntrack_in(struct net *net,
2564 - unsigned int hooknum,
2565 - struct sk_buff *skb,
2566 - const struct net_device *in,
2567 - const struct net_device *out,
2568 - int (*okfn)(struct sk_buff *))
2569 -{
2570 - struct sk_buff *reasm = skb->nfct_reasm;
2571 - const struct nf_conn_help *help;
2572 - struct nf_conn *ct;
2573 - enum ip_conntrack_info ctinfo;
2574 -
2575 - /* This packet is fragmented and has reassembled packet. */
2576 - if (reasm) {
2577 - /* Reassembled packet isn't parsed yet ? */
2578 - if (!reasm->nfct) {
2579 - unsigned int ret;
2580 -
2581 - ret = nf_conntrack_in(net, PF_INET6, hooknum, reasm);
2582 - if (ret != NF_ACCEPT)
2583 - return ret;
2584 - }
2585 -
2586 - /* Conntrack helpers need the entire reassembled packet in the
2587 - * POST_ROUTING hook. In case of unconfirmed connections NAT
2588 - * might reassign a helper, so the entire packet is also
2589 - * required.
2590 - */
2591 - ct = nf_ct_get(reasm, &ctinfo);
2592 - if (ct != NULL && !nf_ct_is_untracked(ct)) {
2593 - help = nfct_help(ct);
2594 - if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
2595 - nf_conntrack_get_reasm(reasm);
2596 - NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
2597 - (struct net_device *)in,
2598 - (struct net_device *)out,
2599 - okfn, NF_IP6_PRI_CONNTRACK + 1);
2600 - return NF_DROP_ERR(-ECANCELED);
2601 - }
2602 - }
2603 -
2604 - nf_conntrack_get(reasm->nfct);
2605 - skb->nfct = reasm->nfct;
2606 - skb->nfctinfo = reasm->nfctinfo;
2607 - return NF_ACCEPT;
2608 - }
2609 -
2610 - return nf_conntrack_in(net, PF_INET6, hooknum, skb);
2611 -}
2612 -
2613 static unsigned int ipv6_conntrack_in(unsigned int hooknum,
2614 struct sk_buff *skb,
2615 const struct net_device *in,
2616 const struct net_device *out,
2617 int (*okfn)(struct sk_buff *))
2618 {
2619 - return __ipv6_conntrack_in(dev_net(in), hooknum, skb, in, out, okfn);
2620 + return nf_conntrack_in(dev_net(in), PF_INET6, hooknum, skb);
2621 }
2622
2623 static unsigned int ipv6_conntrack_local(unsigned int hooknum,
2624 @@ -242,7 +192,7 @@ static unsigned int ipv6_conntrack_local(unsigned int hooknum,
2625 net_notice_ratelimited("ipv6_conntrack_local: packet too short\n");
2626 return NF_ACCEPT;
2627 }
2628 - return __ipv6_conntrack_in(dev_net(out), hooknum, skb, in, out, okfn);
2629 + return nf_conntrack_in(dev_net(out), PF_INET6, hooknum, skb);
2630 }
2631
2632 static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
2633 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
2634 index dffdc1a389c5..253566a8d55b 100644
2635 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
2636 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
2637 @@ -621,31 +621,16 @@ ret_orig:
2638 return skb;
2639 }
2640
2641 -void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
2642 - struct net_device *in, struct net_device *out,
2643 - int (*okfn)(struct sk_buff *))
2644 +void nf_ct_frag6_consume_orig(struct sk_buff *skb)
2645 {
2646 struct sk_buff *s, *s2;
2647 - unsigned int ret = 0;
2648
2649 for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
2650 - nf_conntrack_put_reasm(s->nfct_reasm);
2651 - nf_conntrack_get_reasm(skb);
2652 - s->nfct_reasm = skb;
2653 -
2654 s2 = s->next;
2655 s->next = NULL;
2656 -
2657 - if (ret != -ECANCELED)
2658 - ret = NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s,
2659 - in, out, okfn,
2660 - NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
2661 - else
2662 - kfree_skb(s);
2663 -
2664 + consume_skb(s);
2665 s = s2;
2666 }
2667 - nf_conntrack_put_reasm(skb);
2668 }
2669
2670 static int nf_ct_net_init(struct net *net)
2671 diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
2672 index aacd121fe8c5..581dd9ede0de 100644
2673 --- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
2674 +++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
2675 @@ -75,8 +75,11 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
2676 if (reasm == skb)
2677 return NF_ACCEPT;
2678
2679 - nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
2680 - (struct net_device *)out, okfn);
2681 + nf_ct_frag6_consume_orig(reasm);
2682 +
2683 + NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
2684 + (struct net_device *) in, (struct net_device *) out,
2685 + okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
2686
2687 return NF_STOLEN;
2688 }
2689 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
2690 index eedff8ccded5..464b1c9c08e4 100644
2691 --- a/net/ipv6/raw.c
2692 +++ b/net/ipv6/raw.c
2693 @@ -459,14 +459,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
2694 if (flags & MSG_OOB)
2695 return -EOPNOTSUPP;
2696
2697 - if (addr_len)
2698 - *addr_len=sizeof(*sin6);
2699 -
2700 if (flags & MSG_ERRQUEUE)
2701 - return ipv6_recv_error(sk, msg, len);
2702 + return ipv6_recv_error(sk, msg, len, addr_len);
2703
2704 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
2705 - return ipv6_recv_rxpmtu(sk, msg, len);
2706 + return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
2707
2708 skb = skb_recv_datagram(sk, flags, noblock, &err);
2709 if (!skb)
2710 @@ -500,6 +497,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
2711 sin6->sin6_flowinfo = 0;
2712 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
2713 IP6CB(skb)->iif);
2714 + *addr_len = sizeof(*sin6);
2715 }
2716
2717 sock_recv_ts_and_drops(msg, sk, skb);
2718 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2719 index 548a1f7c1a29..5a8bf536026c 100644
2720 --- a/net/ipv6/route.c
2721 +++ b/net/ipv6/route.c
2722 @@ -728,8 +728,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
2723 prefix = &prefix_buf;
2724 }
2725
2726 - rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr,
2727 - dev->ifindex);
2728 + if (rinfo->prefix_len == 0)
2729 + rt = rt6_get_dflt_router(gwaddr, dev);
2730 + else
2731 + rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
2732 + gwaddr, dev->ifindex);
2733
2734 if (rt && !lifetime) {
2735 ip6_del_rt(rt);
2736 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
2737 index e7b28f9bb02b..6b298dc614e3 100644
2738 --- a/net/ipv6/udp.c
2739 +++ b/net/ipv6/udp.c
2740 @@ -373,14 +373,11 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
2741 int is_udp4;
2742 bool slow;
2743
2744 - if (addr_len)
2745 - *addr_len = sizeof(struct sockaddr_in6);
2746 -
2747 if (flags & MSG_ERRQUEUE)
2748 - return ipv6_recv_error(sk, msg, len);
2749 + return ipv6_recv_error(sk, msg, len, addr_len);
2750
2751 if (np->rxpmtu && np->rxopt.bits.rxpmtu)
2752 - return ipv6_recv_rxpmtu(sk, msg, len);
2753 + return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
2754
2755 try_again:
2756 skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
2757 @@ -461,7 +458,7 @@ try_again:
2758 ipv6_iface_scope_id(&sin6->sin6_addr,
2759 IP6CB(skb)->iif);
2760 }
2761 -
2762 + *addr_len = sizeof(*sin6);
2763 }
2764 if (is_udp4) {
2765 if (inet->cmsg_flags)
2766 diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
2767 index d3cfaf9c7a08..76f165ef8d49 100644
2768 --- a/net/ipv6/udp_offload.c
2769 +++ b/net/ipv6/udp_offload.c
2770 @@ -85,7 +85,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
2771
2772 /* Check if there is enough headroom to insert fragment header. */
2773 tnl_hlen = skb_tnl_header_len(skb);
2774 - if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
2775 + if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
2776 if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
2777 goto out;
2778 }
2779 diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
2780 index f547a47d381c..e0897377b3b4 100644
2781 --- a/net/ipx/af_ipx.c
2782 +++ b/net/ipx/af_ipx.c
2783 @@ -1823,8 +1823,6 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
2784 if (skb->tstamp.tv64)
2785 sk->sk_stamp = skb->tstamp;
2786
2787 - msg->msg_namelen = sizeof(*sipx);
2788 -
2789 if (sipx) {
2790 sipx->sipx_family = AF_IPX;
2791 sipx->sipx_port = ipx->ipx_source.sock;
2792 @@ -1832,6 +1830,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
2793 sipx->sipx_network = IPX_SKB_CB(skb)->ipx_source_net;
2794 sipx->sipx_type = ipx->ipx_type;
2795 sipx->sipx_zero = 0;
2796 + msg->msg_namelen = sizeof(*sipx);
2797 }
2798 rc = copied;
2799
2800 diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
2801 index 0578d4fa00a9..a5e62ef57155 100644
2802 --- a/net/irda/af_irda.c
2803 +++ b/net/irda/af_irda.c
2804 @@ -1385,8 +1385,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,
2805
2806 IRDA_DEBUG(4, "%s()\n", __func__);
2807
2808 - msg->msg_namelen = 0;
2809 -
2810 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
2811 flags & MSG_DONTWAIT, &err);
2812 if (!skb)
2813 @@ -1451,8 +1449,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
2814 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2815 timeo = sock_rcvtimeo(sk, noblock);
2816
2817 - msg->msg_namelen = 0;
2818 -
2819 do {
2820 int chunk;
2821 struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
2822 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
2823 index ae691651b721..276aa86f366b 100644
2824 --- a/net/iucv/af_iucv.c
2825 +++ b/net/iucv/af_iucv.c
2826 @@ -1324,8 +1324,6 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
2827 int err = 0;
2828 u32 offset;
2829
2830 - msg->msg_namelen = 0;
2831 -
2832 if ((sk->sk_state == IUCV_DISCONN) &&
2833 skb_queue_empty(&iucv->backlog_skb_q) &&
2834 skb_queue_empty(&sk->sk_receive_queue) &&
2835 diff --git a/net/key/af_key.c b/net/key/af_key.c
2836 index ab8bd2cabfa0..66f51c5a8a3a 100644
2837 --- a/net/key/af_key.c
2838 +++ b/net/key/af_key.c
2839 @@ -3623,7 +3623,6 @@ static int pfkey_recvmsg(struct kiocb *kiocb,
2840 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
2841 goto out;
2842
2843 - msg->msg_namelen = 0;
2844 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2845 if (skb == NULL)
2846 goto out;
2847 diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
2848 index 571db8dd2292..da1a1cee1a08 100644
2849 --- a/net/l2tp/l2tp_ip.c
2850 +++ b/net/l2tp/l2tp_ip.c
2851 @@ -518,9 +518,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
2852 if (flags & MSG_OOB)
2853 goto out;
2854
2855 - if (addr_len)
2856 - *addr_len = sizeof(*sin);
2857 -
2858 skb = skb_recv_datagram(sk, flags, noblock, &err);
2859 if (!skb)
2860 goto out;
2861 @@ -543,6 +540,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
2862 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
2863 sin->sin_port = 0;
2864 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2865 + *addr_len = sizeof(*sin);
2866 }
2867 if (inet->cmsg_flags)
2868 ip_cmsg_recv(msg, skb);
2869 diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
2870 index b8a6039314e8..e6e8408c9e36 100644
2871 --- a/net/l2tp/l2tp_ip6.c
2872 +++ b/net/l2tp/l2tp_ip6.c
2873 @@ -665,7 +665,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
2874 *addr_len = sizeof(*lsa);
2875
2876 if (flags & MSG_ERRQUEUE)
2877 - return ipv6_recv_error(sk, msg, len);
2878 + return ipv6_recv_error(sk, msg, len, addr_len);
2879
2880 skb = skb_recv_datagram(sk, flags, noblock, &err);
2881 if (!skb)
2882 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2883 index 8c46b271064a..44441c0c5037 100644
2884 --- a/net/l2tp/l2tp_ppp.c
2885 +++ b/net/l2tp/l2tp_ppp.c
2886 @@ -197,8 +197,6 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
2887 if (sk->sk_state & PPPOX_BOUND)
2888 goto end;
2889
2890 - msg->msg_namelen = 0;
2891 -
2892 err = 0;
2893 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
2894 flags & MSG_DONTWAIT, &err);
2895 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
2896 index 48aaa89253e0..88709882c464 100644
2897 --- a/net/llc/af_llc.c
2898 +++ b/net/llc/af_llc.c
2899 @@ -720,8 +720,6 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
2900 int target; /* Read at least this many bytes */
2901 long timeo;
2902
2903 - msg->msg_namelen = 0;
2904 -
2905 lock_sock(sk);
2906 copied = -ENOTCONN;
2907 if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN))
2908 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
2909 index 23b8eb53a569..21a3a475d7cd 100644
2910 --- a/net/netfilter/ipvs/ip_vs_core.c
2911 +++ b/net/netfilter/ipvs/ip_vs_core.c
2912 @@ -1131,12 +1131,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
2913 ip_vs_fill_iph_skb(af, skb, &iph);
2914 #ifdef CONFIG_IP_VS_IPV6
2915 if (af == AF_INET6) {
2916 - if (!iph.fragoffs && skb_nfct_reasm(skb)) {
2917 - struct sk_buff *reasm = skb_nfct_reasm(skb);
2918 - /* Save fw mark for coming frags */
2919 - reasm->ipvs_property = 1;
2920 - reasm->mark = skb->mark;
2921 - }
2922 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
2923 int related;
2924 int verdict = ip_vs_out_icmp_v6(skb, &related,
2925 @@ -1606,12 +1600,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
2926
2927 #ifdef CONFIG_IP_VS_IPV6
2928 if (af == AF_INET6) {
2929 - if (!iph.fragoffs && skb_nfct_reasm(skb)) {
2930 - struct sk_buff *reasm = skb_nfct_reasm(skb);
2931 - /* Save fw mark for coming frags. */
2932 - reasm->ipvs_property = 1;
2933 - reasm->mark = skb->mark;
2934 - }
2935 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
2936 int related;
2937 int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum,
2938 @@ -1663,9 +1651,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
2939 /* sorry, all this trouble for a no-hit :) */
2940 IP_VS_DBG_PKT(12, af, pp, skb, 0,
2941 "ip_vs_in: packet continues traversal as normal");
2942 - if (iph.fragoffs && !skb_nfct_reasm(skb)) {
2943 + if (iph.fragoffs) {
2944 /* Fragment that couldn't be mapped to a conn entry
2945 - * and don't have any pointer to a reasm skb
2946 * is missing module nf_defrag_ipv6
2947 */
2948 IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n");
2949 @@ -1748,38 +1735,6 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
2950 #ifdef CONFIG_IP_VS_IPV6
2951
2952 /*
2953 - * AF_INET6 fragment handling
2954 - * Copy info from first fragment, to the rest of them.
2955 - */
2956 -static unsigned int
2957 -ip_vs_preroute_frag6(unsigned int hooknum, struct sk_buff *skb,
2958 - const struct net_device *in,
2959 - const struct net_device *out,
2960 - int (*okfn)(struct sk_buff *))
2961 -{
2962 - struct sk_buff *reasm = skb_nfct_reasm(skb);
2963 - struct net *net;
2964 -
2965 - /* Skip if not a "replay" from nf_ct_frag6_output or first fragment.
2966 - * ipvs_property is set when checking first fragment
2967 - * in ip_vs_in() and ip_vs_out().
2968 - */
2969 - if (reasm)
2970 - IP_VS_DBG(2, "Fragment recv prop:%d\n", reasm->ipvs_property);
2971 - if (!reasm || !reasm->ipvs_property)
2972 - return NF_ACCEPT;
2973 -
2974 - net = skb_net(skb);
2975 - if (!net_ipvs(net)->enable)
2976 - return NF_ACCEPT;
2977 -
2978 - /* Copy stored fw mark, saved in ip_vs_{in,out} */
2979 - skb->mark = reasm->mark;
2980 -
2981 - return NF_ACCEPT;
2982 -}
2983 -
2984 -/*
2985 * AF_INET6 handler in NF_INET_LOCAL_IN chain
2986 * Schedule and forward packets from remote clients
2987 */
2988 @@ -1916,14 +1871,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
2989 .priority = 100,
2990 },
2991 #ifdef CONFIG_IP_VS_IPV6
2992 - /* After mangle & nat fetch 2:nd fragment and following */
2993 - {
2994 - .hook = ip_vs_preroute_frag6,
2995 - .owner = THIS_MODULE,
2996 - .pf = NFPROTO_IPV6,
2997 - .hooknum = NF_INET_PRE_ROUTING,
2998 - .priority = NF_IP6_PRI_NAT_DST + 1,
2999 - },
3000 /* After packet filtering, change source only for VS/NAT */
3001 {
3002 .hook = ip_vs_reply6,
3003 diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
3004 index 9ef22bdce9f1..bed5f7042529 100644
3005 --- a/net/netfilter/ipvs/ip_vs_pe_sip.c
3006 +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
3007 @@ -65,7 +65,6 @@ static int get_callid(const char *dptr, unsigned int dataoff,
3008 static int
3009 ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
3010 {
3011 - struct sk_buff *reasm = skb_nfct_reasm(skb);
3012 struct ip_vs_iphdr iph;
3013 unsigned int dataoff, datalen, matchoff, matchlen;
3014 const char *dptr;
3015 @@ -79,15 +78,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
3016 /* todo: IPv6 fragments:
3017 * I think this only should be done for the first fragment. /HS
3018 */
3019 - if (reasm) {
3020 - skb = reasm;
3021 - dataoff = iph.thoff_reasm + sizeof(struct udphdr);
3022 - } else
3023 - dataoff = iph.len + sizeof(struct udphdr);
3024 + dataoff = iph.len + sizeof(struct udphdr);
3025
3026 if (dataoff >= skb->len)
3027 return -EINVAL;
3028 - /* todo: Check if this will mess-up the reasm skb !!! /HS */
3029 retc = skb_linearize(skb);
3030 if (retc < 0)
3031 return retc;
3032 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3033 index 57ee84d21470..c9c2a8441d32 100644
3034 --- a/net/netlink/af_netlink.c
3035 +++ b/net/netlink/af_netlink.c
3036 @@ -2168,8 +2168,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
3037 }
3038 #endif
3039
3040 - msg->msg_namelen = 0;
3041 -
3042 copied = data_skb->len;
3043 if (len < copied) {
3044 msg->msg_flags |= MSG_TRUNC;
3045 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
3046 index ec0c80fde69f..13b92982a506 100644
3047 --- a/net/netrom/af_netrom.c
3048 +++ b/net/netrom/af_netrom.c
3049 @@ -1179,10 +1179,9 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
3050 sax->sax25_family = AF_NETROM;
3051 skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,
3052 AX25_ADDR_LEN);
3053 + msg->msg_namelen = sizeof(*sax);
3054 }
3055
3056 - msg->msg_namelen = sizeof(*sax);
3057 -
3058 skb_free_datagram(sk, skb);
3059
3060 release_sock(sk);
3061 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
3062 index 7522c3708723..86470cf54cee 100644
3063 --- a/net/nfc/llcp_sock.c
3064 +++ b/net/nfc/llcp_sock.c
3065 @@ -800,8 +800,6 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
3066
3067 pr_debug("%p %zu\n", sk, len);
3068
3069 - msg->msg_namelen = 0;
3070 -
3071 lock_sock(sk);
3072
3073 if (sk->sk_state == LLCP_CLOSED &&
3074 diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
3075 index 313bf1bc848a..5d11f4ac3ecb 100644
3076 --- a/net/nfc/rawsock.c
3077 +++ b/net/nfc/rawsock.c
3078 @@ -241,8 +241,6 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
3079 if (!skb)
3080 return rc;
3081
3082 - msg->msg_namelen = 0;
3083 -
3084 copied = skb->len;
3085 if (len < copied) {
3086 msg->msg_flags |= MSG_TRUNC;
3087 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3088 index a6895ab597c2..c503ad6f610f 100644
3089 --- a/net/packet/af_packet.c
3090 +++ b/net/packet/af_packet.c
3091 @@ -244,11 +244,15 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
3092 static void register_prot_hook(struct sock *sk)
3093 {
3094 struct packet_sock *po = pkt_sk(sk);
3095 +
3096 if (!po->running) {
3097 - if (po->fanout)
3098 + if (po->fanout) {
3099 __fanout_link(sk, po);
3100 - else
3101 + } else {
3102 dev_add_pack(&po->prot_hook);
3103 + rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
3104 + }
3105 +
3106 sock_hold(sk);
3107 po->running = 1;
3108 }
3109 @@ -266,10 +270,13 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
3110 struct packet_sock *po = pkt_sk(sk);
3111
3112 po->running = 0;
3113 - if (po->fanout)
3114 + if (po->fanout) {
3115 __fanout_unlink(sk, po);
3116 - else
3117 + } else {
3118 __dev_remove_pack(&po->prot_hook);
3119 + RCU_INIT_POINTER(po->cached_dev, NULL);
3120 + }
3121 +
3122 __sock_put(sk);
3123
3124 if (sync) {
3125 @@ -432,9 +439,9 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
3126
3127 pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
3128
3129 - spin_lock(&rb_queue->lock);
3130 + spin_lock_bh(&rb_queue->lock);
3131 pkc->delete_blk_timer = 1;
3132 - spin_unlock(&rb_queue->lock);
3133 + spin_unlock_bh(&rb_queue->lock);
3134
3135 prb_del_retire_blk_timer(pkc);
3136 }
3137 @@ -2041,12 +2048,24 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
3138 return tp_len;
3139 }
3140
3141 +static struct net_device *packet_cached_dev_get(struct packet_sock *po)
3142 +{
3143 + struct net_device *dev;
3144 +
3145 + rcu_read_lock();
3146 + dev = rcu_dereference(po->cached_dev);
3147 + if (dev)
3148 + dev_hold(dev);
3149 + rcu_read_unlock();
3150 +
3151 + return dev;
3152 +}
3153 +
3154 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3155 {
3156 struct sk_buff *skb;
3157 struct net_device *dev;
3158 __be16 proto;
3159 - bool need_rls_dev = false;
3160 int err, reserve = 0;
3161 void *ph;
3162 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
3163 @@ -2059,7 +2078,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3164 mutex_lock(&po->pg_vec_lock);
3165
3166 if (saddr == NULL) {
3167 - dev = po->prot_hook.dev;
3168 + dev = packet_cached_dev_get(po);
3169 proto = po->num;
3170 addr = NULL;
3171 } else {
3172 @@ -2073,19 +2092,17 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
3173 proto = saddr->sll_protocol;
3174 addr = saddr->sll_addr;
3175 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
3176 - need_rls_dev = true;
3177 }
3178
3179 err = -ENXIO;
3180 if (unlikely(dev == NULL))
3181 goto out;
3182 -
3183 - reserve = dev->hard_header_len;
3184 -
3185 err = -ENETDOWN;
3186 if (unlikely(!(dev->flags & IFF_UP)))
3187 goto out_put;
3188
3189 + reserve = dev->hard_header_len;
3190 +
3191 size_max = po->tx_ring.frame_size
3192 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
3193
3194 @@ -2162,8 +2179,7 @@ out_status:
3195 __packet_set_status(po, ph, status);
3196 kfree_skb(skb);
3197 out_put:
3198 - if (need_rls_dev)
3199 - dev_put(dev);
3200 + dev_put(dev);
3201 out:
3202 mutex_unlock(&po->pg_vec_lock);
3203 return err;
3204 @@ -2201,7 +2217,6 @@ static int packet_snd(struct socket *sock,
3205 struct sk_buff *skb;
3206 struct net_device *dev;
3207 __be16 proto;
3208 - bool need_rls_dev = false;
3209 unsigned char *addr;
3210 int err, reserve = 0;
3211 struct virtio_net_hdr vnet_hdr = { 0 };
3212 @@ -2217,7 +2232,7 @@ static int packet_snd(struct socket *sock,
3213 */
3214
3215 if (saddr == NULL) {
3216 - dev = po->prot_hook.dev;
3217 + dev = packet_cached_dev_get(po);
3218 proto = po->num;
3219 addr = NULL;
3220 } else {
3221 @@ -2229,19 +2244,17 @@ static int packet_snd(struct socket *sock,
3222 proto = saddr->sll_protocol;
3223 addr = saddr->sll_addr;
3224 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3225 - need_rls_dev = true;
3226 }
3227
3228 err = -ENXIO;
3229 - if (dev == NULL)
3230 + if (unlikely(dev == NULL))
3231 goto out_unlock;
3232 - if (sock->type == SOCK_RAW)
3233 - reserve = dev->hard_header_len;
3234 -
3235 err = -ENETDOWN;
3236 - if (!(dev->flags & IFF_UP))
3237 + if (unlikely(!(dev->flags & IFF_UP)))
3238 goto out_unlock;
3239
3240 + if (sock->type == SOCK_RAW)
3241 + reserve = dev->hard_header_len;
3242 if (po->has_vnet_hdr) {
3243 vnet_hdr_len = sizeof(vnet_hdr);
3244
3245 @@ -2375,15 +2388,14 @@ static int packet_snd(struct socket *sock,
3246 if (err > 0 && (err = net_xmit_errno(err)) != 0)
3247 goto out_unlock;
3248
3249 - if (need_rls_dev)
3250 - dev_put(dev);
3251 + dev_put(dev);
3252
3253 return len;
3254
3255 out_free:
3256 kfree_skb(skb);
3257 out_unlock:
3258 - if (dev && need_rls_dev)
3259 + if (dev)
3260 dev_put(dev);
3261 out:
3262 return err;
3263 @@ -2603,6 +2615,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
3264 po = pkt_sk(sk);
3265 sk->sk_family = PF_PACKET;
3266 po->num = proto;
3267 + RCU_INIT_POINTER(po->cached_dev, NULL);
3268
3269 sk->sk_destruct = packet_sock_destruct;
3270 sk_refcnt_debug_inc(sk);
3271 @@ -2694,7 +2707,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
3272 struct sock *sk = sock->sk;
3273 struct sk_buff *skb;
3274 int copied, err;
3275 - struct sockaddr_ll *sll;
3276 int vnet_hdr_len = 0;
3277
3278 err = -EINVAL;
3279 @@ -2777,22 +2789,10 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
3280 goto out_free;
3281 }
3282
3283 - /*
3284 - * If the address length field is there to be filled in, we fill
3285 - * it in now.
3286 + /* You lose any data beyond the buffer you gave. If it worries
3287 + * a user program they can ask the device for its MTU
3288 + * anyway.
3289 */
3290 -
3291 - sll = &PACKET_SKB_CB(skb)->sa.ll;
3292 - if (sock->type == SOCK_PACKET)
3293 - msg->msg_namelen = sizeof(struct sockaddr_pkt);
3294 - else
3295 - msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
3296 -
3297 - /*
3298 - * You lose any data beyond the buffer you gave. If it worries a
3299 - * user program they can ask the device for its MTU anyway.
3300 - */
3301 -
3302 copied = skb->len;
3303 if (copied > len) {
3304 copied = len;
3305 @@ -2805,9 +2805,20 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
3306
3307 sock_recv_ts_and_drops(msg, sk, skb);
3308
3309 - if (msg->msg_name)
3310 + if (msg->msg_name) {
3311 + /* If the address length field is there to be filled
3312 + * in, we fill it in now.
3313 + */
3314 + if (sock->type == SOCK_PACKET) {
3315 + msg->msg_namelen = sizeof(struct sockaddr_pkt);
3316 + } else {
3317 + struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3318 + msg->msg_namelen = sll->sll_halen +
3319 + offsetof(struct sockaddr_ll, sll_addr);
3320 + }
3321 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3322 msg->msg_namelen);
3323 + }
3324
3325 if (pkt_sk(sk)->auxdata) {
3326 struct tpacket_auxdata aux;
3327 diff --git a/net/packet/internal.h b/net/packet/internal.h
3328 index c4e4b4561207..1035fa2d909c 100644
3329 --- a/net/packet/internal.h
3330 +++ b/net/packet/internal.h
3331 @@ -113,6 +113,7 @@ struct packet_sock {
3332 unsigned int tp_loss:1;
3333 unsigned int tp_tx_has_off:1;
3334 unsigned int tp_tstamp;
3335 + struct net_device __rcu *cached_dev;
3336 struct packet_type prot_hook ____cacheline_aligned_in_smp;
3337 };
3338
3339 diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
3340 index 12c30f3e643e..38946b26e471 100644
3341 --- a/net/phonet/datagram.c
3342 +++ b/net/phonet/datagram.c
3343 @@ -139,9 +139,6 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
3344 MSG_CMSG_COMPAT))
3345 goto out_nofree;
3346
3347 - if (addr_len)
3348 - *addr_len = sizeof(sa);
3349 -
3350 skb = skb_recv_datagram(sk, flags, noblock, &rval);
3351 if (skb == NULL)
3352 goto out_nofree;
3353 @@ -162,8 +159,10 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
3354
3355 rval = (flags & MSG_TRUNC) ? skb->len : copylen;
3356
3357 - if (msg->msg_name != NULL)
3358 - memcpy(msg->msg_name, &sa, sizeof(struct sockaddr_pn));
3359 + if (msg->msg_name != NULL) {
3360 + memcpy(msg->msg_name, &sa, sizeof(sa));
3361 + *addr_len = sizeof(sa);
3362 + }
3363
3364 out:
3365 skb_free_datagram(sk, skb);
3366 diff --git a/net/rds/recv.c b/net/rds/recv.c
3367 index 9f0f17cf6bf9..de339b24ca14 100644
3368 --- a/net/rds/recv.c
3369 +++ b/net/rds/recv.c
3370 @@ -410,8 +410,6 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
3371
3372 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
3373
3374 - msg->msg_namelen = 0;
3375 -
3376 if (msg_flags & MSG_OOB)
3377 goto out;
3378
3379 diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
3380 index 9c8347451597..abf0ad6311d0 100644
3381 --- a/net/rose/af_rose.c
3382 +++ b/net/rose/af_rose.c
3383 @@ -1216,7 +1216,6 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
3384 {
3385 struct sock *sk = sock->sk;
3386 struct rose_sock *rose = rose_sk(sk);
3387 - struct sockaddr_rose *srose = (struct sockaddr_rose *)msg->msg_name;
3388 size_t copied;
3389 unsigned char *asmptr;
3390 struct sk_buff *skb;
3391 @@ -1252,8 +1251,11 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
3392
3393 skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
3394
3395 - if (srose != NULL) {
3396 - memset(srose, 0, msg->msg_namelen);
3397 + if (msg->msg_name) {
3398 + struct sockaddr_rose *srose;
3399 +
3400 + memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
3401 + srose = msg->msg_name;
3402 srose->srose_family = AF_ROSE;
3403 srose->srose_addr = rose->dest_addr;
3404 srose->srose_call = rose->dest_call;
3405 diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c
3406 index 4b48687c3890..898492a8d61b 100644
3407 --- a/net/rxrpc/ar-recvmsg.c
3408 +++ b/net/rxrpc/ar-recvmsg.c
3409 @@ -143,10 +143,13 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock,
3410
3411 /* copy the peer address and timestamp */
3412 if (!continue_call) {
3413 - if (msg->msg_name && msg->msg_namelen > 0)
3414 + if (msg->msg_name) {
3415 + size_t len =
3416 + sizeof(call->conn->trans->peer->srx);
3417 memcpy(msg->msg_name,
3418 - &call->conn->trans->peer->srx,
3419 - sizeof(call->conn->trans->peer->srx));
3420 + &call->conn->trans->peer->srx, len);
3421 + msg->msg_namelen = len;
3422 + }
3423 sock_recv_ts_and_drops(msg, &rx->sk, skb);
3424 }
3425
3426 diff --git a/net/socket.c b/net/socket.c
3427 index 9c467b2afc84..ac72efc3d965 100644
3428 --- a/net/socket.c
3429 +++ b/net/socket.c
3430 @@ -215,12 +215,13 @@ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
3431 int err;
3432 int len;
3433
3434 + BUG_ON(klen > sizeof(struct sockaddr_storage));
3435 err = get_user(len, ulen);
3436 if (err)
3437 return err;
3438 if (len > klen)
3439 len = klen;
3440 - if (len < 0 || len > sizeof(struct sockaddr_storage))
3441 + if (len < 0)
3442 return -EINVAL;
3443 if (len) {
3444 if (audit_sockaddr(klen, kaddr))
3445 @@ -1832,8 +1833,10 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
3446 msg.msg_iov = &iov;
3447 iov.iov_len = size;
3448 iov.iov_base = ubuf;
3449 - msg.msg_name = (struct sockaddr *)&address;
3450 - msg.msg_namelen = sizeof(address);
3451 + /* Save some cycles and don't copy the address if not needed */
3452 + msg.msg_name = addr ? (struct sockaddr *)&address : NULL;
3453 + /* We assume all kernel code knows the size of sockaddr_storage */
3454 + msg.msg_namelen = 0;
3455 if (sock->file->f_flags & O_NONBLOCK)
3456 flags |= MSG_DONTWAIT;
3457 err = sock_recvmsg(sock, &msg, size, flags);
3458 @@ -1962,7 +1965,7 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
3459 if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
3460 return -EFAULT;
3461 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
3462 - return -EINVAL;
3463 + kmsg->msg_namelen = sizeof(struct sockaddr_storage);
3464 return 0;
3465 }
3466
3467 @@ -2213,16 +2216,14 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
3468 goto out;
3469 }
3470
3471 - /*
3472 - * Save the user-mode address (verify_iovec will change the
3473 - * kernel msghdr to use the kernel address space)
3474 + /* Save the user-mode address (verify_iovec will change the
3475 + * kernel msghdr to use the kernel address space)
3476 */
3477 -
3478 uaddr = (__force void __user *)msg_sys->msg_name;
3479 uaddr_len = COMPAT_NAMELEN(msg);
3480 - if (MSG_CMSG_COMPAT & flags) {
3481 + if (MSG_CMSG_COMPAT & flags)
3482 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
3483 - } else
3484 + else
3485 err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
3486 if (err < 0)
3487 goto out_freeiov;
3488 @@ -2231,6 +2232,9 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
3489 cmsg_ptr = (unsigned long)msg_sys->msg_control;
3490 msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
3491
3492 + /* We assume all kernel code knows the size of sockaddr_storage */
3493 + msg_sys->msg_namelen = 0;
3494 +
3495 if (sock->file->f_flags & O_NONBLOCK)
3496 flags |= MSG_DONTWAIT;
3497 err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
3498 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
3499 index 7e26ad416af1..2b1d7c2d677d 100644
3500 --- a/net/tipc/socket.c
3501 +++ b/net/tipc/socket.c
3502 @@ -905,9 +905,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
3503 goto exit;
3504 }
3505
3506 - /* will be updated in set_orig_addr() if needed */
3507 - m->msg_namelen = 0;
3508 -
3509 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
3510 restart:
3511
3512 @@ -1017,9 +1014,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
3513 goto exit;
3514 }
3515
3516 - /* will be updated in set_orig_addr() if needed */
3517 - m->msg_namelen = 0;
3518 -
3519 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
3520 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
3521
3522 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3523 index 0258072a518f..8664ad0d5797 100644
3524 --- a/net/unix/af_unix.c
3525 +++ b/net/unix/af_unix.c
3526 @@ -1761,7 +1761,6 @@ static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
3527 {
3528 struct unix_sock *u = unix_sk(sk);
3529
3530 - msg->msg_namelen = 0;
3531 if (u->addr) {
3532 msg->msg_namelen = u->addr->len;
3533 memcpy(msg->msg_name, u->addr->name, u->addr->len);
3534 @@ -1785,8 +1784,6 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
3535 if (flags&MSG_OOB)
3536 goto out;
3537
3538 - msg->msg_namelen = 0;
3539 -
3540 err = mutex_lock_interruptible(&u->readlock);
3541 if (err) {
3542 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
3543 @@ -1926,8 +1923,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3544 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
3545 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
3546
3547 - msg->msg_namelen = 0;
3548 -
3549 /* Lock the socket to prevent queue disordering
3550 * while sleeps in memcpy_tomsg
3551 */
3552 diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
3553 index 3f77f42a3b58..9b88693bcc99 100644
3554 --- a/net/vmw_vsock/af_vsock.c
3555 +++ b/net/vmw_vsock/af_vsock.c
3556 @@ -1670,8 +1670,6 @@ vsock_stream_recvmsg(struct kiocb *kiocb,
3557 vsk = vsock_sk(sk);
3558 err = 0;
3559
3560 - msg->msg_namelen = 0;
3561 -
3562 lock_sock(sk);
3563
3564 if (sk->sk_state != SS_CONNECTED) {
3565 diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
3566 index daff75200e25..62bbf7d73980 100644
3567 --- a/net/vmw_vsock/vmci_transport.c
3568 +++ b/net/vmw_vsock/vmci_transport.c
3569 @@ -1746,8 +1746,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,
3570 if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
3571 return -EOPNOTSUPP;
3572
3573 - msg->msg_namelen = 0;
3574 -
3575 /* Retrieve the head sk_buff from the socket's receive queue. */
3576 err = 0;
3577 skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
3578 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
3579 index 22c88d2e6846..f96af3b96322 100644
3580 --- a/net/x25/af_x25.c
3581 +++ b/net/x25/af_x25.c
3582 @@ -1340,10 +1340,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock,
3583 if (sx25) {
3584 sx25->sx25_family = AF_X25;
3585 sx25->sx25_addr = x25->dest_addr;
3586 + msg->msg_namelen = sizeof(*sx25);
3587 }
3588
3589 - msg->msg_namelen = sizeof(struct sockaddr_x25);
3590 -
3591 x25_check_rbuf(sk);
3592 rc = copied;
3593 out_free_dgram: