Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.11/0106-3.11.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2317 - (show annotations) (download)
Mon Nov 18 11:55:25 2013 UTC (10 years, 5 months ago) by niro
File size: 79622 byte(s)
-linux-3.11.7
1 diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
2 index 10742902146f..b522883a5a84 100644
3 --- a/Documentation/networking/ip-sysctl.txt
4 +++ b/Documentation/networking/ip-sysctl.txt
5 @@ -478,6 +478,15 @@ tcp_syn_retries - INTEGER
6 tcp_timestamps - BOOLEAN
7 Enable timestamps as defined in RFC1323.
8
9 +tcp_min_tso_segs - INTEGER
10 + Minimal number of segments per TSO frame.
11 + Since linux-3.12, TCP does an automatic sizing of TSO frames,
12 + depending on flow rate, instead of filling 64Kbytes packets.
13 + For specific usages, it's possible to force TCP to build big
14 + TSO frames. Note that TCP stack might split too big TSO packets
15 + if available window is too small.
16 + Default: 2
17 +
18 tcp_tso_win_divisor - INTEGER
19 This allows control over what percentage of the congestion window
20 can be consumed by a single TSO frame.
21 diff --git a/Makefile b/Makefile
22 index e87ba831bd2d..686adf7f2035 100644
23 --- a/Makefile
24 +++ b/Makefile
25 @@ -1,6 +1,6 @@
26 VERSION = 3
27 PATCHLEVEL = 11
28 -SUBLEVEL = 6
29 +SUBLEVEL = 7
30 EXTRAVERSION =
31 NAME = Linux for Workgroups
32
33 diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts
34 index ff1aea0ee043..72693a69f830 100644
35 --- a/arch/arm/boot/dts/integratorcp.dts
36 +++ b/arch/arm/boot/dts/integratorcp.dts
37 @@ -9,11 +9,6 @@
38 model = "ARM Integrator/CP";
39 compatible = "arm,integrator-cp";
40
41 - aliases {
42 - arm,timer-primary = &timer2;
43 - arm,timer-secondary = &timer1;
44 - };
45 -
46 chosen {
47 bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk";
48 };
49 @@ -24,14 +19,18 @@
50 };
51
52 timer0: timer@13000000 {
53 + /* TIMER0 runs @ 25MHz */
54 compatible = "arm,integrator-cp-timer";
55 + status = "disabled";
56 };
57
58 timer1: timer@13000100 {
59 + /* TIMER1 runs @ 1MHz */
60 compatible = "arm,integrator-cp-timer";
61 };
62
63 timer2: timer@13000200 {
64 + /* TIMER2 runs @ 1MHz */
65 compatible = "arm,integrator-cp-timer";
66 };
67
68 diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
69 index f1d96d4e8092..73ddd7239b33 100644
70 --- a/arch/arm/include/asm/syscall.h
71 +++ b/arch/arm/include/asm/syscall.h
72 @@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
73 unsigned int i, unsigned int n,
74 unsigned long *args)
75 {
76 + if (n == 0)
77 + return;
78 +
79 if (i + n > SYSCALL_MAX_ARGS) {
80 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
81 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
82 @@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
83 unsigned int i, unsigned int n,
84 const unsigned long *args)
85 {
86 + if (n == 0)
87 + return;
88 +
89 if (i + n > SYSCALL_MAX_ARGS) {
90 pr_warning("%s called with max args %d, handling only %d\n",
91 __func__, i + n, SYSCALL_MAX_ARGS);
92 diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
93 index 08ae128cce9b..c73fc2b74de2 100644
94 --- a/drivers/connector/cn_proc.c
95 +++ b/drivers/connector/cn_proc.c
96 @@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task)
97
98 msg = (struct cn_msg *)buffer;
99 ev = (struct proc_event *)msg->data;
100 + memset(&ev->event_data, 0, sizeof(ev->event_data));
101 get_seq(&msg->seq, &ev->cpu);
102 ktime_get_ts(&ts); /* get high res monotonic timestamp */
103 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
104 @@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task)
105 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
106 msg->ack = 0; /* not used */
107 msg->len = sizeof(*ev);
108 + msg->flags = 0; /* not used */
109 /* If cn_netlink_send() failed, the data is not sent */
110 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
111 }
112 @@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task)
113
114 msg = (struct cn_msg *)buffer;
115 ev = (struct proc_event *)msg->data;
116 + memset(&ev->event_data, 0, sizeof(ev->event_data));
117 get_seq(&msg->seq, &ev->cpu);
118 ktime_get_ts(&ts); /* get high res monotonic timestamp */
119 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
120 @@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task)
121 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
122 msg->ack = 0; /* not used */
123 msg->len = sizeof(*ev);
124 + msg->flags = 0; /* not used */
125 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
126 }
127
128 @@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
129
130 msg = (struct cn_msg *)buffer;
131 ev = (struct proc_event *)msg->data;
132 + memset(&ev->event_data, 0, sizeof(ev->event_data));
133 ev->what = which_id;
134 ev->event_data.id.process_pid = task->pid;
135 ev->event_data.id.process_tgid = task->tgid;
136 @@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
137 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
138 msg->ack = 0; /* not used */
139 msg->len = sizeof(*ev);
140 + msg->flags = 0; /* not used */
141 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
142 }
143
144 @@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task)
145
146 msg = (struct cn_msg *)buffer;
147 ev = (struct proc_event *)msg->data;
148 + memset(&ev->event_data, 0, sizeof(ev->event_data));
149 get_seq(&msg->seq, &ev->cpu);
150 ktime_get_ts(&ts); /* get high res monotonic timestamp */
151 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
152 @@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task)
153 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
154 msg->ack = 0; /* not used */
155 msg->len = sizeof(*ev);
156 + msg->flags = 0; /* not used */
157 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
158 }
159
160 @@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
161
162 msg = (struct cn_msg *)buffer;
163 ev = (struct proc_event *)msg->data;
164 + memset(&ev->event_data, 0, sizeof(ev->event_data));
165 get_seq(&msg->seq, &ev->cpu);
166 ktime_get_ts(&ts); /* get high res monotonic timestamp */
167 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
168 @@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
169 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
170 msg->ack = 0; /* not used */
171 msg->len = sizeof(*ev);
172 + msg->flags = 0; /* not used */
173 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
174 }
175
176 @@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task)
177
178 msg = (struct cn_msg *)buffer;
179 ev = (struct proc_event *)msg->data;
180 + memset(&ev->event_data, 0, sizeof(ev->event_data));
181 get_seq(&msg->seq, &ev->cpu);
182 ktime_get_ts(&ts); /* get high res monotonic timestamp */
183 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
184 @@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task)
185 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
186 msg->ack = 0; /* not used */
187 msg->len = sizeof(*ev);
188 + msg->flags = 0; /* not used */
189 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
190 }
191
192 @@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task)
193
194 msg = (struct cn_msg *)buffer;
195 ev = (struct proc_event *)msg->data;
196 + memset(&ev->event_data, 0, sizeof(ev->event_data));
197 get_seq(&msg->seq, &ev->cpu);
198 ktime_get_ts(&ts); /* get high res monotonic timestamp */
199 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
200 @@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task)
201 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
202 msg->ack = 0; /* not used */
203 msg->len = sizeof(*ev);
204 + msg->flags = 0; /* not used */
205 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
206 }
207
208 @@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task)
209
210 msg = (struct cn_msg *)buffer;
211 ev = (struct proc_event *)msg->data;
212 + memset(&ev->event_data, 0, sizeof(ev->event_data));
213 get_seq(&msg->seq, &ev->cpu);
214 ktime_get_ts(&ts); /* get high res monotonic timestamp */
215 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
216 @@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task)
217 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
218 msg->ack = 0; /* not used */
219 msg->len = sizeof(*ev);
220 + msg->flags = 0; /* not used */
221 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
222 }
223
224 @@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
225
226 msg = (struct cn_msg *)buffer;
227 ev = (struct proc_event *)msg->data;
228 + memset(&ev->event_data, 0, sizeof(ev->event_data));
229 msg->seq = rcvd_seq;
230 ktime_get_ts(&ts); /* get high res monotonic timestamp */
231 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
232 @@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
233 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
234 msg->ack = rcvd_ack + 1;
235 msg->len = sizeof(*ev);
236 + msg->flags = 0; /* not used */
237 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
238 }
239
240 diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
241 index 6ecfa758942c..0daa11e418b1 100644
242 --- a/drivers/connector/connector.c
243 +++ b/drivers/connector/connector.c
244 @@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb)
245 static void cn_rx_skb(struct sk_buff *__skb)
246 {
247 struct nlmsghdr *nlh;
248 - int err;
249 struct sk_buff *skb;
250 + int len, err;
251
252 skb = skb_get(__skb);
253
254 if (skb->len >= NLMSG_HDRLEN) {
255 nlh = nlmsg_hdr(skb);
256 + len = nlmsg_len(nlh);
257
258 - if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
259 + if (len < (int)sizeof(struct cn_msg) ||
260 skb->len < nlh->nlmsg_len ||
261 - nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) {
262 + len > CONNECTOR_MAX_MSG_SIZE) {
263 kfree_skb(skb);
264 return;
265 }
266 diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
267 index 761c4705dfbb..baf301f36b42 100644
268 --- a/drivers/gpio/gpio-lynxpoint.c
269 +++ b/drivers/gpio/gpio-lynxpoint.c
270 @@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
271 struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
272 struct irq_chip *chip = irq_data_get_irq_chip(data);
273 u32 base, pin, mask;
274 - unsigned long reg, pending;
275 + unsigned long reg, ena, pending;
276 unsigned virq;
277
278 /* check from GPIO controller which pin triggered the interrupt */
279 for (base = 0; base < lg->chip.ngpio; base += 32) {
280 reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
281 + ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
282
283 - while ((pending = inl(reg))) {
284 + while ((pending = (inl(reg) & inl(ena)))) {
285 pin = __ffs(pending);
286 mask = BIT(pin);
287 /* Clear before handling so we don't lose an edge */
288 diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
289 index cd82eb44e4c4..7c9f053556f2 100644
290 --- a/drivers/i2c/busses/i2c-ismt.c
291 +++ b/drivers/i2c/busses/i2c-ismt.c
292 @@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
293
294 desc = &priv->hw[priv->head];
295
296 + /* Initialize the DMA buffer */
297 + memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer));
298 +
299 /* Initialize the descriptor */
300 memset(desc, 0, sizeof(struct ismt_desc));
301 desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write);
302 diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
303 index 4caa8e6d59d7..2d2b1b7588d7 100644
304 --- a/drivers/md/dm-snap-persistent.c
305 +++ b/drivers/md/dm-snap-persistent.c
306 @@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area)
307 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
308 }
309
310 +static void skip_metadata(struct pstore *ps)
311 +{
312 + uint32_t stride = ps->exceptions_per_area + 1;
313 + chunk_t next_free = ps->next_free;
314 + if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
315 + ps->next_free++;
316 +}
317 +
318 /*
319 * Read or write a metadata area. Remembering to skip the first
320 * chunk which holds the header.
321 @@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps,
322
323 ps->current_area--;
324
325 + skip_metadata(ps);
326 +
327 return 0;
328 }
329
330 @@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
331 struct dm_exception *e)
332 {
333 struct pstore *ps = get_info(store);
334 - uint32_t stride;
335 - chunk_t next_free;
336 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
337
338 /* Is there enough room ? */
339 @@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
340 * Move onto the next free pending, making sure to take
341 * into account the location of the metadata chunks.
342 */
343 - stride = (ps->exceptions_per_area + 1);
344 - next_free = ++ps->next_free;
345 - if (sector_div(next_free, stride) == 1)
346 - ps->next_free++;
347 + ps->next_free++;
348 + skip_metadata(ps);
349
350 atomic_inc(&ps->pending_count);
351 return 0;
352 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
353 index f9cba4123c66..1870c4731a57 100644
354 --- a/drivers/net/can/dev.c
355 +++ b/drivers/net/can/dev.c
356 @@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev)
357 size_t size;
358
359 size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
360 - size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */
361 + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
362 size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
363 - size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
364 - size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
365 + size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */
366 + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
367 if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
368 - size += sizeof(struct can_berr_counter);
369 + size += nla_total_size(sizeof(struct can_berr_counter));
370 if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
371 - size += sizeof(struct can_bittiming_const);
372 + size += nla_total_size(sizeof(struct can_bittiming_const));
373
374 return size;
375 }
376 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
377 index 0cc26110868d..4b0877e68653 100644
378 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
379 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
380 @@ -676,6 +676,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
381 }
382 }
383 #endif
384 + skb_record_rx_queue(skb, fp->rx_queue);
385 napi_gro_receive(&fp->napi, skb);
386 }
387
388 diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
389 index 8ec5d74ad44d..13ac104bbf4b 100644
390 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c
391 +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
392 @@ -1150,7 +1150,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
393
394 if (lancer_chip(adapter)) {
395 req->hdr.version = 1;
396 - req->if_id = cpu_to_le16(adapter->if_handle);
397 } else if (BEx_chip(adapter)) {
398 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
399 req->hdr.version = 2;
400 @@ -1158,6 +1157,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
401 req->hdr.version = 2;
402 }
403
404 + if (req->hdr.version > 0)
405 + req->if_id = cpu_to_le16(adapter->if_handle);
406 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
407 req->ulp_num = BE_ULP1_NUM;
408 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
409 diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
410 index c35db735958f..39334d428891 100644
411 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c
412 +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
413 @@ -1131,15 +1131,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
414 p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
415 p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
416 spin_unlock_bh(&mp->mib_counters_lock);
417 -
418 - mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
419 }
420
421 static void mib_counters_timer_wrapper(unsigned long _mp)
422 {
423 struct mv643xx_eth_private *mp = (void *)_mp;
424 -
425 mib_counters_update(mp);
426 + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
427 }
428
429
430 @@ -2237,6 +2235,7 @@ static int mv643xx_eth_open(struct net_device *dev)
431 mp->int_mask |= INT_TX_END_0 << i;
432 }
433
434 + add_timer(&mp->mib_counters_timer);
435 port_start(mp);
436
437 wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX);
438 @@ -2916,7 +2915,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
439 mp->mib_counters_timer.data = (unsigned long)mp;
440 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
441 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
442 - add_timer(&mp->mib_counters_timer);
443
444 spin_lock_init(&mp->mib_counters_lock);
445
446 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
447 index dec455c8f627..afe2efa69c86 100644
448 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
449 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
450 @@ -70,14 +70,15 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
451 put_page(page);
452 return -ENOMEM;
453 }
454 - page_alloc->size = PAGE_SIZE << order;
455 + page_alloc->page_size = PAGE_SIZE << order;
456 page_alloc->page = page;
457 page_alloc->dma = dma;
458 - page_alloc->offset = frag_info->frag_align;
459 + page_alloc->page_offset = frag_info->frag_align;
460 /* Not doing get_page() for each frag is a big win
461 * on asymetric workloads.
462 */
463 - atomic_set(&page->_count, page_alloc->size / frag_info->frag_stride);
464 + atomic_set(&page->_count,
465 + page_alloc->page_size / frag_info->frag_stride);
466 return 0;
467 }
468
469 @@ -96,16 +97,19 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
470 for (i = 0; i < priv->num_frags; i++) {
471 frag_info = &priv->frag_info[i];
472 page_alloc[i] = ring_alloc[i];
473 - page_alloc[i].offset += frag_info->frag_stride;
474 - if (page_alloc[i].offset + frag_info->frag_stride <= ring_alloc[i].size)
475 + page_alloc[i].page_offset += frag_info->frag_stride;
476 +
477 + if (page_alloc[i].page_offset + frag_info->frag_stride <=
478 + ring_alloc[i].page_size)
479 continue;
480 +
481 if (mlx4_alloc_pages(priv, &page_alloc[i], frag_info, gfp))
482 goto out;
483 }
484
485 for (i = 0; i < priv->num_frags; i++) {
486 frags[i] = ring_alloc[i];
487 - dma = ring_alloc[i].dma + ring_alloc[i].offset;
488 + dma = ring_alloc[i].dma + ring_alloc[i].page_offset;
489 ring_alloc[i] = page_alloc[i];
490 rx_desc->data[i].addr = cpu_to_be64(dma);
491 }
492 @@ -117,7 +121,7 @@ out:
493 frag_info = &priv->frag_info[i];
494 if (page_alloc[i].page != ring_alloc[i].page) {
495 dma_unmap_page(priv->ddev, page_alloc[i].dma,
496 - page_alloc[i].size, PCI_DMA_FROMDEVICE);
497 + page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
498 page = page_alloc[i].page;
499 atomic_set(&page->_count, 1);
500 put_page(page);
501 @@ -131,10 +135,12 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
502 int i)
503 {
504 const struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
505 + u32 next_frag_end = frags[i].page_offset + 2 * frag_info->frag_stride;
506 +
507
508 - if (frags[i].offset + frag_info->frag_stride > frags[i].size)
509 - dma_unmap_page(priv->ddev, frags[i].dma, frags[i].size,
510 - PCI_DMA_FROMDEVICE);
511 + if (next_frag_end > frags[i].page_size)
512 + dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
513 + PCI_DMA_FROMDEVICE);
514
515 if (frags[i].page)
516 put_page(frags[i].page);
517 @@ -161,7 +167,7 @@ out:
518
519 page_alloc = &ring->page_alloc[i];
520 dma_unmap_page(priv->ddev, page_alloc->dma,
521 - page_alloc->size, PCI_DMA_FROMDEVICE);
522 + page_alloc->page_size, PCI_DMA_FROMDEVICE);
523 page = page_alloc->page;
524 atomic_set(&page->_count, 1);
525 put_page(page);
526 @@ -184,10 +190,11 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
527 i, page_count(page_alloc->page));
528
529 dma_unmap_page(priv->ddev, page_alloc->dma,
530 - page_alloc->size, PCI_DMA_FROMDEVICE);
531 - while (page_alloc->offset + frag_info->frag_stride < page_alloc->size) {
532 + page_alloc->page_size, PCI_DMA_FROMDEVICE);
533 + while (page_alloc->page_offset + frag_info->frag_stride <
534 + page_alloc->page_size) {
535 put_page(page_alloc->page);
536 - page_alloc->offset += frag_info->frag_stride;
537 + page_alloc->page_offset += frag_info->frag_stride;
538 }
539 page_alloc->page = NULL;
540 }
541 @@ -478,7 +485,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
542 /* Save page reference in skb */
543 __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
544 skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
545 - skb_frags_rx[nr].page_offset = frags[nr].offset;
546 + skb_frags_rx[nr].page_offset = frags[nr].page_offset;
547 skb->truesize += frag_info->frag_stride;
548 frags[nr].page = NULL;
549 }
550 @@ -517,7 +524,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
551
552 /* Get pointer to first fragment so we could copy the headers into the
553 * (linear part of the) skb */
554 - va = page_address(frags[0].page) + frags[0].offset;
555 + va = page_address(frags[0].page) + frags[0].page_offset;
556
557 if (length <= SMALL_PACKET_SIZE) {
558 /* We are copying all relevant data to the skb - temporarily
559 @@ -645,7 +652,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
560 dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
561 DMA_FROM_DEVICE);
562 ethh = (struct ethhdr *)(page_address(frags[0].page) +
563 - frags[0].offset);
564 + frags[0].page_offset);
565
566 if (is_multicast_ether_addr(ethh->h_dest)) {
567 struct mlx4_mac_entry *entry;
568 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
569 index 5e0aa569306a..bf06e3610d27 100644
570 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
571 +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
572 @@ -237,8 +237,8 @@ struct mlx4_en_tx_desc {
573 struct mlx4_en_rx_alloc {
574 struct page *page;
575 dma_addr_t dma;
576 - u32 offset;
577 - u32 size;
578 + u32 page_offset;
579 + u32 page_size;
580 };
581
582 struct mlx4_en_tx_ring {
583 diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
584 index 1a222bce4bd7..45c167fc96c5 100644
585 --- a/drivers/net/ethernet/ti/davinci_emac.c
586 +++ b/drivers/net/ethernet/ti/davinci_emac.c
587 @@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
588 netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
589 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
590 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
591 - }
592 - if (!netdev_mc_empty(ndev)) {
593 + } else if (!netdev_mc_empty(ndev)) {
594 struct netdev_hw_addr *ha;
595
596 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
597 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
598 index 3d2a90a62649..1d01534c2020 100644
599 --- a/drivers/net/virtio_net.c
600 +++ b/drivers/net/virtio_net.c
601 @@ -916,7 +916,9 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
602 return -EINVAL;
603 } else {
604 vi->curr_queue_pairs = queue_pairs;
605 - schedule_delayed_work(&vi->refill, 0);
606 + /* virtnet_open() will refill when device is going to up. */
607 + if (dev->flags & IFF_UP)
608 + schedule_delayed_work(&vi->refill, 0);
609 }
610
611 return 0;
612 @@ -1094,6 +1096,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
613 {
614 struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb);
615
616 + mutex_lock(&vi->config_lock);
617 +
618 + if (!vi->config_enable)
619 + goto done;
620 +
621 switch(action & ~CPU_TASKS_FROZEN) {
622 case CPU_ONLINE:
623 case CPU_DOWN_FAILED:
624 @@ -1106,6 +1113,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb,
625 default:
626 break;
627 }
628 +
629 +done:
630 + mutex_unlock(&vi->config_lock);
631 return NOTIFY_OK;
632 }
633
634 @@ -1706,7 +1716,9 @@ static int virtnet_restore(struct virtio_device *vdev)
635 vi->config_enable = true;
636 mutex_unlock(&vi->config_lock);
637
638 + rtnl_lock();
639 virtnet_set_queues(vi, vi->curr_queue_pairs);
640 + rtnl_unlock();
641
642 return 0;
643 }
644 diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
645 index 3f0c4f268751..bcfff0d62de4 100644
646 --- a/drivers/net/wan/farsync.c
647 +++ b/drivers/net/wan/farsync.c
648 @@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
649 }
650
651 i = port->index;
652 + memset(&sync, 0, sizeof(sync));
653 sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
654 /* Lucky card and linux use same encoding here */
655 sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
656 diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
657 index 6a24a5a70cc7..4c0a69779b89 100644
658 --- a/drivers/net/wan/wanxl.c
659 +++ b/drivers/net/wan/wanxl.c
660 @@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
661 ifr->ifr_settings.size = size; /* data size wanted */
662 return -ENOBUFS;
663 }
664 + memset(&line, 0, sizeof(line));
665 line.clock_type = get_status(port)->clocking;
666 line.clock_rate = 0;
667 line.loopback = 0;
668 diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
669 index 899cad34ccd3..755a0c8edfe1 100644
670 --- a/drivers/net/wireless/cw1200/cw1200_spi.c
671 +++ b/drivers/net/wireless/cw1200/cw1200_spi.c
672 @@ -237,7 +237,9 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
673 struct hwbus_priv *self = dev_id;
674
675 if (self->core) {
676 + cw1200_spi_lock(self);
677 cw1200_irq_handler(self->core);
678 + cw1200_spi_unlock(self);
679 return IRQ_HANDLED;
680 } else {
681 return IRQ_NONE;
682 diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
683 index 8a4d77ee9c5b..4d9a5e70c992 100644
684 --- a/drivers/net/xen-netback/common.h
685 +++ b/drivers/net/xen-netback/common.h
686 @@ -120,6 +120,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
687 unsigned long rx_ring_ref, unsigned int tx_evtchn,
688 unsigned int rx_evtchn);
689 void xenvif_disconnect(struct xenvif *vif);
690 +void xenvif_free(struct xenvif *vif);
691
692 void xenvif_get(struct xenvif *vif);
693 void xenvif_put(struct xenvif *vif);
694 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
695 index 087d2db0389d..73336c144d92 100644
696 --- a/drivers/net/xen-netback/interface.c
697 +++ b/drivers/net/xen-netback/interface.c
698 @@ -326,6 +326,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
699 }
700
701 netdev_dbg(dev, "Successfully created xenvif\n");
702 +
703 + __module_get(THIS_MODULE);
704 +
705 return vif;
706 }
707
708 @@ -413,12 +416,6 @@ void xenvif_carrier_off(struct xenvif *vif)
709
710 void xenvif_disconnect(struct xenvif *vif)
711 {
712 - /* Disconnect funtion might get called by generic framework
713 - * even before vif connects, so we need to check if we really
714 - * need to do a module_put.
715 - */
716 - int need_module_put = 0;
717 -
718 if (netif_carrier_ok(vif->dev))
719 xenvif_carrier_off(vif);
720
721 @@ -432,18 +429,16 @@ void xenvif_disconnect(struct xenvif *vif)
722 unbind_from_irqhandler(vif->tx_irq, vif);
723 unbind_from_irqhandler(vif->rx_irq, vif);
724 }
725 - /* vif->irq is valid, we had a module_get in
726 - * xenvif_connect.
727 - */
728 - need_module_put = 1;
729 }
730
731 - unregister_netdev(vif->dev);
732 -
733 xen_netbk_unmap_frontend_rings(vif);
734 +}
735 +
736 +void xenvif_free(struct xenvif *vif)
737 +{
738 + unregister_netdev(vif->dev);
739
740 free_netdev(vif->dev);
741
742 - if (need_module_put)
743 - module_put(THIS_MODULE);
744 + module_put(THIS_MODULE);
745 }
746 diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
747 index 1fe48fe364ed..a53782ef1540 100644
748 --- a/drivers/net/xen-netback/xenbus.c
749 +++ b/drivers/net/xen-netback/xenbus.c
750 @@ -42,7 +42,7 @@ static int netback_remove(struct xenbus_device *dev)
751 if (be->vif) {
752 kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
753 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
754 - xenvif_disconnect(be->vif);
755 + xenvif_free(be->vif);
756 be->vif = NULL;
757 }
758 kfree(be);
759 @@ -213,9 +213,18 @@ static void disconnect_backend(struct xenbus_device *dev)
760 {
761 struct backend_info *be = dev_get_drvdata(&dev->dev);
762
763 + if (be->vif)
764 + xenvif_disconnect(be->vif);
765 +}
766 +
767 +static void destroy_backend(struct xenbus_device *dev)
768 +{
769 + struct backend_info *be = dev_get_drvdata(&dev->dev);
770 +
771 if (be->vif) {
772 + kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
773 xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
774 - xenvif_disconnect(be->vif);
775 + xenvif_free(be->vif);
776 be->vif = NULL;
777 }
778 }
779 @@ -246,14 +255,11 @@ static void frontend_changed(struct xenbus_device *dev,
780 case XenbusStateConnected:
781 if (dev->state == XenbusStateConnected)
782 break;
783 - backend_create_xenvif(be);
784 if (be->vif)
785 connect(be);
786 break;
787
788 case XenbusStateClosing:
789 - if (be->vif)
790 - kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
791 disconnect_backend(dev);
792 xenbus_switch_state(dev, XenbusStateClosing);
793 break;
794 @@ -262,6 +268,7 @@ static void frontend_changed(struct xenbus_device *dev,
795 xenbus_switch_state(dev, XenbusStateClosed);
796 if (xenbus_dev_is_online(dev))
797 break;
798 + destroy_backend(dev);
799 /* fall through if not online */
800 case XenbusStateUnknown:
801 device_unregister(&dev->dev);
802 diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
803 index 48af43de3467..007c2c1ee8c0 100644
804 --- a/drivers/tty/serial/vt8500_serial.c
805 +++ b/drivers/tty/serial/vt8500_serial.c
806 @@ -559,12 +559,13 @@ static int vt8500_serial_probe(struct platform_device *pdev)
807 if (!mmres || !irqres)
808 return -ENODEV;
809
810 - if (np)
811 + if (np) {
812 port = of_alias_get_id(np, "serial");
813 if (port >= VT8500_MAX_PORTS)
814 port = -1;
815 - else
816 + } else {
817 port = -1;
818 + }
819
820 if (port < 0) {
821 /* calculate the port id */
822 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
823 index 80a7104d5ddb..f1507c052a2e 100644
824 --- a/drivers/usb/serial/option.c
825 +++ b/drivers/usb/serial/option.c
826 @@ -451,6 +451,10 @@ static void option_instat_callback(struct urb *urb);
827 #define CHANGHONG_VENDOR_ID 0x2077
828 #define CHANGHONG_PRODUCT_CH690 0x7001
829
830 +/* Inovia */
831 +#define INOVIA_VENDOR_ID 0x20a6
832 +#define INOVIA_SEW858 0x1105
833 +
834 /* some devices interfaces need special handling due to a number of reasons */
835 enum option_blacklist_reason {
836 OPTION_BLACKLIST_NONE = 0,
837 @@ -1257,7 +1261,9 @@ static const struct usb_device_id option_ids[] = {
838
839 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
840 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
841 - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
842 + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
843 + .driver_info = (kernel_ulong_t)&net_intf6_blacklist
844 + },
845 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
846 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
847 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
848 @@ -1345,6 +1351,7 @@ static const struct usb_device_id option_ids[] = {
849 { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
850 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
851 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
852 + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
853 { } /* Terminating entry */
854 };
855 MODULE_DEVICE_TABLE(usb, option_ids);
856 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
857 index 5c9f9b1d7736..4ced88ebcecb 100644
858 --- a/drivers/usb/serial/ti_usb_3410_5052.c
859 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
860 @@ -203,6 +203,7 @@ static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1]
861 { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
862 { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
863 { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
864 + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
865 { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
866 { }
867 };
868 diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
869 index 0459df843c58..15a6ddf7f3ca 100644
870 --- a/drivers/w1/w1.c
871 +++ b/drivers/w1/w1.c
872 @@ -680,7 +680,10 @@ static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
873 atomic_set(&sl->refcnt, 0);
874 init_completion(&sl->released);
875
876 + /* slave modules need to be loaded in a context with unlocked mutex */
877 + mutex_unlock(&dev->mutex);
878 request_module("w1-family-0x%0x", rn->family);
879 + mutex_lock(&dev->mutex);
880
881 spin_lock(&w1_flock);
882 f = w1_family_registered(rn->family);
883 diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
884 index 1194b1f0f839..f8cde46de9cd 100644
885 --- a/fs/ext3/namei.c
886 +++ b/fs/ext3/namei.c
887 @@ -1783,7 +1783,7 @@ retry:
888 d_tmpfile(dentry, inode);
889 err = ext3_orphan_add(handle, inode);
890 if (err)
891 - goto err_drop_inode;
892 + goto err_unlock_inode;
893 mark_inode_dirty(inode);
894 unlock_new_inode(inode);
895 }
896 @@ -1791,10 +1791,9 @@ retry:
897 if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
898 goto retry;
899 return err;
900 -err_drop_inode:
901 +err_unlock_inode:
902 ext3_journal_stop(handle);
903 unlock_new_inode(inode);
904 - iput(inode);
905 return err;
906 }
907
908 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
909 index 35f55a0dbc4b..b53cbc6966a2 100644
910 --- a/fs/ext4/namei.c
911 +++ b/fs/ext4/namei.c
912 @@ -2319,7 +2319,7 @@ retry:
913 d_tmpfile(dentry, inode);
914 err = ext4_orphan_add(handle, inode);
915 if (err)
916 - goto err_drop_inode;
917 + goto err_unlock_inode;
918 mark_inode_dirty(inode);
919 unlock_new_inode(inode);
920 }
921 @@ -2328,10 +2328,9 @@ retry:
922 if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
923 goto retry;
924 return err;
925 -err_drop_inode:
926 +err_unlock_inode:
927 ext4_journal_stop(handle);
928 unlock_new_inode(inode);
929 - iput(inode);
930 return err;
931 }
932
933 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
934 index 3b71a4e83642..6bd165be52e3 100644
935 --- a/include/linux/skbuff.h
936 +++ b/include/linux/skbuff.h
937 @@ -1316,6 +1316,11 @@ static inline int skb_pagelen(const struct sk_buff *skb)
938 return len + skb_headlen(skb);
939 }
940
941 +static inline bool skb_has_frags(const struct sk_buff *skb)
942 +{
943 + return skb_shinfo(skb)->nr_frags;
944 +}
945 +
946 /**
947 * __skb_fill_page_desc - initialise a paged fragment in an skb
948 * @skb: buffer containing fragment to be initialised
949 diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
950 index a7a683e30b64..a8c2ef6d3b93 100644
951 --- a/include/net/cipso_ipv4.h
952 +++ b/include/net/cipso_ipv4.h
953 @@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
954 unsigned char err_offset = 0;
955 u8 opt_len = opt[1];
956 u8 opt_iter;
957 + u8 tag_len;
958
959 if (opt_len < 8) {
960 err_offset = 1;
961 @@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
962 }
963
964 for (opt_iter = 6; opt_iter < opt_len;) {
965 - if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
966 + tag_len = opt[opt_iter + 1];
967 + if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) {
968 err_offset = opt_iter + 1;
969 goto out;
970 }
971 - opt_iter += opt[opt_iter + 1];
972 + opt_iter += tag_len;
973 }
974
975 out:
976 diff --git a/include/net/dst.h b/include/net/dst.h
977 index 1f8fd109e225..e0c97f5a57cf 100644
978 --- a/include/net/dst.h
979 +++ b/include/net/dst.h
980 @@ -477,10 +477,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
981 {
982 return dst_orig;
983 }
984 +
985 +static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
986 +{
987 + return NULL;
988 +}
989 +
990 #else
991 extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
992 const struct flowi *fl, struct sock *sk,
993 int flags);
994 +
995 +/* skb attached with this dst needs transformation if dst->xfrm is valid */
996 +static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst)
997 +{
998 + return dst->xfrm;
999 +}
1000 #endif
1001
1002 #endif /* _NET_DST_H */
1003 diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
1004 index f667248202b6..c7b8860f29fd 100644
1005 --- a/include/net/ip6_route.h
1006 +++ b/include/net/ip6_route.h
1007 @@ -196,11 +196,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
1008 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
1009 }
1010
1011 -static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest)
1012 +static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
1013 {
1014 - if (rt->rt6i_flags & RTF_GATEWAY)
1015 - return &rt->rt6i_gateway;
1016 - return dest;
1017 + return &rt->rt6i_gateway;
1018 }
1019
1020 #endif
1021 diff --git a/include/net/sock.h b/include/net/sock.h
1022 index 31d5cfbb51ec..04e148f45277 100644
1023 --- a/include/net/sock.h
1024 +++ b/include/net/sock.h
1025 @@ -232,6 +232,7 @@ struct cg_proto;
1026 * @sk_napi_id: id of the last napi context to receive data for sk
1027 * @sk_ll_usec: usecs to busypoll when there is no data
1028 * @sk_allocation: allocation mode
1029 + * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
1030 * @sk_sndbuf: size of send buffer in bytes
1031 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
1032 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
1033 @@ -361,6 +362,7 @@ struct sock {
1034 kmemcheck_bitfield_end(flags);
1035 int sk_wmem_queued;
1036 gfp_t sk_allocation;
1037 + u32 sk_pacing_rate; /* bytes per second */
1038 netdev_features_t sk_route_caps;
1039 netdev_features_t sk_route_nocaps;
1040 int sk_gso_type;
1041 diff --git a/include/net/tcp.h b/include/net/tcp.h
1042 index d1980054ec75..46cb8a406b8f 100644
1043 --- a/include/net/tcp.h
1044 +++ b/include/net/tcp.h
1045 @@ -284,6 +284,7 @@ extern int sysctl_tcp_thin_dupack;
1046 extern int sysctl_tcp_early_retrans;
1047 extern int sysctl_tcp_limit_output_bytes;
1048 extern int sysctl_tcp_challenge_ack_limit;
1049 +extern int sysctl_tcp_min_tso_segs;
1050
1051 extern atomic_long_t tcp_memory_allocated;
1052 extern struct percpu_counter tcp_sockets_allocated;
1053 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1054 index f2820fbf67c9..70861a1fdd64 100644
1055 --- a/mm/huge_memory.c
1056 +++ b/mm/huge_memory.c
1057 @@ -2709,6 +2709,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
1058
1059 mmun_start = haddr;
1060 mmun_end = haddr + HPAGE_PMD_SIZE;
1061 +again:
1062 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1063 spin_lock(&mm->page_table_lock);
1064 if (unlikely(!pmd_trans_huge(*pmd))) {
1065 @@ -2731,7 +2732,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
1066 split_huge_page(page);
1067
1068 put_page(page);
1069 - BUG_ON(pmd_trans_huge(*pmd));
1070 +
1071 + /*
1072 + * We don't always have down_write of mmap_sem here: a racing
1073 + * do_huge_pmd_wp_page() might have copied-on-write to another
1074 + * huge page before our split_huge_page() got the anon_vma lock.
1075 + */
1076 + if (unlikely(pmd_trans_huge(*pmd)))
1077 + goto again;
1078 }
1079
1080 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
1081 diff --git a/mm/memory.c b/mm/memory.c
1082 index af84bc0ec17c..440986e57218 100644
1083 --- a/mm/memory.c
1084 +++ b/mm/memory.c
1085 @@ -861,6 +861,8 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
1086 */
1087 make_migration_entry_read(&entry);
1088 pte = swp_entry_to_pte(entry);
1089 + if (pte_swp_soft_dirty(*src_pte))
1090 + pte = pte_swp_mksoft_dirty(pte);
1091 set_pte_at(src_mm, addr, src_pte, pte);
1092 }
1093 }
1094 diff --git a/mm/migrate.c b/mm/migrate.c
1095 index 25ca7caf9092..81af4e678101 100644
1096 --- a/mm/migrate.c
1097 +++ b/mm/migrate.c
1098 @@ -157,6 +157,8 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
1099
1100 get_page(new);
1101 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
1102 + if (pte_swp_soft_dirty(*ptep))
1103 + pte = pte_mksoft_dirty(pte);
1104 if (is_write_migration_entry(entry))
1105 pte = pte_mkwrite(pte);
1106 #ifdef CONFIG_HUGETLB_PAGE
1107 diff --git a/mm/mprotect.c b/mm/mprotect.c
1108 index 94722a4d6b43..a3af058f68e4 100644
1109 --- a/mm/mprotect.c
1110 +++ b/mm/mprotect.c
1111 @@ -94,13 +94,16 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
1112 swp_entry_t entry = pte_to_swp_entry(oldpte);
1113
1114 if (is_write_migration_entry(entry)) {
1115 + pte_t newpte;
1116 /*
1117 * A protection check is difficult so
1118 * just be safe and disable write
1119 */
1120 make_migration_entry_read(&entry);
1121 - set_pte_at(mm, addr, pte,
1122 - swp_entry_to_pte(entry));
1123 + newpte = swp_entry_to_pte(entry);
1124 + if (pte_swp_soft_dirty(oldpte))
1125 + newpte = pte_swp_mksoft_dirty(newpte);
1126 + set_pte_at(mm, addr, pte, newpte);
1127 }
1128 pages++;
1129 }
1130 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
1131 index 3f0c895c71fe..241a746f864c 100644
1132 --- a/mm/page-writeback.c
1133 +++ b/mm/page-writeback.c
1134 @@ -1104,11 +1104,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty,
1135 return 1;
1136 }
1137
1138 -static long bdi_max_pause(struct backing_dev_info *bdi,
1139 - unsigned long bdi_dirty)
1140 +static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
1141 + unsigned long bdi_dirty)
1142 {
1143 - long bw = bdi->avg_write_bandwidth;
1144 - long t;
1145 + unsigned long bw = bdi->avg_write_bandwidth;
1146 + unsigned long t;
1147
1148 /*
1149 * Limit pause time for small memory systems. If sleeping for too long
1150 @@ -1120,7 +1120,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi,
1151 t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1152 t++;
1153
1154 - return min_t(long, t, MAX_PAUSE);
1155 + return min_t(unsigned long, t, MAX_PAUSE);
1156 }
1157
1158 static long bdi_min_pause(struct backing_dev_info *bdi,
1159 diff --git a/mm/zswap.c b/mm/zswap.c
1160 index deda2b671e12..cbd9578c1e88 100644
1161 --- a/mm/zswap.c
1162 +++ b/mm/zswap.c
1163 @@ -816,6 +816,10 @@ static void zswap_frontswap_invalidate_area(unsigned type)
1164 }
1165 tree->rbroot = RB_ROOT;
1166 spin_unlock(&tree->lock);
1167 +
1168 + zbud_destroy_pool(tree->pool);
1169 + kfree(tree);
1170 + zswap_trees[type] = NULL;
1171 }
1172
1173 static struct zbud_ops zswap_zbud_ops = {
1174 diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
1175 index 309129732285..c7e634af8516 100644
1176 --- a/net/8021q/vlan_netlink.c
1177 +++ b/net/8021q/vlan_netlink.c
1178 @@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev)
1179
1180 return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */
1181 nla_total_size(2) + /* IFLA_VLAN_ID */
1182 - sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */
1183 + nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */
1184 vlan_qos_map_size(vlan->nr_ingress_mappings) +
1185 vlan_qos_map_size(vlan->nr_egress_mappings);
1186 }
1187 diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
1188 index 08125f3f6064..c8e0671422a3 100644
1189 --- a/net/batman-adv/main.c
1190 +++ b/net/batman-adv/main.c
1191 @@ -61,6 +61,7 @@ static int __init batadv_init(void)
1192 batadv_recv_handler_init();
1193
1194 batadv_iv_init();
1195 + batadv_nc_init();
1196
1197 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
1198
1199 @@ -138,7 +139,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
1200 if (ret < 0)
1201 goto err;
1202
1203 - ret = batadv_nc_init(bat_priv);
1204 + ret = batadv_nc_mesh_init(bat_priv);
1205 if (ret < 0)
1206 goto err;
1207
1208 @@ -163,7 +164,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
1209 batadv_vis_quit(bat_priv);
1210
1211 batadv_gw_node_purge(bat_priv);
1212 - batadv_nc_free(bat_priv);
1213 + batadv_nc_mesh_free(bat_priv);
1214 batadv_dat_free(bat_priv);
1215 batadv_bla_free(bat_priv);
1216
1217 diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
1218 index a487d46e0aec..4ecc0b6bf8ab 100644
1219 --- a/net/batman-adv/network-coding.c
1220 +++ b/net/batman-adv/network-coding.c
1221 @@ -35,6 +35,20 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
1222 struct batadv_hard_iface *recv_if);
1223
1224 /**
1225 + * batadv_nc_init - one-time initialization for network coding
1226 + */
1227 +int __init batadv_nc_init(void)
1228 +{
1229 + int ret;
1230 +
1231 + /* Register our packet type */
1232 + ret = batadv_recv_handler_register(BATADV_CODED,
1233 + batadv_nc_recv_coded_packet);
1234 +
1235 + return ret;
1236 +}
1237 +
1238 +/**
1239 * batadv_nc_start_timer - initialise the nc periodic worker
1240 * @bat_priv: the bat priv with all the soft interface information
1241 */
1242 @@ -45,10 +59,10 @@ static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
1243 }
1244
1245 /**
1246 - * batadv_nc_init - initialise coding hash table and start house keeping
1247 + * batadv_nc_mesh_init - initialise coding hash table and start house keeping
1248 * @bat_priv: the bat priv with all the soft interface information
1249 */
1250 -int batadv_nc_init(struct batadv_priv *bat_priv)
1251 +int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
1252 {
1253 bat_priv->nc.timestamp_fwd_flush = jiffies;
1254 bat_priv->nc.timestamp_sniffed_purge = jiffies;
1255 @@ -70,11 +84,6 @@ int batadv_nc_init(struct batadv_priv *bat_priv)
1256 batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
1257 &batadv_nc_decoding_hash_lock_class_key);
1258
1259 - /* Register our packet type */
1260 - if (batadv_recv_handler_register(BATADV_CODED,
1261 - batadv_nc_recv_coded_packet) < 0)
1262 - goto err;
1263 -
1264 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
1265 batadv_nc_start_timer(bat_priv);
1266
1267 @@ -1721,12 +1730,11 @@ free_nc_packet:
1268 }
1269
1270 /**
1271 - * batadv_nc_free - clean up network coding memory
1272 + * batadv_nc_mesh_free - clean up network coding memory
1273 * @bat_priv: the bat priv with all the soft interface information
1274 */
1275 -void batadv_nc_free(struct batadv_priv *bat_priv)
1276 +void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
1277 {
1278 - batadv_recv_handler_unregister(BATADV_CODED);
1279 cancel_delayed_work_sync(&bat_priv->nc.work);
1280
1281 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
1282 diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
1283 index 85a4ec81ad50..ddfa618e80bf 100644
1284 --- a/net/batman-adv/network-coding.h
1285 +++ b/net/batman-adv/network-coding.h
1286 @@ -22,8 +22,9 @@
1287
1288 #ifdef CONFIG_BATMAN_ADV_NC
1289
1290 -int batadv_nc_init(struct batadv_priv *bat_priv);
1291 -void batadv_nc_free(struct batadv_priv *bat_priv);
1292 +int batadv_nc_init(void);
1293 +int batadv_nc_mesh_init(struct batadv_priv *bat_priv);
1294 +void batadv_nc_mesh_free(struct batadv_priv *bat_priv);
1295 void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
1296 struct batadv_orig_node *orig_node,
1297 struct batadv_orig_node *orig_neigh_node,
1298 @@ -46,12 +47,17 @@ int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
1299
1300 #else /* ifdef CONFIG_BATMAN_ADV_NC */
1301
1302 -static inline int batadv_nc_init(struct batadv_priv *bat_priv)
1303 +static inline int batadv_nc_init(void)
1304 {
1305 return 0;
1306 }
1307
1308 -static inline void batadv_nc_free(struct batadv_priv *bat_priv)
1309 +static inline int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
1310 +{
1311 + return 0;
1312 +}
1313 +
1314 +static inline void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
1315 {
1316 return;
1317 }
1318 diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
1319 index 6319c4333c39..de3a0e76ab75 100644
1320 --- a/net/bridge/br_mdb.c
1321 +++ b/net/bridge/br_mdb.c
1322 @@ -451,7 +451,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
1323 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1324 err = 0;
1325
1326 - if (!mp->ports && !mp->mglist && mp->timer_armed &&
1327 + if (!mp->ports && !mp->mglist &&
1328 netif_running(br->dev))
1329 mod_timer(&mp->timer, jiffies);
1330 break;
1331 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1332 index bbcb43582496..fbad619c70b7 100644
1333 --- a/net/bridge/br_multicast.c
1334 +++ b/net/bridge/br_multicast.c
1335 @@ -271,7 +271,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
1336 del_timer(&p->timer);
1337 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1338
1339 - if (!mp->ports && !mp->mglist && mp->timer_armed &&
1340 + if (!mp->ports && !mp->mglist &&
1341 netif_running(br->dev))
1342 mod_timer(&mp->timer, jiffies);
1343
1344 @@ -619,7 +619,6 @@ rehash:
1345
1346 mp->br = br;
1347 mp->addr = *group;
1348 -
1349 setup_timer(&mp->timer, br_multicast_group_expired,
1350 (unsigned long)mp);
1351
1352 @@ -659,6 +658,7 @@ static int br_multicast_add_group(struct net_bridge *br,
1353 struct net_bridge_mdb_entry *mp;
1354 struct net_bridge_port_group *p;
1355 struct net_bridge_port_group __rcu **pp;
1356 + unsigned long now = jiffies;
1357 int err;
1358
1359 spin_lock(&br->multicast_lock);
1360 @@ -673,6 +673,7 @@ static int br_multicast_add_group(struct net_bridge *br,
1361
1362 if (!port) {
1363 mp->mglist = true;
1364 + mod_timer(&mp->timer, now + br->multicast_membership_interval);
1365 goto out;
1366 }
1367
1368 @@ -680,7 +681,7 @@ static int br_multicast_add_group(struct net_bridge *br,
1369 (p = mlock_dereference(*pp, br)) != NULL;
1370 pp = &p->next) {
1371 if (p->port == port)
1372 - goto out;
1373 + goto found;
1374 if ((unsigned long)p->port < (unsigned long)port)
1375 break;
1376 }
1377 @@ -691,6 +692,8 @@ static int br_multicast_add_group(struct net_bridge *br,
1378 rcu_assign_pointer(*pp, p);
1379 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
1380
1381 +found:
1382 + mod_timer(&p->timer, now + br->multicast_membership_interval);
1383 out:
1384 err = 0;
1385
1386 @@ -1190,9 +1193,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1387 if (!mp)
1388 goto out;
1389
1390 - mod_timer(&mp->timer, now + br->multicast_membership_interval);
1391 - mp->timer_armed = true;
1392 -
1393 max_delay *= br->multicast_last_member_count;
1394
1395 if (mp->mglist &&
1396 @@ -1269,9 +1269,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1397 if (!mp)
1398 goto out;
1399
1400 - mod_timer(&mp->timer, now + br->multicast_membership_interval);
1401 - mp->timer_armed = true;
1402 -
1403 max_delay *= br->multicast_last_member_count;
1404 if (mp->mglist &&
1405 (timer_pending(&mp->timer) ?
1406 @@ -1357,7 +1354,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1407 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1408 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1409
1410 - if (!mp->ports && !mp->mglist && mp->timer_armed &&
1411 + if (!mp->ports && !mp->mglist &&
1412 netif_running(br->dev))
1413 mod_timer(&mp->timer, jiffies);
1414 }
1415 @@ -1369,12 +1366,30 @@ static void br_multicast_leave_group(struct net_bridge *br,
1416 br->multicast_last_member_interval;
1417
1418 if (!port) {
1419 - if (mp->mglist && mp->timer_armed &&
1420 + if (mp->mglist &&
1421 (timer_pending(&mp->timer) ?
1422 time_after(mp->timer.expires, time) :
1423 try_to_del_timer_sync(&mp->timer) >= 0)) {
1424 mod_timer(&mp->timer, time);
1425 }
1426 +
1427 + goto out;
1428 + }
1429 +
1430 + for (p = mlock_dereference(mp->ports, br);
1431 + p != NULL;
1432 + p = mlock_dereference(p->next, br)) {
1433 + if (p->port != port)
1434 + continue;
1435 +
1436 + if (!hlist_unhashed(&p->mglist) &&
1437 + (timer_pending(&p->timer) ?
1438 + time_after(p->timer.expires, time) :
1439 + try_to_del_timer_sync(&p->timer) >= 0)) {
1440 + mod_timer(&p->timer, time);
1441 + }
1442 +
1443 + break;
1444 }
1445 out:
1446 spin_unlock(&br->multicast_lock);
1447 @@ -1791,7 +1806,6 @@ void br_multicast_stop(struct net_bridge *br)
1448 hlist_for_each_entry_safe(mp, n, &mdb->mhash[i],
1449 hlist[ver]) {
1450 del_timer(&mp->timer);
1451 - mp->timer_armed = false;
1452 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1453 }
1454 }
1455 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
1456 index cde1eb14d9a2..aa05bd80e9bb 100644
1457 --- a/net/bridge/br_private.h
1458 +++ b/net/bridge/br_private.h
1459 @@ -126,7 +126,6 @@ struct net_bridge_mdb_entry
1460 struct timer_list timer;
1461 struct br_ip addr;
1462 bool mglist;
1463 - bool timer_armed;
1464 };
1465
1466 struct net_bridge_mdb_htable
1467 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
1468 index 108084a04671..656a6f3e40de 100644
1469 --- a/net/bridge/br_stp_if.c
1470 +++ b/net/bridge/br_stp_if.c
1471 @@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br)
1472
1473 if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
1474 __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
1475 - else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
1476 + else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY)
1477 __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
1478
1479 if (r == 0) {
1480 diff --git a/net/compat.c b/net/compat.c
1481 index f0a1ba6c8086..89032580bd1d 100644
1482 --- a/net/compat.c
1483 +++ b/net/compat.c
1484 @@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
1485 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
1486 __get_user(kmsg->msg_flags, &umsg->msg_flags))
1487 return -EFAULT;
1488 + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
1489 + return -EINVAL;
1490 kmsg->msg_name = compat_ptr(tmp1);
1491 kmsg->msg_iov = compat_ptr(tmp2);
1492 kmsg->msg_control = compat_ptr(tmp3);
1493 diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
1494 index 3f1ec1586ae1..8d9d05edd2eb 100644
1495 --- a/net/core/secure_seq.c
1496 +++ b/net/core/secure_seq.c
1497 @@ -10,6 +10,7 @@
1498
1499 #include <net/secure_seq.h>
1500
1501 +#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET)
1502 #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4)
1503
1504 static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned;
1505 @@ -29,6 +30,7 @@ static void net_secret_init(void)
1506 cmpxchg(&net_secret[--i], 0, tmp);
1507 }
1508 }
1509 +#endif
1510
1511 #ifdef CONFIG_INET
1512 static u32 seq_scale(u32 seq)
1513 diff --git a/net/core/sock.c b/net/core/sock.c
1514 index 2c097c5a35dd..8729d9135790 100644
1515 --- a/net/core/sock.c
1516 +++ b/net/core/sock.c
1517 @@ -2297,6 +2297,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1518 sk->sk_ll_usec = sysctl_net_busy_read;
1519 #endif
1520
1521 + sk->sk_pacing_rate = ~0U;
1522 /*
1523 * Before updating sk_refcnt, we must commit prior changes to memory
1524 * (Documentation/RCU/rculist_nulls.txt for details)
1525 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
1526 index 7bd8983dbfcf..96da9c77deca 100644
1527 --- a/net/ipv4/inet_hashtables.c
1528 +++ b/net/ipv4/inet_hashtables.c
1529 @@ -287,7 +287,7 @@ begintw:
1530 if (unlikely(!INET_TW_MATCH(sk, net, acookie,
1531 saddr, daddr, ports,
1532 dif))) {
1533 - sock_put(sk);
1534 + inet_twsk_put(inet_twsk(sk));
1535 goto begintw;
1536 }
1537 goto out;
1538 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1539 index a04d872c54f9..7f4ab5d31c16 100644
1540 --- a/net/ipv4/ip_output.c
1541 +++ b/net/ipv4/ip_output.c
1542 @@ -836,7 +836,7 @@ static int __ip_append_data(struct sock *sk,
1543 csummode = CHECKSUM_PARTIAL;
1544
1545 cork->length += length;
1546 - if (((length > mtu) || (skb && skb_is_gso(skb))) &&
1547 + if (((length > mtu) || (skb && skb_has_frags(skb))) &&
1548 (sk->sk_protocol == IPPROTO_UDP) &&
1549 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
1550 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
1551 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
1552 index 17cc0ffa8c0d..065604127418 100644
1553 --- a/net/ipv4/ip_vti.c
1554 +++ b/net/ipv4/ip_vti.c
1555 @@ -285,8 +285,17 @@ static int vti_rcv(struct sk_buff *skb)
1556 tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
1557 if (tunnel != NULL) {
1558 struct pcpu_tstats *tstats;
1559 + u32 oldmark = skb->mark;
1560 + int ret;
1561
1562 - if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1563 +
1564 + /* temporarily mark the skb with the tunnel o_key, to
1565 + * only match policies with this mark.
1566 + */
1567 + skb->mark = be32_to_cpu(tunnel->parms.o_key);
1568 + ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
1569 + skb->mark = oldmark;
1570 + if (!ret)
1571 return -1;
1572
1573 tstats = this_cpu_ptr(tunnel->dev->tstats);
1574 @@ -295,7 +304,6 @@ static int vti_rcv(struct sk_buff *skb)
1575 tstats->rx_bytes += skb->len;
1576 u64_stats_update_end(&tstats->syncp);
1577
1578 - skb->mark = 0;
1579 secpath_reset(skb);
1580 skb->dev = tunnel->dev;
1581 return 1;
1582 @@ -327,7 +335,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1583
1584 memset(&fl4, 0, sizeof(fl4));
1585 flowi4_init_output(&fl4, tunnel->parms.link,
1586 - be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos),
1587 + be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
1588 RT_SCOPE_UNIVERSE,
1589 IPPROTO_IPIP, 0,
1590 dst, tiph->saddr, 0, 0);
1591 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1592 index a9a54a236832..2de16d940528 100644
1593 --- a/net/ipv4/route.c
1594 +++ b/net/ipv4/route.c
1595 @@ -2074,7 +2074,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1596 RT_SCOPE_LINK);
1597 goto make_route;
1598 }
1599 - if (fl4->saddr) {
1600 + if (!fl4->saddr) {
1601 if (ipv4_is_multicast(fl4->daddr))
1602 fl4->saddr = inet_select_addr(dev_out, 0,
1603 fl4->flowi4_scope);
1604 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
1605 index 610e324348d1..6900b8b97d93 100644
1606 --- a/net/ipv4/sysctl_net_ipv4.c
1607 +++ b/net/ipv4/sysctl_net_ipv4.c
1608 @@ -29,6 +29,7 @@
1609 static int zero;
1610 static int one = 1;
1611 static int four = 4;
1612 +static int gso_max_segs = GSO_MAX_SEGS;
1613 static int tcp_retr1_max = 255;
1614 static int ip_local_port_range_min[] = { 1, 1 };
1615 static int ip_local_port_range_max[] = { 65535, 65535 };
1616 @@ -754,6 +755,15 @@ static struct ctl_table ipv4_table[] = {
1617 .extra2 = &four,
1618 },
1619 {
1620 + .procname = "tcp_min_tso_segs",
1621 + .data = &sysctl_tcp_min_tso_segs,
1622 + .maxlen = sizeof(int),
1623 + .mode = 0644,
1624 + .proc_handler = proc_dointvec_minmax,
1625 + .extra1 = &zero,
1626 + .extra2 = &gso_max_segs,
1627 + },
1628 + {
1629 .procname = "udp_mem",
1630 .data = &sysctl_udp_mem,
1631 .maxlen = sizeof(sysctl_udp_mem),
1632 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1633 index 95544e4028c0..ec586e553361 100644
1634 --- a/net/ipv4/tcp.c
1635 +++ b/net/ipv4/tcp.c
1636 @@ -283,6 +283,8 @@
1637
1638 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
1639
1640 +int sysctl_tcp_min_tso_segs __read_mostly = 2;
1641 +
1642 struct percpu_counter tcp_orphan_count;
1643 EXPORT_SYMBOL_GPL(tcp_orphan_count);
1644
1645 @@ -789,12 +791,28 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
1646 xmit_size_goal = mss_now;
1647
1648 if (large_allowed && sk_can_gso(sk)) {
1649 - xmit_size_goal = ((sk->sk_gso_max_size - 1) -
1650 - inet_csk(sk)->icsk_af_ops->net_header_len -
1651 - inet_csk(sk)->icsk_ext_hdr_len -
1652 - tp->tcp_header_len);
1653 + u32 gso_size, hlen;
1654 +
1655 + /* Maybe we should/could use sk->sk_prot->max_header here ? */
1656 + hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
1657 + inet_csk(sk)->icsk_ext_hdr_len +
1658 + tp->tcp_header_len;
1659 +
1660 + /* Goal is to send at least one packet per ms,
1661 + * not one big TSO packet every 100 ms.
1662 + * This preserves ACK clocking and is consistent
1663 + * with tcp_tso_should_defer() heuristic.
1664 + */
1665 + gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
1666 + gso_size = max_t(u32, gso_size,
1667 + sysctl_tcp_min_tso_segs * mss_now);
1668 +
1669 + xmit_size_goal = min_t(u32, gso_size,
1670 + sk->sk_gso_max_size - 1 - hlen);
1671
1672 - /* TSQ : try to have two TSO segments in flight */
1673 + /* TSQ : try to have at least two segments in flight
1674 + * (one in NIC TX ring, another in Qdisc)
1675 + */
1676 xmit_size_goal = min_t(u32, xmit_size_goal,
1677 sysctl_tcp_limit_output_bytes >> 1);
1678
1679 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1680 index 3ca2139a130b..723951aec07e 100644
1681 --- a/net/ipv4/tcp_input.c
1682 +++ b/net/ipv4/tcp_input.c
1683 @@ -688,6 +688,34 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
1684 }
1685 }
1686
1687 +/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
1688 + * Note: TCP stack does not yet implement pacing.
1689 + * FQ packet scheduler can be used to implement cheap but effective
1690 + * TCP pacing, to smooth the burst on large writes when packets
1691 + * in flight is significantly lower than cwnd (or rwin)
1692 + */
1693 +static void tcp_update_pacing_rate(struct sock *sk)
1694 +{
1695 + const struct tcp_sock *tp = tcp_sk(sk);
1696 + u64 rate;
1697 +
1698 + /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
1699 + rate = (u64)tp->mss_cache * 2 * (HZ << 3);
1700 +
1701 + rate *= max(tp->snd_cwnd, tp->packets_out);
1702 +
1703 + /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
1704 + * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
1705 + * We probably need usec resolution in the future.
1706 + * Note: This also takes care of possible srtt=0 case,
1707 + * when tcp_rtt_estimator() was not yet called.
1708 + */
1709 + if (tp->srtt > 8 + 2)
1710 + do_div(rate, tp->srtt);
1711 +
1712 + sk->sk_pacing_rate = min_t(u64, rate, ~0U);
1713 +}
1714 +
1715 /* Calculate rto without backoff. This is the second half of Van Jacobson's
1716 * routine referred to above.
1717 */
1718 @@ -1251,7 +1279,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1719 tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1720 }
1721
1722 - TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
1723 + TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1724 + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1725 + TCP_SKB_CB(prev)->end_seq++;
1726 +
1727 if (skb == tcp_highest_sack(sk))
1728 tcp_advance_highest_sack(sk, skb);
1729
1730 @@ -3253,7 +3284,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
1731 tcp_init_cwnd_reduction(sk, true);
1732 tcp_set_ca_state(sk, TCP_CA_CWR);
1733 tcp_end_cwnd_reduction(sk);
1734 - tcp_set_ca_state(sk, TCP_CA_Open);
1735 + tcp_try_keep_open(sk);
1736 NET_INC_STATS_BH(sock_net(sk),
1737 LINUX_MIB_TCPLOSSPROBERECOVERY);
1738 }
1739 @@ -3269,7 +3300,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
1740 u32 ack_seq = TCP_SKB_CB(skb)->seq;
1741 u32 ack = TCP_SKB_CB(skb)->ack_seq;
1742 bool is_dupack = false;
1743 - u32 prior_in_flight;
1744 + u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
1745 u32 prior_fackets;
1746 int prior_packets = tp->packets_out;
1747 const int prior_unsacked = tp->packets_out - tp->sacked_out;
1748 @@ -3375,6 +3406,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
1749
1750 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
1751 tcp_schedule_loss_probe(sk);
1752 + if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
1753 + tcp_update_pacing_rate(sk);
1754 return 1;
1755
1756 no_queue:
1757 @@ -5671,6 +5704,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
1758 } else
1759 tcp_init_metrics(sk);
1760
1761 + tcp_update_pacing_rate(sk);
1762 +
1763 /* Prevent spurious tcp_cwnd_restart() on first data packet */
1764 tp->lsndtime = tcp_time_stamp;
1765
1766 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1767 index 170737a9d56d..28c0d6a00d96 100644
1768 --- a/net/ipv4/tcp_output.c
1769 +++ b/net/ipv4/tcp_output.c
1770 @@ -892,8 +892,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1771
1772 skb_orphan(skb);
1773 skb->sk = sk;
1774 - skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
1775 - tcp_wfree : sock_wfree;
1776 + skb->destructor = tcp_wfree;
1777 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1778
1779 /* Build TCP header and checksum it. */
1780 @@ -982,6 +981,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1781 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1782 unsigned int mss_now)
1783 {
1784 + /* Make sure we own this skb before messing gso_size/gso_segs */
1785 + WARN_ON_ONCE(skb_cloned(skb));
1786 +
1787 if (skb->len <= mss_now || !sk_can_gso(sk) ||
1788 skb->ip_summed == CHECKSUM_NONE) {
1789 /* Avoid the costly divide in the normal
1790 @@ -1063,9 +1065,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1791 if (nsize < 0)
1792 nsize = 0;
1793
1794 - if (skb_cloned(skb) &&
1795 - skb_is_nonlinear(skb) &&
1796 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1797 + if (skb_unclone(skb, GFP_ATOMIC))
1798 return -ENOMEM;
1799
1800 /* Get a new skb... force flag on. */
1801 @@ -1628,7 +1628,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1802
1803 /* If a full-sized TSO skb can be sent, do it. */
1804 if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
1805 - sk->sk_gso_max_segs * tp->mss_cache))
1806 + tp->xmit_size_goal_segs * tp->mss_cache))
1807 goto send_now;
1808
1809 /* Middle in queue won't get any more data, full sendable already? */
1810 @@ -1837,7 +1837,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1811 while ((skb = tcp_send_head(sk))) {
1812 unsigned int limit;
1813
1814 -
1815 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1816 BUG_ON(!tso_segs);
1817
1818 @@ -1866,13 +1865,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1819 break;
1820 }
1821
1822 - /* TSQ : sk_wmem_alloc accounts skb truesize,
1823 - * including skb overhead. But thats OK.
1824 + /* TCP Small Queues :
1825 + * Control number of packets in qdisc/devices to two packets / or ~1 ms.
1826 + * This allows for :
1827 + * - better RTT estimation and ACK scheduling
1828 + * - faster recovery
1829 + * - high rates
1830 */
1831 - if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
1832 + limit = max(skb->truesize, sk->sk_pacing_rate >> 10);
1833 +
1834 + if (atomic_read(&sk->sk_wmem_alloc) > limit) {
1835 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
1836 break;
1837 }
1838 +
1839 limit = mss_now;
1840 if (tso_segs > 1 && !tcp_urg_mode(tp))
1841 limit = tcp_mss_split_point(sk, skb, mss_now,
1842 @@ -2334,6 +2340,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1843 int oldpcount = tcp_skb_pcount(skb);
1844
1845 if (unlikely(oldpcount > 1)) {
1846 + if (skb_unclone(skb, GFP_ATOMIC))
1847 + return -ENOMEM;
1848 tcp_init_tso_segs(sk, skb, cur_mss);
1849 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
1850 }
1851 diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
1852 index 32b4a1675d82..066640e0ba8e 100644
1853 --- a/net/ipv6/inet6_hashtables.c
1854 +++ b/net/ipv6/inet6_hashtables.c
1855 @@ -116,7 +116,7 @@ begintw:
1856 }
1857 if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
1858 ports, dif))) {
1859 - sock_put(sk);
1860 + inet_twsk_put(inet_twsk(sk));
1861 goto begintw;
1862 }
1863 goto out;
1864 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1865 index 44df1c92a0d6..5b25f850faf9 100644
1866 --- a/net/ipv6/ip6_output.c
1867 +++ b/net/ipv6/ip6_output.c
1868 @@ -130,7 +130,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
1869 }
1870
1871 rcu_read_lock_bh();
1872 - nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
1873 + nexthop = rt6_nexthop((struct rt6_info *)dst);
1874 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
1875 if (unlikely(!neigh))
1876 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
1877 @@ -899,7 +899,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
1878 */
1879 rt = (struct rt6_info *) *dst;
1880 rcu_read_lock_bh();
1881 - n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr));
1882 + n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
1883 err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
1884 rcu_read_unlock_bh();
1885
1886 @@ -1252,7 +1252,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1887 skb = skb_peek_tail(&sk->sk_write_queue);
1888 cork->length += length;
1889 if (((length > mtu) ||
1890 - (skb && skb_is_gso(skb))) &&
1891 + (skb && skb_has_frags(skb))) &&
1892 (sk->sk_protocol == IPPROTO_UDP) &&
1893 (rt->dst.dev->features & NETIF_F_UFO)) {
1894 err = ip6_ufo_append_data(sk, getfrag, from, length,
1895 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1896 index 8d9a93ed9c59..1e32d5c0b615 100644
1897 --- a/net/ipv6/route.c
1898 +++ b/net/ipv6/route.c
1899 @@ -477,6 +477,24 @@ out:
1900 }
1901
1902 #ifdef CONFIG_IPV6_ROUTER_PREF
1903 +struct __rt6_probe_work {
1904 + struct work_struct work;
1905 + struct in6_addr target;
1906 + struct net_device *dev;
1907 +};
1908 +
1909 +static void rt6_probe_deferred(struct work_struct *w)
1910 +{
1911 + struct in6_addr mcaddr;
1912 + struct __rt6_probe_work *work =
1913 + container_of(w, struct __rt6_probe_work, work);
1914 +
1915 + addrconf_addr_solict_mult(&work->target, &mcaddr);
1916 + ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL);
1917 + dev_put(work->dev);
1918 + kfree(w);
1919 +}
1920 +
1921 static void rt6_probe(struct rt6_info *rt)
1922 {
1923 struct neighbour *neigh;
1924 @@ -500,17 +518,23 @@ static void rt6_probe(struct rt6_info *rt)
1925
1926 if (!neigh ||
1927 time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) {
1928 - struct in6_addr mcaddr;
1929 - struct in6_addr *target;
1930 + struct __rt6_probe_work *work;
1931
1932 - if (neigh) {
1933 + work = kmalloc(sizeof(*work), GFP_ATOMIC);
1934 +
1935 + if (neigh && work)
1936 neigh->updated = jiffies;
1937 +
1938 + if (neigh)
1939 write_unlock(&neigh->lock);
1940 - }
1941
1942 - target = (struct in6_addr *)&rt->rt6i_gateway;
1943 - addrconf_addr_solict_mult(target, &mcaddr);
1944 - ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL);
1945 + if (work) {
1946 + INIT_WORK(&work->work, rt6_probe_deferred);
1947 + work->target = rt->rt6i_gateway;
1948 + dev_hold(rt->dst.dev);
1949 + work->dev = rt->dst.dev;
1950 + schedule_work(&work->work);
1951 + }
1952 } else {
1953 out:
1954 write_unlock(&neigh->lock);
1955 @@ -852,7 +876,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
1956 if (ort->rt6i_dst.plen != 128 &&
1957 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1958 rt->rt6i_flags |= RTF_ANYCAST;
1959 - rt->rt6i_gateway = *daddr;
1960 }
1961
1962 rt->rt6i_flags |= RTF_CACHE;
1963 @@ -1270,6 +1293,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1964 rt->dst.flags |= DST_HOST;
1965 rt->dst.output = ip6_output;
1966 atomic_set(&rt->dst.__refcnt, 1);
1967 + rt->rt6i_gateway = fl6->daddr;
1968 rt->rt6i_dst.addr = fl6->daddr;
1969 rt->rt6i_dst.plen = 128;
1970 rt->rt6i_idev = idev;
1971 @@ -1824,7 +1848,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
1972 in6_dev_hold(rt->rt6i_idev);
1973 rt->dst.lastuse = jiffies;
1974
1975 - rt->rt6i_gateway = ort->rt6i_gateway;
1976 + if (ort->rt6i_flags & RTF_GATEWAY)
1977 + rt->rt6i_gateway = ort->rt6i_gateway;
1978 + else
1979 + rt->rt6i_gateway = *dest;
1980 rt->rt6i_flags = ort->rt6i_flags;
1981 if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
1982 (RTF_DEFAULT | RTF_ADDRCONF))
1983 @@ -2111,6 +2138,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1984 else
1985 rt->rt6i_flags |= RTF_LOCAL;
1986
1987 + rt->rt6i_gateway = *addr;
1988 rt->rt6i_dst.addr = *addr;
1989 rt->rt6i_dst.plen = 128;
1990 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1991 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1992 index 86f639ba5407..a51ad079ee58 100644
1993 --- a/net/ipv6/sit.c
1994 +++ b/net/ipv6/sit.c
1995 @@ -1708,7 +1708,6 @@ static void __net_exit sit_exit_net(struct net *net)
1996
1997 rtnl_lock();
1998 sit_destroy_tunnels(sitn, &list);
1999 - unregister_netdevice_queue(sitn->fb_tunnel_dev, &list);
2000 unregister_netdevice_many(&list);
2001 rtnl_unlock();
2002 }
2003 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
2004 index feae495a0a30..b076e8309bc2 100644
2005 --- a/net/l2tp/l2tp_core.c
2006 +++ b/net/l2tp/l2tp_core.c
2007 @@ -115,6 +115,11 @@ struct l2tp_net {
2008 static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
2009 static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
2010
2011 +static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
2012 +{
2013 + return sk->sk_user_data;
2014 +}
2015 +
2016 static inline struct l2tp_net *l2tp_pernet(struct net *net)
2017 {
2018 BUG_ON(!net);
2019 @@ -504,7 +509,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
2020 return 0;
2021
2022 #if IS_ENABLED(CONFIG_IPV6)
2023 - if (sk->sk_family == PF_INET6) {
2024 + if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
2025 if (!uh->check) {
2026 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
2027 return 1;
2028 @@ -1128,7 +1133,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
2029 /* Queue the packet to IP for output */
2030 skb->local_df = 1;
2031 #if IS_ENABLED(CONFIG_IPV6)
2032 - if (skb->sk->sk_family == PF_INET6)
2033 + if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped)
2034 error = inet6_csk_xmit(skb, NULL);
2035 else
2036 #endif
2037 @@ -1255,7 +1260,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
2038
2039 /* Calculate UDP checksum if configured to do so */
2040 #if IS_ENABLED(CONFIG_IPV6)
2041 - if (sk->sk_family == PF_INET6)
2042 + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
2043 l2tp_xmit_ipv6_csum(sk, skb, udp_len);
2044 else
2045 #endif
2046 @@ -1304,10 +1309,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
2047 */
2048 static void l2tp_tunnel_destruct(struct sock *sk)
2049 {
2050 - struct l2tp_tunnel *tunnel;
2051 + struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
2052 struct l2tp_net *pn;
2053
2054 - tunnel = sk->sk_user_data;
2055 if (tunnel == NULL)
2056 goto end;
2057
2058 @@ -1675,7 +1679,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
2059 }
2060
2061 /* Check if this socket has already been prepped */
2062 - tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
2063 + tunnel = l2tp_tunnel(sk);
2064 if (tunnel != NULL) {
2065 /* This socket has already been prepped */
2066 err = -EBUSY;
2067 @@ -1704,6 +1708,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
2068 if (cfg != NULL)
2069 tunnel->debug = cfg->debug;
2070
2071 +#if IS_ENABLED(CONFIG_IPV6)
2072 + if (sk->sk_family == PF_INET6) {
2073 + struct ipv6_pinfo *np = inet6_sk(sk);
2074 +
2075 + if (ipv6_addr_v4mapped(&np->saddr) &&
2076 + ipv6_addr_v4mapped(&np->daddr)) {
2077 + struct inet_sock *inet = inet_sk(sk);
2078 +
2079 + tunnel->v4mapped = true;
2080 + inet->inet_saddr = np->saddr.s6_addr32[3];
2081 + inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3];
2082 + inet->inet_daddr = np->daddr.s6_addr32[3];
2083 + } else {
2084 + tunnel->v4mapped = false;
2085 + }
2086 + }
2087 +#endif
2088 +
2089 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
2090 tunnel->encap = encap;
2091 if (encap == L2TP_ENCAPTYPE_UDP) {
2092 @@ -1712,7 +1734,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
2093 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
2094 udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
2095 #if IS_ENABLED(CONFIG_IPV6)
2096 - if (sk->sk_family == PF_INET6)
2097 + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
2098 udpv6_encap_enable();
2099 else
2100 #endif
2101 diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
2102 index 66a559b104b6..6f251cbc2ed7 100644
2103 --- a/net/l2tp/l2tp_core.h
2104 +++ b/net/l2tp/l2tp_core.h
2105 @@ -194,6 +194,9 @@ struct l2tp_tunnel {
2106 struct sock *sock; /* Parent socket */
2107 int fd; /* Parent fd, if tunnel socket
2108 * was created by userspace */
2109 +#if IS_ENABLED(CONFIG_IPV6)
2110 + bool v4mapped;
2111 +#endif
2112
2113 struct work_struct del_work;
2114
2115 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2116 index 5ebee2ded9e9..8c46b271064a 100644
2117 --- a/net/l2tp/l2tp_ppp.c
2118 +++ b/net/l2tp/l2tp_ppp.c
2119 @@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
2120 goto error_put_sess_tun;
2121 }
2122
2123 + local_bh_disable();
2124 l2tp_xmit_skb(session, skb, session->hdr_len);
2125 + local_bh_enable();
2126
2127 sock_put(ps->tunnel_sock);
2128 sock_put(sk);
2129 @@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
2130 skb->data[0] = ppph[0];
2131 skb->data[1] = ppph[1];
2132
2133 + local_bh_disable();
2134 l2tp_xmit_skb(session, skb, session->hdr_len);
2135 + local_bh_enable();
2136
2137 sock_put(sk_tun);
2138 sock_put(sk);
2139 diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
2140 index bdebd03bc8cd..70866d192efc 100644
2141 --- a/net/netfilter/nf_conntrack_h323_main.c
2142 +++ b/net/netfilter/nf_conntrack_h323_main.c
2143 @@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src,
2144 flowi6_to_flowi(&fl1), false)) {
2145 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
2146 flowi6_to_flowi(&fl2), false)) {
2147 - if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
2148 - sizeof(rt1->rt6i_gateway)) &&
2149 + if (ipv6_addr_equal(rt6_nexthop(rt1),
2150 + rt6_nexthop(rt2)) &&
2151 rt1->dst.dev == rt2->dst.dev)
2152 ret = 1;
2153 dst_release(&rt2->dst);
2154 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
2155 index 82f6016d89ab..3626010e8f0b 100644
2156 --- a/net/sched/sch_netem.c
2157 +++ b/net/sched/sch_netem.c
2158 @@ -358,6 +358,21 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
2159 return PSCHED_NS2TICKS(ticks);
2160 }
2161
2162 +static void tfifo_reset(struct Qdisc *sch)
2163 +{
2164 + struct netem_sched_data *q = qdisc_priv(sch);
2165 + struct rb_node *p;
2166 +
2167 + while ((p = rb_first(&q->t_root))) {
2168 + struct sk_buff *skb = netem_rb_to_skb(p);
2169 +
2170 + rb_erase(p, &q->t_root);
2171 + skb->next = NULL;
2172 + skb->prev = NULL;
2173 + kfree_skb(skb);
2174 + }
2175 +}
2176 +
2177 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
2178 {
2179 struct netem_sched_data *q = qdisc_priv(sch);
2180 @@ -523,6 +538,7 @@ static unsigned int netem_drop(struct Qdisc *sch)
2181 skb->next = NULL;
2182 skb->prev = NULL;
2183 len = qdisc_pkt_len(skb);
2184 + sch->qstats.backlog -= len;
2185 kfree_skb(skb);
2186 }
2187 }
2188 @@ -612,6 +628,7 @@ static void netem_reset(struct Qdisc *sch)
2189 struct netem_sched_data *q = qdisc_priv(sch);
2190
2191 qdisc_reset_queue(sch);
2192 + tfifo_reset(sch);
2193 if (q->qdisc)
2194 qdisc_reset(q->qdisc);
2195 qdisc_watchdog_cancel(&q->watchdog);
2196 diff --git a/net/sctp/output.c b/net/sctp/output.c
2197 index a46d1eb41762..013a07d9c454 100644
2198 --- a/net/sctp/output.c
2199 +++ b/net/sctp/output.c
2200 @@ -542,7 +542,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
2201 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
2202 */
2203 if (!sctp_checksum_disable) {
2204 - if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
2205 + if (!(dst->dev->features & NETIF_F_SCTP_CSUM) ||
2206 + (dst_xfrm(dst) != NULL) || packet->ipfragok) {
2207 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
2208
2209 /* 3) Put the resultant value into the checksum field in the
2210 diff --git a/net/socket.c b/net/socket.c
2211 index b2d7c629eeb9..4b946438d560 100644
2212 --- a/net/socket.c
2213 +++ b/net/socket.c
2214 @@ -1973,6 +1973,16 @@ struct used_address {
2215 unsigned int name_len;
2216 };
2217
2218 +static int copy_msghdr_from_user(struct msghdr *kmsg,
2219 + struct msghdr __user *umsg)
2220 +{
2221 + if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
2222 + return -EFAULT;
2223 + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
2224 + return -EINVAL;
2225 + return 0;
2226 +}
2227 +
2228 static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
2229 struct msghdr *msg_sys, unsigned int flags,
2230 struct used_address *used_address)
2231 @@ -1991,8 +2001,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
2232 if (MSG_CMSG_COMPAT & flags) {
2233 if (get_compat_msghdr(msg_sys, msg_compat))
2234 return -EFAULT;
2235 - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
2236 - return -EFAULT;
2237 + } else {
2238 + err = copy_msghdr_from_user(msg_sys, msg);
2239 + if (err)
2240 + return err;
2241 + }
2242
2243 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
2244 err = -EMSGSIZE;
2245 @@ -2200,8 +2213,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2246 if (MSG_CMSG_COMPAT & flags) {
2247 if (get_compat_msghdr(msg_sys, msg_compat))
2248 return -EFAULT;
2249 - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
2250 - return -EFAULT;
2251 + } else {
2252 + err = copy_msghdr_from_user(msg_sys, msg);
2253 + if (err)
2254 + return err;
2255 + }
2256
2257 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
2258 err = -EMSGSIZE;
2259 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2260 index c4ce243824bb..e64bbcf5fb2c 100644
2261 --- a/net/unix/af_unix.c
2262 +++ b/net/unix/af_unix.c
2263 @@ -1246,6 +1246,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
2264 return 0;
2265 }
2266
2267 +static void unix_sock_inherit_flags(const struct socket *old,
2268 + struct socket *new)
2269 +{
2270 + if (test_bit(SOCK_PASSCRED, &old->flags))
2271 + set_bit(SOCK_PASSCRED, &new->flags);
2272 + if (test_bit(SOCK_PASSSEC, &old->flags))
2273 + set_bit(SOCK_PASSSEC, &new->flags);
2274 +}
2275 +
2276 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
2277 {
2278 struct sock *sk = sock->sk;
2279 @@ -1280,6 +1289,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
2280 /* attach accepted sock to socket */
2281 unix_state_lock(tsk);
2282 newsock->state = SS_CONNECTED;
2283 + unix_sock_inherit_flags(sock, newsock);
2284 sock_graft(tsk, newsock);
2285 unix_state_unlock(tsk);
2286 return 0;
2287 diff --git a/net/unix/diag.c b/net/unix/diag.c
2288 index d591091603bf..86fa0f3b2caf 100644
2289 --- a/net/unix/diag.c
2290 +++ b/net/unix/diag.c
2291 @@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
2292 rep->udiag_family = AF_UNIX;
2293 rep->udiag_type = sk->sk_type;
2294 rep->udiag_state = sk->sk_state;
2295 + rep->pad = 0;
2296 rep->udiag_ino = sk_ino;
2297 sock_diag_save_cookie(sk, rep->udiag_cookie);
2298
2299 diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
2300 index 7d604c06c3dc..a271c27fac77 100644
2301 --- a/net/wireless/radiotap.c
2302 +++ b/net/wireless/radiotap.c
2303 @@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init(
2304 struct ieee80211_radiotap_header *radiotap_header,
2305 int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns)
2306 {
2307 + /* check the radiotap header can actually be present */
2308 + if (max_length < sizeof(struct ieee80211_radiotap_header))
2309 + return -EINVAL;
2310 +
2311 /* Linux only supports version 0 radiotap format */
2312 if (radiotap_header->it_version)
2313 return -EINVAL;
2314 @@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init(
2315 */
2316
2317 if ((unsigned long)iterator->_arg -
2318 - (unsigned long)iterator->_rtheader >
2319 + (unsigned long)iterator->_rtheader +
2320 + sizeof(uint32_t) >
2321 (unsigned long)iterator->_max_length)
2322 return -EINVAL;
2323 }
2324 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2325 index e3c7ba8d7582..adabdeb7b15d 100644
2326 --- a/sound/pci/hda/hda_generic.c
2327 +++ b/sound/pci/hda/hda_generic.c
2328 @@ -3505,7 +3505,7 @@ static int create_capture_mixers(struct hda_codec *codec)
2329 if (!multi)
2330 err = create_single_cap_vol_ctl(codec, n, vol, sw,
2331 inv_dmic);
2332 - else if (!multi_cap_vol)
2333 + else if (!multi_cap_vol && !inv_dmic)
2334 err = create_bind_cap_vol_ctl(codec, n, vol, sw);
2335 else
2336 err = create_multi_cap_vol_ctl(codec);
2337 diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c
2338 index d0323a693ba2..999550bbad40 100644
2339 --- a/sound/usb/usx2y/us122l.c
2340 +++ b/sound/usb/usx2y/us122l.c
2341 @@ -262,7 +262,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw,
2342 }
2343
2344 area->vm_ops = &usb_stream_hwdep_vm_ops;
2345 - area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2346 + area->vm_flags |= VM_DONTDUMP;
2347 + if (!read)
2348 + area->vm_flags |= VM_DONTEXPAND;
2349 area->vm_private_data = us122l;
2350 atomic_inc(&us122l->mmap_count);
2351 out: