Contents of /trunk/kernel-alx/patches-3.10/0117-3.10.18-all-fixes.patch
Parent Directory | Revision Log
Revision 2345 -
(show annotations)
(download)
Mon Dec 16 10:04:46 2013 UTC (10 years, 9 months ago) by niro
File size: 59864 byte(s)
Mon Dec 16 10:04:46 2013 UTC (10 years, 9 months ago) by niro
File size: 59864 byte(s)
-linux-3.10.18
1 | diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt |
2 | index 3458d6343e01..3994f0bbeeb6 100644 |
3 | --- a/Documentation/networking/ip-sysctl.txt |
4 | +++ b/Documentation/networking/ip-sysctl.txt |
5 | @@ -478,6 +478,15 @@ tcp_syn_retries - INTEGER |
6 | tcp_timestamps - BOOLEAN |
7 | Enable timestamps as defined in RFC1323. |
8 | |
9 | +tcp_min_tso_segs - INTEGER |
10 | + Minimal number of segments per TSO frame. |
11 | + Since linux-3.12, TCP does an automatic sizing of TSO frames, |
12 | + depending on flow rate, instead of filling 64Kbytes packets. |
13 | + For specific usages, it's possible to force TCP to build big |
14 | + TSO frames. Note that TCP stack might split too big TSO packets |
15 | + if available window is too small. |
16 | + Default: 2 |
17 | + |
18 | tcp_tso_win_divisor - INTEGER |
19 | This allows control over what percentage of the congestion window |
20 | can be consumed by a single TSO frame. |
21 | diff --git a/Makefile b/Makefile |
22 | index 5c7d3d63d376..5fb14e503fe3 100644 |
23 | --- a/Makefile |
24 | +++ b/Makefile |
25 | @@ -1,6 +1,6 @@ |
26 | VERSION = 3 |
27 | PATCHLEVEL = 10 |
28 | -SUBLEVEL = 17 |
29 | +SUBLEVEL = 18 |
30 | EXTRAVERSION = |
31 | NAME = TOSSUG Baby Fish |
32 | |
33 | diff --git a/arch/arm/boot/dts/integratorcp.dts b/arch/arm/boot/dts/integratorcp.dts |
34 | index ff1aea0ee043..72693a69f830 100644 |
35 | --- a/arch/arm/boot/dts/integratorcp.dts |
36 | +++ b/arch/arm/boot/dts/integratorcp.dts |
37 | @@ -9,11 +9,6 @@ |
38 | model = "ARM Integrator/CP"; |
39 | compatible = "arm,integrator-cp"; |
40 | |
41 | - aliases { |
42 | - arm,timer-primary = &timer2; |
43 | - arm,timer-secondary = &timer1; |
44 | - }; |
45 | - |
46 | chosen { |
47 | bootargs = "root=/dev/ram0 console=ttyAMA0,38400n8 earlyprintk"; |
48 | }; |
49 | @@ -24,14 +19,18 @@ |
50 | }; |
51 | |
52 | timer0: timer@13000000 { |
53 | + /* TIMER0 runs @ 25MHz */ |
54 | compatible = "arm,integrator-cp-timer"; |
55 | + status = "disabled"; |
56 | }; |
57 | |
58 | timer1: timer@13000100 { |
59 | + /* TIMER1 runs @ 1MHz */ |
60 | compatible = "arm,integrator-cp-timer"; |
61 | }; |
62 | |
63 | timer2: timer@13000200 { |
64 | + /* TIMER2 runs @ 1MHz */ |
65 | compatible = "arm,integrator-cp-timer"; |
66 | }; |
67 | |
68 | diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h |
69 | index f1d96d4e8092..73ddd7239b33 100644 |
70 | --- a/arch/arm/include/asm/syscall.h |
71 | +++ b/arch/arm/include/asm/syscall.h |
72 | @@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task, |
73 | unsigned int i, unsigned int n, |
74 | unsigned long *args) |
75 | { |
76 | + if (n == 0) |
77 | + return; |
78 | + |
79 | if (i + n > SYSCALL_MAX_ARGS) { |
80 | unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; |
81 | unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; |
82 | @@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task, |
83 | unsigned int i, unsigned int n, |
84 | const unsigned long *args) |
85 | { |
86 | + if (n == 0) |
87 | + return; |
88 | + |
89 | if (i + n > SYSCALL_MAX_ARGS) { |
90 | pr_warning("%s called with max args %d, handling only %d\n", |
91 | __func__, i + n, SYSCALL_MAX_ARGS); |
92 | diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c |
93 | index 08ae128cce9b..c73fc2b74de2 100644 |
94 | --- a/drivers/connector/cn_proc.c |
95 | +++ b/drivers/connector/cn_proc.c |
96 | @@ -65,6 +65,7 @@ void proc_fork_connector(struct task_struct *task) |
97 | |
98 | msg = (struct cn_msg *)buffer; |
99 | ev = (struct proc_event *)msg->data; |
100 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
101 | get_seq(&msg->seq, &ev->cpu); |
102 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
103 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
104 | @@ -80,6 +81,7 @@ void proc_fork_connector(struct task_struct *task) |
105 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
106 | msg->ack = 0; /* not used */ |
107 | msg->len = sizeof(*ev); |
108 | + msg->flags = 0; /* not used */ |
109 | /* If cn_netlink_send() failed, the data is not sent */ |
110 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
111 | } |
112 | @@ -96,6 +98,7 @@ void proc_exec_connector(struct task_struct *task) |
113 | |
114 | msg = (struct cn_msg *)buffer; |
115 | ev = (struct proc_event *)msg->data; |
116 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
117 | get_seq(&msg->seq, &ev->cpu); |
118 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
119 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
120 | @@ -106,6 +109,7 @@ void proc_exec_connector(struct task_struct *task) |
121 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
122 | msg->ack = 0; /* not used */ |
123 | msg->len = sizeof(*ev); |
124 | + msg->flags = 0; /* not used */ |
125 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
126 | } |
127 | |
128 | @@ -122,6 +126,7 @@ void proc_id_connector(struct task_struct *task, int which_id) |
129 | |
130 | msg = (struct cn_msg *)buffer; |
131 | ev = (struct proc_event *)msg->data; |
132 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
133 | ev->what = which_id; |
134 | ev->event_data.id.process_pid = task->pid; |
135 | ev->event_data.id.process_tgid = task->tgid; |
136 | @@ -145,6 +150,7 @@ void proc_id_connector(struct task_struct *task, int which_id) |
137 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
138 | msg->ack = 0; /* not used */ |
139 | msg->len = sizeof(*ev); |
140 | + msg->flags = 0; /* not used */ |
141 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
142 | } |
143 | |
144 | @@ -160,6 +166,7 @@ void proc_sid_connector(struct task_struct *task) |
145 | |
146 | msg = (struct cn_msg *)buffer; |
147 | ev = (struct proc_event *)msg->data; |
148 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
149 | get_seq(&msg->seq, &ev->cpu); |
150 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
151 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
152 | @@ -170,6 +177,7 @@ void proc_sid_connector(struct task_struct *task) |
153 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
154 | msg->ack = 0; /* not used */ |
155 | msg->len = sizeof(*ev); |
156 | + msg->flags = 0; /* not used */ |
157 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
158 | } |
159 | |
160 | @@ -185,6 +193,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) |
161 | |
162 | msg = (struct cn_msg *)buffer; |
163 | ev = (struct proc_event *)msg->data; |
164 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
165 | get_seq(&msg->seq, &ev->cpu); |
166 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
167 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
168 | @@ -203,6 +212,7 @@ void proc_ptrace_connector(struct task_struct *task, int ptrace_id) |
169 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
170 | msg->ack = 0; /* not used */ |
171 | msg->len = sizeof(*ev); |
172 | + msg->flags = 0; /* not used */ |
173 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
174 | } |
175 | |
176 | @@ -218,6 +228,7 @@ void proc_comm_connector(struct task_struct *task) |
177 | |
178 | msg = (struct cn_msg *)buffer; |
179 | ev = (struct proc_event *)msg->data; |
180 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
181 | get_seq(&msg->seq, &ev->cpu); |
182 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
183 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
184 | @@ -229,6 +240,7 @@ void proc_comm_connector(struct task_struct *task) |
185 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
186 | msg->ack = 0; /* not used */ |
187 | msg->len = sizeof(*ev); |
188 | + msg->flags = 0; /* not used */ |
189 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
190 | } |
191 | |
192 | @@ -244,6 +256,7 @@ void proc_coredump_connector(struct task_struct *task) |
193 | |
194 | msg = (struct cn_msg *)buffer; |
195 | ev = (struct proc_event *)msg->data; |
196 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
197 | get_seq(&msg->seq, &ev->cpu); |
198 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
199 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
200 | @@ -254,6 +267,7 @@ void proc_coredump_connector(struct task_struct *task) |
201 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
202 | msg->ack = 0; /* not used */ |
203 | msg->len = sizeof(*ev); |
204 | + msg->flags = 0; /* not used */ |
205 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
206 | } |
207 | |
208 | @@ -269,6 +283,7 @@ void proc_exit_connector(struct task_struct *task) |
209 | |
210 | msg = (struct cn_msg *)buffer; |
211 | ev = (struct proc_event *)msg->data; |
212 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
213 | get_seq(&msg->seq, &ev->cpu); |
214 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
215 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
216 | @@ -281,6 +296,7 @@ void proc_exit_connector(struct task_struct *task) |
217 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
218 | msg->ack = 0; /* not used */ |
219 | msg->len = sizeof(*ev); |
220 | + msg->flags = 0; /* not used */ |
221 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
222 | } |
223 | |
224 | @@ -304,6 +320,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) |
225 | |
226 | msg = (struct cn_msg *)buffer; |
227 | ev = (struct proc_event *)msg->data; |
228 | + memset(&ev->event_data, 0, sizeof(ev->event_data)); |
229 | msg->seq = rcvd_seq; |
230 | ktime_get_ts(&ts); /* get high res monotonic timestamp */ |
231 | put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); |
232 | @@ -313,6 +330,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) |
233 | memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); |
234 | msg->ack = rcvd_ack + 1; |
235 | msg->len = sizeof(*ev); |
236 | + msg->flags = 0; /* not used */ |
237 | cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); |
238 | } |
239 | |
240 | diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c |
241 | index 6ecfa758942c..0daa11e418b1 100644 |
242 | --- a/drivers/connector/connector.c |
243 | +++ b/drivers/connector/connector.c |
244 | @@ -157,17 +157,18 @@ static int cn_call_callback(struct sk_buff *skb) |
245 | static void cn_rx_skb(struct sk_buff *__skb) |
246 | { |
247 | struct nlmsghdr *nlh; |
248 | - int err; |
249 | struct sk_buff *skb; |
250 | + int len, err; |
251 | |
252 | skb = skb_get(__skb); |
253 | |
254 | if (skb->len >= NLMSG_HDRLEN) { |
255 | nlh = nlmsg_hdr(skb); |
256 | + len = nlmsg_len(nlh); |
257 | |
258 | - if (nlh->nlmsg_len < sizeof(struct cn_msg) || |
259 | + if (len < (int)sizeof(struct cn_msg) || |
260 | skb->len < nlh->nlmsg_len || |
261 | - nlh->nlmsg_len > CONNECTOR_MAX_MSG_SIZE) { |
262 | + len > CONNECTOR_MAX_MSG_SIZE) { |
263 | kfree_skb(skb); |
264 | return; |
265 | } |
266 | diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c |
267 | index 86c17de87692..71d86143aec5 100644 |
268 | --- a/drivers/gpio/gpio-lynxpoint.c |
269 | +++ b/drivers/gpio/gpio-lynxpoint.c |
270 | @@ -248,14 +248,15 @@ static void lp_gpio_irq_handler(unsigned irq, struct irq_desc *desc) |
271 | struct lp_gpio *lg = irq_data_get_irq_handler_data(data); |
272 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
273 | u32 base, pin, mask; |
274 | - unsigned long reg, pending; |
275 | + unsigned long reg, ena, pending; |
276 | unsigned virq; |
277 | |
278 | /* check from GPIO controller which pin triggered the interrupt */ |
279 | for (base = 0; base < lg->chip.ngpio; base += 32) { |
280 | reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT); |
281 | + ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE); |
282 | |
283 | - while ((pending = inl(reg))) { |
284 | + while ((pending = (inl(reg) & inl(ena)))) { |
285 | pin = __ffs(pending); |
286 | mask = BIT(pin); |
287 | /* Clear before handling so we don't lose an edge */ |
288 | diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c |
289 | index cd82eb44e4c4..7c9f053556f2 100644 |
290 | --- a/drivers/i2c/busses/i2c-ismt.c |
291 | +++ b/drivers/i2c/busses/i2c-ismt.c |
292 | @@ -393,6 +393,9 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr, |
293 | |
294 | desc = &priv->hw[priv->head]; |
295 | |
296 | + /* Initialize the DMA buffer */ |
297 | + memset(priv->dma_buffer, 0, sizeof(priv->dma_buffer)); |
298 | + |
299 | /* Initialize the descriptor */ |
300 | memset(desc, 0, sizeof(struct ismt_desc)); |
301 | desc->tgtaddr_rw = ISMT_DESC_ADDR_RW(addr, read_write); |
302 | diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c |
303 | index 4caa8e6d59d7..2d2b1b7588d7 100644 |
304 | --- a/drivers/md/dm-snap-persistent.c |
305 | +++ b/drivers/md/dm-snap-persistent.c |
306 | @@ -269,6 +269,14 @@ static chunk_t area_location(struct pstore *ps, chunk_t area) |
307 | return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); |
308 | } |
309 | |
310 | +static void skip_metadata(struct pstore *ps) |
311 | +{ |
312 | + uint32_t stride = ps->exceptions_per_area + 1; |
313 | + chunk_t next_free = ps->next_free; |
314 | + if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) |
315 | + ps->next_free++; |
316 | +} |
317 | + |
318 | /* |
319 | * Read or write a metadata area. Remembering to skip the first |
320 | * chunk which holds the header. |
321 | @@ -502,6 +510,8 @@ static int read_exceptions(struct pstore *ps, |
322 | |
323 | ps->current_area--; |
324 | |
325 | + skip_metadata(ps); |
326 | + |
327 | return 0; |
328 | } |
329 | |
330 | @@ -616,8 +626,6 @@ static int persistent_prepare_exception(struct dm_exception_store *store, |
331 | struct dm_exception *e) |
332 | { |
333 | struct pstore *ps = get_info(store); |
334 | - uint32_t stride; |
335 | - chunk_t next_free; |
336 | sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); |
337 | |
338 | /* Is there enough room ? */ |
339 | @@ -630,10 +638,8 @@ static int persistent_prepare_exception(struct dm_exception_store *store, |
340 | * Move onto the next free pending, making sure to take |
341 | * into account the location of the metadata chunks. |
342 | */ |
343 | - stride = (ps->exceptions_per_area + 1); |
344 | - next_free = ++ps->next_free; |
345 | - if (sector_div(next_free, stride) == 1) |
346 | - ps->next_free++; |
347 | + ps->next_free++; |
348 | + skip_metadata(ps); |
349 | |
350 | atomic_inc(&ps->pending_count); |
351 | return 0; |
352 | diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c |
353 | index f9cba4123c66..1870c4731a57 100644 |
354 | --- a/drivers/net/can/dev.c |
355 | +++ b/drivers/net/can/dev.c |
356 | @@ -705,14 +705,14 @@ static size_t can_get_size(const struct net_device *dev) |
357 | size_t size; |
358 | |
359 | size = nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */ |
360 | - size += sizeof(struct can_ctrlmode); /* IFLA_CAN_CTRLMODE */ |
361 | + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */ |
362 | size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */ |
363 | - size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */ |
364 | - size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */ |
365 | + size += nla_total_size(sizeof(struct can_bittiming)); /* IFLA_CAN_BITTIMING */ |
366 | + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */ |
367 | if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */ |
368 | - size += sizeof(struct can_berr_counter); |
369 | + size += nla_total_size(sizeof(struct can_berr_counter)); |
370 | if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */ |
371 | - size += sizeof(struct can_bittiming_const); |
372 | + size += nla_total_size(sizeof(struct can_bittiming_const)); |
373 | |
374 | return size; |
375 | } |
376 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
377 | index 638e55435b04..8c4babc0efbd 100644 |
378 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
379 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
380 | @@ -670,6 +670,7 @@ static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
381 | } |
382 | } |
383 | #endif |
384 | + skb_record_rx_queue(skb, fp->rx_queue); |
385 | napi_gro_receive(&fp->napi, skb); |
386 | } |
387 | |
388 | diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c |
389 | index 1db2df61b8af..696674edbdc4 100644 |
390 | --- a/drivers/net/ethernet/emulex/benet/be_cmds.c |
391 | +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c |
392 | @@ -1150,7 +1150,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) |
393 | |
394 | if (lancer_chip(adapter)) { |
395 | req->hdr.version = 1; |
396 | - req->if_id = cpu_to_le16(adapter->if_handle); |
397 | } else if (BEx_chip(adapter)) { |
398 | if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) |
399 | req->hdr.version = 2; |
400 | @@ -1158,6 +1157,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) |
401 | req->hdr.version = 2; |
402 | } |
403 | |
404 | + if (req->hdr.version > 0) |
405 | + req->if_id = cpu_to_le16(adapter->if_handle); |
406 | req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); |
407 | req->ulp_num = BE_ULP1_NUM; |
408 | req->type = BE_ETH_TX_RING_TYPE_STANDARD; |
409 | diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c |
410 | index d1cbfb12c1ca..4be11ff516a0 100644 |
411 | --- a/drivers/net/ethernet/marvell/mv643xx_eth.c |
412 | +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c |
413 | @@ -1125,15 +1125,13 @@ static void mib_counters_update(struct mv643xx_eth_private *mp) |
414 | p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT); |
415 | p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT); |
416 | spin_unlock_bh(&mp->mib_counters_lock); |
417 | - |
418 | - mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); |
419 | } |
420 | |
421 | static void mib_counters_timer_wrapper(unsigned long _mp) |
422 | { |
423 | struct mv643xx_eth_private *mp = (void *)_mp; |
424 | - |
425 | mib_counters_update(mp); |
426 | + mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ); |
427 | } |
428 | |
429 | |
430 | @@ -2231,6 +2229,7 @@ static int mv643xx_eth_open(struct net_device *dev) |
431 | mp->int_mask |= INT_TX_END_0 << i; |
432 | } |
433 | |
434 | + add_timer(&mp->mib_counters_timer); |
435 | port_start(mp); |
436 | |
437 | wrlp(mp, INT_MASK_EXT, INT_EXT_LINK_PHY | INT_EXT_TX); |
438 | @@ -2739,7 +2738,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev) |
439 | mp->mib_counters_timer.data = (unsigned long)mp; |
440 | mp->mib_counters_timer.function = mib_counters_timer_wrapper; |
441 | mp->mib_counters_timer.expires = jiffies + 30 * HZ; |
442 | - add_timer(&mp->mib_counters_timer); |
443 | |
444 | spin_lock_init(&mp->mib_counters_lock); |
445 | |
446 | diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c |
447 | index 860e15ddfbcb..7233610164cd 100644 |
448 | --- a/drivers/net/ethernet/ti/davinci_emac.c |
449 | +++ b/drivers/net/ethernet/ti/davinci_emac.c |
450 | @@ -876,8 +876,7 @@ static void emac_dev_mcast_set(struct net_device *ndev) |
451 | netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { |
452 | mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); |
453 | emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); |
454 | - } |
455 | - if (!netdev_mc_empty(ndev)) { |
456 | + } else if (!netdev_mc_empty(ndev)) { |
457 | struct netdev_hw_addr *ha; |
458 | |
459 | mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); |
460 | diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c |
461 | index 42d670a468f8..1d01534c2020 100644 |
462 | --- a/drivers/net/virtio_net.c |
463 | +++ b/drivers/net/virtio_net.c |
464 | @@ -902,7 +902,6 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
465 | struct scatterlist sg; |
466 | struct virtio_net_ctrl_mq s; |
467 | struct net_device *dev = vi->dev; |
468 | - int i; |
469 | |
470 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
471 | return 0; |
472 | @@ -916,10 +915,10 @@ static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
473 | queue_pairs); |
474 | return -EINVAL; |
475 | } else { |
476 | - for (i = vi->curr_queue_pairs; i < queue_pairs; i++) |
477 | - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
478 | - schedule_delayed_work(&vi->refill, 0); |
479 | vi->curr_queue_pairs = queue_pairs; |
480 | + /* virtnet_open() will refill when device is going to up. */ |
481 | + if (dev->flags & IFF_UP) |
482 | + schedule_delayed_work(&vi->refill, 0); |
483 | } |
484 | |
485 | return 0; |
486 | @@ -1097,6 +1096,11 @@ static int virtnet_cpu_callback(struct notifier_block *nfb, |
487 | { |
488 | struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); |
489 | |
490 | + mutex_lock(&vi->config_lock); |
491 | + |
492 | + if (!vi->config_enable) |
493 | + goto done; |
494 | + |
495 | switch(action & ~CPU_TASKS_FROZEN) { |
496 | case CPU_ONLINE: |
497 | case CPU_DOWN_FAILED: |
498 | @@ -1109,6 +1113,9 @@ static int virtnet_cpu_callback(struct notifier_block *nfb, |
499 | default: |
500 | break; |
501 | } |
502 | + |
503 | +done: |
504 | + mutex_unlock(&vi->config_lock); |
505 | return NOTIFY_OK; |
506 | } |
507 | |
508 | @@ -1709,7 +1716,9 @@ static int virtnet_restore(struct virtio_device *vdev) |
509 | vi->config_enable = true; |
510 | mutex_unlock(&vi->config_lock); |
511 | |
512 | + rtnl_lock(); |
513 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
514 | + rtnl_unlock(); |
515 | |
516 | return 0; |
517 | } |
518 | diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c |
519 | index 3f0c4f268751..bcfff0d62de4 100644 |
520 | --- a/drivers/net/wan/farsync.c |
521 | +++ b/drivers/net/wan/farsync.c |
522 | @@ -1972,6 +1972,7 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port, |
523 | } |
524 | |
525 | i = port->index; |
526 | + memset(&sync, 0, sizeof(sync)); |
527 | sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); |
528 | /* Lucky card and linux use same encoding here */ |
529 | sync.clock_type = FST_RDB(card, portConfig[i].internalClock) == |
530 | diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c |
531 | index 6a24a5a70cc7..4c0a69779b89 100644 |
532 | --- a/drivers/net/wan/wanxl.c |
533 | +++ b/drivers/net/wan/wanxl.c |
534 | @@ -355,6 +355,7 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
535 | ifr->ifr_settings.size = size; /* data size wanted */ |
536 | return -ENOBUFS; |
537 | } |
538 | + memset(&line, 0, sizeof(line)); |
539 | line.clock_type = get_status(port)->clocking; |
540 | line.clock_rate = 0; |
541 | line.loopback = 0; |
542 | diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h |
543 | index 9d7f1723dd8f..1a285083d24a 100644 |
544 | --- a/drivers/net/xen-netback/common.h |
545 | +++ b/drivers/net/xen-netback/common.h |
546 | @@ -115,6 +115,7 @@ struct xenvif *xenvif_alloc(struct device *parent, |
547 | int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, |
548 | unsigned long rx_ring_ref, unsigned int evtchn); |
549 | void xenvif_disconnect(struct xenvif *vif); |
550 | +void xenvif_free(struct xenvif *vif); |
551 | |
552 | void xenvif_get(struct xenvif *vif); |
553 | void xenvif_put(struct xenvif *vif); |
554 | diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c |
555 | index d98414168485..3a294c2528d5 100644 |
556 | --- a/drivers/net/xen-netback/interface.c |
557 | +++ b/drivers/net/xen-netback/interface.c |
558 | @@ -304,6 +304,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, |
559 | } |
560 | |
561 | netdev_dbg(dev, "Successfully created xenvif\n"); |
562 | + |
563 | + __module_get(THIS_MODULE); |
564 | + |
565 | return vif; |
566 | } |
567 | |
568 | @@ -369,9 +372,14 @@ void xenvif_disconnect(struct xenvif *vif) |
569 | if (vif->irq) |
570 | unbind_from_irqhandler(vif->irq, vif); |
571 | |
572 | - unregister_netdev(vif->dev); |
573 | - |
574 | xen_netbk_unmap_frontend_rings(vif); |
575 | +} |
576 | + |
577 | +void xenvif_free(struct xenvif *vif) |
578 | +{ |
579 | + unregister_netdev(vif->dev); |
580 | |
581 | free_netdev(vif->dev); |
582 | + |
583 | + module_put(THIS_MODULE); |
584 | } |
585 | diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c |
586 | index 410018c4c528..abe24ff000f0 100644 |
587 | --- a/drivers/net/xen-netback/xenbus.c |
588 | +++ b/drivers/net/xen-netback/xenbus.c |
589 | @@ -42,7 +42,7 @@ static int netback_remove(struct xenbus_device *dev) |
590 | if (be->vif) { |
591 | kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); |
592 | xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); |
593 | - xenvif_disconnect(be->vif); |
594 | + xenvif_free(be->vif); |
595 | be->vif = NULL; |
596 | } |
597 | kfree(be); |
598 | @@ -203,9 +203,18 @@ static void disconnect_backend(struct xenbus_device *dev) |
599 | { |
600 | struct backend_info *be = dev_get_drvdata(&dev->dev); |
601 | |
602 | + if (be->vif) |
603 | + xenvif_disconnect(be->vif); |
604 | +} |
605 | + |
606 | +static void destroy_backend(struct xenbus_device *dev) |
607 | +{ |
608 | + struct backend_info *be = dev_get_drvdata(&dev->dev); |
609 | + |
610 | if (be->vif) { |
611 | + kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); |
612 | xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status"); |
613 | - xenvif_disconnect(be->vif); |
614 | + xenvif_free(be->vif); |
615 | be->vif = NULL; |
616 | } |
617 | } |
618 | @@ -237,14 +246,11 @@ static void frontend_changed(struct xenbus_device *dev, |
619 | case XenbusStateConnected: |
620 | if (dev->state == XenbusStateConnected) |
621 | break; |
622 | - backend_create_xenvif(be); |
623 | if (be->vif) |
624 | connect(be); |
625 | break; |
626 | |
627 | case XenbusStateClosing: |
628 | - if (be->vif) |
629 | - kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE); |
630 | disconnect_backend(dev); |
631 | xenbus_switch_state(dev, XenbusStateClosing); |
632 | break; |
633 | @@ -253,6 +259,7 @@ static void frontend_changed(struct xenbus_device *dev, |
634 | xenbus_switch_state(dev, XenbusStateClosed); |
635 | if (xenbus_dev_is_online(dev)) |
636 | break; |
637 | + destroy_backend(dev); |
638 | /* fall through if not online */ |
639 | case XenbusStateUnknown: |
640 | device_unregister(&dev->dev); |
641 | diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c |
642 | index 1a8bc2275ea4..f72b43fbbef9 100644 |
643 | --- a/drivers/tty/serial/vt8500_serial.c |
644 | +++ b/drivers/tty/serial/vt8500_serial.c |
645 | @@ -559,12 +559,13 @@ static int vt8500_serial_probe(struct platform_device *pdev) |
646 | if (!mmres || !irqres) |
647 | return -ENODEV; |
648 | |
649 | - if (np) |
650 | + if (np) { |
651 | port = of_alias_get_id(np, "serial"); |
652 | if (port >= VT8500_MAX_PORTS) |
653 | port = -1; |
654 | - else |
655 | + } else { |
656 | port = -1; |
657 | + } |
658 | |
659 | if (port < 0) { |
660 | /* calculate the port id */ |
661 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
662 | index 80a7104d5ddb..f1507c052a2e 100644 |
663 | --- a/drivers/usb/serial/option.c |
664 | +++ b/drivers/usb/serial/option.c |
665 | @@ -451,6 +451,10 @@ static void option_instat_callback(struct urb *urb); |
666 | #define CHANGHONG_VENDOR_ID 0x2077 |
667 | #define CHANGHONG_PRODUCT_CH690 0x7001 |
668 | |
669 | +/* Inovia */ |
670 | +#define INOVIA_VENDOR_ID 0x20a6 |
671 | +#define INOVIA_SEW858 0x1105 |
672 | + |
673 | /* some devices interfaces need special handling due to a number of reasons */ |
674 | enum option_blacklist_reason { |
675 | OPTION_BLACKLIST_NONE = 0, |
676 | @@ -1257,7 +1261,9 @@ static const struct usb_device_id option_ids[] = { |
677 | |
678 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, |
679 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, |
680 | - { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) }, |
681 | + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), |
682 | + .driver_info = (kernel_ulong_t)&net_intf6_blacklist |
683 | + }, |
684 | { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ |
685 | { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ |
686 | { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, |
687 | @@ -1345,6 +1351,7 @@ static const struct usb_device_id option_ids[] = { |
688 | { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, |
689 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ |
690 | { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ |
691 | + { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, |
692 | { } /* Terminating entry */ |
693 | }; |
694 | MODULE_DEVICE_TABLE(usb, option_ids); |
695 | diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c |
696 | index 32bdd5eac59b..4cc84c0c990d 100644 |
697 | --- a/drivers/usb/serial/ti_usb_3410_5052.c |
698 | +++ b/drivers/usb/serial/ti_usb_3410_5052.c |
699 | @@ -203,6 +203,7 @@ static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] |
700 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, |
701 | { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, |
702 | { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, |
703 | + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, |
704 | { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, |
705 | { } |
706 | }; |
707 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
708 | index dec1748cd002..eaf602781635 100644 |
709 | --- a/include/linux/skbuff.h |
710 | +++ b/include/linux/skbuff.h |
711 | @@ -1308,6 +1308,11 @@ static inline int skb_pagelen(const struct sk_buff *skb) |
712 | return len + skb_headlen(skb); |
713 | } |
714 | |
715 | +static inline bool skb_has_frags(const struct sk_buff *skb) |
716 | +{ |
717 | + return skb_shinfo(skb)->nr_frags; |
718 | +} |
719 | + |
720 | /** |
721 | * __skb_fill_page_desc - initialise a paged fragment in an skb |
722 | * @skb: buffer containing fragment to be initialised |
723 | diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h |
724 | index a7a683e30b64..a8c2ef6d3b93 100644 |
725 | --- a/include/net/cipso_ipv4.h |
726 | +++ b/include/net/cipso_ipv4.h |
727 | @@ -290,6 +290,7 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, |
728 | unsigned char err_offset = 0; |
729 | u8 opt_len = opt[1]; |
730 | u8 opt_iter; |
731 | + u8 tag_len; |
732 | |
733 | if (opt_len < 8) { |
734 | err_offset = 1; |
735 | @@ -302,11 +303,12 @@ static inline int cipso_v4_validate(const struct sk_buff *skb, |
736 | } |
737 | |
738 | for (opt_iter = 6; opt_iter < opt_len;) { |
739 | - if (opt[opt_iter + 1] > (opt_len - opt_iter)) { |
740 | + tag_len = opt[opt_iter + 1]; |
741 | + if ((tag_len == 0) || (opt[opt_iter + 1] > (opt_len - opt_iter))) { |
742 | err_offset = opt_iter + 1; |
743 | goto out; |
744 | } |
745 | - opt_iter += opt[opt_iter + 1]; |
746 | + opt_iter += tag_len; |
747 | } |
748 | |
749 | out: |
750 | diff --git a/include/net/dst.h b/include/net/dst.h |
751 | index 1f8fd109e225..e0c97f5a57cf 100644 |
752 | --- a/include/net/dst.h |
753 | +++ b/include/net/dst.h |
754 | @@ -477,10 +477,22 @@ static inline struct dst_entry *xfrm_lookup(struct net *net, |
755 | { |
756 | return dst_orig; |
757 | } |
758 | + |
759 | +static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
760 | +{ |
761 | + return NULL; |
762 | +} |
763 | + |
764 | #else |
765 | extern struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, |
766 | const struct flowi *fl, struct sock *sk, |
767 | int flags); |
768 | + |
769 | +/* skb attached with this dst needs transformation if dst->xfrm is valid */ |
770 | +static inline struct xfrm_state *dst_xfrm(const struct dst_entry *dst) |
771 | +{ |
772 | + return dst->xfrm; |
773 | +} |
774 | #endif |
775 | |
776 | #endif /* _NET_DST_H */ |
777 | diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h |
778 | index 260f83f16bcf..b906f4a131a4 100644 |
779 | --- a/include/net/ip6_route.h |
780 | +++ b/include/net/ip6_route.h |
781 | @@ -194,11 +194,9 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb) |
782 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
783 | } |
784 | |
785 | -static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt, struct in6_addr *dest) |
786 | +static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt) |
787 | { |
788 | - if (rt->rt6i_flags & RTF_GATEWAY) |
789 | - return &rt->rt6i_gateway; |
790 | - return dest; |
791 | + return &rt->rt6i_gateway; |
792 | } |
793 | |
794 | #endif |
795 | diff --git a/include/net/sock.h b/include/net/sock.h |
796 | index 66772cf8c3c5..cec4c723db9a 100644 |
797 | --- a/include/net/sock.h |
798 | +++ b/include/net/sock.h |
799 | @@ -230,6 +230,7 @@ struct cg_proto; |
800 | * @sk_wmem_queued: persistent queue size |
801 | * @sk_forward_alloc: space allocated forward |
802 | * @sk_allocation: allocation mode |
803 | + * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler) |
804 | * @sk_sndbuf: size of send buffer in bytes |
805 | * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, |
806 | * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings |
807 | @@ -355,6 +356,7 @@ struct sock { |
808 | kmemcheck_bitfield_end(flags); |
809 | int sk_wmem_queued; |
810 | gfp_t sk_allocation; |
811 | + u32 sk_pacing_rate; /* bytes per second */ |
812 | netdev_features_t sk_route_caps; |
813 | netdev_features_t sk_route_nocaps; |
814 | int sk_gso_type; |
815 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
816 | index 5bba80fbd1d9..3fc77e90624a 100644 |
817 | --- a/include/net/tcp.h |
818 | +++ b/include/net/tcp.h |
819 | @@ -287,6 +287,7 @@ extern int sysctl_tcp_thin_dupack; |
820 | extern int sysctl_tcp_early_retrans; |
821 | extern int sysctl_tcp_limit_output_bytes; |
822 | extern int sysctl_tcp_challenge_ack_limit; |
823 | +extern int sysctl_tcp_min_tso_segs; |
824 | |
825 | extern atomic_long_t tcp_memory_allocated; |
826 | extern struct percpu_counter tcp_sockets_allocated; |
827 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
828 | index b92d0ce428b1..0164b09c1e99 100644 |
829 | --- a/mm/huge_memory.c |
830 | +++ b/mm/huge_memory.c |
831 | @@ -2699,6 +2699,7 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, |
832 | |
833 | mmun_start = haddr; |
834 | mmun_end = haddr + HPAGE_PMD_SIZE; |
835 | +again: |
836 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
837 | spin_lock(&mm->page_table_lock); |
838 | if (unlikely(!pmd_trans_huge(*pmd))) { |
839 | @@ -2721,7 +2722,14 @@ void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, |
840 | split_huge_page(page); |
841 | |
842 | put_page(page); |
843 | - BUG_ON(pmd_trans_huge(*pmd)); |
844 | + |
845 | + /* |
846 | + * We don't always have down_write of mmap_sem here: a racing |
847 | + * do_huge_pmd_wp_page() might have copied-on-write to another |
848 | + * huge page before our split_huge_page() got the anon_vma lock. |
849 | + */ |
850 | + if (unlikely(pmd_trans_huge(*pmd))) |
851 | + goto again; |
852 | } |
853 | |
854 | void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, |
855 | diff --git a/mm/page-writeback.c b/mm/page-writeback.c |
856 | index 4514ad7415c3..aca4364275b5 100644 |
857 | --- a/mm/page-writeback.c |
858 | +++ b/mm/page-writeback.c |
859 | @@ -1104,11 +1104,11 @@ static unsigned long dirty_poll_interval(unsigned long dirty, |
860 | return 1; |
861 | } |
862 | |
863 | -static long bdi_max_pause(struct backing_dev_info *bdi, |
864 | - unsigned long bdi_dirty) |
865 | +static unsigned long bdi_max_pause(struct backing_dev_info *bdi, |
866 | + unsigned long bdi_dirty) |
867 | { |
868 | - long bw = bdi->avg_write_bandwidth; |
869 | - long t; |
870 | + unsigned long bw = bdi->avg_write_bandwidth; |
871 | + unsigned long t; |
872 | |
873 | /* |
874 | * Limit pause time for small memory systems. If sleeping for too long |
875 | @@ -1120,7 +1120,7 @@ static long bdi_max_pause(struct backing_dev_info *bdi, |
876 | t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8)); |
877 | t++; |
878 | |
879 | - return min_t(long, t, MAX_PAUSE); |
880 | + return min_t(unsigned long, t, MAX_PAUSE); |
881 | } |
882 | |
883 | static long bdi_min_pause(struct backing_dev_info *bdi, |
884 | diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c |
885 | index 309129732285..c7e634af8516 100644 |
886 | --- a/net/8021q/vlan_netlink.c |
887 | +++ b/net/8021q/vlan_netlink.c |
888 | @@ -171,7 +171,7 @@ static size_t vlan_get_size(const struct net_device *dev) |
889 | |
890 | return nla_total_size(2) + /* IFLA_VLAN_PROTOCOL */ |
891 | nla_total_size(2) + /* IFLA_VLAN_ID */ |
892 | - sizeof(struct ifla_vlan_flags) + /* IFLA_VLAN_FLAGS */ |
893 | + nla_total_size(sizeof(struct ifla_vlan_flags)) + /* IFLA_VLAN_FLAGS */ |
894 | vlan_qos_map_size(vlan->nr_ingress_mappings) + |
895 | vlan_qos_map_size(vlan->nr_egress_mappings); |
896 | } |
897 | diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c |
898 | index 108084a04671..656a6f3e40de 100644 |
899 | --- a/net/bridge/br_stp_if.c |
900 | +++ b/net/bridge/br_stp_if.c |
901 | @@ -134,7 +134,7 @@ static void br_stp_start(struct net_bridge *br) |
902 | |
903 | if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) |
904 | __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); |
905 | - else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) |
906 | + else if (br->bridge_forward_delay > BR_MAX_FORWARD_DELAY) |
907 | __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); |
908 | |
909 | if (r == 0) { |
910 | diff --git a/net/compat.c b/net/compat.c |
911 | index f0a1ba6c8086..89032580bd1d 100644 |
912 | --- a/net/compat.c |
913 | +++ b/net/compat.c |
914 | @@ -71,6 +71,8 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) |
915 | __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || |
916 | __get_user(kmsg->msg_flags, &umsg->msg_flags)) |
917 | return -EFAULT; |
918 | + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
919 | + return -EINVAL; |
920 | kmsg->msg_name = compat_ptr(tmp1); |
921 | kmsg->msg_iov = compat_ptr(tmp2); |
922 | kmsg->msg_control = compat_ptr(tmp3); |
923 | diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c |
924 | index 3f1ec1586ae1..8d9d05edd2eb 100644 |
925 | --- a/net/core/secure_seq.c |
926 | +++ b/net/core/secure_seq.c |
927 | @@ -10,6 +10,7 @@ |
928 | |
929 | #include <net/secure_seq.h> |
930 | |
931 | +#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_INET) |
932 | #define NET_SECRET_SIZE (MD5_MESSAGE_BYTES / 4) |
933 | |
934 | static u32 net_secret[NET_SECRET_SIZE] ____cacheline_aligned; |
935 | @@ -29,6 +30,7 @@ static void net_secret_init(void) |
936 | cmpxchg(&net_secret[--i], 0, tmp); |
937 | } |
938 | } |
939 | +#endif |
940 | |
941 | #ifdef CONFIG_INET |
942 | static u32 seq_scale(u32 seq) |
943 | diff --git a/net/core/sock.c b/net/core/sock.c |
944 | index d6d024cfaaaf..6565431b0e6d 100644 |
945 | --- a/net/core/sock.c |
946 | +++ b/net/core/sock.c |
947 | @@ -2271,6 +2271,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) |
948 | |
949 | sk->sk_stamp = ktime_set(-1L, 0); |
950 | |
951 | + sk->sk_pacing_rate = ~0U; |
952 | /* |
953 | * Before updating sk_refcnt, we must commit prior changes to memory |
954 | * (Documentation/RCU/rculist_nulls.txt for details) |
955 | diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c |
956 | index 6af375afeeef..c95848d00039 100644 |
957 | --- a/net/ipv4/inet_hashtables.c |
958 | +++ b/net/ipv4/inet_hashtables.c |
959 | @@ -287,7 +287,7 @@ begintw: |
960 | if (unlikely(!INET_TW_MATCH(sk, net, acookie, |
961 | saddr, daddr, ports, |
962 | dif))) { |
963 | - sock_put(sk); |
964 | + inet_twsk_put(inet_twsk(sk)); |
965 | goto begintw; |
966 | } |
967 | goto out; |
968 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
969 | index ec2d430a6a55..6ca5873d6175 100644 |
970 | --- a/net/ipv4/ip_output.c |
971 | +++ b/net/ipv4/ip_output.c |
972 | @@ -844,7 +844,7 @@ static int __ip_append_data(struct sock *sk, |
973 | csummode = CHECKSUM_PARTIAL; |
974 | |
975 | cork->length += length; |
976 | - if (((length > mtu) || (skb && skb_is_gso(skb))) && |
977 | + if (((length > mtu) || (skb && skb_has_frags(skb))) && |
978 | (sk->sk_protocol == IPPROTO_UDP) && |
979 | (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { |
980 | err = ip_ufo_append_data(sk, queue, getfrag, from, length, |
981 | diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c |
982 | index 17cc0ffa8c0d..065604127418 100644 |
983 | --- a/net/ipv4/ip_vti.c |
984 | +++ b/net/ipv4/ip_vti.c |
985 | @@ -285,8 +285,17 @@ static int vti_rcv(struct sk_buff *skb) |
986 | tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr); |
987 | if (tunnel != NULL) { |
988 | struct pcpu_tstats *tstats; |
989 | + u32 oldmark = skb->mark; |
990 | + int ret; |
991 | |
992 | - if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
993 | + |
994 | + /* temporarily mark the skb with the tunnel o_key, to |
995 | + * only match policies with this mark. |
996 | + */ |
997 | + skb->mark = be32_to_cpu(tunnel->parms.o_key); |
998 | + ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb); |
999 | + skb->mark = oldmark; |
1000 | + if (!ret) |
1001 | return -1; |
1002 | |
1003 | tstats = this_cpu_ptr(tunnel->dev->tstats); |
1004 | @@ -295,7 +304,6 @@ static int vti_rcv(struct sk_buff *skb) |
1005 | tstats->rx_bytes += skb->len; |
1006 | u64_stats_update_end(&tstats->syncp); |
1007 | |
1008 | - skb->mark = 0; |
1009 | secpath_reset(skb); |
1010 | skb->dev = tunnel->dev; |
1011 | return 1; |
1012 | @@ -327,7 +335,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
1013 | |
1014 | memset(&fl4, 0, sizeof(fl4)); |
1015 | flowi4_init_output(&fl4, tunnel->parms.link, |
1016 | - be32_to_cpu(tunnel->parms.i_key), RT_TOS(tos), |
1017 | + be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos), |
1018 | RT_SCOPE_UNIVERSE, |
1019 | IPPROTO_IPIP, 0, |
1020 | dst, tiph->saddr, 0, 0); |
1021 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
1022 | index d35bbf0cf404..d11e73ce9365 100644 |
1023 | --- a/net/ipv4/route.c |
1024 | +++ b/net/ipv4/route.c |
1025 | @@ -2020,7 +2020,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) |
1026 | RT_SCOPE_LINK); |
1027 | goto make_route; |
1028 | } |
1029 | - if (fl4->saddr) { |
1030 | + if (!fl4->saddr) { |
1031 | if (ipv4_is_multicast(fl4->daddr)) |
1032 | fl4->saddr = inet_select_addr(dev_out, 0, |
1033 | fl4->flowi4_scope); |
1034 | diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c |
1035 | index 3f25e75ae692..90b26beb84d4 100644 |
1036 | --- a/net/ipv4/sysctl_net_ipv4.c |
1037 | +++ b/net/ipv4/sysctl_net_ipv4.c |
1038 | @@ -29,6 +29,7 @@ |
1039 | static int zero; |
1040 | static int one = 1; |
1041 | static int four = 4; |
1042 | +static int gso_max_segs = GSO_MAX_SEGS; |
1043 | static int tcp_retr1_max = 255; |
1044 | static int ip_local_port_range_min[] = { 1, 1 }; |
1045 | static int ip_local_port_range_max[] = { 65535, 65535 }; |
1046 | @@ -753,6 +754,15 @@ static struct ctl_table ipv4_table[] = { |
1047 | .extra2 = &four, |
1048 | }, |
1049 | { |
1050 | + .procname = "tcp_min_tso_segs", |
1051 | + .data = &sysctl_tcp_min_tso_segs, |
1052 | + .maxlen = sizeof(int), |
1053 | + .mode = 0644, |
1054 | + .proc_handler = proc_dointvec_minmax, |
1055 | + .extra1 = &zero, |
1056 | + .extra2 = &gso_max_segs, |
1057 | + }, |
1058 | + { |
1059 | .procname = "udp_mem", |
1060 | .data = &sysctl_udp_mem, |
1061 | .maxlen = sizeof(sysctl_udp_mem), |
1062 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
1063 | index 2b1b57f213b2..c888abf5a728 100644 |
1064 | --- a/net/ipv4/tcp.c |
1065 | +++ b/net/ipv4/tcp.c |
1066 | @@ -282,6 +282,8 @@ |
1067 | |
1068 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; |
1069 | |
1070 | +int sysctl_tcp_min_tso_segs __read_mostly = 2; |
1071 | + |
1072 | struct percpu_counter tcp_orphan_count; |
1073 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
1074 | |
1075 | @@ -786,12 +788,28 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, |
1076 | xmit_size_goal = mss_now; |
1077 | |
1078 | if (large_allowed && sk_can_gso(sk)) { |
1079 | - xmit_size_goal = ((sk->sk_gso_max_size - 1) - |
1080 | - inet_csk(sk)->icsk_af_ops->net_header_len - |
1081 | - inet_csk(sk)->icsk_ext_hdr_len - |
1082 | - tp->tcp_header_len); |
1083 | + u32 gso_size, hlen; |
1084 | + |
1085 | + /* Maybe we should/could use sk->sk_prot->max_header here ? */ |
1086 | + hlen = inet_csk(sk)->icsk_af_ops->net_header_len + |
1087 | + inet_csk(sk)->icsk_ext_hdr_len + |
1088 | + tp->tcp_header_len; |
1089 | + |
1090 | + /* Goal is to send at least one packet per ms, |
1091 | + * not one big TSO packet every 100 ms. |
1092 | + * This preserves ACK clocking and is consistent |
1093 | + * with tcp_tso_should_defer() heuristic. |
1094 | + */ |
1095 | + gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC); |
1096 | + gso_size = max_t(u32, gso_size, |
1097 | + sysctl_tcp_min_tso_segs * mss_now); |
1098 | + |
1099 | + xmit_size_goal = min_t(u32, gso_size, |
1100 | + sk->sk_gso_max_size - 1 - hlen); |
1101 | |
1102 | - /* TSQ : try to have two TSO segments in flight */ |
1103 | + /* TSQ : try to have at least two segments in flight |
1104 | + * (one in NIC TX ring, another in Qdisc) |
1105 | + */ |
1106 | xmit_size_goal = min_t(u32, xmit_size_goal, |
1107 | sysctl_tcp_limit_output_bytes >> 1); |
1108 | |
1109 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1110 | index 4b75aad14b04..e15d330919af 100644 |
1111 | --- a/net/ipv4/tcp_input.c |
1112 | +++ b/net/ipv4/tcp_input.c |
1113 | @@ -699,6 +699,34 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) |
1114 | } |
1115 | } |
1116 | |
1117 | +/* Set the sk_pacing_rate to allow proper sizing of TSO packets. |
1118 | + * Note: TCP stack does not yet implement pacing. |
1119 | + * FQ packet scheduler can be used to implement cheap but effective |
1120 | + * TCP pacing, to smooth the burst on large writes when packets |
1121 | + * in flight is significantly lower than cwnd (or rwin) |
1122 | + */ |
1123 | +static void tcp_update_pacing_rate(struct sock *sk) |
1124 | +{ |
1125 | + const struct tcp_sock *tp = tcp_sk(sk); |
1126 | + u64 rate; |
1127 | + |
1128 | + /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ |
1129 | + rate = (u64)tp->mss_cache * 2 * (HZ << 3); |
1130 | + |
1131 | + rate *= max(tp->snd_cwnd, tp->packets_out); |
1132 | + |
1133 | + /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3), |
1134 | + * be conservative and assume srtt = 1 (125 us instead of 1.25 ms) |
1135 | + * We probably need usec resolution in the future. |
1136 | + * Note: This also takes care of possible srtt=0 case, |
1137 | + * when tcp_rtt_estimator() was not yet called. |
1138 | + */ |
1139 | + if (tp->srtt > 8 + 2) |
1140 | + do_div(rate, tp->srtt); |
1141 | + |
1142 | + sk->sk_pacing_rate = min_t(u64, rate, ~0U); |
1143 | +} |
1144 | + |
1145 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
1146 | * routine referred to above. |
1147 | */ |
1148 | @@ -1264,7 +1292,10 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, |
1149 | tp->lost_cnt_hint -= tcp_skb_pcount(prev); |
1150 | } |
1151 | |
1152 | - TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags; |
1153 | + TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; |
1154 | + if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) |
1155 | + TCP_SKB_CB(prev)->end_seq++; |
1156 | + |
1157 | if (skb == tcp_highest_sack(sk)) |
1158 | tcp_advance_highest_sack(sk, skb); |
1159 | |
1160 | @@ -3314,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) |
1161 | tcp_init_cwnd_reduction(sk, true); |
1162 | tcp_set_ca_state(sk, TCP_CA_CWR); |
1163 | tcp_end_cwnd_reduction(sk); |
1164 | - tcp_set_ca_state(sk, TCP_CA_Open); |
1165 | + tcp_try_keep_open(sk); |
1166 | NET_INC_STATS_BH(sock_net(sk), |
1167 | LINUX_MIB_TCPLOSSPROBERECOVERY); |
1168 | } |
1169 | @@ -3330,7 +3361,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
1170 | u32 ack_seq = TCP_SKB_CB(skb)->seq; |
1171 | u32 ack = TCP_SKB_CB(skb)->ack_seq; |
1172 | bool is_dupack = false; |
1173 | - u32 prior_in_flight; |
1174 | + u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt; |
1175 | u32 prior_fackets; |
1176 | int prior_packets = tp->packets_out; |
1177 | int prior_sacked = tp->sacked_out; |
1178 | @@ -3438,6 +3469,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
1179 | |
1180 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) |
1181 | tcp_schedule_loss_probe(sk); |
1182 | + if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd) |
1183 | + tcp_update_pacing_rate(sk); |
1184 | return 1; |
1185 | |
1186 | no_queue: |
1187 | @@ -5736,6 +5769,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
1188 | } else |
1189 | tcp_init_metrics(sk); |
1190 | |
1191 | + tcp_update_pacing_rate(sk); |
1192 | + |
1193 | /* Prevent spurious tcp_cwnd_restart() on |
1194 | * first data packet. |
1195 | */ |
1196 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1197 | index 0145ce7e6098..cd16eb06bebf 100644 |
1198 | --- a/net/ipv4/tcp_output.c |
1199 | +++ b/net/ipv4/tcp_output.c |
1200 | @@ -887,8 +887,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
1201 | |
1202 | skb_orphan(skb); |
1203 | skb->sk = sk; |
1204 | - skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? |
1205 | - tcp_wfree : sock_wfree; |
1206 | + skb->destructor = tcp_wfree; |
1207 | atomic_add(skb->truesize, &sk->sk_wmem_alloc); |
1208 | |
1209 | /* Build TCP header and checksum it. */ |
1210 | @@ -977,6 +976,9 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) |
1211 | static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, |
1212 | unsigned int mss_now) |
1213 | { |
1214 | + /* Make sure we own this skb before messing gso_size/gso_segs */ |
1215 | + WARN_ON_ONCE(skb_cloned(skb)); |
1216 | + |
1217 | if (skb->len <= mss_now || !sk_can_gso(sk) || |
1218 | skb->ip_summed == CHECKSUM_NONE) { |
1219 | /* Avoid the costly divide in the normal |
1220 | @@ -1058,9 +1060,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, |
1221 | if (nsize < 0) |
1222 | nsize = 0; |
1223 | |
1224 | - if (skb_cloned(skb) && |
1225 | - skb_is_nonlinear(skb) && |
1226 | - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
1227 | + if (skb_unclone(skb, GFP_ATOMIC)) |
1228 | return -ENOMEM; |
1229 | |
1230 | /* Get a new skb... force flag on. */ |
1231 | @@ -1623,7 +1623,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) |
1232 | |
1233 | /* If a full-sized TSO skb can be sent, do it. */ |
1234 | if (limit >= min_t(unsigned int, sk->sk_gso_max_size, |
1235 | - sk->sk_gso_max_segs * tp->mss_cache)) |
1236 | + tp->xmit_size_goal_segs * tp->mss_cache)) |
1237 | goto send_now; |
1238 | |
1239 | /* Middle in queue won't get any more data, full sendable already? */ |
1240 | @@ -1832,7 +1832,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
1241 | while ((skb = tcp_send_head(sk))) { |
1242 | unsigned int limit; |
1243 | |
1244 | - |
1245 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
1246 | BUG_ON(!tso_segs); |
1247 | |
1248 | @@ -1861,13 +1860,20 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, |
1249 | break; |
1250 | } |
1251 | |
1252 | - /* TSQ : sk_wmem_alloc accounts skb truesize, |
1253 | - * including skb overhead. But thats OK. |
1254 | + /* TCP Small Queues : |
1255 | + * Control number of packets in qdisc/devices to two packets / or ~1 ms. |
1256 | + * This allows for : |
1257 | + * - better RTT estimation and ACK scheduling |
1258 | + * - faster recovery |
1259 | + * - high rates |
1260 | */ |
1261 | - if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) { |
1262 | + limit = max(skb->truesize, sk->sk_pacing_rate >> 10); |
1263 | + |
1264 | + if (atomic_read(&sk->sk_wmem_alloc) > limit) { |
1265 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); |
1266 | break; |
1267 | } |
1268 | + |
1269 | limit = mss_now; |
1270 | if (tso_segs > 1 && !tcp_urg_mode(tp)) |
1271 | limit = tcp_mss_split_point(sk, skb, mss_now, |
1272 | @@ -2329,6 +2335,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
1273 | int oldpcount = tcp_skb_pcount(skb); |
1274 | |
1275 | if (unlikely(oldpcount > 1)) { |
1276 | + if (skb_unclone(skb, GFP_ATOMIC)) |
1277 | + return -ENOMEM; |
1278 | tcp_init_tso_segs(sk, skb, cur_mss); |
1279 | tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); |
1280 | } |
1281 | diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c |
1282 | index 32b4a1675d82..066640e0ba8e 100644 |
1283 | --- a/net/ipv6/inet6_hashtables.c |
1284 | +++ b/net/ipv6/inet6_hashtables.c |
1285 | @@ -116,7 +116,7 @@ begintw: |
1286 | } |
1287 | if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr, |
1288 | ports, dif))) { |
1289 | - sock_put(sk); |
1290 | + inet_twsk_put(inet_twsk(sk)); |
1291 | goto begintw; |
1292 | } |
1293 | goto out; |
1294 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
1295 | index 44ffdb99a62e..878f8027ebf6 100644 |
1296 | --- a/net/ipv6/ip6_output.c |
1297 | +++ b/net/ipv6/ip6_output.c |
1298 | @@ -130,7 +130,7 @@ static int ip6_finish_output2(struct sk_buff *skb) |
1299 | } |
1300 | |
1301 | rcu_read_lock_bh(); |
1302 | - nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr); |
1303 | + nexthop = rt6_nexthop((struct rt6_info *)dst); |
1304 | neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop); |
1305 | if (unlikely(!neigh)) |
1306 | neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false); |
1307 | @@ -898,7 +898,7 @@ static int ip6_dst_lookup_tail(struct sock *sk, |
1308 | */ |
1309 | rt = (struct rt6_info *) *dst; |
1310 | rcu_read_lock_bh(); |
1311 | - n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt, &fl6->daddr)); |
1312 | + n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt)); |
1313 | err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0; |
1314 | rcu_read_unlock_bh(); |
1315 | |
1316 | @@ -1250,7 +1250,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, |
1317 | skb = skb_peek_tail(&sk->sk_write_queue); |
1318 | cork->length += length; |
1319 | if (((length > mtu) || |
1320 | - (skb && skb_is_gso(skb))) && |
1321 | + (skb && skb_has_frags(skb))) && |
1322 | (sk->sk_protocol == IPPROTO_UDP) && |
1323 | (rt->dst.dev->features & NETIF_F_UFO)) { |
1324 | err = ip6_ufo_append_data(sk, getfrag, from, length, |
1325 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
1326 | index bacce6c08644..3c1f493ccc63 100644 |
1327 | --- a/net/ipv6/route.c |
1328 | +++ b/net/ipv6/route.c |
1329 | @@ -473,6 +473,24 @@ out: |
1330 | } |
1331 | |
1332 | #ifdef CONFIG_IPV6_ROUTER_PREF |
1333 | +struct __rt6_probe_work { |
1334 | + struct work_struct work; |
1335 | + struct in6_addr target; |
1336 | + struct net_device *dev; |
1337 | +}; |
1338 | + |
1339 | +static void rt6_probe_deferred(struct work_struct *w) |
1340 | +{ |
1341 | + struct in6_addr mcaddr; |
1342 | + struct __rt6_probe_work *work = |
1343 | + container_of(w, struct __rt6_probe_work, work); |
1344 | + |
1345 | + addrconf_addr_solict_mult(&work->target, &mcaddr); |
1346 | + ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL); |
1347 | + dev_put(work->dev); |
1348 | + kfree(w); |
1349 | +} |
1350 | + |
1351 | static void rt6_probe(struct rt6_info *rt) |
1352 | { |
1353 | struct neighbour *neigh; |
1354 | @@ -496,17 +514,23 @@ static void rt6_probe(struct rt6_info *rt) |
1355 | |
1356 | if (!neigh || |
1357 | time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { |
1358 | - struct in6_addr mcaddr; |
1359 | - struct in6_addr *target; |
1360 | + struct __rt6_probe_work *work; |
1361 | |
1362 | - if (neigh) { |
1363 | + work = kmalloc(sizeof(*work), GFP_ATOMIC); |
1364 | + |
1365 | + if (neigh && work) |
1366 | neigh->updated = jiffies; |
1367 | + |
1368 | + if (neigh) |
1369 | write_unlock(&neigh->lock); |
1370 | - } |
1371 | |
1372 | - target = (struct in6_addr *)&rt->rt6i_gateway; |
1373 | - addrconf_addr_solict_mult(target, &mcaddr); |
1374 | - ndisc_send_ns(rt->dst.dev, NULL, target, &mcaddr, NULL); |
1375 | + if (work) { |
1376 | + INIT_WORK(&work->work, rt6_probe_deferred); |
1377 | + work->target = rt->rt6i_gateway; |
1378 | + dev_hold(rt->dst.dev); |
1379 | + work->dev = rt->dst.dev; |
1380 | + schedule_work(&work->work); |
1381 | + } |
1382 | } else { |
1383 | out: |
1384 | write_unlock(&neigh->lock); |
1385 | @@ -848,7 +872,6 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, |
1386 | if (ort->rt6i_dst.plen != 128 && |
1387 | ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) |
1388 | rt->rt6i_flags |= RTF_ANYCAST; |
1389 | - rt->rt6i_gateway = *daddr; |
1390 | } |
1391 | |
1392 | rt->rt6i_flags |= RTF_CACHE; |
1393 | @@ -1245,6 +1268,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev, |
1394 | rt->dst.flags |= DST_HOST; |
1395 | rt->dst.output = ip6_output; |
1396 | atomic_set(&rt->dst.__refcnt, 1); |
1397 | + rt->rt6i_gateway = fl6->daddr; |
1398 | rt->rt6i_dst.addr = fl6->daddr; |
1399 | rt->rt6i_dst.plen = 128; |
1400 | rt->rt6i_idev = idev; |
1401 | @@ -1801,7 +1825,10 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, |
1402 | in6_dev_hold(rt->rt6i_idev); |
1403 | rt->dst.lastuse = jiffies; |
1404 | |
1405 | - rt->rt6i_gateway = ort->rt6i_gateway; |
1406 | + if (ort->rt6i_flags & RTF_GATEWAY) |
1407 | + rt->rt6i_gateway = ort->rt6i_gateway; |
1408 | + else |
1409 | + rt->rt6i_gateway = *dest; |
1410 | rt->rt6i_flags = ort->rt6i_flags; |
1411 | if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == |
1412 | (RTF_DEFAULT | RTF_ADDRCONF)) |
1413 | @@ -2088,6 +2115,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, |
1414 | else |
1415 | rt->rt6i_flags |= RTF_LOCAL; |
1416 | |
1417 | + rt->rt6i_gateway = *addr; |
1418 | rt->rt6i_dst.addr = *addr; |
1419 | rt->rt6i_dst.plen = 128; |
1420 | rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); |
1421 | diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c |
1422 | index 6984c3a353cd..8c27de2b4d5a 100644 |
1423 | --- a/net/l2tp/l2tp_core.c |
1424 | +++ b/net/l2tp/l2tp_core.c |
1425 | @@ -115,6 +115,11 @@ struct l2tp_net { |
1426 | static void l2tp_session_set_header_len(struct l2tp_session *session, int version); |
1427 | static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
1428 | |
1429 | +static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) |
1430 | +{ |
1431 | + return sk->sk_user_data; |
1432 | +} |
1433 | + |
1434 | static inline struct l2tp_net *l2tp_pernet(struct net *net) |
1435 | { |
1436 | BUG_ON(!net); |
1437 | @@ -507,7 +512,7 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk, |
1438 | return 0; |
1439 | |
1440 | #if IS_ENABLED(CONFIG_IPV6) |
1441 | - if (sk->sk_family == PF_INET6) { |
1442 | + if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) { |
1443 | if (!uh->check) { |
1444 | LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n"); |
1445 | return 1; |
1446 | @@ -1071,7 +1076,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, |
1447 | /* Queue the packet to IP for output */ |
1448 | skb->local_df = 1; |
1449 | #if IS_ENABLED(CONFIG_IPV6) |
1450 | - if (skb->sk->sk_family == PF_INET6) |
1451 | + if (skb->sk->sk_family == PF_INET6 && !tunnel->v4mapped) |
1452 | error = inet6_csk_xmit(skb, NULL); |
1453 | else |
1454 | #endif |
1455 | @@ -1198,7 +1203,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len |
1456 | |
1457 | /* Calculate UDP checksum if configured to do so */ |
1458 | #if IS_ENABLED(CONFIG_IPV6) |
1459 | - if (sk->sk_family == PF_INET6) |
1460 | + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) |
1461 | l2tp_xmit_ipv6_csum(sk, skb, udp_len); |
1462 | else |
1463 | #endif |
1464 | @@ -1247,10 +1252,9 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb); |
1465 | */ |
1466 | static void l2tp_tunnel_destruct(struct sock *sk) |
1467 | { |
1468 | - struct l2tp_tunnel *tunnel; |
1469 | + struct l2tp_tunnel *tunnel = l2tp_tunnel(sk); |
1470 | struct l2tp_net *pn; |
1471 | |
1472 | - tunnel = sk->sk_user_data; |
1473 | if (tunnel == NULL) |
1474 | goto end; |
1475 | |
1476 | @@ -1618,7 +1622,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 |
1477 | } |
1478 | |
1479 | /* Check if this socket has already been prepped */ |
1480 | - tunnel = (struct l2tp_tunnel *)sk->sk_user_data; |
1481 | + tunnel = l2tp_tunnel(sk); |
1482 | if (tunnel != NULL) { |
1483 | /* This socket has already been prepped */ |
1484 | err = -EBUSY; |
1485 | @@ -1647,6 +1651,24 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 |
1486 | if (cfg != NULL) |
1487 | tunnel->debug = cfg->debug; |
1488 | |
1489 | +#if IS_ENABLED(CONFIG_IPV6) |
1490 | + if (sk->sk_family == PF_INET6) { |
1491 | + struct ipv6_pinfo *np = inet6_sk(sk); |
1492 | + |
1493 | + if (ipv6_addr_v4mapped(&np->saddr) && |
1494 | + ipv6_addr_v4mapped(&np->daddr)) { |
1495 | + struct inet_sock *inet = inet_sk(sk); |
1496 | + |
1497 | + tunnel->v4mapped = true; |
1498 | + inet->inet_saddr = np->saddr.s6_addr32[3]; |
1499 | + inet->inet_rcv_saddr = np->rcv_saddr.s6_addr32[3]; |
1500 | + inet->inet_daddr = np->daddr.s6_addr32[3]; |
1501 | + } else { |
1502 | + tunnel->v4mapped = false; |
1503 | + } |
1504 | + } |
1505 | +#endif |
1506 | + |
1507 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
1508 | tunnel->encap = encap; |
1509 | if (encap == L2TP_ENCAPTYPE_UDP) { |
1510 | @@ -1655,7 +1677,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 |
1511 | udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; |
1512 | udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy; |
1513 | #if IS_ENABLED(CONFIG_IPV6) |
1514 | - if (sk->sk_family == PF_INET6) |
1515 | + if (sk->sk_family == PF_INET6 && !tunnel->v4mapped) |
1516 | udpv6_encap_enable(); |
1517 | else |
1518 | #endif |
1519 | diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h |
1520 | index 485a490fd990..2f89d43877d7 100644 |
1521 | --- a/net/l2tp/l2tp_core.h |
1522 | +++ b/net/l2tp/l2tp_core.h |
1523 | @@ -189,6 +189,9 @@ struct l2tp_tunnel { |
1524 | struct sock *sock; /* Parent socket */ |
1525 | int fd; /* Parent fd, if tunnel socket |
1526 | * was created by userspace */ |
1527 | +#if IS_ENABLED(CONFIG_IPV6) |
1528 | + bool v4mapped; |
1529 | +#endif |
1530 | |
1531 | struct work_struct del_work; |
1532 | |
1533 | diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
1534 | index 5ebee2ded9e9..8c46b271064a 100644 |
1535 | --- a/net/l2tp/l2tp_ppp.c |
1536 | +++ b/net/l2tp/l2tp_ppp.c |
1537 | @@ -353,7 +353,9 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh |
1538 | goto error_put_sess_tun; |
1539 | } |
1540 | |
1541 | + local_bh_disable(); |
1542 | l2tp_xmit_skb(session, skb, session->hdr_len); |
1543 | + local_bh_enable(); |
1544 | |
1545 | sock_put(ps->tunnel_sock); |
1546 | sock_put(sk); |
1547 | @@ -422,7 +424,9 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb) |
1548 | skb->data[0] = ppph[0]; |
1549 | skb->data[1] = ppph[1]; |
1550 | |
1551 | + local_bh_disable(); |
1552 | l2tp_xmit_skb(session, skb, session->hdr_len); |
1553 | + local_bh_enable(); |
1554 | |
1555 | sock_put(sk_tun); |
1556 | sock_put(sk); |
1557 | diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c |
1558 | index bdebd03bc8cd..70866d192efc 100644 |
1559 | --- a/net/netfilter/nf_conntrack_h323_main.c |
1560 | +++ b/net/netfilter/nf_conntrack_h323_main.c |
1561 | @@ -778,8 +778,8 @@ static int callforward_do_filter(const union nf_inet_addr *src, |
1562 | flowi6_to_flowi(&fl1), false)) { |
1563 | if (!afinfo->route(&init_net, (struct dst_entry **)&rt2, |
1564 | flowi6_to_flowi(&fl2), false)) { |
1565 | - if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, |
1566 | - sizeof(rt1->rt6i_gateway)) && |
1567 | + if (ipv6_addr_equal(rt6_nexthop(rt1), |
1568 | + rt6_nexthop(rt2)) && |
1569 | rt1->dst.dev == rt2->dst.dev) |
1570 | ret = 1; |
1571 | dst_release(&rt2->dst); |
1572 | diff --git a/net/sctp/output.c b/net/sctp/output.c |
1573 | index bbef4a7a9b56..0beb2f9c8a7c 100644 |
1574 | --- a/net/sctp/output.c |
1575 | +++ b/net/sctp/output.c |
1576 | @@ -547,7 +547,8 @@ int sctp_packet_transmit(struct sctp_packet *packet) |
1577 | * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. |
1578 | */ |
1579 | if (!sctp_checksum_disable) { |
1580 | - if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) { |
1581 | + if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || |
1582 | + (dst_xfrm(dst) != NULL) || packet->ipfragok) { |
1583 | __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); |
1584 | |
1585 | /* 3) Put the resultant value into the checksum field in the |
1586 | diff --git a/net/socket.c b/net/socket.c |
1587 | index 4ca1526db756..9c467b2afc84 100644 |
1588 | --- a/net/socket.c |
1589 | +++ b/net/socket.c |
1590 | @@ -1956,6 +1956,16 @@ struct used_address { |
1591 | unsigned int name_len; |
1592 | }; |
1593 | |
1594 | +static int copy_msghdr_from_user(struct msghdr *kmsg, |
1595 | + struct msghdr __user *umsg) |
1596 | +{ |
1597 | + if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) |
1598 | + return -EFAULT; |
1599 | + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) |
1600 | + return -EINVAL; |
1601 | + return 0; |
1602 | +} |
1603 | + |
1604 | static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, |
1605 | struct msghdr *msg_sys, unsigned int flags, |
1606 | struct used_address *used_address) |
1607 | @@ -1974,8 +1984,11 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, |
1608 | if (MSG_CMSG_COMPAT & flags) { |
1609 | if (get_compat_msghdr(msg_sys, msg_compat)) |
1610 | return -EFAULT; |
1611 | - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) |
1612 | - return -EFAULT; |
1613 | + } else { |
1614 | + err = copy_msghdr_from_user(msg_sys, msg); |
1615 | + if (err) |
1616 | + return err; |
1617 | + } |
1618 | |
1619 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { |
1620 | err = -EMSGSIZE; |
1621 | @@ -2183,8 +2196,11 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, |
1622 | if (MSG_CMSG_COMPAT & flags) { |
1623 | if (get_compat_msghdr(msg_sys, msg_compat)) |
1624 | return -EFAULT; |
1625 | - } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) |
1626 | - return -EFAULT; |
1627 | + } else { |
1628 | + err = copy_msghdr_from_user(msg_sys, msg); |
1629 | + if (err) |
1630 | + return err; |
1631 | + } |
1632 | |
1633 | if (msg_sys->msg_iovlen > UIO_FASTIOV) { |
1634 | err = -EMSGSIZE; |
1635 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
1636 | index 826e09938bff..0258072a518f 100644 |
1637 | --- a/net/unix/af_unix.c |
1638 | +++ b/net/unix/af_unix.c |
1639 | @@ -1245,6 +1245,15 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb) |
1640 | return 0; |
1641 | } |
1642 | |
1643 | +static void unix_sock_inherit_flags(const struct socket *old, |
1644 | + struct socket *new) |
1645 | +{ |
1646 | + if (test_bit(SOCK_PASSCRED, &old->flags)) |
1647 | + set_bit(SOCK_PASSCRED, &new->flags); |
1648 | + if (test_bit(SOCK_PASSSEC, &old->flags)) |
1649 | + set_bit(SOCK_PASSSEC, &new->flags); |
1650 | +} |
1651 | + |
1652 | static int unix_accept(struct socket *sock, struct socket *newsock, int flags) |
1653 | { |
1654 | struct sock *sk = sock->sk; |
1655 | @@ -1279,6 +1288,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags) |
1656 | /* attach accepted sock to socket */ |
1657 | unix_state_lock(tsk); |
1658 | newsock->state = SS_CONNECTED; |
1659 | + unix_sock_inherit_flags(sock, newsock); |
1660 | sock_graft(tsk, newsock); |
1661 | unix_state_unlock(tsk); |
1662 | return 0; |
1663 | diff --git a/net/unix/diag.c b/net/unix/diag.c |
1664 | index d591091603bf..86fa0f3b2caf 100644 |
1665 | --- a/net/unix/diag.c |
1666 | +++ b/net/unix/diag.c |
1667 | @@ -124,6 +124,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r |
1668 | rep->udiag_family = AF_UNIX; |
1669 | rep->udiag_type = sk->sk_type; |
1670 | rep->udiag_state = sk->sk_state; |
1671 | + rep->pad = 0; |
1672 | rep->udiag_ino = sk_ino; |
1673 | sock_diag_save_cookie(sk, rep->udiag_cookie); |
1674 | |
1675 | diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c |
1676 | index 7d604c06c3dc..a271c27fac77 100644 |
1677 | --- a/net/wireless/radiotap.c |
1678 | +++ b/net/wireless/radiotap.c |
1679 | @@ -97,6 +97,10 @@ int ieee80211_radiotap_iterator_init( |
1680 | struct ieee80211_radiotap_header *radiotap_header, |
1681 | int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) |
1682 | { |
1683 | + /* check the radiotap header can actually be present */ |
1684 | + if (max_length < sizeof(struct ieee80211_radiotap_header)) |
1685 | + return -EINVAL; |
1686 | + |
1687 | /* Linux only supports version 0 radiotap format */ |
1688 | if (radiotap_header->it_version) |
1689 | return -EINVAL; |
1690 | @@ -131,7 +135,8 @@ int ieee80211_radiotap_iterator_init( |
1691 | */ |
1692 | |
1693 | if ((unsigned long)iterator->_arg - |
1694 | - (unsigned long)iterator->_rtheader > |
1695 | + (unsigned long)iterator->_rtheader + |
1696 | + sizeof(uint32_t) > |
1697 | (unsigned long)iterator->_max_length) |
1698 | return -EINVAL; |
1699 | } |
1700 | diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c |
1701 | index ad22decad02b..2519f9d03c0f 100644 |
1702 | --- a/sound/pci/hda/hda_generic.c |
1703 | +++ b/sound/pci/hda/hda_generic.c |
1704 | @@ -3474,7 +3474,7 @@ static int create_capture_mixers(struct hda_codec *codec) |
1705 | if (!multi) |
1706 | err = create_single_cap_vol_ctl(codec, n, vol, sw, |
1707 | inv_dmic); |
1708 | - else if (!multi_cap_vol) |
1709 | + else if (!multi_cap_vol && !inv_dmic) |
1710 | err = create_bind_cap_vol_ctl(codec, n, vol, sw); |
1711 | else |
1712 | err = create_multi_cap_vol_ctl(codec); |
1713 | diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c |
1714 | index d0323a693ba2..999550bbad40 100644 |
1715 | --- a/sound/usb/usx2y/us122l.c |
1716 | +++ b/sound/usb/usx2y/us122l.c |
1717 | @@ -262,7 +262,9 @@ static int usb_stream_hwdep_mmap(struct snd_hwdep *hw, |
1718 | } |
1719 | |
1720 | area->vm_ops = &usb_stream_hwdep_vm_ops; |
1721 | - area->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; |
1722 | + area->vm_flags |= VM_DONTDUMP; |
1723 | + if (!read) |
1724 | + area->vm_flags |= VM_DONTEXPAND; |
1725 | area->vm_private_data = us122l; |
1726 | atomic_inc(&us122l->mmap_count); |
1727 | out: |