Contents of /trunk/kernel-alx/patches-4.9/0215-4.9.116-all-fixes.patch
Parent Directory | Revision Log
Revision 3192 -
(show annotations)
(download)
Wed Aug 8 14:17:41 2018 UTC (6 years, 1 month ago) by niro
File size: 45098 byte(s)
Wed Aug 8 14:17:41 2018 UTC (6 years, 1 month ago) by niro
File size: 45098 byte(s)
-linux-4.9.116
1 | diff --git a/Makefile b/Makefile |
2 | index 889c58e39928..a6b011778960 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 9 |
8 | -SUBLEVEL = 115 |
9 | +SUBLEVEL = 116 |
10 | EXTRAVERSION = |
11 | NAME = Roaring Lionus |
12 | |
13 | @@ -635,6 +635,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) |
14 | KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) |
15 | KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) |
16 | KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) |
17 | +KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) |
18 | |
19 | ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION |
20 | KBUILD_CFLAGS += $(call cc-option,-ffunction-sections,) |
21 | diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c |
22 | index d071a3a0f876..fc97a11c41c5 100644 |
23 | --- a/arch/mips/ath79/common.c |
24 | +++ b/arch/mips/ath79/common.c |
25 | @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); |
26 | |
27 | void ath79_ddr_wb_flush(u32 reg) |
28 | { |
29 | - void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; |
30 | + void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); |
31 | |
32 | /* Flush the DDR write buffer. */ |
33 | __raw_writel(0x1, flush_reg); |
34 | diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c |
35 | index f6325fa657fb..64ae8c094a54 100644 |
36 | --- a/arch/mips/pci/pci.c |
37 | +++ b/arch/mips/pci/pci.c |
38 | @@ -55,7 +55,7 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, |
39 | phys_addr_t size = resource_size(rsrc); |
40 | |
41 | *start = fixup_bigphys_addr(rsrc->start, size); |
42 | - *end = rsrc->start + size; |
43 | + *end = rsrc->start + size - 1; |
44 | } |
45 | |
46 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
47 | diff --git a/drivers/base/dd.c b/drivers/base/dd.c |
48 | index d76cd97a98b6..ee25a69630c3 100644 |
49 | --- a/drivers/base/dd.c |
50 | +++ b/drivers/base/dd.c |
51 | @@ -363,14 +363,6 @@ re_probe: |
52 | goto probe_failed; |
53 | } |
54 | |
55 | - /* |
56 | - * Ensure devices are listed in devices_kset in correct order |
57 | - * It's important to move Dev to the end of devices_kset before |
58 | - * calling .probe, because it could be recursive and parent Dev |
59 | - * should always go first |
60 | - */ |
61 | - devices_kset_move_last(dev); |
62 | - |
63 | if (dev->bus->probe) { |
64 | ret = dev->bus->probe(dev); |
65 | if (ret) |
66 | diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c |
67 | index c71a03593595..e680bab27dd7 100644 |
68 | --- a/drivers/net/can/xilinx_can.c |
69 | +++ b/drivers/net/can/xilinx_can.c |
70 | @@ -2,6 +2,7 @@ |
71 | * |
72 | * Copyright (C) 2012 - 2014 Xilinx, Inc. |
73 | * Copyright (C) 2009 PetaLogix. All rights reserved. |
74 | + * Copyright (C) 2017 Sandvik Mining and Construction Oy |
75 | * |
76 | * Description: |
77 | * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. |
78 | @@ -25,8 +26,10 @@ |
79 | #include <linux/module.h> |
80 | #include <linux/netdevice.h> |
81 | #include <linux/of.h> |
82 | +#include <linux/of_device.h> |
83 | #include <linux/platform_device.h> |
84 | #include <linux/skbuff.h> |
85 | +#include <linux/spinlock.h> |
86 | #include <linux/string.h> |
87 | #include <linux/types.h> |
88 | #include <linux/can/dev.h> |
89 | @@ -101,7 +104,7 @@ enum xcan_reg { |
90 | #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ |
91 | XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ |
92 | XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ |
93 | - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) |
94 | + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) |
95 | |
96 | /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ |
97 | #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ |
98 | @@ -118,6 +121,7 @@ enum xcan_reg { |
99 | /** |
100 | * struct xcan_priv - This definition define CAN driver instance |
101 | * @can: CAN private data structure. |
102 | + * @tx_lock: Lock for synchronizing TX interrupt handling |
103 | * @tx_head: Tx CAN packets ready to send on the queue |
104 | * @tx_tail: Tx CAN packets successfully sended on the queue |
105 | * @tx_max: Maximum number packets the driver can send |
106 | @@ -132,6 +136,7 @@ enum xcan_reg { |
107 | */ |
108 | struct xcan_priv { |
109 | struct can_priv can; |
110 | + spinlock_t tx_lock; |
111 | unsigned int tx_head; |
112 | unsigned int tx_tail; |
113 | unsigned int tx_max; |
114 | @@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { |
115 | .brp_inc = 1, |
116 | }; |
117 | |
118 | +#define XCAN_CAP_WATERMARK 0x0001 |
119 | +struct xcan_devtype_data { |
120 | + unsigned int caps; |
121 | +}; |
122 | + |
123 | /** |
124 | * xcan_write_reg_le - Write a value to the device register little endian |
125 | * @priv: Driver private data structure |
126 | @@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) |
127 | usleep_range(500, 10000); |
128 | } |
129 | |
130 | + /* reset clears FIFOs */ |
131 | + priv->tx_head = 0; |
132 | + priv->tx_tail = 0; |
133 | + |
134 | return 0; |
135 | } |
136 | |
137 | @@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
138 | struct net_device_stats *stats = &ndev->stats; |
139 | struct can_frame *cf = (struct can_frame *)skb->data; |
140 | u32 id, dlc, data[2] = {0, 0}; |
141 | + unsigned long flags; |
142 | |
143 | if (can_dropped_invalid_skb(ndev, skb)) |
144 | return NETDEV_TX_OK; |
145 | @@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
146 | data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); |
147 | |
148 | can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); |
149 | + |
150 | + spin_lock_irqsave(&priv->tx_lock, flags); |
151 | + |
152 | priv->tx_head++; |
153 | |
154 | /* Write the Frame to Xilinx CAN TX FIFO */ |
155 | @@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
156 | stats->tx_bytes += cf->can_dlc; |
157 | } |
158 | |
159 | + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ |
160 | + if (priv->tx_max > 1) |
161 | + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); |
162 | + |
163 | /* Check if the TX buffer is full */ |
164 | if ((priv->tx_head - priv->tx_tail) == priv->tx_max) |
165 | netif_stop_queue(ndev); |
166 | |
167 | + spin_unlock_irqrestore(&priv->tx_lock, flags); |
168 | + |
169 | return NETDEV_TX_OK; |
170 | } |
171 | |
172 | @@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev) |
173 | return 1; |
174 | } |
175 | |
176 | +/** |
177 | + * xcan_current_error_state - Get current error state from HW |
178 | + * @ndev: Pointer to net_device structure |
179 | + * |
180 | + * Checks the current CAN error state from the HW. Note that this |
181 | + * only checks for ERROR_PASSIVE and ERROR_WARNING. |
182 | + * |
183 | + * Return: |
184 | + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE |
185 | + * otherwise. |
186 | + */ |
187 | +static enum can_state xcan_current_error_state(struct net_device *ndev) |
188 | +{ |
189 | + struct xcan_priv *priv = netdev_priv(ndev); |
190 | + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); |
191 | + |
192 | + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) |
193 | + return CAN_STATE_ERROR_PASSIVE; |
194 | + else if (status & XCAN_SR_ERRWRN_MASK) |
195 | + return CAN_STATE_ERROR_WARNING; |
196 | + else |
197 | + return CAN_STATE_ERROR_ACTIVE; |
198 | +} |
199 | + |
200 | +/** |
201 | + * xcan_set_error_state - Set new CAN error state |
202 | + * @ndev: Pointer to net_device structure |
203 | + * @new_state: The new CAN state to be set |
204 | + * @cf: Error frame to be populated or NULL |
205 | + * |
206 | + * Set new CAN error state for the device, updating statistics and |
207 | + * populating the error frame if given. |
208 | + */ |
209 | +static void xcan_set_error_state(struct net_device *ndev, |
210 | + enum can_state new_state, |
211 | + struct can_frame *cf) |
212 | +{ |
213 | + struct xcan_priv *priv = netdev_priv(ndev); |
214 | + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); |
215 | + u32 txerr = ecr & XCAN_ECR_TEC_MASK; |
216 | + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; |
217 | + |
218 | + priv->can.state = new_state; |
219 | + |
220 | + if (cf) { |
221 | + cf->can_id |= CAN_ERR_CRTL; |
222 | + cf->data[6] = txerr; |
223 | + cf->data[7] = rxerr; |
224 | + } |
225 | + |
226 | + switch (new_state) { |
227 | + case CAN_STATE_ERROR_PASSIVE: |
228 | + priv->can.can_stats.error_passive++; |
229 | + if (cf) |
230 | + cf->data[1] = (rxerr > 127) ? |
231 | + CAN_ERR_CRTL_RX_PASSIVE : |
232 | + CAN_ERR_CRTL_TX_PASSIVE; |
233 | + break; |
234 | + case CAN_STATE_ERROR_WARNING: |
235 | + priv->can.can_stats.error_warning++; |
236 | + if (cf) |
237 | + cf->data[1] |= (txerr > rxerr) ? |
238 | + CAN_ERR_CRTL_TX_WARNING : |
239 | + CAN_ERR_CRTL_RX_WARNING; |
240 | + break; |
241 | + case CAN_STATE_ERROR_ACTIVE: |
242 | + if (cf) |
243 | + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; |
244 | + break; |
245 | + default: |
246 | + /* non-ERROR states are handled elsewhere */ |
247 | + WARN_ON(1); |
248 | + break; |
249 | + } |
250 | +} |
251 | + |
252 | +/** |
253 | + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX |
254 | + * @ndev: Pointer to net_device structure |
255 | + * |
256 | + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if |
257 | + * the performed RX/TX has caused it to drop to a lesser state and set |
258 | + * the interface state accordingly. |
259 | + */ |
260 | +static void xcan_update_error_state_after_rxtx(struct net_device *ndev) |
261 | +{ |
262 | + struct xcan_priv *priv = netdev_priv(ndev); |
263 | + enum can_state old_state = priv->can.state; |
264 | + enum can_state new_state; |
265 | + |
266 | + /* changing error state due to successful frame RX/TX can only |
267 | + * occur from these states |
268 | + */ |
269 | + if (old_state != CAN_STATE_ERROR_WARNING && |
270 | + old_state != CAN_STATE_ERROR_PASSIVE) |
271 | + return; |
272 | + |
273 | + new_state = xcan_current_error_state(ndev); |
274 | + |
275 | + if (new_state != old_state) { |
276 | + struct sk_buff *skb; |
277 | + struct can_frame *cf; |
278 | + |
279 | + skb = alloc_can_err_skb(ndev, &cf); |
280 | + |
281 | + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); |
282 | + |
283 | + if (skb) { |
284 | + struct net_device_stats *stats = &ndev->stats; |
285 | + |
286 | + stats->rx_packets++; |
287 | + stats->rx_bytes += cf->can_dlc; |
288 | + netif_rx(skb); |
289 | + } |
290 | + } |
291 | +} |
292 | + |
293 | /** |
294 | * xcan_err_interrupt - error frame Isr |
295 | * @ndev: net_device pointer |
296 | @@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
297 | struct net_device_stats *stats = &ndev->stats; |
298 | struct can_frame *cf; |
299 | struct sk_buff *skb; |
300 | - u32 err_status, status, txerr = 0, rxerr = 0; |
301 | + u32 err_status; |
302 | |
303 | skb = alloc_can_err_skb(ndev, &cf); |
304 | |
305 | err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); |
306 | priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); |
307 | - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; |
308 | - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & |
309 | - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); |
310 | - status = priv->read_reg(priv, XCAN_SR_OFFSET); |
311 | |
312 | if (isr & XCAN_IXR_BSOFF_MASK) { |
313 | priv->can.state = CAN_STATE_BUS_OFF; |
314 | @@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
315 | can_bus_off(ndev); |
316 | if (skb) |
317 | cf->can_id |= CAN_ERR_BUSOFF; |
318 | - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { |
319 | - priv->can.state = CAN_STATE_ERROR_PASSIVE; |
320 | - priv->can.can_stats.error_passive++; |
321 | - if (skb) { |
322 | - cf->can_id |= CAN_ERR_CRTL; |
323 | - cf->data[1] = (rxerr > 127) ? |
324 | - CAN_ERR_CRTL_RX_PASSIVE : |
325 | - CAN_ERR_CRTL_TX_PASSIVE; |
326 | - cf->data[6] = txerr; |
327 | - cf->data[7] = rxerr; |
328 | - } |
329 | - } else if (status & XCAN_SR_ERRWRN_MASK) { |
330 | - priv->can.state = CAN_STATE_ERROR_WARNING; |
331 | - priv->can.can_stats.error_warning++; |
332 | - if (skb) { |
333 | - cf->can_id |= CAN_ERR_CRTL; |
334 | - cf->data[1] |= (txerr > rxerr) ? |
335 | - CAN_ERR_CRTL_TX_WARNING : |
336 | - CAN_ERR_CRTL_RX_WARNING; |
337 | - cf->data[6] = txerr; |
338 | - cf->data[7] = rxerr; |
339 | - } |
340 | + } else { |
341 | + enum can_state new_state = xcan_current_error_state(ndev); |
342 | + |
343 | + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); |
344 | } |
345 | |
346 | /* Check for Arbitration lost interrupt */ |
347 | @@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
348 | if (isr & XCAN_IXR_RXOFLW_MASK) { |
349 | stats->rx_over_errors++; |
350 | stats->rx_errors++; |
351 | - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
352 | if (skb) { |
353 | cf->can_id |= CAN_ERR_CRTL; |
354 | cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; |
355 | @@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) |
356 | |
357 | isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
358 | while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { |
359 | - if (isr & XCAN_IXR_RXOK_MASK) { |
360 | - priv->write_reg(priv, XCAN_ICR_OFFSET, |
361 | - XCAN_IXR_RXOK_MASK); |
362 | - work_done += xcan_rx(ndev); |
363 | - } else { |
364 | - priv->write_reg(priv, XCAN_ICR_OFFSET, |
365 | - XCAN_IXR_RXNEMP_MASK); |
366 | - break; |
367 | - } |
368 | + work_done += xcan_rx(ndev); |
369 | priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); |
370 | isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
371 | } |
372 | |
373 | - if (work_done) |
374 | + if (work_done) { |
375 | can_led_event(ndev, CAN_LED_EVENT_RX); |
376 | + xcan_update_error_state_after_rxtx(ndev); |
377 | + } |
378 | |
379 | if (work_done < quota) { |
380 | napi_complete(napi); |
381 | ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
382 | - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); |
383 | + ier |= XCAN_IXR_RXNEMP_MASK; |
384 | priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
385 | } |
386 | return work_done; |
387 | @@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) |
388 | { |
389 | struct xcan_priv *priv = netdev_priv(ndev); |
390 | struct net_device_stats *stats = &ndev->stats; |
391 | + unsigned int frames_in_fifo; |
392 | + int frames_sent = 1; /* TXOK => at least 1 frame was sent */ |
393 | + unsigned long flags; |
394 | + int retries = 0; |
395 | + |
396 | + /* Synchronize with xmit as we need to know the exact number |
397 | + * of frames in the FIFO to stay in sync due to the TXFEMP |
398 | + * handling. |
399 | + * This also prevents a race between netif_wake_queue() and |
400 | + * netif_stop_queue(). |
401 | + */ |
402 | + spin_lock_irqsave(&priv->tx_lock, flags); |
403 | + |
404 | + frames_in_fifo = priv->tx_head - priv->tx_tail; |
405 | + |
406 | + if (WARN_ON_ONCE(frames_in_fifo == 0)) { |
407 | + /* clear TXOK anyway to avoid getting back here */ |
408 | + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
409 | + spin_unlock_irqrestore(&priv->tx_lock, flags); |
410 | + return; |
411 | + } |
412 | + |
413 | + /* Check if 2 frames were sent (TXOK only means that at least 1 |
414 | + * frame was sent). |
415 | + */ |
416 | + if (frames_in_fifo > 1) { |
417 | + WARN_ON(frames_in_fifo > priv->tx_max); |
418 | + |
419 | + /* Synchronize TXOK and isr so that after the loop: |
420 | + * (1) isr variable is up-to-date at least up to TXOK clear |
421 | + * time. This avoids us clearing a TXOK of a second frame |
422 | + * but not noticing that the FIFO is now empty and thus |
423 | + * marking only a single frame as sent. |
424 | + * (2) No TXOK is left. Having one could mean leaving a |
425 | + * stray TXOK as we might process the associated frame |
426 | + * via TXFEMP handling as we read TXFEMP *after* TXOK |
427 | + * clear to satisfy (1). |
428 | + */ |
429 | + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { |
430 | + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
431 | + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
432 | + } |
433 | |
434 | - while ((priv->tx_head - priv->tx_tail > 0) && |
435 | - (isr & XCAN_IXR_TXOK_MASK)) { |
436 | + if (isr & XCAN_IXR_TXFEMP_MASK) { |
437 | + /* nothing in FIFO anymore */ |
438 | + frames_sent = frames_in_fifo; |
439 | + } |
440 | + } else { |
441 | + /* single frame in fifo, just clear TXOK */ |
442 | priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
443 | + } |
444 | + |
445 | + while (frames_sent--) { |
446 | can_get_echo_skb(ndev, priv->tx_tail % |
447 | priv->tx_max); |
448 | priv->tx_tail++; |
449 | stats->tx_packets++; |
450 | - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
451 | } |
452 | - can_led_event(ndev, CAN_LED_EVENT_TX); |
453 | + |
454 | netif_wake_queue(ndev); |
455 | + |
456 | + spin_unlock_irqrestore(&priv->tx_lock, flags); |
457 | + |
458 | + can_led_event(ndev, CAN_LED_EVENT_TX); |
459 | + xcan_update_error_state_after_rxtx(ndev); |
460 | } |
461 | |
462 | /** |
463 | @@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
464 | struct net_device *ndev = (struct net_device *)dev_id; |
465 | struct xcan_priv *priv = netdev_priv(ndev); |
466 | u32 isr, ier; |
467 | + u32 isr_errors; |
468 | |
469 | /* Get the interrupt status from Xilinx CAN */ |
470 | isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
471 | @@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
472 | xcan_tx_interrupt(ndev, isr); |
473 | |
474 | /* Check for the type of error interrupt and Processing it */ |
475 | - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | |
476 | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { |
477 | - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | |
478 | - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | |
479 | - XCAN_IXR_ARBLST_MASK)); |
480 | + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | |
481 | + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); |
482 | + if (isr_errors) { |
483 | + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); |
484 | xcan_err_interrupt(ndev, isr); |
485 | } |
486 | |
487 | /* Check for the type of receive interrupt and Processing it */ |
488 | - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { |
489 | + if (isr & XCAN_IXR_RXNEMP_MASK) { |
490 | ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
491 | - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); |
492 | + ier &= ~XCAN_IXR_RXNEMP_MASK; |
493 | priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
494 | napi_schedule(&priv->napi); |
495 | } |
496 | @@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
497 | static void xcan_chip_stop(struct net_device *ndev) |
498 | { |
499 | struct xcan_priv *priv = netdev_priv(ndev); |
500 | - u32 ier; |
501 | |
502 | /* Disable interrupts and leave the can in configuration mode */ |
503 | - ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
504 | - ier &= ~XCAN_INTR_ALL; |
505 | - priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
506 | - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
507 | + set_reset_mode(ndev); |
508 | priv->can.state = CAN_STATE_STOPPED; |
509 | } |
510 | |
511 | @@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { |
512 | */ |
513 | static int __maybe_unused xcan_suspend(struct device *dev) |
514 | { |
515 | - if (!device_may_wakeup(dev)) |
516 | - return pm_runtime_force_suspend(dev); |
517 | + struct net_device *ndev = dev_get_drvdata(dev); |
518 | |
519 | - return 0; |
520 | + if (netif_running(ndev)) { |
521 | + netif_stop_queue(ndev); |
522 | + netif_device_detach(ndev); |
523 | + xcan_chip_stop(ndev); |
524 | + } |
525 | + |
526 | + return pm_runtime_force_suspend(dev); |
527 | } |
528 | |
529 | /** |
530 | @@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) |
531 | */ |
532 | static int __maybe_unused xcan_resume(struct device *dev) |
533 | { |
534 | - if (!device_may_wakeup(dev)) |
535 | - return pm_runtime_force_resume(dev); |
536 | + struct net_device *ndev = dev_get_drvdata(dev); |
537 | + int ret; |
538 | |
539 | - return 0; |
540 | + ret = pm_runtime_force_resume(dev); |
541 | + if (ret) { |
542 | + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); |
543 | + return ret; |
544 | + } |
545 | + |
546 | + if (netif_running(ndev)) { |
547 | + ret = xcan_chip_start(ndev); |
548 | + if (ret) { |
549 | + dev_err(dev, "xcan_chip_start failed on resume\n"); |
550 | + return ret; |
551 | + } |
552 | + |
553 | + netif_device_attach(ndev); |
554 | + netif_start_queue(ndev); |
555 | + } |
556 | |
557 | + return 0; |
558 | } |
559 | |
560 | /** |
561 | @@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) |
562 | struct net_device *ndev = dev_get_drvdata(dev); |
563 | struct xcan_priv *priv = netdev_priv(ndev); |
564 | |
565 | - if (netif_running(ndev)) { |
566 | - netif_stop_queue(ndev); |
567 | - netif_device_detach(ndev); |
568 | - } |
569 | - |
570 | - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); |
571 | - priv->can.state = CAN_STATE_SLEEPING; |
572 | - |
573 | clk_disable_unprepare(priv->bus_clk); |
574 | clk_disable_unprepare(priv->can_clk); |
575 | |
576 | @@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) |
577 | struct net_device *ndev = dev_get_drvdata(dev); |
578 | struct xcan_priv *priv = netdev_priv(ndev); |
579 | int ret; |
580 | - u32 isr, status; |
581 | |
582 | ret = clk_prepare_enable(priv->bus_clk); |
583 | if (ret) { |
584 | @@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) |
585 | return ret; |
586 | } |
587 | |
588 | - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
589 | - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
590 | - status = priv->read_reg(priv, XCAN_SR_OFFSET); |
591 | - |
592 | - if (netif_running(ndev)) { |
593 | - if (isr & XCAN_IXR_BSOFF_MASK) { |
594 | - priv->can.state = CAN_STATE_BUS_OFF; |
595 | - priv->write_reg(priv, XCAN_SRR_OFFSET, |
596 | - XCAN_SRR_RESET_MASK); |
597 | - } else if ((status & XCAN_SR_ESTAT_MASK) == |
598 | - XCAN_SR_ESTAT_MASK) { |
599 | - priv->can.state = CAN_STATE_ERROR_PASSIVE; |
600 | - } else if (status & XCAN_SR_ERRWRN_MASK) { |
601 | - priv->can.state = CAN_STATE_ERROR_WARNING; |
602 | - } else { |
603 | - priv->can.state = CAN_STATE_ERROR_ACTIVE; |
604 | - } |
605 | - netif_device_attach(ndev); |
606 | - netif_start_queue(ndev); |
607 | - } |
608 | - |
609 | return 0; |
610 | } |
611 | |
612 | @@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { |
613 | SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) |
614 | }; |
615 | |
616 | +static const struct xcan_devtype_data xcan_zynq_data = { |
617 | + .caps = XCAN_CAP_WATERMARK, |
618 | +}; |
619 | + |
620 | +/* Match table for OF platform binding */ |
621 | +static const struct of_device_id xcan_of_match[] = { |
622 | + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, |
623 | + { .compatible = "xlnx,axi-can-1.00.a", }, |
624 | + { /* end of list */ }, |
625 | +}; |
626 | +MODULE_DEVICE_TABLE(of, xcan_of_match); |
627 | + |
628 | /** |
629 | * xcan_probe - Platform registration call |
630 | * @pdev: Handle to the platform device structure |
631 | @@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) |
632 | struct resource *res; /* IO mem resources */ |
633 | struct net_device *ndev; |
634 | struct xcan_priv *priv; |
635 | + const struct of_device_id *of_id; |
636 | + int caps = 0; |
637 | void __iomem *addr; |
638 | - int ret, rx_max, tx_max; |
639 | + int ret, rx_max, tx_max, tx_fifo_depth; |
640 | |
641 | /* Get the virtual base address for the device */ |
642 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
643 | @@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) |
644 | goto err; |
645 | } |
646 | |
647 | - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); |
648 | + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", |
649 | + &tx_fifo_depth); |
650 | if (ret < 0) |
651 | goto err; |
652 | |
653 | @@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) |
654 | if (ret < 0) |
655 | goto err; |
656 | |
657 | + of_id = of_match_device(xcan_of_match, &pdev->dev); |
658 | + if (of_id) { |
659 | + const struct xcan_devtype_data *devtype_data = of_id->data; |
660 | + |
661 | + if (devtype_data) |
662 | + caps = devtype_data->caps; |
663 | + } |
664 | + |
665 | + /* There is no way to directly figure out how many frames have been |
666 | + * sent when the TXOK interrupt is processed. If watermark programming |
667 | + * is supported, we can have 2 frames in the FIFO and use TXFEMP |
668 | + * to determine if 1 or 2 frames have been sent. |
669 | + * Theoretically we should be able to use TXFWMEMP to determine up |
670 | + * to 3 frames, but it seems that after putting a second frame in the |
671 | + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less |
672 | + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was |
673 | + * sent), which is not a sensible state - possibly TXFWMEMP is not |
674 | + * completely synchronized with the rest of the bits? |
675 | + */ |
676 | + if (caps & XCAN_CAP_WATERMARK) |
677 | + tx_max = min(tx_fifo_depth, 2); |
678 | + else |
679 | + tx_max = 1; |
680 | + |
681 | /* Create a CAN device instance */ |
682 | ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); |
683 | if (!ndev) |
684 | @@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) |
685 | CAN_CTRLMODE_BERR_REPORTING; |
686 | priv->reg_base = addr; |
687 | priv->tx_max = tx_max; |
688 | + spin_lock_init(&priv->tx_lock); |
689 | |
690 | /* Get IRQ for the device */ |
691 | ndev->irq = platform_get_irq(pdev, 0); |
692 | @@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) |
693 | |
694 | pm_runtime_put(&pdev->dev); |
695 | |
696 | - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", |
697 | + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", |
698 | priv->reg_base, ndev->irq, priv->can.clock.freq, |
699 | - priv->tx_max); |
700 | + tx_fifo_depth, priv->tx_max); |
701 | |
702 | return 0; |
703 | |
704 | @@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) |
705 | return 0; |
706 | } |
707 | |
708 | -/* Match table for OF platform binding */ |
709 | -static const struct of_device_id xcan_of_match[] = { |
710 | - { .compatible = "xlnx,zynq-can-1.0", }, |
711 | - { .compatible = "xlnx,axi-can-1.00.a", }, |
712 | - { /* end of list */ }, |
713 | -}; |
714 | -MODULE_DEVICE_TABLE(of, xcan_of_match); |
715 | - |
716 | static struct platform_driver xcan_driver = { |
717 | .probe = xcan_probe, |
718 | .remove = xcan_remove, |
719 | diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
720 | index d6b06bef1b69..9d1a7d5ae835 100644 |
721 | --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
722 | +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
723 | @@ -2916,7 +2916,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
724 | u32 srqn = qp_get_srqn(qpc) & 0xffffff; |
725 | int use_srq = (qp_get_srqn(qpc) >> 24) & 1; |
726 | struct res_srq *srq; |
727 | - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; |
728 | + int local_qpn = vhcr->in_modifier & 0xffffff; |
729 | |
730 | err = adjust_qp_sched_queue(dev, slave, qpc, inbox); |
731 | if (err) |
732 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
733 | index a8cb38789774..4a51fc6908ad 100644 |
734 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
735 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
736 | @@ -383,14 +383,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) |
737 | HLIST_HEAD(del_list); |
738 | spin_lock_bh(&priv->fs.arfs.arfs_lock); |
739 | mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { |
740 | - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) |
741 | - break; |
742 | if (!work_pending(&arfs_rule->arfs_work) && |
743 | rps_may_expire_flow(priv->netdev, |
744 | arfs_rule->rxq, arfs_rule->flow_id, |
745 | arfs_rule->filter_id)) { |
746 | hlist_del_init(&arfs_rule->hlist); |
747 | hlist_add_head(&arfs_rule->hlist, &del_list); |
748 | + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) |
749 | + break; |
750 | } |
751 | } |
752 | spin_unlock_bh(&priv->fs.arfs.arfs_lock); |
753 | @@ -715,6 +715,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
754 | skb->protocol != htons(ETH_P_IPV6)) |
755 | return -EPROTONOSUPPORT; |
756 | |
757 | + if (skb->encapsulation) |
758 | + return -EPROTONOSUPPORT; |
759 | + |
760 | arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); |
761 | if (!arfs_t) |
762 | return -EPROTONOSUPPORT; |
763 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
764 | index 1612ec0d9103..f8b99d0b54d5 100644 |
765 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
766 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
767 | @@ -233,6 +233,7 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp) |
768 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
769 | { |
770 | struct mlx5e_tstamp *tstamp = &priv->tstamp; |
771 | + u64 overflow_cycles; |
772 | u64 ns; |
773 | u64 frac = 0; |
774 | u32 dev_freq; |
775 | @@ -257,10 +258,17 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
776 | |
777 | /* Calculate period in seconds to call the overflow watchdog - to make |
778 | * sure counter is checked at least once every wrap around. |
779 | + * The period is calculated as the minimum between max HW cycles count |
780 | + * (The clock source mask) and max amount of cycles that can be |
781 | + * multiplied by clock multiplier where the result doesn't exceed |
782 | + * 64bits. |
783 | */ |
784 | - ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, |
785 | + overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult); |
786 | + overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1); |
787 | + |
788 | + ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles, |
789 | frac, &frac); |
790 | - do_div(ns, NSEC_PER_SEC / 2 / HZ); |
791 | + do_div(ns, NSEC_PER_SEC / HZ); |
792 | tstamp->overflow_period = ns; |
793 | |
794 | INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow); |
795 | diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
796 | index 4d217649c8b1..5fde8e335f13 100644 |
797 | --- a/drivers/net/phy/phy.c |
798 | +++ b/drivers/net/phy/phy.c |
799 | @@ -598,7 +598,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) |
800 | * negotiation may already be done and aneg interrupt may not be |
801 | * generated. |
802 | */ |
803 | - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { |
804 | + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { |
805 | err = phy_aneg_done(phydev); |
806 | if (err > 0) { |
807 | trigger = true; |
808 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
809 | index 08bef18372ea..a9c950d29ce2 100644 |
810 | --- a/drivers/usb/class/cdc-acm.c |
811 | +++ b/drivers/usb/class/cdc-acm.c |
812 | @@ -1785,6 +1785,9 @@ static const struct usb_device_id acm_ids[] = { |
813 | { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ |
814 | .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ |
815 | }, |
816 | + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ |
817 | + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ |
818 | + }, |
819 | |
820 | { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ |
821 | .driver_info = CLEAR_HALT_CONDITIONS, |
822 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
823 | index 8bf0090218dd..bdb19db542a4 100644 |
824 | --- a/drivers/usb/core/hub.c |
825 | +++ b/drivers/usb/core/hub.c |
826 | @@ -1139,10 +1139,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
827 | |
828 | if (!udev || udev->state == USB_STATE_NOTATTACHED) { |
829 | /* Tell hub_wq to disconnect the device or |
830 | - * check for a new connection |
831 | + * check for a new connection or over current condition. |
832 | + * Based on USB2.0 Spec Section 11.12.5, |
833 | + * C_PORT_OVER_CURRENT could be set while |
834 | + * PORT_OVER_CURRENT is not. So check for any of them. |
835 | */ |
836 | if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || |
837 | - (portstatus & USB_PORT_STAT_OVERCURRENT)) |
838 | + (portstatus & USB_PORT_STAT_OVERCURRENT) || |
839 | + (portchange & USB_PORT_STAT_C_OVERCURRENT)) |
840 | set_bit(port1, hub->change_bits); |
841 | |
842 | } else if (portstatus & USB_PORT_STAT_ENABLE) { |
843 | diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
844 | index af72224f8ba2..04eb64381d92 100644 |
845 | --- a/drivers/usb/gadget/function/f_fs.c |
846 | +++ b/drivers/usb/gadget/function/f_fs.c |
847 | @@ -3243,7 +3243,7 @@ static int ffs_func_setup(struct usb_function *f, |
848 | __ffs_event_add(ffs, FUNCTIONFS_SETUP); |
849 | spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); |
850 | |
851 | - return USB_GADGET_DELAYED_STATUS; |
852 | + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; |
853 | } |
854 | |
855 | static bool ffs_func_req_match(struct usb_function *f, |
856 | diff --git a/fs/exec.c b/fs/exec.c |
857 | index b8c43be24751..fcd8642ef2d2 100644 |
858 | --- a/fs/exec.c |
859 | +++ b/fs/exec.c |
860 | @@ -1228,15 +1228,14 @@ killed: |
861 | return -EAGAIN; |
862 | } |
863 | |
864 | -char *get_task_comm(char *buf, struct task_struct *tsk) |
865 | +char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk) |
866 | { |
867 | - /* buf must be at least sizeof(tsk->comm) in size */ |
868 | task_lock(tsk); |
869 | - strncpy(buf, tsk->comm, sizeof(tsk->comm)); |
870 | + strncpy(buf, tsk->comm, buf_size); |
871 | task_unlock(tsk); |
872 | return buf; |
873 | } |
874 | -EXPORT_SYMBOL_GPL(get_task_comm); |
875 | +EXPORT_SYMBOL_GPL(__get_task_comm); |
876 | |
877 | /* |
878 | * These functions flushes out all traces of the currently running executable |
879 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
880 | index 5ebef8c86c26..1cc5723a7821 100644 |
881 | --- a/include/linux/sched.h |
882 | +++ b/include/linux/sched.h |
883 | @@ -2999,7 +2999,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) |
884 | { |
885 | __set_task_comm(tsk, from, false); |
886 | } |
887 | -extern char *get_task_comm(char *to, struct task_struct *tsk); |
888 | +extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); |
889 | +#define get_task_comm(buf, tsk) ({ \ |
890 | + BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ |
891 | + __get_task_comm(buf, sizeof(buf), tsk); \ |
892 | +}) |
893 | |
894 | #ifdef CONFIG_SMP |
895 | void scheduler_ipi(void); |
896 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
897 | index b048d3d3b327..1f207dd22757 100644 |
898 | --- a/include/linux/skbuff.h |
899 | +++ b/include/linux/skbuff.h |
900 | @@ -2982,6 +2982,8 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) |
901 | return __skb_grow(skb, len); |
902 | } |
903 | |
904 | +#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) |
905 | + |
906 | #define skb_queue_walk(queue, skb) \ |
907 | for (skb = (queue)->next; \ |
908 | skb != (struct sk_buff *)(queue); \ |
909 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
910 | index 18f029bcb8c7..5d440bb0e409 100644 |
911 | --- a/include/net/tcp.h |
912 | +++ b/include/net/tcp.h |
913 | @@ -363,6 +363,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, |
914 | struct pipe_inode_info *pipe, size_t len, |
915 | unsigned int flags); |
916 | |
917 | +void tcp_enter_quickack_mode(struct sock *sk); |
918 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
919 | const unsigned int pkts) |
920 | { |
921 | @@ -553,6 +554,7 @@ void tcp_send_fin(struct sock *sk); |
922 | void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
923 | int tcp_send_synack(struct sock *); |
924 | void tcp_push_one(struct sock *, unsigned int mss_now); |
925 | +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); |
926 | void tcp_send_ack(struct sock *sk); |
927 | void tcp_send_delayed_ack(struct sock *sk); |
928 | void tcp_send_loss_probe(struct sock *sk); |
929 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
930 | index f3a0ad14b454..194e844e1021 100644 |
931 | --- a/net/core/rtnetlink.c |
932 | +++ b/net/core/rtnetlink.c |
933 | @@ -2339,9 +2339,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) |
934 | return err; |
935 | } |
936 | |
937 | - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
938 | - |
939 | - __dev_notify_flags(dev, old_flags, ~0U); |
940 | + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { |
941 | + __dev_notify_flags(dev, old_flags, 0U); |
942 | + } else { |
943 | + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
944 | + __dev_notify_flags(dev, old_flags, ~0U); |
945 | + } |
946 | return 0; |
947 | } |
948 | EXPORT_SYMBOL(rtnl_configure_link); |
949 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
950 | index 8cae7aa4a4ec..84c731aef0d8 100644 |
951 | --- a/net/core/skbuff.c |
952 | +++ b/net/core/skbuff.c |
953 | @@ -3253,6 +3253,7 @@ normal: |
954 | net_warn_ratelimited( |
955 | "skb_segment: too many frags: %u %u\n", |
956 | pos, mss); |
957 | + err = -EINVAL; |
958 | goto err; |
959 | } |
960 | |
961 | @@ -3289,11 +3290,10 @@ skip_fraglist: |
962 | |
963 | perform_csum_check: |
964 | if (!csum) { |
965 | - if (skb_has_shared_frag(nskb)) { |
966 | - err = __skb_linearize(nskb); |
967 | - if (err) |
968 | - goto err; |
969 | - } |
970 | + if (skb_has_shared_frag(nskb) && |
971 | + __skb_linearize(nskb)) |
972 | + goto err; |
973 | + |
974 | if (!nskb->remcsum_offload) |
975 | nskb->ip_summed = CHECKSUM_NONE; |
976 | SKB_GSO_CB(nskb)->csum = |
977 | diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
978 | index 7f5fe07d0b13..f2e6e874e4ec 100644 |
979 | --- a/net/ipv4/igmp.c |
980 | +++ b/net/ipv4/igmp.c |
981 | @@ -1193,8 +1193,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) |
982 | if (pmc) { |
983 | im->interface = pmc->interface; |
984 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
985 | - im->sfmode = pmc->sfmode; |
986 | - if (pmc->sfmode == MCAST_INCLUDE) { |
987 | + if (im->sfmode == MCAST_INCLUDE) { |
988 | im->tomb = pmc->tomb; |
989 | im->sources = pmc->sources; |
990 | for (psf = im->sources; psf; psf = psf->sf_next) |
991 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
992 | index 3b1f3bc8becb..100c86f1f547 100644 |
993 | --- a/net/ipv4/ip_output.c |
994 | +++ b/net/ipv4/ip_output.c |
995 | @@ -497,6 +497,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) |
996 | to->dev = from->dev; |
997 | to->mark = from->mark; |
998 | |
999 | + skb_copy_hash(to, from); |
1000 | + |
1001 | /* Copy the flags to each fragment. */ |
1002 | IPCB(to)->flags = IPCB(from)->flags; |
1003 | |
1004 | diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
1005 | index dd80276a8205..b21e435f428c 100644 |
1006 | --- a/net/ipv4/ip_sockglue.c |
1007 | +++ b/net/ipv4/ip_sockglue.c |
1008 | @@ -135,15 +135,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) |
1009 | { |
1010 | struct sockaddr_in sin; |
1011 | const struct iphdr *iph = ip_hdr(skb); |
1012 | - __be16 *ports = (__be16 *)skb_transport_header(skb); |
1013 | + __be16 *ports; |
1014 | + int end; |
1015 | |
1016 | - if (skb_transport_offset(skb) + 4 > (int)skb->len) |
1017 | + end = skb_transport_offset(skb) + 4; |
1018 | + if (end > 0 && !pskb_may_pull(skb, end)) |
1019 | return; |
1020 | |
1021 | /* All current transport protocols have the port numbers in the |
1022 | * first four bytes of the transport header and this function is |
1023 | * written with this assumption in mind. |
1024 | */ |
1025 | + ports = (__be16 *)skb_transport_header(skb); |
1026 | |
1027 | sin.sin_family = AF_INET; |
1028 | sin.sin_addr.s_addr = iph->daddr; |
1029 | diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c |
1030 | index ab37c6775630..dd52ccb812ea 100644 |
1031 | --- a/net/ipv4/tcp_dctcp.c |
1032 | +++ b/net/ipv4/tcp_dctcp.c |
1033 | @@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) |
1034 | struct dctcp *ca = inet_csk_ca(sk); |
1035 | struct tcp_sock *tp = tcp_sk(sk); |
1036 | |
1037 | - /* State has changed from CE=0 to CE=1 and delayed |
1038 | - * ACK has not sent yet. |
1039 | - */ |
1040 | - if (!ca->ce_state && ca->delayed_ack_reserved) { |
1041 | - u32 tmp_rcv_nxt; |
1042 | - |
1043 | - /* Save current rcv_nxt. */ |
1044 | - tmp_rcv_nxt = tp->rcv_nxt; |
1045 | - |
1046 | - /* Generate previous ack with CE=0. */ |
1047 | - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; |
1048 | - tp->rcv_nxt = ca->prior_rcv_nxt; |
1049 | - |
1050 | - tcp_send_ack(sk); |
1051 | - |
1052 | - /* Recover current rcv_nxt. */ |
1053 | - tp->rcv_nxt = tmp_rcv_nxt; |
1054 | + if (!ca->ce_state) { |
1055 | + /* State has changed from CE=0 to CE=1, force an immediate |
1056 | + * ACK to reflect the new CE state. If an ACK was delayed, |
1057 | + * send that first to reflect the prior CE state. |
1058 | + */ |
1059 | + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
1060 | + __tcp_send_ack(sk, ca->prior_rcv_nxt); |
1061 | + tcp_enter_quickack_mode(sk); |
1062 | } |
1063 | |
1064 | ca->prior_rcv_nxt = tp->rcv_nxt; |
1065 | @@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) |
1066 | struct dctcp *ca = inet_csk_ca(sk); |
1067 | struct tcp_sock *tp = tcp_sk(sk); |
1068 | |
1069 | - /* State has changed from CE=1 to CE=0 and delayed |
1070 | - * ACK has not sent yet. |
1071 | - */ |
1072 | - if (ca->ce_state && ca->delayed_ack_reserved) { |
1073 | - u32 tmp_rcv_nxt; |
1074 | - |
1075 | - /* Save current rcv_nxt. */ |
1076 | - tmp_rcv_nxt = tp->rcv_nxt; |
1077 | - |
1078 | - /* Generate previous ack with CE=1. */ |
1079 | - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; |
1080 | - tp->rcv_nxt = ca->prior_rcv_nxt; |
1081 | - |
1082 | - tcp_send_ack(sk); |
1083 | - |
1084 | - /* Recover current rcv_nxt. */ |
1085 | - tp->rcv_nxt = tmp_rcv_nxt; |
1086 | + if (ca->ce_state) { |
1087 | + /* State has changed from CE=1 to CE=0, force an immediate |
1088 | + * ACK to reflect the new CE state. If an ACK was delayed, |
1089 | + * send that first to reflect the prior CE state. |
1090 | + */ |
1091 | + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
1092 | + __tcp_send_ack(sk, ca->prior_rcv_nxt); |
1093 | + tcp_enter_quickack_mode(sk); |
1094 | } |
1095 | |
1096 | ca->prior_rcv_nxt = tp->rcv_nxt; |
1097 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1098 | index be453aa8fce8..44d136fd2af5 100644 |
1099 | --- a/net/ipv4/tcp_input.c |
1100 | +++ b/net/ipv4/tcp_input.c |
1101 | @@ -209,13 +209,14 @@ static void tcp_incr_quickack(struct sock *sk) |
1102 | icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); |
1103 | } |
1104 | |
1105 | -static void tcp_enter_quickack_mode(struct sock *sk) |
1106 | +void tcp_enter_quickack_mode(struct sock *sk) |
1107 | { |
1108 | struct inet_connection_sock *icsk = inet_csk(sk); |
1109 | tcp_incr_quickack(sk); |
1110 | icsk->icsk_ack.pingpong = 0; |
1111 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
1112 | } |
1113 | +EXPORT_SYMBOL(tcp_enter_quickack_mode); |
1114 | |
1115 | /* Send ACKs quickly, if "quick" count is not exhausted |
1116 | * and the session is not interactive. |
1117 | @@ -4516,7 +4517,7 @@ coalesce_done: |
1118 | /* All the bits are present. Drop. */ |
1119 | NET_INC_STATS(sock_net(sk), |
1120 | LINUX_MIB_TCPOFOMERGE); |
1121 | - __kfree_skb(skb); |
1122 | + tcp_drop(sk, skb); |
1123 | skb = NULL; |
1124 | tcp_dsack_set(sk, seq, end_seq); |
1125 | goto add_sack; |
1126 | @@ -4535,7 +4536,7 @@ coalesce_done: |
1127 | TCP_SKB_CB(skb1)->end_seq); |
1128 | NET_INC_STATS(sock_net(sk), |
1129 | LINUX_MIB_TCPOFOMERGE); |
1130 | - __kfree_skb(skb1); |
1131 | + tcp_drop(sk, skb1); |
1132 | goto merge_right; |
1133 | } |
1134 | } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { |
1135 | @@ -4917,6 +4918,7 @@ end: |
1136 | static void tcp_collapse_ofo_queue(struct sock *sk) |
1137 | { |
1138 | struct tcp_sock *tp = tcp_sk(sk); |
1139 | + u32 range_truesize, sum_tiny = 0; |
1140 | struct sk_buff *skb, *head; |
1141 | struct rb_node *p; |
1142 | u32 start, end; |
1143 | @@ -4935,6 +4937,7 @@ new_range: |
1144 | } |
1145 | start = TCP_SKB_CB(skb)->seq; |
1146 | end = TCP_SKB_CB(skb)->end_seq; |
1147 | + range_truesize = skb->truesize; |
1148 | |
1149 | for (head = skb;;) { |
1150 | skb = tcp_skb_next(skb, NULL); |
1151 | @@ -4945,11 +4948,20 @@ new_range: |
1152 | if (!skb || |
1153 | after(TCP_SKB_CB(skb)->seq, end) || |
1154 | before(TCP_SKB_CB(skb)->end_seq, start)) { |
1155 | - tcp_collapse(sk, NULL, &tp->out_of_order_queue, |
1156 | - head, skb, start, end); |
1157 | + /* Do not attempt collapsing tiny skbs */ |
1158 | + if (range_truesize != head->truesize || |
1159 | + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { |
1160 | + tcp_collapse(sk, NULL, &tp->out_of_order_queue, |
1161 | + head, skb, start, end); |
1162 | + } else { |
1163 | + sum_tiny += range_truesize; |
1164 | + if (sum_tiny > sk->sk_rcvbuf >> 3) |
1165 | + return; |
1166 | + } |
1167 | goto new_range; |
1168 | } |
1169 | |
1170 | + range_truesize += skb->truesize; |
1171 | if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) |
1172 | start = TCP_SKB_CB(skb)->seq; |
1173 | if (after(TCP_SKB_CB(skb)->end_seq, end)) |
1174 | @@ -4964,6 +4976,7 @@ new_range: |
1175 | * 2) not add too big latencies if thousands of packets sit there. |
1176 | * (But if application shrinks SO_RCVBUF, we could still end up |
1177 | * freeing whole queue here) |
1178 | + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. |
1179 | * |
1180 | * Return true if queue has shrunk. |
1181 | */ |
1182 | @@ -4971,20 +4984,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) |
1183 | { |
1184 | struct tcp_sock *tp = tcp_sk(sk); |
1185 | struct rb_node *node, *prev; |
1186 | + int goal; |
1187 | |
1188 | if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) |
1189 | return false; |
1190 | |
1191 | NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); |
1192 | + goal = sk->sk_rcvbuf >> 3; |
1193 | node = &tp->ooo_last_skb->rbnode; |
1194 | do { |
1195 | prev = rb_prev(node); |
1196 | rb_erase(node, &tp->out_of_order_queue); |
1197 | + goal -= rb_to_skb(node)->truesize; |
1198 | tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); |
1199 | - sk_mem_reclaim(sk); |
1200 | - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
1201 | - !tcp_under_memory_pressure(sk)) |
1202 | - break; |
1203 | + if (!prev || goal <= 0) { |
1204 | + sk_mem_reclaim(sk); |
1205 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
1206 | + !tcp_under_memory_pressure(sk)) |
1207 | + break; |
1208 | + goal = sk->sk_rcvbuf >> 3; |
1209 | + } |
1210 | node = prev; |
1211 | } while (node); |
1212 | tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); |
1213 | @@ -5019,6 +5038,9 @@ static int tcp_prune_queue(struct sock *sk) |
1214 | else if (tcp_under_memory_pressure(sk)) |
1215 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
1216 | |
1217 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
1218 | + return 0; |
1219 | + |
1220 | tcp_collapse_ofo_queue(sk); |
1221 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
1222 | tcp_collapse(sk, &sk->sk_receive_queue, NULL, |
1223 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
1224 | index f07a0a1c98ff..5f916953b28e 100644 |
1225 | --- a/net/ipv4/tcp_output.c |
1226 | +++ b/net/ipv4/tcp_output.c |
1227 | @@ -174,8 +174,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, |
1228 | } |
1229 | |
1230 | /* Account for an ACK we sent. */ |
1231 | -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
1232 | +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, |
1233 | + u32 rcv_nxt) |
1234 | { |
1235 | + struct tcp_sock *tp = tcp_sk(sk); |
1236 | + |
1237 | + if (unlikely(rcv_nxt != tp->rcv_nxt)) |
1238 | + return; /* Special ACK sent by DCTCP to reflect ECN */ |
1239 | tcp_dec_quickack_mode(sk, pkts); |
1240 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
1241 | } |
1242 | @@ -905,8 +910,8 @@ out: |
1243 | * We are working here with either a clone of the original |
1244 | * SKB, or a fresh unique copy made by the retransmit engine. |
1245 | */ |
1246 | -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
1247 | - gfp_t gfp_mask) |
1248 | +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, |
1249 | + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) |
1250 | { |
1251 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1252 | struct inet_sock *inet; |
1253 | @@ -969,7 +974,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
1254 | th->source = inet->inet_sport; |
1255 | th->dest = inet->inet_dport; |
1256 | th->seq = htonl(tcb->seq); |
1257 | - th->ack_seq = htonl(tp->rcv_nxt); |
1258 | + th->ack_seq = htonl(rcv_nxt); |
1259 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
1260 | tcb->tcp_flags); |
1261 | |
1262 | @@ -1010,7 +1015,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
1263 | icsk->icsk_af_ops->send_check(sk, skb); |
1264 | |
1265 | if (likely(tcb->tcp_flags & TCPHDR_ACK)) |
1266 | - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); |
1267 | + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); |
1268 | |
1269 | if (skb->len != tcp_header_size) { |
1270 | tcp_event_data_sent(tp, sk); |
1271 | @@ -1046,6 +1051,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
1272 | return err; |
1273 | } |
1274 | |
1275 | +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
1276 | + gfp_t gfp_mask) |
1277 | +{ |
1278 | + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, |
1279 | + tcp_sk(sk)->rcv_nxt); |
1280 | +} |
1281 | + |
1282 | /* This routine just queues the buffer for sending. |
1283 | * |
1284 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
1285 | @@ -3482,7 +3494,7 @@ void tcp_send_delayed_ack(struct sock *sk) |
1286 | } |
1287 | |
1288 | /* This routine sends an ack and also updates the window. */ |
1289 | -void tcp_send_ack(struct sock *sk) |
1290 | +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) |
1291 | { |
1292 | struct sk_buff *buff; |
1293 | |
1294 | @@ -3520,9 +3532,14 @@ void tcp_send_ack(struct sock *sk) |
1295 | |
1296 | /* Send it off, this clears delayed acks for us. */ |
1297 | skb_mstamp_get(&buff->skb_mstamp); |
1298 | - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); |
1299 | + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); |
1300 | +} |
1301 | +EXPORT_SYMBOL_GPL(__tcp_send_ack); |
1302 | + |
1303 | +void tcp_send_ack(struct sock *sk) |
1304 | +{ |
1305 | + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); |
1306 | } |
1307 | -EXPORT_SYMBOL_GPL(tcp_send_ack); |
1308 | |
1309 | /* This routine sends a packet with an out of date sequence |
1310 | * number. It assumes the other end will try to ack it. |
1311 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
1312 | index 38062f403ceb..2d3c8fe27583 100644 |
1313 | --- a/net/ipv6/datagram.c |
1314 | +++ b/net/ipv6/datagram.c |
1315 | @@ -694,13 +694,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, |
1316 | } |
1317 | if (np->rxopt.bits.rxorigdstaddr) { |
1318 | struct sockaddr_in6 sin6; |
1319 | - __be16 *ports = (__be16 *) skb_transport_header(skb); |
1320 | + __be16 *ports; |
1321 | + int end; |
1322 | |
1323 | - if (skb_transport_offset(skb) + 4 <= (int)skb->len) { |
1324 | + end = skb_transport_offset(skb) + 4; |
1325 | + if (end <= 0 || pskb_may_pull(skb, end)) { |
1326 | /* All current transport protocols have the port numbers in the |
1327 | * first four bytes of the transport header and this function is |
1328 | * written with this assumption in mind. |
1329 | */ |
1330 | + ports = (__be16 *)skb_transport_header(skb); |
1331 | |
1332 | sin6.sin6_family = AF_INET6; |
1333 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
1334 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
1335 | index eb9046eae581..ea14466cdca8 100644 |
1336 | --- a/net/ipv6/ip6_output.c |
1337 | +++ b/net/ipv6/ip6_output.c |
1338 | @@ -576,6 +576,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) |
1339 | to->dev = from->dev; |
1340 | to->mark = from->mark; |
1341 | |
1342 | + skb_copy_hash(to, from); |
1343 | + |
1344 | #ifdef CONFIG_NET_SCHED |
1345 | to->tc_index = from->tc_index; |
1346 | #endif |
1347 | diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c |
1348 | index ca8fac6e5a09..918c161e5b55 100644 |
1349 | --- a/net/ipv6/mcast.c |
1350 | +++ b/net/ipv6/mcast.c |
1351 | @@ -771,8 +771,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) |
1352 | if (pmc) { |
1353 | im->idev = pmc->idev; |
1354 | im->mca_crcount = idev->mc_qrv; |
1355 | - im->mca_sfmode = pmc->mca_sfmode; |
1356 | - if (pmc->mca_sfmode == MCAST_INCLUDE) { |
1357 | + if (im->mca_sfmode == MCAST_INCLUDE) { |
1358 | im->mca_tomb = pmc->mca_tomb; |
1359 | im->mca_sources = pmc->mca_sources; |
1360 | for (psf = im->mca_sources; psf; psf = psf->sf_next) |