Magellan Linux

Contents of /trunk/kernel-alx/patches-4.4/0114-4.4.15-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2823 - (show annotations) (download)
Tue Sep 13 07:17:58 2016 UTC (7 years, 7 months ago) by niro
File size: 40581 byte(s)
-linux-4.4.15
1 diff --git a/Makefile b/Makefile
2 index fadbb9d73c6d..979088079338 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 4
8 -SUBLEVEL = 14
9 +SUBLEVEL = 15
10 EXTRAVERSION =
11 NAME = Blurry Fish Butt
12
13 diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
14 index 43fe85f20d57..7097a3395b25 100644
15 --- a/crypto/crypto_user.c
16 +++ b/crypto/crypto_user.c
17 @@ -455,6 +455,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
18 [CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
19 [CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
20 [CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
21 + [CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
22 [CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
23 };
24
25 diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
26 index 66b1c3313e2e..cd4398498495 100644
27 --- a/drivers/crypto/ux500/hash/hash_core.c
28 +++ b/drivers/crypto/ux500/hash/hash_core.c
29 @@ -797,7 +797,7 @@ static int hash_process_data(struct hash_device_data *device_data,
30 &device_data->state);
31 memmove(req_ctx->state.buffer,
32 device_data->state.buffer,
33 - HASH_BLOCK_SIZE / sizeof(u32));
34 + HASH_BLOCK_SIZE);
35 if (ret) {
36 dev_err(device_data->dev,
37 "%s: hash_resume_state() failed!\n",
38 @@ -848,7 +848,7 @@ static int hash_process_data(struct hash_device_data *device_data,
39
40 memmove(device_data->state.buffer,
41 req_ctx->state.buffer,
42 - HASH_BLOCK_SIZE / sizeof(u32));
43 + HASH_BLOCK_SIZE);
44 if (ret) {
45 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
46 __func__);
47 diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
48 index 0b8fe2ec5315..f3801b983f42 100644
49 --- a/drivers/crypto/vmx/aes_cbc.c
50 +++ b/drivers/crypto/vmx/aes_cbc.c
51 @@ -182,7 +182,7 @@ struct crypto_alg p8_aes_cbc_alg = {
52 .cra_name = "cbc(aes)",
53 .cra_driver_name = "p8_aes_cbc",
54 .cra_module = THIS_MODULE,
55 - .cra_priority = 1000,
56 + .cra_priority = 2000,
57 .cra_type = &crypto_blkcipher_type,
58 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
59 .cra_alignmask = 0,
60 diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
61 index ee1306cd8f59..404a1b69a3ab 100644
62 --- a/drivers/crypto/vmx/aes_ctr.c
63 +++ b/drivers/crypto/vmx/aes_ctr.c
64 @@ -166,7 +166,7 @@ struct crypto_alg p8_aes_ctr_alg = {
65 .cra_name = "ctr(aes)",
66 .cra_driver_name = "p8_aes_ctr",
67 .cra_module = THIS_MODULE,
68 - .cra_priority = 1000,
69 + .cra_priority = 2000,
70 .cra_type = &crypto_blkcipher_type,
71 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
72 .cra_alignmask = 0,
73 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
74 index bd377a6b067d..df54475d163b 100644
75 --- a/drivers/net/ethernet/atheros/alx/main.c
76 +++ b/drivers/net/ethernet/atheros/alx/main.c
77 @@ -86,9 +86,14 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
78 while (!cur_buf->skb && next != rxq->read_idx) {
79 struct alx_rfd *rfd = &rxq->rfd[cur];
80
81 - skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
82 + skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
83 if (!skb)
84 break;
85 +
86 + /* Workround for the HW RX DMA overflow issue */
87 + if (((unsigned long)skb->data & 0xfff) == 0xfc0)
88 + skb_reserve(skb, 64);
89 +
90 dma = dma_map_single(&alx->hw.pdev->dev,
91 skb->data, alx->rxbuf_size,
92 DMA_FROM_DEVICE);
93 diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
94 index 169059c92f80..8d54e7b41bbf 100644
95 --- a/drivers/net/ethernet/cadence/macb.c
96 +++ b/drivers/net/ethernet/cadence/macb.c
97 @@ -2405,9 +2405,9 @@ static int macb_init(struct platform_device *pdev)
98 if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
99 val = GEM_BIT(RGMII);
100 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
101 - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
102 + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
103 val = MACB_BIT(RMII);
104 - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII))
105 + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
106 val = MACB_BIT(MII);
107
108 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
109 @@ -2738,7 +2738,7 @@ static int at91ether_init(struct platform_device *pdev)
110 }
111
112 static const struct macb_config at91sam9260_config = {
113 - .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII,
114 + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
115 .clk_init = macb_clk_init,
116 .init = macb_init,
117 };
118 @@ -2751,21 +2751,22 @@ static const struct macb_config pc302gem_config = {
119 };
120
121 static const struct macb_config sama5d2_config = {
122 - .caps = 0,
123 + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
124 .dma_burst_length = 16,
125 .clk_init = macb_clk_init,
126 .init = macb_init,
127 };
128
129 static const struct macb_config sama5d3_config = {
130 - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
131 + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
132 + | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
133 .dma_burst_length = 16,
134 .clk_init = macb_clk_init,
135 .init = macb_init,
136 };
137
138 static const struct macb_config sama5d4_config = {
139 - .caps = 0,
140 + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
141 .dma_burst_length = 4,
142 .clk_init = macb_clk_init,
143 .init = macb_init,
144 diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
145 index d83b0db77821..3f385ab94988 100644
146 --- a/drivers/net/ethernet/cadence/macb.h
147 +++ b/drivers/net/ethernet/cadence/macb.h
148 @@ -398,7 +398,7 @@
149 /* Capability mask bits */
150 #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
151 #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
152 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004
153 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
154 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
155 #define MACB_CAPS_FIFO_MODE 0x10000000
156 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
157 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
158 index 6dc810bce295..944a6dca0fcb 100644
159 --- a/drivers/usb/core/quirks.c
160 +++ b/drivers/usb/core/quirks.c
161 @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
162 /* Creative SB Audigy 2 NX */
163 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
164
165 + /* USB3503 */
166 + { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
167 +
168 /* Microsoft Wireless Laser Mouse 6000 Receiver */
169 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
170
171 @@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = {
172 /* MAYA44USB sound device */
173 { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
174
175 + /* ASUS Base Station(T100) */
176 + { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
177 + USB_QUIRK_IGNORE_REMOTE_WAKEUP },
178 +
179 /* Action Semiconductor flash disk */
180 { USB_DEVICE(0x10d6, 0x2200), .driver_info =
181 USB_QUIRK_STRING_FETCH_255 },
182 @@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = {
183 { USB_DEVICE(0x1908, 0x1315), .driver_info =
184 USB_QUIRK_HONOR_BNUMINTERFACES },
185
186 - /* INTEL VALUE SSD */
187 - { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
188 -
189 - /* USB3503 */
190 - { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
191 -
192 - /* ASUS Base Station(T100) */
193 - { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
194 - USB_QUIRK_IGNORE_REMOTE_WAKEUP },
195 -
196 /* Protocol and OTG Electrical Test Device */
197 { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
198 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
199
200 + /* Acer C120 LED Projector */
201 + { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
202 +
203 /* Blackmagic Design Intensity Shuttle */
204 { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
205
206 /* Blackmagic Design UltraStudio SDI */
207 { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
208
209 + /* INTEL VALUE SSD */
210 + { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
211 +
212 { } /* terminating entry must be last */
213 };
214
215 diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
216 index dd5cb5577dca..2f1fb7e7aa54 100644
217 --- a/drivers/usb/dwc3/dwc3-exynos.c
218 +++ b/drivers/usb/dwc3/dwc3-exynos.c
219 @@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
220
221 platform_set_drvdata(pdev, exynos);
222
223 - ret = dwc3_exynos_register_phys(exynos);
224 - if (ret) {
225 - dev_err(dev, "couldn't register PHYs\n");
226 - return ret;
227 - }
228 -
229 exynos->dev = dev;
230
231 exynos->clk = devm_clk_get(dev, "usbdrd30");
232 @@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
233 goto err3;
234 }
235
236 + ret = dwc3_exynos_register_phys(exynos);
237 + if (ret) {
238 + dev_err(dev, "couldn't register PHYs\n");
239 + goto err4;
240 + }
241 +
242 if (node) {
243 ret = of_platform_populate(node, NULL, NULL, dev);
244 if (ret) {
245 dev_err(dev, "failed to add dwc3 core\n");
246 - goto err4;
247 + goto err5;
248 }
249 } else {
250 dev_err(dev, "no device node, failed to add dwc3 core\n");
251 ret = -ENODEV;
252 - goto err4;
253 + goto err5;
254 }
255
256 return 0;
257
258 +err5:
259 + platform_device_unregister(exynos->usb2_phy);
260 + platform_device_unregister(exynos->usb3_phy);
261 err4:
262 regulator_disable(exynos->vdd10);
263 err3:
264 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
265 index f454c7af489c..55386619a0f1 100644
266 --- a/drivers/usb/gadget/legacy/inode.c
267 +++ b/drivers/usb/gadget/legacy/inode.c
268 @@ -937,8 +937,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
269 struct usb_ep *ep = dev->gadget->ep0;
270 struct usb_request *req = dev->req;
271
272 - if ((retval = setup_req (ep, req, 0)) == 0)
273 - retval = usb_ep_queue (ep, req, GFP_ATOMIC);
274 + if ((retval = setup_req (ep, req, 0)) == 0) {
275 + spin_unlock_irq (&dev->lock);
276 + retval = usb_ep_queue (ep, req, GFP_KERNEL);
277 + spin_lock_irq (&dev->lock);
278 + }
279 dev->state = STATE_DEV_CONNECTED;
280
281 /* assume that was SET_CONFIGURATION */
282 @@ -1456,8 +1459,11 @@ delegate:
283 w_length);
284 if (value < 0)
285 break;
286 +
287 + spin_unlock (&dev->lock);
288 value = usb_ep_queue (gadget->ep0, dev->req,
289 - GFP_ATOMIC);
290 + GFP_KERNEL);
291 + spin_lock (&dev->lock);
292 if (value < 0) {
293 clean_req (gadget->ep0, dev->req);
294 break;
295 @@ -1480,11 +1486,14 @@ delegate:
296 if (value >= 0 && dev->state != STATE_DEV_SETUP) {
297 req->length = value;
298 req->zero = value < w_length;
299 - value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
300 +
301 + spin_unlock (&dev->lock);
302 + value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
303 if (value < 0) {
304 DBG (dev, "ep_queue --> %d\n", value);
305 req->status = 0;
306 }
307 + return value;
308 }
309
310 /* device stalls when value < 0 */
311 diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
312 index 4031b372008e..c1c1024a054c 100644
313 --- a/drivers/usb/host/ehci-tegra.c
314 +++ b/drivers/usb/host/ehci-tegra.c
315 @@ -89,7 +89,7 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
316 if (!usb1_reset_attempted) {
317 struct reset_control *usb1_reset;
318
319 - usb1_reset = of_reset_control_get(phy_np, "usb");
320 + usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
321 if (IS_ERR(usb1_reset)) {
322 dev_warn(&pdev->dev,
323 "can't get utmi-pads reset from the PHY\n");
324 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
325 index ea4fb4b0cd44..de644e56aa3b 100644
326 --- a/drivers/usb/host/xhci-pci.c
327 +++ b/drivers/usb/host/xhci-pci.c
328 @@ -37,6 +37,7 @@
329 /* Device for a quirk */
330 #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
331 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
332 +#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009
333 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400
334
335 #define PCI_VENDOR_ID_ETRON 0x1b6f
336 @@ -115,6 +116,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
337 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
338 }
339
340 + if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
341 + pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
342 + xhci->quirks |= XHCI_BROKEN_STREAMS;
343 +
344 if (pdev->vendor == PCI_VENDOR_ID_NEC)
345 xhci->quirks |= XHCI_NEC_HOST;
346
347 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
348 index 05647e6753cd..4fe7c9b56bc0 100644
349 --- a/drivers/usb/host/xhci-plat.c
350 +++ b/drivers/usb/host/xhci-plat.c
351 @@ -132,6 +132,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
352 ret = clk_prepare_enable(clk);
353 if (ret)
354 goto put_hcd;
355 + } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
356 + ret = -EPROBE_DEFER;
357 + goto put_hcd;
358 }
359
360 if (of_device_is_compatible(pdev->dev.of_node,
361 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
362 index 2b63969c2bbf..34cd23724bed 100644
363 --- a/drivers/usb/host/xhci-ring.c
364 +++ b/drivers/usb/host/xhci-ring.c
365 @@ -289,6 +289,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
366
367 temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
368 xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
369 +
370 + /*
371 + * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
372 + * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
373 + * but the completion event in never sent. Use the cmd timeout timer to
374 + * handle those cases. Use twice the time to cover the bit polling retry
375 + */
376 + mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
377 xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
378 &xhci->op_regs->cmd_ring);
379
380 @@ -313,6 +321,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
381
382 xhci_err(xhci, "Stopped the command ring failed, "
383 "maybe the host is dead\n");
384 + del_timer(&xhci->cmd_timer);
385 xhci->xhc_state |= XHCI_STATE_DYING;
386 xhci_quiesce(xhci);
387 xhci_halt(xhci);
388 @@ -1252,22 +1261,21 @@ void xhci_handle_command_timeout(unsigned long data)
389 int ret;
390 unsigned long flags;
391 u64 hw_ring_state;
392 - struct xhci_command *cur_cmd = NULL;
393 + bool second_timeout = false;
394 xhci = (struct xhci_hcd *) data;
395
396 /* mark this command to be cancelled */
397 spin_lock_irqsave(&xhci->lock, flags);
398 if (xhci->current_cmd) {
399 - cur_cmd = xhci->current_cmd;
400 - cur_cmd->status = COMP_CMD_ABORT;
401 + if (xhci->current_cmd->status == COMP_CMD_ABORT)
402 + second_timeout = true;
403 + xhci->current_cmd->status = COMP_CMD_ABORT;
404 }
405
406 -
407 /* Make sure command ring is running before aborting it */
408 hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
409 if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
410 (hw_ring_state & CMD_RING_RUNNING)) {
411 -
412 spin_unlock_irqrestore(&xhci->lock, flags);
413 xhci_dbg(xhci, "Command timeout\n");
414 ret = xhci_abort_cmd_ring(xhci);
415 @@ -1279,6 +1287,15 @@ void xhci_handle_command_timeout(unsigned long data)
416 }
417 return;
418 }
419 +
420 + /* command ring failed to restart, or host removed. Bail out */
421 + if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
422 + spin_unlock_irqrestore(&xhci->lock, flags);
423 + xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
424 + xhci_cleanup_command_queue(xhci);
425 + return;
426 + }
427 +
428 /* command timeout on stopped ring, ring can't be aborted */
429 xhci_dbg(xhci, "Command timeout on stopped ring\n");
430 xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
431 @@ -2727,7 +2744,8 @@ hw_died:
432 writel(irq_pending, &xhci->ir_set->irq_pending);
433 }
434
435 - if (xhci->xhc_state & XHCI_STATE_DYING) {
436 + if (xhci->xhc_state & XHCI_STATE_DYING ||
437 + xhci->xhc_state & XHCI_STATE_HALTED) {
438 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
439 "Shouldn't IRQs be disabled?\n");
440 /* Clear the event handler busy flag (RW1C);
441 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
442 index ec9e758d5fcd..6fe0174da226 100644
443 --- a/drivers/usb/host/xhci.c
444 +++ b/drivers/usb/host/xhci.c
445 @@ -680,20 +680,23 @@ void xhci_stop(struct usb_hcd *hcd)
446 u32 temp;
447 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
448
449 - if (xhci->xhc_state & XHCI_STATE_HALTED)
450 - return;
451 -
452 mutex_lock(&xhci->mutex);
453 - spin_lock_irq(&xhci->lock);
454 - xhci->xhc_state |= XHCI_STATE_HALTED;
455 - xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
456
457 - /* Make sure the xHC is halted for a USB3 roothub
458 - * (xhci_stop() could be called as part of failed init).
459 - */
460 - xhci_halt(xhci);
461 - xhci_reset(xhci);
462 - spin_unlock_irq(&xhci->lock);
463 + if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
464 + spin_lock_irq(&xhci->lock);
465 +
466 + xhci->xhc_state |= XHCI_STATE_HALTED;
467 + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
468 + xhci_halt(xhci);
469 + xhci_reset(xhci);
470 +
471 + spin_unlock_irq(&xhci->lock);
472 + }
473 +
474 + if (!usb_hcd_is_primary_hcd(hcd)) {
475 + mutex_unlock(&xhci->mutex);
476 + return;
477 + }
478
479 xhci_cleanup_msix(xhci);
480
481 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
482 index ee9ff7028b92..00eed5d66fda 100644
483 --- a/drivers/usb/musb/musb_core.c
484 +++ b/drivers/usb/musb/musb_core.c
485 @@ -2401,7 +2401,8 @@ static void musb_restore_context(struct musb *musb)
486 musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
487 musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
488 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
489 - musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
490 + if (musb->context.devctl & MUSB_DEVCTL_SESSION)
491 + musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
492
493 for (i = 0; i < musb->config->num_eps; ++i) {
494 struct musb_hw_ep *hw_ep;
495 diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
496 index 795a45b1b25b..59a63a0b7985 100644
497 --- a/drivers/usb/musb/musb_host.c
498 +++ b/drivers/usb/musb/musb_host.c
499 @@ -594,14 +594,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
500 musb_writew(ep->regs, MUSB_TXCSR, 0);
501
502 /* scrub all previous state, clearing toggle */
503 - } else {
504 - csr = musb_readw(ep->regs, MUSB_RXCSR);
505 - if (csr & MUSB_RXCSR_RXPKTRDY)
506 - WARNING("rx%d, packet/%d ready?\n", ep->epnum,
507 - musb_readw(ep->regs, MUSB_RXCOUNT));
508 -
509 - musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
510 }
511 + csr = musb_readw(ep->regs, MUSB_RXCSR);
512 + if (csr & MUSB_RXCSR_RXPKTRDY)
513 + WARNING("rx%d, packet/%d ready?\n", ep->epnum,
514 + musb_readw(ep->regs, MUSB_RXCOUNT));
515 +
516 + musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
517
518 /* target addr and (for multipoint) hub addr/port */
519 if (musb->is_multipoint) {
520 @@ -995,9 +994,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
521 if (is_in) {
522 dma = is_dma_capable() ? ep->rx_channel : NULL;
523
524 - /* clear nak timeout bit */
525 + /*
526 + * Need to stop the transaction by clearing REQPKT first
527 + * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
528 + * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
529 + */
530 rx_csr = musb_readw(epio, MUSB_RXCSR);
531 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
532 + rx_csr &= ~MUSB_RXCSR_H_REQPKT;
533 + musb_writew(epio, MUSB_RXCSR, rx_csr);
534 rx_csr &= ~MUSB_RXCSR_DATAERROR;
535 musb_writew(epio, MUSB_RXCSR, rx_csr);
536
537 @@ -1551,7 +1556,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
538 struct urb *urb,
539 size_t len)
540 {
541 - struct dma_channel *channel = hw_ep->tx_channel;
542 + struct dma_channel *channel = hw_ep->rx_channel;
543 void __iomem *epio = hw_ep->regs;
544 dma_addr_t *buf;
545 u32 length, res;
546 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
547 index 78b4f64c6b00..06c7dbc1c802 100644
548 --- a/drivers/usb/serial/mos7720.c
549 +++ b/drivers/usb/serial/mos7720.c
550 @@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial)
551 urblist_entry)
552 usb_unlink_urb(urbtrack->urb);
553 spin_unlock_irqrestore(&mos_parport->listlock, flags);
554 + parport_del_port(mos_parport->pp);
555
556 kref_put(&mos_parport->ref_count, destroy_mos_parport);
557 }
558 diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
559 index 9baf081174ce..e26e32169a36 100644
560 --- a/drivers/usb/storage/uas.c
561 +++ b/drivers/usb/storage/uas.c
562 @@ -811,6 +811,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
563 if (devinfo->flags & US_FL_BROKEN_FUA)
564 sdev->broken_fua = 1;
565
566 + scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
567 return 0;
568 }
569
570 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
571 index 67bc2da5d233..4f6d29c8e3d8 100644
572 --- a/include/linux/bpf.h
573 +++ b/include/linux/bpf.h
574 @@ -198,6 +198,10 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
575 static inline void bpf_prog_put(struct bpf_prog *prog)
576 {
577 }
578 +
579 +static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
580 +{
581 +}
582 #endif /* CONFIG_BPF_SYSCALL */
583
584 /* verifier prototypes for helper functions called from eBPF programs */
585 diff --git a/include/linux/net.h b/include/linux/net.h
586 index 25ef630f1bd6..c00b8d182226 100644
587 --- a/include/linux/net.h
588 +++ b/include/linux/net.h
589 @@ -251,7 +251,8 @@ do { \
590 DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
591 if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
592 net_ratelimit()) \
593 - __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
594 + __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
595 + ##__VA_ARGS__); \
596 } while (0)
597 #elif defined(DEBUG)
598 #define net_dbg_ratelimited(fmt, ...) \
599 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
600 index 4fde61804191..1716f9395010 100644
601 --- a/include/linux/skbuff.h
602 +++ b/include/linux/skbuff.h
603 @@ -2564,6 +2564,13 @@ static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len
604 skb_headroom(skb) + len <= skb->hdr_len;
605 }
606
607 +static inline int skb_try_make_writable(struct sk_buff *skb,
608 + unsigned int write_len)
609 +{
610 + return skb_cloned(skb) && !skb_clone_writable(skb, write_len) &&
611 + pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
612 +}
613 +
614 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
615 int cloned)
616 {
617 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
618 index fddebc617469..8ff34ed1ae8a 100644
619 --- a/include/linux/sock_diag.h
620 +++ b/include/linux/sock_diag.h
621 @@ -35,6 +35,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
622 {
623 switch (sk->sk_family) {
624 case AF_INET:
625 + if (sk->sk_type == SOCK_RAW)
626 + return SKNLGRP_NONE;
627 +
628 switch (sk->sk_protocol) {
629 case IPPROTO_TCP:
630 return SKNLGRP_INET_TCP_DESTROY;
631 @@ -44,6 +47,9 @@ enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
632 return SKNLGRP_NONE;
633 }
634 case AF_INET6:
635 + if (sk->sk_type == SOCK_RAW)
636 + return SKNLGRP_NONE;
637 +
638 switch (sk->sk_protocol) {
639 case IPPROTO_TCP:
640 return SKNLGRP_INET6_TCP_DESTROY;
641 diff --git a/kernel/events/core.c b/kernel/events/core.c
642 index 95e47d2f2c67..12ecd4f0329f 100644
643 --- a/kernel/events/core.c
644 +++ b/kernel/events/core.c
645 @@ -7101,7 +7101,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
646 prog = event->tp_event->prog;
647 if (prog) {
648 event->tp_event->prog = NULL;
649 - bpf_prog_put(prog);
650 + bpf_prog_put_rcu(prog);
651 }
652 }
653
654 diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
655 index fbd0acf80b13..2fdebabbfacd 100644
656 --- a/net/ax25/af_ax25.c
657 +++ b/net/ax25/af_ax25.c
658 @@ -976,7 +976,8 @@ static int ax25_release(struct socket *sock)
659 release_sock(sk);
660 ax25_disconnect(ax25, 0);
661 lock_sock(sk);
662 - ax25_destroy_socket(ax25);
663 + if (!sock_flag(ax25->sk, SOCK_DESTROY))
664 + ax25_destroy_socket(ax25);
665 break;
666
667 case AX25_STATE_3:
668 diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
669 index 951cd57bb07d..5237dff6941d 100644
670 --- a/net/ax25/ax25_ds_timer.c
671 +++ b/net/ax25/ax25_ds_timer.c
672 @@ -102,6 +102,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
673 switch (ax25->state) {
674
675 case AX25_STATE_0:
676 + case AX25_STATE_2:
677 /* Magic here: If we listen() and a new link dies before it
678 is accepted() it isn't 'dead' so doesn't get removed. */
679 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
680 @@ -111,6 +112,7 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
681 sock_hold(sk);
682 ax25_destroy_socket(ax25);
683 bh_unlock_sock(sk);
684 + /* Ungrab socket and destroy it */
685 sock_put(sk);
686 } else
687 ax25_destroy_socket(ax25);
688 @@ -213,7 +215,8 @@ void ax25_ds_t1_timeout(ax25_cb *ax25)
689 case AX25_STATE_2:
690 if (ax25->n2count == ax25->n2) {
691 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
692 - ax25_disconnect(ax25, ETIMEDOUT);
693 + if (!sock_flag(ax25->sk, SOCK_DESTROY))
694 + ax25_disconnect(ax25, ETIMEDOUT);
695 return;
696 } else {
697 ax25->n2count++;
698 diff --git a/net/ax25/ax25_std_timer.c b/net/ax25/ax25_std_timer.c
699 index 004467c9e6e1..2c0d6ef66f9d 100644
700 --- a/net/ax25/ax25_std_timer.c
701 +++ b/net/ax25/ax25_std_timer.c
702 @@ -38,6 +38,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
703
704 switch (ax25->state) {
705 case AX25_STATE_0:
706 + case AX25_STATE_2:
707 /* Magic here: If we listen() and a new link dies before it
708 is accepted() it isn't 'dead' so doesn't get removed. */
709 if (!sk || sock_flag(sk, SOCK_DESTROY) ||
710 @@ -47,6 +48,7 @@ void ax25_std_heartbeat_expiry(ax25_cb *ax25)
711 sock_hold(sk);
712 ax25_destroy_socket(ax25);
713 bh_unlock_sock(sk);
714 + /* Ungrab socket and destroy it */
715 sock_put(sk);
716 } else
717 ax25_destroy_socket(ax25);
718 @@ -144,7 +146,8 @@ void ax25_std_t1timer_expiry(ax25_cb *ax25)
719 case AX25_STATE_2:
720 if (ax25->n2count == ax25->n2) {
721 ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND);
722 - ax25_disconnect(ax25, ETIMEDOUT);
723 + if (!sock_flag(ax25->sk, SOCK_DESTROY))
724 + ax25_disconnect(ax25, ETIMEDOUT);
725 return;
726 } else {
727 ax25->n2count++;
728 diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
729 index 3b78e8473a01..655a7d4c96e1 100644
730 --- a/net/ax25/ax25_subr.c
731 +++ b/net/ax25/ax25_subr.c
732 @@ -264,7 +264,8 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
733 {
734 ax25_clear_queues(ax25);
735
736 - ax25_stop_heartbeat(ax25);
737 + if (!sock_flag(ax25->sk, SOCK_DESTROY))
738 + ax25_stop_heartbeat(ax25);
739 ax25_stop_t1timer(ax25);
740 ax25_stop_t2timer(ax25);
741 ax25_stop_t3timer(ax25);
742 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
743 index ea9893743a0f..7173a685309a 100644
744 --- a/net/bridge/br_multicast.c
745 +++ b/net/bridge/br_multicast.c
746 @@ -464,8 +464,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
747 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
748 &ip6h->saddr)) {
749 kfree_skb(skb);
750 + br->has_ipv6_addr = 0;
751 return NULL;
752 }
753 +
754 + br->has_ipv6_addr = 1;
755 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
756
757 hopopt = (u8 *)(ip6h + 1);
758 @@ -1736,6 +1739,7 @@ void br_multicast_init(struct net_bridge *br)
759 br->ip6_other_query.delay_time = 0;
760 br->ip6_querier.port = NULL;
761 #endif
762 + br->has_ipv6_addr = 1;
763
764 spin_lock_init(&br->multicast_lock);
765 setup_timer(&br->multicast_router_timer,
766 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
767 index 216018c76018..1001a1b7df9b 100644
768 --- a/net/bridge/br_private.h
769 +++ b/net/bridge/br_private.h
770 @@ -301,6 +301,7 @@ struct net_bridge
771 u8 multicast_disabled:1;
772 u8 multicast_querier:1;
773 u8 multicast_query_use_ifaddr:1;
774 + u8 has_ipv6_addr:1;
775
776 u32 hash_elasticity;
777 u32 hash_max;
778 @@ -574,10 +575,22 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
779
780 static inline bool
781 __br_multicast_querier_exists(struct net_bridge *br,
782 - struct bridge_mcast_other_query *querier)
783 + struct bridge_mcast_other_query *querier,
784 + const bool is_ipv6)
785 {
786 + bool own_querier_enabled;
787 +
788 + if (br->multicast_querier) {
789 + if (is_ipv6 && !br->has_ipv6_addr)
790 + own_querier_enabled = false;
791 + else
792 + own_querier_enabled = true;
793 + } else {
794 + own_querier_enabled = false;
795 + }
796 +
797 return time_is_before_jiffies(querier->delay_time) &&
798 - (br->multicast_querier || timer_pending(&querier->timer));
799 + (own_querier_enabled || timer_pending(&querier->timer));
800 }
801
802 static inline bool br_multicast_querier_exists(struct net_bridge *br,
803 @@ -585,10 +598,12 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
804 {
805 switch (eth->h_proto) {
806 case (htons(ETH_P_IP)):
807 - return __br_multicast_querier_exists(br, &br->ip4_other_query);
808 + return __br_multicast_querier_exists(br,
809 + &br->ip4_other_query, false);
810 #if IS_ENABLED(CONFIG_IPV6)
811 case (htons(ETH_P_IPV6)):
812 - return __br_multicast_querier_exists(br, &br->ip6_other_query);
813 + return __br_multicast_querier_exists(br,
814 + &br->ip6_other_query, true);
815 #endif
816 default:
817 return false;
818 diff --git a/net/core/filter.c b/net/core/filter.c
819 index f393a22b9d50..75e9b2b2336d 100644
820 --- a/net/core/filter.c
821 +++ b/net/core/filter.c
822 @@ -1275,9 +1275,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
823 */
824 if (unlikely((u32) offset > 0xffff || len > sizeof(buf)))
825 return -EFAULT;
826 -
827 - if (unlikely(skb_cloned(skb) &&
828 - !skb_clone_writable(skb, offset + len)))
829 + if (unlikely(skb_try_make_writable(skb, offset + len)))
830 return -EFAULT;
831
832 ptr = skb_header_pointer(skb, offset, len, buf);
833 @@ -1321,8 +1319,7 @@ static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
834 if (unlikely((u32) offset > 0xffff))
835 return -EFAULT;
836
837 - if (unlikely(skb_cloned(skb) &&
838 - !skb_clone_writable(skb, offset + sizeof(sum))))
839 + if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
840 return -EFAULT;
841
842 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
843 @@ -1367,9 +1364,7 @@ static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags)
844
845 if (unlikely((u32) offset > 0xffff))
846 return -EFAULT;
847 -
848 - if (unlikely(skb_cloned(skb) &&
849 - !skb_clone_writable(skb, offset + sizeof(sum))))
850 + if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum))))
851 return -EFAULT;
852
853 ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum);
854 @@ -1554,6 +1549,13 @@ bool bpf_helper_changes_skb_data(void *func)
855 return true;
856 if (func == bpf_skb_vlan_pop)
857 return true;
858 + if (func == bpf_skb_store_bytes)
859 + return true;
860 + if (func == bpf_l3_csum_replace)
861 + return true;
862 + if (func == bpf_l4_csum_replace)
863 + return true;
864 +
865 return false;
866 }
867
868 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
869 index f18ae91b652e..769cece9b00b 100644
870 --- a/net/core/neighbour.c
871 +++ b/net/core/neighbour.c
872 @@ -2467,13 +2467,17 @@ int neigh_xmit(int index, struct net_device *dev,
873 tbl = neigh_tables[index];
874 if (!tbl)
875 goto out;
876 + rcu_read_lock_bh();
877 neigh = __neigh_lookup_noref(tbl, addr, dev);
878 if (!neigh)
879 neigh = __neigh_create(tbl, addr, dev, false);
880 err = PTR_ERR(neigh);
881 - if (IS_ERR(neigh))
882 + if (IS_ERR(neigh)) {
883 + rcu_read_unlock_bh();
884 goto out_kfree_skb;
885 + }
886 err = neigh->output(neigh, skb);
887 + rcu_read_unlock_bh();
888 }
889 else if (index == NEIGH_LINK_TABLE) {
890 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
891 diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
892 index 477937465a20..d95631d09248 100644
893 --- a/net/ipv4/esp4.c
894 +++ b/net/ipv4/esp4.c
895 @@ -23,6 +23,11 @@ struct esp_skb_cb {
896 void *tmp;
897 };
898
899 +struct esp_output_extra {
900 + __be32 seqhi;
901 + u32 esphoff;
902 +};
903 +
904 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
905
906 static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
907 @@ -35,11 +40,11 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
908 *
909 * TODO: Use spare space in skb for this where possible.
910 */
911 -static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
912 +static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
913 {
914 unsigned int len;
915
916 - len = seqhilen;
917 + len = extralen;
918
919 len += crypto_aead_ivsize(aead);
920
921 @@ -57,15 +62,16 @@ static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
922 return kmalloc(len, GFP_ATOMIC);
923 }
924
925 -static inline __be32 *esp_tmp_seqhi(void *tmp)
926 +static inline void *esp_tmp_extra(void *tmp)
927 {
928 - return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
929 + return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
930 }
931 -static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
932 +
933 +static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
934 {
935 return crypto_aead_ivsize(aead) ?
936 - PTR_ALIGN((u8 *)tmp + seqhilen,
937 - crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
938 + PTR_ALIGN((u8 *)tmp + extralen,
939 + crypto_aead_alignmask(aead) + 1) : tmp + extralen;
940 }
941
942 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
943 @@ -99,7 +105,7 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
944 {
945 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
946 void *tmp = ESP_SKB_CB(skb)->tmp;
947 - __be32 *seqhi = esp_tmp_seqhi(tmp);
948 + __be32 *seqhi = esp_tmp_extra(tmp);
949
950 esph->seq_no = esph->spi;
951 esph->spi = *seqhi;
952 @@ -107,7 +113,11 @@ static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
953
954 static void esp_output_restore_header(struct sk_buff *skb)
955 {
956 - esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
957 + void *tmp = ESP_SKB_CB(skb)->tmp;
958 + struct esp_output_extra *extra = esp_tmp_extra(tmp);
959 +
960 + esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
961 + sizeof(__be32));
962 }
963
964 static void esp_output_done_esn(struct crypto_async_request *base, int err)
965 @@ -121,6 +131,7 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
966 static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
967 {
968 int err;
969 + struct esp_output_extra *extra;
970 struct ip_esp_hdr *esph;
971 struct crypto_aead *aead;
972 struct aead_request *req;
973 @@ -137,8 +148,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
974 int tfclen;
975 int nfrags;
976 int assoclen;
977 - int seqhilen;
978 - __be32 *seqhi;
979 + int extralen;
980 __be64 seqno;
981
982 /* skb is pure payload to encrypt */
983 @@ -166,21 +176,21 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
984 nfrags = err;
985
986 assoclen = sizeof(*esph);
987 - seqhilen = 0;
988 + extralen = 0;
989
990 if (x->props.flags & XFRM_STATE_ESN) {
991 - seqhilen += sizeof(__be32);
992 - assoclen += seqhilen;
993 + extralen += sizeof(*extra);
994 + assoclen += sizeof(__be32);
995 }
996
997 - tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
998 + tmp = esp_alloc_tmp(aead, nfrags, extralen);
999 if (!tmp) {
1000 err = -ENOMEM;
1001 goto error;
1002 }
1003
1004 - seqhi = esp_tmp_seqhi(tmp);
1005 - iv = esp_tmp_iv(aead, tmp, seqhilen);
1006 + extra = esp_tmp_extra(tmp);
1007 + iv = esp_tmp_iv(aead, tmp, extralen);
1008 req = esp_tmp_req(aead, iv);
1009 sg = esp_req_sg(aead, req);
1010
1011 @@ -247,8 +257,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
1012 * encryption.
1013 */
1014 if ((x->props.flags & XFRM_STATE_ESN)) {
1015 - esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
1016 - *seqhi = esph->spi;
1017 + extra->esphoff = (unsigned char *)esph -
1018 + skb_transport_header(skb);
1019 + esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
1020 + extra->seqhi = esph->spi;
1021 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
1022 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
1023 }
1024 @@ -445,7 +457,7 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
1025 goto out;
1026
1027 ESP_SKB_CB(skb)->tmp = tmp;
1028 - seqhi = esp_tmp_seqhi(tmp);
1029 + seqhi = esp_tmp_extra(tmp);
1030 iv = esp_tmp_iv(aead, tmp, seqhilen);
1031 req = esp_tmp_req(aead, iv);
1032 sg = esp_req_sg(aead, req);
1033 diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1034 index c3a38353f5dc..9d1e555496e3 100644
1035 --- a/net/ipv4/ipmr.c
1036 +++ b/net/ipv4/ipmr.c
1037 @@ -882,8 +882,10 @@ static struct mfc_cache *ipmr_cache_alloc(void)
1038 {
1039 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1040
1041 - if (c)
1042 + if (c) {
1043 + c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1044 c->mfc_un.res.minvif = MAXVIFS;
1045 + }
1046 return c;
1047 }
1048
1049 diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1050 index a10e77103c88..e207cb2468da 100644
1051 --- a/net/ipv6/ip6mr.c
1052 +++ b/net/ipv6/ip6mr.c
1053 @@ -1074,6 +1074,7 @@ static struct mfc6_cache *ip6mr_cache_alloc(void)
1054 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1055 if (!c)
1056 return NULL;
1057 + c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1058 c->mfc_un.res.minvif = MAXMIFS;
1059 return c;
1060 }
1061 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1062 index dcccae86190f..ba3d2f3d66d2 100644
1063 --- a/net/ipv6/sit.c
1064 +++ b/net/ipv6/sit.c
1065 @@ -560,13 +560,13 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
1066
1067 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
1068 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
1069 - t->parms.link, 0, IPPROTO_IPV6, 0);
1070 + t->parms.link, 0, iph->protocol, 0);
1071 err = 0;
1072 goto out;
1073 }
1074 if (type == ICMP_REDIRECT) {
1075 ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
1076 - IPPROTO_IPV6, 0);
1077 + iph->protocol, 0);
1078 err = 0;
1079 goto out;
1080 }
1081 diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
1082 index b07c535ba8e7..eeb3eb3ea9eb 100644
1083 --- a/net/sched/act_csum.c
1084 +++ b/net/sched/act_csum.c
1085 @@ -105,9 +105,7 @@ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
1086 int hl = ihl + jhl;
1087
1088 if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
1089 - (skb_cloned(skb) &&
1090 - !skb_clone_writable(skb, hl + ntkoff) &&
1091 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1092 + skb_try_make_writable(skb, hl + ntkoff))
1093 return NULL;
1094 else
1095 return (void *)(skb_network_header(skb) + ihl);
1096 @@ -365,9 +363,7 @@ static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
1097 }
1098
1099 if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
1100 - if (skb_cloned(skb) &&
1101 - !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
1102 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1103 + if (skb_try_make_writable(skb, sizeof(*iph) + ntkoff))
1104 goto fail;
1105
1106 ip_send_check(ip_hdr(skb));
1107 diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
1108 index b7c4ead8b5a8..27607b863aba 100644
1109 --- a/net/sched/act_nat.c
1110 +++ b/net/sched/act_nat.c
1111 @@ -126,9 +126,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
1112 addr = iph->daddr;
1113
1114 if (!((old_addr ^ addr) & mask)) {
1115 - if (skb_cloned(skb) &&
1116 - !skb_clone_writable(skb, sizeof(*iph) + noff) &&
1117 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1118 + if (skb_try_make_writable(skb, sizeof(*iph) + noff))
1119 goto drop;
1120
1121 new_addr &= mask;
1122 @@ -156,9 +154,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
1123 struct tcphdr *tcph;
1124
1125 if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) ||
1126 - (skb_cloned(skb) &&
1127 - !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) &&
1128 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1129 + skb_try_make_writable(skb, ihl + sizeof(*tcph) + noff))
1130 goto drop;
1131
1132 tcph = (void *)(skb_network_header(skb) + ihl);
1133 @@ -171,9 +167,7 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
1134 struct udphdr *udph;
1135
1136 if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) ||
1137 - (skb_cloned(skb) &&
1138 - !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) &&
1139 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1140 + skb_try_make_writable(skb, ihl + sizeof(*udph) + noff))
1141 goto drop;
1142
1143 udph = (void *)(skb_network_header(skb) + ihl);
1144 @@ -213,10 +207,8 @@ static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
1145 if ((old_addr ^ addr) & mask)
1146 break;
1147
1148 - if (skb_cloned(skb) &&
1149 - !skb_clone_writable(skb, ihl + sizeof(*icmph) +
1150 - sizeof(*iph) + noff) &&
1151 - pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
1152 + if (skb_try_make_writable(skb, ihl + sizeof(*icmph) +
1153 + sizeof(*iph) + noff))
1154 goto drop;
1155
1156 icmph = (void *)(skb_network_header(skb) + ihl);
1157 diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
1158 index 2177eac0a61e..2e4bd2c0a50c 100644
1159 --- a/net/sched/sch_fifo.c
1160 +++ b/net/sched/sch_fifo.c
1161 @@ -37,14 +37,18 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1162
1163 static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1164 {
1165 + unsigned int prev_backlog;
1166 +
1167 if (likely(skb_queue_len(&sch->q) < sch->limit))
1168 return qdisc_enqueue_tail(skb, sch);
1169
1170 + prev_backlog = sch->qstats.backlog;
1171 /* queue full, remove one skb to fulfill the limit */
1172 __qdisc_queue_drop_head(sch, &sch->q);
1173 qdisc_qstats_drop(sch);
1174 qdisc_enqueue_tail(skb, sch);
1175
1176 + qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
1177 return NET_XMIT_CN;
1178 }
1179
1180 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
1181 index 4befe97a9034..b7c29d5b6f04 100644
1182 --- a/net/sched/sch_netem.c
1183 +++ b/net/sched/sch_netem.c
1184 @@ -650,14 +650,14 @@ deliver:
1185 #endif
1186
1187 if (q->qdisc) {
1188 + unsigned int pkt_len = qdisc_pkt_len(skb);
1189 int err = qdisc_enqueue(skb, q->qdisc);
1190
1191 - if (unlikely(err != NET_XMIT_SUCCESS)) {
1192 - if (net_xmit_drop_count(err)) {
1193 - qdisc_qstats_drop(sch);
1194 - qdisc_tree_reduce_backlog(sch, 1,
1195 - qdisc_pkt_len(skb));
1196 - }
1197 + if (err != NET_XMIT_SUCCESS &&
1198 + net_xmit_drop_count(err)) {
1199 + qdisc_qstats_drop(sch);
1200 + qdisc_tree_reduce_backlog(sch, 1,
1201 + pkt_len);
1202 }
1203 goto tfifo_dequeue;
1204 }