Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.7/0107-3.7.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2076 - (show annotations) (download)
Mon Feb 18 12:04:15 2013 UTC (11 years, 2 months ago) by niro
File size: 73887 byte(s)
-linux-3.7.8
1 diff --git a/MAINTAINERS b/MAINTAINERS
2 index 9386a63..4eb1deb 100644
3 --- a/MAINTAINERS
4 +++ b/MAINTAINERS
5 @@ -2898,7 +2898,7 @@ S: Maintained
6 F: drivers/net/ethernet/i825xx/eexpress.*
7
8 ETHERNET BRIDGE
9 -M: Stephen Hemminger <shemminger@vyatta.com>
10 +M: Stephen Hemminger <stephen@networkplumber.org>
11 L: bridge@lists.linux-foundation.org
12 L: netdev@vger.kernel.org
13 W: http://www.linuxfoundation.org/en/Net:Bridge
14 @@ -4739,7 +4739,7 @@ S: Maintained
15
16 MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
17 M: Mirko Lindner <mlindner@marvell.com>
18 -M: Stephen Hemminger <shemminger@vyatta.com>
19 +M: Stephen Hemminger <stephen@networkplumber.org>
20 L: netdev@vger.kernel.org
21 S: Maintained
22 F: drivers/net/ethernet/marvell/sk*
23 @@ -4993,7 +4993,7 @@ S: Supported
24 F: drivers/infiniband/hw/nes/
25
26 NETEM NETWORK EMULATOR
27 -M: Stephen Hemminger <shemminger@vyatta.com>
28 +M: Stephen Hemminger <stephen@networkplumber.org>
29 L: netem@lists.linux-foundation.org
30 S: Maintained
31 F: net/sched/sch_netem.c
32 diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h
33 index 6a0955e..53ecac5 100644
34 --- a/drivers/atm/iphase.h
35 +++ b/drivers/atm/iphase.h
36 @@ -636,82 +636,82 @@ struct rx_buf_desc {
37 #define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
38 #define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
39
40 -typedef volatile u_int freg_t;
41 +typedef volatile u_int ffreg_t;
42 typedef u_int rreg_t;
43
44 typedef struct _ffredn_t {
45 - freg_t idlehead_high; /* Idle cell header (high) */
46 - freg_t idlehead_low; /* Idle cell header (low) */
47 - freg_t maxrate; /* Maximum rate */
48 - freg_t stparms; /* Traffic Management Parameters */
49 - freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
50 - freg_t rm_type; /* */
51 - u_int filler5[0x17 - 0x06];
52 - freg_t cmd_reg; /* Command register */
53 - u_int filler18[0x20 - 0x18];
54 - freg_t cbr_base; /* CBR Pointer Base */
55 - freg_t vbr_base; /* VBR Pointer Base */
56 - freg_t abr_base; /* ABR Pointer Base */
57 - freg_t ubr_base; /* UBR Pointer Base */
58 - u_int filler24;
59 - freg_t vbrwq_base; /* VBR Wait Queue Base */
60 - freg_t abrwq_base; /* ABR Wait Queue Base */
61 - freg_t ubrwq_base; /* UBR Wait Queue Base */
62 - freg_t vct_base; /* Main VC Table Base */
63 - freg_t vcte_base; /* Extended Main VC Table Base */
64 - u_int filler2a[0x2C - 0x2A];
65 - freg_t cbr_tab_beg; /* CBR Table Begin */
66 - freg_t cbr_tab_end; /* CBR Table End */
67 - freg_t cbr_pointer; /* CBR Pointer */
68 - u_int filler2f[0x30 - 0x2F];
69 - freg_t prq_st_adr; /* Packet Ready Queue Start Address */
70 - freg_t prq_ed_adr; /* Packet Ready Queue End Address */
71 - freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
72 - freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
73 - freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
74 - freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
75 - freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
76 - freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
77 - u_int filler38[0x40 - 0x38];
78 - freg_t queue_base; /* Base address for PRQ and TCQ */
79 - freg_t desc_base; /* Base address of descriptor table */
80 - u_int filler42[0x45 - 0x42];
81 - freg_t mode_reg_0; /* Mode register 0 */
82 - freg_t mode_reg_1; /* Mode register 1 */
83 - freg_t intr_status_reg;/* Interrupt Status register */
84 - freg_t mask_reg; /* Mask Register */
85 - freg_t cell_ctr_high1; /* Total cell transfer count (high) */
86 - freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
87 - freg_t state_reg; /* Status register */
88 - u_int filler4c[0x58 - 0x4c];
89 - freg_t curr_desc_num; /* Contains the current descriptor num */
90 - freg_t next_desc; /* Next descriptor */
91 - freg_t next_vc; /* Next VC */
92 - u_int filler5b[0x5d - 0x5b];
93 - freg_t present_slot_cnt;/* Present slot count */
94 - u_int filler5e[0x6a - 0x5e];
95 - freg_t new_desc_num; /* New descriptor number */
96 - freg_t new_vc; /* New VC */
97 - freg_t sched_tbl_ptr; /* Schedule table pointer */
98 - freg_t vbrwq_wptr; /* VBR wait queue write pointer */
99 - freg_t vbrwq_rptr; /* VBR wait queue read pointer */
100 - freg_t abrwq_wptr; /* ABR wait queue write pointer */
101 - freg_t abrwq_rptr; /* ABR wait queue read pointer */
102 - freg_t ubrwq_wptr; /* UBR wait queue write pointer */
103 - freg_t ubrwq_rptr; /* UBR wait queue read pointer */
104 - freg_t cbr_vc; /* CBR VC */
105 - freg_t vbr_sb_vc; /* VBR SB VC */
106 - freg_t abr_sb_vc; /* ABR SB VC */
107 - freg_t ubr_sb_vc; /* UBR SB VC */
108 - freg_t vbr_next_link; /* VBR next link */
109 - freg_t abr_next_link; /* ABR next link */
110 - freg_t ubr_next_link; /* UBR next link */
111 - u_int filler7a[0x7c-0x7a];
112 - freg_t out_rate_head; /* Out of rate head */
113 - u_int filler7d[0xca-0x7d]; /* pad out to full address space */
114 - freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
115 - freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
116 - u_int fillercc[0x100-0xcc]; /* pad out to full address space */
117 + ffreg_t idlehead_high; /* Idle cell header (high) */
118 + ffreg_t idlehead_low; /* Idle cell header (low) */
119 + ffreg_t maxrate; /* Maximum rate */
120 + ffreg_t stparms; /* Traffic Management Parameters */
121 + ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
122 + ffreg_t rm_type; /* */
123 + u_int filler5[0x17 - 0x06];
124 + ffreg_t cmd_reg; /* Command register */
125 + u_int filler18[0x20 - 0x18];
126 + ffreg_t cbr_base; /* CBR Pointer Base */
127 + ffreg_t vbr_base; /* VBR Pointer Base */
128 + ffreg_t abr_base; /* ABR Pointer Base */
129 + ffreg_t ubr_base; /* UBR Pointer Base */
130 + u_int filler24;
131 + ffreg_t vbrwq_base; /* VBR Wait Queue Base */
132 + ffreg_t abrwq_base; /* ABR Wait Queue Base */
133 + ffreg_t ubrwq_base; /* UBR Wait Queue Base */
134 + ffreg_t vct_base; /* Main VC Table Base */
135 + ffreg_t vcte_base; /* Extended Main VC Table Base */
136 + u_int filler2a[0x2C - 0x2A];
137 + ffreg_t cbr_tab_beg; /* CBR Table Begin */
138 + ffreg_t cbr_tab_end; /* CBR Table End */
139 + ffreg_t cbr_pointer; /* CBR Pointer */
140 + u_int filler2f[0x30 - 0x2F];
141 + ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
142 + ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
143 + ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
144 + ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
145 + ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
146 + ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
147 + ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
148 + ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
149 + u_int filler38[0x40 - 0x38];
150 + ffreg_t queue_base; /* Base address for PRQ and TCQ */
151 + ffreg_t desc_base; /* Base address of descriptor table */
152 + u_int filler42[0x45 - 0x42];
153 + ffreg_t mode_reg_0; /* Mode register 0 */
154 + ffreg_t mode_reg_1; /* Mode register 1 */
155 + ffreg_t intr_status_reg;/* Interrupt Status register */
156 + ffreg_t mask_reg; /* Mask Register */
157 + ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
158 + ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
159 + ffreg_t state_reg; /* Status register */
160 + u_int filler4c[0x58 - 0x4c];
161 + ffreg_t curr_desc_num; /* Contains the current descriptor num */
162 + ffreg_t next_desc; /* Next descriptor */
163 + ffreg_t next_vc; /* Next VC */
164 + u_int filler5b[0x5d - 0x5b];
165 + ffreg_t present_slot_cnt;/* Present slot count */
166 + u_int filler5e[0x6a - 0x5e];
167 + ffreg_t new_desc_num; /* New descriptor number */
168 + ffreg_t new_vc; /* New VC */
169 + ffreg_t sched_tbl_ptr; /* Schedule table pointer */
170 + ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
171 + ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
172 + ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
173 + ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
174 + ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
175 + ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
176 + ffreg_t cbr_vc; /* CBR VC */
177 + ffreg_t vbr_sb_vc; /* VBR SB VC */
178 + ffreg_t abr_sb_vc; /* ABR SB VC */
179 + ffreg_t ubr_sb_vc; /* UBR SB VC */
180 + ffreg_t vbr_next_link; /* VBR next link */
181 + ffreg_t abr_next_link; /* ABR next link */
182 + ffreg_t ubr_next_link; /* UBR next link */
183 + u_int filler7a[0x7c-0x7a];
184 + ffreg_t out_rate_head; /* Out of rate head */
185 + u_int filler7d[0xca-0x7d]; /* pad out to full address space */
186 + ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
187 + ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
188 + u_int fillercc[0x100-0xcc]; /* pad out to full address space */
189 } ffredn_t;
190
191 typedef struct _rfredn_t {
192 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
193 index 8ab9c3d..088c8fd 100644
194 --- a/drivers/char/virtio_console.c
195 +++ b/drivers/char/virtio_console.c
196 @@ -1966,7 +1966,8 @@ static void virtcons_remove(struct virtio_device *vdev)
197 /* Disable interrupts for vqs */
198 vdev->config->reset(vdev);
199 /* Finish up work that's lined up */
200 - cancel_work_sync(&portdev->control_work);
201 + if (use_multiport(portdev))
202 + cancel_work_sync(&portdev->control_work);
203
204 list_for_each_entry_safe(port, port2, &portdev->ports, list)
205 unplug_port(port);
206 diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
207 index f74c30a..48f0637 100644
208 --- a/drivers/gpu/drm/nouveau/core/core/subdev.c
209 +++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
210 @@ -99,7 +99,7 @@ nouveau_subdev_create_(struct nouveau_object *parent,
211 if (ret)
212 return ret;
213
214 - mutex_init(&subdev->mutex);
215 + __mutex_init(&subdev->mutex, subname, &oclass->lock_class_key);
216 subdev->name = subname;
217
218 if (parent) {
219 diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
220 index 486f1a9..727ba4f 100644
221 --- a/drivers/gpu/drm/nouveau/core/include/core/object.h
222 +++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
223 @@ -50,10 +50,13 @@ int nouveau_object_fini(struct nouveau_object *, bool suspend);
224
225 extern struct nouveau_ofuncs nouveau_object_ofuncs;
226
227 +/* Don't allocate dynamically, because lockdep needs lock_class_keys to be in
228 + * ".data". */
229 struct nouveau_oclass {
230 u32 handle;
231 - struct nouveau_ofuncs *ofuncs;
232 - struct nouveau_omthds *omthds;
233 + struct nouveau_ofuncs * ofuncs;
234 + struct nouveau_omthds * const omthds;
235 + struct lock_class_key lock_class_key;
236 };
237
238 #define nv_oclass(o) nv_object(o)->oclass
239 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
240 index 8503b2e..572c216 100644
241 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
242 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
243 @@ -241,6 +241,8 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
244 return 0;
245 }
246
247 +static struct lock_class_key drm_client_lock_class_key;
248 +
249 static int
250 nouveau_drm_load(struct drm_device *dev, unsigned long flags)
251 {
252 @@ -252,6 +254,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
253 ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm);
254 if (ret)
255 return ret;
256 + lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key);
257
258 dev->dev_private = drm;
259 drm->dev = dev;
260 diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
261 index 4850d03..3527509 100644
262 --- a/drivers/infiniband/hw/qib/qib_qp.c
263 +++ b/drivers/infiniband/hw/qib/qib_qp.c
264 @@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
265 struct qib_qp __rcu **qpp;
266
267 qpp = &dev->qp_table[n];
268 - q = rcu_dereference_protected(*qpp,
269 - lockdep_is_held(&dev->qpt_lock));
270 - for (; q; qpp = &q->next) {
271 + for (; (q = rcu_dereference_protected(*qpp,
272 + lockdep_is_held(&dev->qpt_lock))) != NULL;
273 + qpp = &q->next)
274 if (q == qp) {
275 atomic_dec(&qp->refcount);
276 *qpp = qp->next;
277 rcu_assign_pointer(qp->next, NULL);
278 - q = rcu_dereference_protected(*qpp,
279 - lockdep_is_held(&dev->qpt_lock));
280 break;
281 }
282 - q = rcu_dereference_protected(*qpp,
283 - lockdep_is_held(&dev->qpt_lock));
284 - }
285 }
286
287 spin_unlock_irqrestore(&dev->qpt_lock, flags);
288 diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
289 index 68452b7..03a0a01 100644
290 --- a/drivers/isdn/gigaset/capi.c
291 +++ b/drivers/isdn/gigaset/capi.c
292 @@ -248,6 +248,8 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
293 CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
294 CAPIMSG_CONTROL(data));
295 l -= 12;
296 + if (l <= 0)
297 + return;
298 dbgline = kmalloc(3 * l, GFP_ATOMIC);
299 if (!dbgline)
300 return;
301 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
302 index 41c9e81..3fc12b9 100644
303 --- a/drivers/md/dm-thin.c
304 +++ b/drivers/md/dm-thin.c
305 @@ -2766,19 +2766,9 @@ static int thin_iterate_devices(struct dm_target *ti,
306 return 0;
307 }
308
309 -/*
310 - * A thin device always inherits its queue limits from its pool.
311 - */
312 -static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
313 -{
314 - struct thin_c *tc = ti->private;
315 -
316 - *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
317 -}
318 -
319 static struct target_type thin_target = {
320 .name = "thin",
321 - .version = {1, 5, 0},
322 + .version = {1, 5, 1},
323 .module = THIS_MODULE,
324 .ctr = thin_ctr,
325 .dtr = thin_dtr,
326 @@ -2787,7 +2777,6 @@ static struct target_type thin_target = {
327 .postsuspend = thin_postsuspend,
328 .status = thin_status,
329 .iterate_devices = thin_iterate_devices,
330 - .io_hints = thin_io_hints,
331 };
332
333 /*----------------------------------------------------------------*/
334 diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
335 index 42e36ba..4d12397 100644
336 --- a/drivers/media/usb/pwc/pwc-if.c
337 +++ b/drivers/media/usb/pwc/pwc-if.c
338 @@ -1000,7 +1000,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
339 pdev->vb_queue.buf_struct_size = sizeof(struct pwc_frame_buf);
340 pdev->vb_queue.ops = &pwc_vb_queue_ops;
341 pdev->vb_queue.mem_ops = &vb2_vmalloc_memops;
342 - vb2_queue_init(&pdev->vb_queue);
343 + rc = vb2_queue_init(&pdev->vb_queue);
344 + if (rc < 0) {
345 + PWC_ERROR("Oops, could not initialize vb2 queue.\n");
346 + goto err_free_mem;
347 + }
348
349 /* Init video_device structure */
350 memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
351 diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
352 index 00b8b0f..296e3c4 100644
353 --- a/drivers/mfd/db8500-prcmu.c
354 +++ b/drivers/mfd/db8500-prcmu.c
355 @@ -2478,7 +2478,7 @@ static bool read_mailbox_0(void)
356
357 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
358 if (ev & prcmu_irq_bit[n])
359 - generic_handle_irq(IRQ_PRCMU_BASE + n);
360 + generic_handle_irq(irq_find_mapping(db8500_irq_domain, n));
361 }
362 r = true;
363 break;
364 @@ -2706,6 +2706,10 @@ static int db8500_irq_init(struct device_node *np)
365 return -ENOSYS;
366 }
367
368 + /* All wakeups will be used, so create mappings for all */
369 + for (i = 0; i < NUM_PRCMU_WAKEUPS; i++)
370 + irq_create_mapping(db8500_irq_domain, i);
371 +
372 return 0;
373 }
374
375 diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
376 index 3391bdd..d301f8a 100644
377 --- a/drivers/net/can/c_can/c_can.c
378 +++ b/drivers/net/can/c_can/c_can.c
379 @@ -482,8 +482,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
380
381 priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
382 IFX_WRITE_LOW_16BIT(mask));
383 +
384 + /* According to C_CAN documentation, the reserved bit
385 + * in IFx_MASK2 register is fixed 1
386 + */
387 priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
388 - IFX_WRITE_HIGH_16BIT(mask));
389 + IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
390
391 priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
392 IFX_WRITE_LOW_16BIT(id));
393 diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
394 index 16814b3..e29c1b6 100644
395 --- a/drivers/net/ethernet/calxeda/xgmac.c
396 +++ b/drivers/net/ethernet/calxeda/xgmac.c
397 @@ -546,6 +546,10 @@ static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
398 return -1;
399 }
400
401 + /* All frames should fit into a single buffer */
402 + if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
403 + return -1;
404 +
405 /* Check if packet has checksum already */
406 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
407 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
408 diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
409 index 76edbc1..cccbbe8 100644
410 --- a/drivers/net/ethernet/intel/e1000e/defines.h
411 +++ b/drivers/net/ethernet/intel/e1000e/defines.h
412 @@ -233,6 +233,7 @@
413 #define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
414 #define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
415 #define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
416 +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
417 #define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
418 #define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
419 #define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
420 @@ -391,6 +392,12 @@
421
422 #define E1000_PBS_16K E1000_PBA_16K
423
424 +/* Uncorrectable/correctable ECC Error counts and enable bits */
425 +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
426 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
427 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
428 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
429 +
430 #define IFS_MAX 80
431 #define IFS_MIN 40
432 #define IFS_RATIO 4
433 @@ -410,6 +417,7 @@
434 #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
435 #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
436 #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
437 +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
438 #define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
439 #define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
440 #define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
441 @@ -446,6 +454,7 @@
442 #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
443 #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
444 #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
445 +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
446 #define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
447 #define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
448 #define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
449 diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
450 index 04668b4..5faa1a8 100644
451 --- a/drivers/net/ethernet/intel/e1000e/e1000.h
452 +++ b/drivers/net/ethernet/intel/e1000e/e1000.h
453 @@ -314,6 +314,8 @@ struct e1000_adapter {
454
455 struct napi_struct napi;
456
457 + unsigned int uncorr_errors; /* uncorrectable ECC errors */
458 + unsigned int corr_errors; /* correctable ECC errors */
459 unsigned int restart_queue;
460 u32 txd_cmd;
461
462 diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
463 index c11ac27..da048f3 100644
464 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c
465 +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
466 @@ -108,6 +108,8 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
467 E1000_STAT("dropped_smbus", stats.mgpdc),
468 E1000_STAT("rx_dma_failed", rx_dma_failed),
469 E1000_STAT("tx_dma_failed", tx_dma_failed),
470 + E1000_STAT("uncorr_ecc_errors", uncorr_errors),
471 + E1000_STAT("corr_ecc_errors", corr_errors),
472 };
473
474 #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
475 diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
476 index d37bfd9..a64f22e 100644
477 --- a/drivers/net/ethernet/intel/e1000e/hw.h
478 +++ b/drivers/net/ethernet/intel/e1000e/hw.h
479 @@ -77,6 +77,7 @@ enum e1e_registers {
480 #define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
481 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
482 E1000_PBS = 0x01008, /* Packet Buffer Size */
483 + E1000_PBECCSTS = 0x0100C, /* Packet Buffer ECC Status - RW */
484 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
485 E1000_EEWR = 0x0102C, /* EEPROM Write Register - RW */
486 E1000_FLOP = 0x0103C, /* FLASH Opcode Register */
487 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
488 index e3a7b07..abc9770 100644
489 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
490 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
491 @@ -3690,6 +3690,17 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
492 if (hw->mac.type == e1000_ich8lan)
493 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
494 ew32(RFCTL, reg);
495 +
496 + /* Enable ECC on Lynxpoint */
497 + if (hw->mac.type == e1000_pch_lpt) {
498 + reg = er32(PBECCSTS);
499 + reg |= E1000_PBECCSTS_ECC_ENABLE;
500 + ew32(PBECCSTS, reg);
501 +
502 + reg = er32(CTRL);
503 + reg |= E1000_CTRL_MEHE;
504 + ew32(CTRL, reg);
505 + }
506 }
507
508 /**
509 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
510 index f444eb0..0fb7947 100644
511 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
512 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
513 @@ -1687,6 +1687,23 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
514 mod_timer(&adapter->watchdog_timer, jiffies + 1);
515 }
516
517 + /* Reset on uncorrectable ECC error */
518 + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
519 + u32 pbeccsts = er32(PBECCSTS);
520 +
521 + adapter->corr_errors +=
522 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
523 + adapter->uncorr_errors +=
524 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
525 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
526 +
527 + /* Do the reset outside of interrupt context */
528 + schedule_work(&adapter->reset_task);
529 +
530 + /* return immediately since reset is imminent */
531 + return IRQ_HANDLED;
532 + }
533 +
534 if (napi_schedule_prep(&adapter->napi)) {
535 adapter->total_tx_bytes = 0;
536 adapter->total_tx_packets = 0;
537 @@ -1754,6 +1771,23 @@ static irqreturn_t e1000_intr(int irq, void *data)
538 mod_timer(&adapter->watchdog_timer, jiffies + 1);
539 }
540
541 + /* Reset on uncorrectable ECC error */
542 + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
543 + u32 pbeccsts = er32(PBECCSTS);
544 +
545 + adapter->corr_errors +=
546 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
547 + adapter->uncorr_errors +=
548 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
549 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
550 +
551 + /* Do the reset outside of interrupt context */
552 + schedule_work(&adapter->reset_task);
553 +
554 + /* return immediately since reset is imminent */
555 + return IRQ_HANDLED;
556 + }
557 +
558 if (napi_schedule_prep(&adapter->napi)) {
559 adapter->total_tx_bytes = 0;
560 adapter->total_tx_packets = 0;
561 @@ -2117,6 +2151,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
562 if (adapter->msix_entries) {
563 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
564 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
565 + } else if (hw->mac.type == e1000_pch_lpt) {
566 + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
567 } else {
568 ew32(IMS, IMS_ENABLE_MASK);
569 }
570 @@ -4297,6 +4333,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
571 adapter->stats.mgptc += er32(MGTPTC);
572 adapter->stats.mgprc += er32(MGTPRC);
573 adapter->stats.mgpdc += er32(MGTPDC);
574 +
575 + /* Correctable ECC Errors */
576 + if (hw->mac.type == e1000_pch_lpt) {
577 + u32 pbeccsts = er32(PBECCSTS);
578 + adapter->corr_errors +=
579 + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
580 + adapter->uncorr_errors +=
581 + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
582 + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
583 + }
584 }
585
586 /**
587 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
588 index b35094c..fc1ac65 100644
589 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
590 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
591 @@ -629,10 +629,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
592 ring->tx_csum++;
593 }
594
595 - /* Copy dst mac address to wqe */
596 - ethh = (struct ethhdr *)skb->data;
597 - tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
598 - tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
599 + if (mlx4_is_mfunc(mdev->dev) || priv->validate_loopback) {
600 + /* Copy dst mac address to wqe. This allows loopback in eSwitch,
601 + * so that VFs and PF can communicate with each other
602 + */
603 + ethh = (struct ethhdr *)skb->data;
604 + tx_desc->ctrl.srcrb_flags16[0] = get_unaligned((__be16 *)ethh->h_dest);
605 + tx_desc->ctrl.imm = get_unaligned((__be32 *)(ethh->h_dest + 2));
606 + }
607 +
608 /* Handle LSO (TSO) packets */
609 if (lso_header_size) {
610 /* Mark opcode as LSO */
611 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
612 index 2aa80af..d4b3935 100644
613 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
614 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
615 @@ -1702,15 +1702,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
616 int i;
617
618 if (msi_x) {
619 - /* In multifunction mode each function gets 2 msi-X vectors
620 - * one for data path completions anf the other for asynch events
621 - * or command completions */
622 - if (mlx4_is_mfunc(dev)) {
623 - nreq = 2;
624 - } else {
625 - nreq = min_t(int, dev->caps.num_eqs -
626 - dev->caps.reserved_eqs, nreq);
627 - }
628 + nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
629 + nreq);
630
631 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
632 if (!entries)
633 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
634 index bc165f4..695667d 100644
635 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
636 +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
637 @@ -144,7 +144,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
638 buffrag->length, PCI_DMA_TODEVICE);
639 buffrag->dma = 0ULL;
640 }
641 - for (j = 0; j < cmd_buf->frag_count; j++) {
642 + for (j = 1; j < cmd_buf->frag_count; j++) {
643 buffrag++;
644 if (buffrag->dma) {
645 pci_unmap_page(adapter->pdev, buffrag->dma,
646 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
647 index df45061..1b55ca1 100644
648 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
649 +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
650 @@ -1963,10 +1963,12 @@ unwind:
651 while (--i >= 0) {
652 nf = &pbuf->frag_array[i+1];
653 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
654 + nf->dma = 0ULL;
655 }
656
657 nf = &pbuf->frag_array[0];
658 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
659 + nf->dma = 0ULL;
660
661 out_err:
662 return -ENOMEM;
663 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
664 index 927aa33..6afe74e 100644
665 --- a/drivers/net/ethernet/realtek/r8169.c
666 +++ b/drivers/net/ethernet/realtek/r8169.c
667 @@ -6064,13 +6064,6 @@ process_pkt:
668 tp->rx_stats.bytes += pkt_size;
669 u64_stats_update_end(&tp->rx_stats.syncp);
670 }
671 -
672 - /* Work around for AMD plateform. */
673 - if ((desc->opts2 & cpu_to_le32(0xfffe000)) &&
674 - (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
675 - desc->opts2 = 0;
676 - cur_rx++;
677 - }
678 }
679
680 count = cur_rx - tp->cur_rx;
681 diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
682 index 0459c09..046526e0 100644
683 --- a/drivers/net/ethernet/via/via-rhine.c
684 +++ b/drivers/net/ethernet/via/via-rhine.c
685 @@ -1802,7 +1802,7 @@ static void rhine_tx(struct net_device *dev)
686 rp->tx_skbuff[entry]->len,
687 PCI_DMA_TODEVICE);
688 }
689 - dev_kfree_skb_irq(rp->tx_skbuff[entry]);
690 + dev_kfree_skb(rp->tx_skbuff[entry]);
691 rp->tx_skbuff[entry] = NULL;
692 entry = (++rp->dirty_tx) % TX_RING_SIZE;
693 }
694 @@ -2011,11 +2011,7 @@ static void rhine_slow_event_task(struct work_struct *work)
695 if (intr_status & IntrPCIErr)
696 netif_warn(rp, hw, dev, "PCI error\n");
697
698 - napi_disable(&rp->napi);
699 - rhine_irq_disable(rp);
700 - /* Slow and safe. Consider __napi_schedule as a replacement ? */
701 - napi_enable(&rp->napi);
702 - napi_schedule(&rp->napi);
703 + iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
704
705 out_unlock:
706 mutex_unlock(&rp->task_lock);
707 diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
708 index 81f8f9e..fcbf680 100644
709 --- a/drivers/net/loopback.c
710 +++ b/drivers/net/loopback.c
711 @@ -77,6 +77,11 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
712
713 skb_orphan(skb);
714
715 + /* Before queueing this packet to netif_rx(),
716 + * make sure dst is refcounted.
717 + */
718 + skb_dst_force(skb);
719 +
720 skb->protocol = eth_type_trans(skb, dev);
721
722 /* it's OK to use per_cpu_ptr() because BHs are off */
723 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
724 index 68a43fe..d3fb97d 100644
725 --- a/drivers/net/macvlan.c
726 +++ b/drivers/net/macvlan.c
727 @@ -822,7 +822,10 @@ static int macvlan_changelink(struct net_device *dev,
728
729 static size_t macvlan_get_size(const struct net_device *dev)
730 {
731 - return nla_total_size(4);
732 + return (0
733 + + nla_total_size(4) /* IFLA_MACVLAN_MODE */
734 + + nla_total_size(2) /* IFLA_MACVLAN_FLAGS */
735 + );
736 }
737
738 static int macvlan_fill_info(struct sk_buff *skb,
739 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
740 index 84b558d..678854a 100644
741 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
742 +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
743 @@ -903,6 +903,8 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
744 AR_PHY_CL_TAB_1,
745 AR_PHY_CL_TAB_2 };
746
747 + ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
748 +
749 if (rtt) {
750 if (!ar9003_hw_rtt_restore(ah, chan))
751 run_rtt_cal = true;
752 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
753 index 8a38ff2..6784986 100644
754 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
755 +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
756 @@ -586,7 +586,7 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
757 ath9k_hw_synth_delay(ah, chan, synthDelay);
758 }
759
760 -static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
761 +void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
762 {
763 if (ah->caps.tx_chainmask == 5 || ah->caps.rx_chainmask == 5)
764 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
765 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
766 index dbc1b7a..b68aaf5 100644
767 --- a/drivers/net/wireless/ath/ath9k/hw.h
768 +++ b/drivers/net/wireless/ath/ath9k/hw.h
769 @@ -1063,6 +1063,7 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
770 int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
771 int ar9003_paprd_init_table(struct ath_hw *ah);
772 bool ar9003_paprd_is_done(struct ath_hw *ah);
773 +void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
774
775 /* Hardware family op attach helpers */
776 void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
777 diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
778 index aecf1ce..b09adab 100644
779 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c
780 +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
781 @@ -1154,6 +1154,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
782 next_reclaimed = ssn;
783 }
784
785 + if (tid != IWL_TID_NON_QOS) {
786 + priv->tid_data[sta_id][tid].next_reclaimed =
787 + next_reclaimed;
788 + IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
789 + next_reclaimed);
790 + }
791 +
792 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
793
794 iwlagn_check_ratid_empty(priv, sta_id, tid);
795 @@ -1204,28 +1211,11 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
796 if (!is_agg)
797 iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
798
799 - /*
800 - * W/A for FW bug - the seq_ctl isn't updated when the
801 - * queues are flushed. Fetch it from the packet itself
802 - */
803 - if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) {
804 - next_reclaimed = le16_to_cpu(hdr->seq_ctrl);
805 - next_reclaimed =
806 - SEQ_TO_SN(next_reclaimed + 0x10);
807 - }
808 -
809 is_offchannel_skb =
810 (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
811 freed++;
812 }
813
814 - if (tid != IWL_TID_NON_QOS) {
815 - priv->tid_data[sta_id][tid].next_reclaimed =
816 - next_reclaimed;
817 - IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
818 - next_reclaimed);
819 - }
820 -
821 WARN_ON(!is_agg && freed != 1);
822
823 /*
824 diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
825 index 9171aae..bd84069 100644
826 --- a/drivers/net/wireless/mwifiex/scan.c
827 +++ b/drivers/net/wireless/mwifiex/scan.c
828 @@ -1555,7 +1555,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
829 dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n",
830 scan_rsp->number_of_sets);
831 ret = -1;
832 - goto done;
833 + goto check_next_scan;
834 }
835
836 bytes_left = le16_to_cpu(scan_rsp->bss_descript_size);
837 @@ -1626,7 +1626,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
838 if (!beacon_size || beacon_size > bytes_left) {
839 bss_info += bytes_left;
840 bytes_left = 0;
841 - return -1;
842 + ret = -1;
843 + goto check_next_scan;
844 }
845
846 /* Initialize the current working beacon pointer for this BSS
847 @@ -1682,7 +1683,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
848 dev_err(priv->adapter->dev,
849 "%s: bytes left < IE length\n",
850 __func__);
851 - goto done;
852 + goto check_next_scan;
853 }
854 if (element_id == WLAN_EID_DS_PARAMS) {
855 channel = *(current_ptr + sizeof(struct ieee_types_header));
856 @@ -1745,6 +1746,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
857 }
858 }
859
860 +check_next_scan:
861 spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
862 if (list_empty(&adapter->scan_pending_q)) {
863 spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
864 @@ -1792,7 +1794,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
865 }
866 }
867
868 -done:
869 return ret;
870 }
871
872 diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
873 index 59381fe..9f32b09 100644
874 --- a/drivers/net/wireless/rtlwifi/base.c
875 +++ b/drivers/net/wireless/rtlwifi/base.c
876 @@ -980,7 +980,8 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
877 is_tx ? "Tx" : "Rx");
878
879 if (is_tx) {
880 - rtl_lps_leave(hw);
881 + schedule_work(&rtlpriv->
882 + works.lps_leave_work);
883 ppsc->last_delaylps_stamp_jiffies =
884 jiffies;
885 }
886 @@ -990,7 +991,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
887 }
888 } else if (ETH_P_ARP == ether_type) {
889 if (is_tx) {
890 - rtl_lps_leave(hw);
891 + schedule_work(&rtlpriv->works.lps_leave_work);
892 ppsc->last_delaylps_stamp_jiffies = jiffies;
893 }
894
895 @@ -1000,7 +1001,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
896 "802.1X %s EAPOL pkt!!\n", is_tx ? "Tx" : "Rx");
897
898 if (is_tx) {
899 - rtl_lps_leave(hw);
900 + schedule_work(&rtlpriv->works.lps_leave_work);
901 ppsc->last_delaylps_stamp_jiffies = jiffies;
902 }
903
904 diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
905 index 46ccbf7..d1c3a7e 100644
906 --- a/drivers/net/wireless/rtlwifi/usb.c
907 +++ b/drivers/net/wireless/rtlwifi/usb.c
908 @@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
909 WARN_ON(skb_queue_empty(&rx_queue));
910 while (!skb_queue_empty(&rx_queue)) {
911 _skb = skb_dequeue(&rx_queue);
912 - _rtl_usb_rx_process_agg(hw, skb);
913 - ieee80211_rx_irqsafe(hw, skb);
914 + _rtl_usb_rx_process_agg(hw, _skb);
915 + ieee80211_rx_irqsafe(hw, _skb);
916 }
917 }
918
919 diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
920 index 94b79c3..9d7f172 100644
921 --- a/drivers/net/xen-netback/common.h
922 +++ b/drivers/net/xen-netback/common.h
923 @@ -151,6 +151,9 @@ void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
924 /* Notify xenvif that ring now has space to send an skb to the frontend */
925 void xenvif_notify_tx_completion(struct xenvif *vif);
926
927 +/* Prevent the device from generating any further traffic. */
928 +void xenvif_carrier_off(struct xenvif *vif);
929 +
930 /* Returns number of ring slots required to send an skb to the frontend */
931 unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
932
933 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
934 index b7d41f8..b8c5193 100644
935 --- a/drivers/net/xen-netback/interface.c
936 +++ b/drivers/net/xen-netback/interface.c
937 @@ -343,17 +343,22 @@ err:
938 return err;
939 }
940
941 -void xenvif_disconnect(struct xenvif *vif)
942 +void xenvif_carrier_off(struct xenvif *vif)
943 {
944 struct net_device *dev = vif->dev;
945 - if (netif_carrier_ok(dev)) {
946 - rtnl_lock();
947 - netif_carrier_off(dev); /* discard queued packets */
948 - if (netif_running(dev))
949 - xenvif_down(vif);
950 - rtnl_unlock();
951 - xenvif_put(vif);
952 - }
953 +
954 + rtnl_lock();
955 + netif_carrier_off(dev); /* discard queued packets */
956 + if (netif_running(dev))
957 + xenvif_down(vif);
958 + rtnl_unlock();
959 + xenvif_put(vif);
960 +}
961 +
962 +void xenvif_disconnect(struct xenvif *vif)
963 +{
964 + if (netif_carrier_ok(vif->dev))
965 + xenvif_carrier_off(vif);
966
967 atomic_dec(&vif->refcnt);
968 wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
969 diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
970 index f2d6b78..2b9520c 100644
971 --- a/drivers/net/xen-netback/netback.c
972 +++ b/drivers/net/xen-netback/netback.c
973 @@ -147,7 +147,8 @@ void xen_netbk_remove_xenvif(struct xenvif *vif)
974 atomic_dec(&netbk->netfront_count);
975 }
976
977 -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
978 +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
979 + u8 status);
980 static void make_tx_response(struct xenvif *vif,
981 struct xen_netif_tx_request *txp,
982 s8 st);
983 @@ -879,7 +880,7 @@ static void netbk_tx_err(struct xenvif *vif,
984
985 do {
986 make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
987 - if (cons >= end)
988 + if (cons == end)
989 break;
990 txp = RING_GET_REQUEST(&vif->tx, cons++);
991 } while (1);
992 @@ -888,6 +889,13 @@ static void netbk_tx_err(struct xenvif *vif,
993 xenvif_put(vif);
994 }
995
996 +static void netbk_fatal_tx_err(struct xenvif *vif)
997 +{
998 + netdev_err(vif->dev, "fatal error; disabling device\n");
999 + xenvif_carrier_off(vif);
1000 + xenvif_put(vif);
1001 +}
1002 +
1003 static int netbk_count_requests(struct xenvif *vif,
1004 struct xen_netif_tx_request *first,
1005 struct xen_netif_tx_request *txp,
1006 @@ -901,19 +909,22 @@ static int netbk_count_requests(struct xenvif *vif,
1007
1008 do {
1009 if (frags >= work_to_do) {
1010 - netdev_dbg(vif->dev, "Need more frags\n");
1011 + netdev_err(vif->dev, "Need more frags\n");
1012 + netbk_fatal_tx_err(vif);
1013 return -frags;
1014 }
1015
1016 if (unlikely(frags >= MAX_SKB_FRAGS)) {
1017 - netdev_dbg(vif->dev, "Too many frags\n");
1018 + netdev_err(vif->dev, "Too many frags\n");
1019 + netbk_fatal_tx_err(vif);
1020 return -frags;
1021 }
1022
1023 memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
1024 sizeof(*txp));
1025 if (txp->size > first->size) {
1026 - netdev_dbg(vif->dev, "Frags galore\n");
1027 + netdev_err(vif->dev, "Frag is bigger than frame.\n");
1028 + netbk_fatal_tx_err(vif);
1029 return -frags;
1030 }
1031
1032 @@ -921,8 +932,9 @@ static int netbk_count_requests(struct xenvif *vif,
1033 frags++;
1034
1035 if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
1036 - netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
1037 + netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
1038 txp->offset, txp->size);
1039 + netbk_fatal_tx_err(vif);
1040 return -frags;
1041 }
1042 } while ((txp++)->flags & XEN_NETTXF_more_data);
1043 @@ -966,7 +978,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1044 pending_idx = netbk->pending_ring[index];
1045 page = xen_netbk_alloc_page(netbk, skb, pending_idx);
1046 if (!page)
1047 - return NULL;
1048 + goto err;
1049
1050 gop->source.u.ref = txp->gref;
1051 gop->source.domid = vif->domid;
1052 @@ -988,6 +1000,17 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
1053 }
1054
1055 return gop;
1056 +err:
1057 + /* Unwind, freeing all pages and sending error responses. */
1058 + while (i-- > start) {
1059 + xen_netbk_idx_release(netbk, frag_get_pending_idx(&frags[i]),
1060 + XEN_NETIF_RSP_ERROR);
1061 + }
1062 + /* The head too, if necessary. */
1063 + if (start)
1064 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1065 +
1066 + return NULL;
1067 }
1068
1069 static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1070 @@ -996,30 +1019,20 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1071 {
1072 struct gnttab_copy *gop = *gopp;
1073 u16 pending_idx = *((u16 *)skb->data);
1074 - struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
1075 - struct xenvif *vif = pending_tx_info[pending_idx].vif;
1076 - struct xen_netif_tx_request *txp;
1077 struct skb_shared_info *shinfo = skb_shinfo(skb);
1078 int nr_frags = shinfo->nr_frags;
1079 int i, err, start;
1080
1081 /* Check status of header. */
1082 err = gop->status;
1083 - if (unlikely(err)) {
1084 - pending_ring_idx_t index;
1085 - index = pending_index(netbk->pending_prod++);
1086 - txp = &pending_tx_info[pending_idx].req;
1087 - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1088 - netbk->pending_ring[index] = pending_idx;
1089 - xenvif_put(vif);
1090 - }
1091 + if (unlikely(err))
1092 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1093
1094 /* Skip first skb fragment if it is on same page as header fragment. */
1095 start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1096
1097 for (i = start; i < nr_frags; i++) {
1098 int j, newerr;
1099 - pending_ring_idx_t index;
1100
1101 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1102
1103 @@ -1028,16 +1041,12 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1104 if (likely(!newerr)) {
1105 /* Had a previous error? Invalidate this fragment. */
1106 if (unlikely(err))
1107 - xen_netbk_idx_release(netbk, pending_idx);
1108 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1109 continue;
1110 }
1111
1112 /* Error on this fragment: respond to client with an error. */
1113 - txp = &netbk->pending_tx_info[pending_idx].req;
1114 - make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
1115 - index = pending_index(netbk->pending_prod++);
1116 - netbk->pending_ring[index] = pending_idx;
1117 - xenvif_put(vif);
1118 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
1119
1120 /* Not the first error? Preceding frags already invalidated. */
1121 if (err)
1122 @@ -1045,10 +1054,10 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
1123
1124 /* First error: invalidate header and preceding fragments. */
1125 pending_idx = *((u16 *)skb->data);
1126 - xen_netbk_idx_release(netbk, pending_idx);
1127 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1128 for (j = start; j < i; j++) {
1129 pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1130 - xen_netbk_idx_release(netbk, pending_idx);
1131 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1132 }
1133
1134 /* Remember the error: invalidate all subsequent fragments. */
1135 @@ -1082,7 +1091,7 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
1136
1137 /* Take an extra reference to offset xen_netbk_idx_release */
1138 get_page(netbk->mmap_pages[pending_idx]);
1139 - xen_netbk_idx_release(netbk, pending_idx);
1140 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1141 }
1142 }
1143
1144 @@ -1095,7 +1104,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1145
1146 do {
1147 if (unlikely(work_to_do-- <= 0)) {
1148 - netdev_dbg(vif->dev, "Missing extra info\n");
1149 + netdev_err(vif->dev, "Missing extra info\n");
1150 + netbk_fatal_tx_err(vif);
1151 return -EBADR;
1152 }
1153
1154 @@ -1104,8 +1114,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
1155 if (unlikely(!extra.type ||
1156 extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1157 vif->tx.req_cons = ++cons;
1158 - netdev_dbg(vif->dev,
1159 + netdev_err(vif->dev,
1160 "Invalid extra type: %d\n", extra.type);
1161 + netbk_fatal_tx_err(vif);
1162 return -EINVAL;
1163 }
1164
1165 @@ -1121,13 +1132,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
1166 struct xen_netif_extra_info *gso)
1167 {
1168 if (!gso->u.gso.size) {
1169 - netdev_dbg(vif->dev, "GSO size must not be zero.\n");
1170 + netdev_err(vif->dev, "GSO size must not be zero.\n");
1171 + netbk_fatal_tx_err(vif);
1172 return -EINVAL;
1173 }
1174
1175 /* Currently only TCPv4 S.O. is supported. */
1176 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1177 - netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1178 + netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1179 + netbk_fatal_tx_err(vif);
1180 return -EINVAL;
1181 }
1182
1183 @@ -1264,9 +1277,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1184
1185 /* Get a netif from the list with work to do. */
1186 vif = poll_net_schedule_list(netbk);
1187 + /* This can sometimes happen because the test of
1188 + * list_empty(net_schedule_list) at the top of the
1189 + * loop is unlocked. Just go back and have another
1190 + * look.
1191 + */
1192 if (!vif)
1193 continue;
1194
1195 + if (vif->tx.sring->req_prod - vif->tx.req_cons >
1196 + XEN_NETIF_TX_RING_SIZE) {
1197 + netdev_err(vif->dev,
1198 + "Impossible number of requests. "
1199 + "req_prod %d, req_cons %d, size %ld\n",
1200 + vif->tx.sring->req_prod, vif->tx.req_cons,
1201 + XEN_NETIF_TX_RING_SIZE);
1202 + netbk_fatal_tx_err(vif);
1203 + continue;
1204 + }
1205 +
1206 RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1207 if (!work_to_do) {
1208 xenvif_put(vif);
1209 @@ -1294,17 +1323,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1210 work_to_do = xen_netbk_get_extras(vif, extras,
1211 work_to_do);
1212 idx = vif->tx.req_cons;
1213 - if (unlikely(work_to_do < 0)) {
1214 - netbk_tx_err(vif, &txreq, idx);
1215 + if (unlikely(work_to_do < 0))
1216 continue;
1217 - }
1218 }
1219
1220 ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
1221 - if (unlikely(ret < 0)) {
1222 - netbk_tx_err(vif, &txreq, idx - ret);
1223 + if (unlikely(ret < 0))
1224 continue;
1225 - }
1226 +
1227 idx += ret;
1228
1229 if (unlikely(txreq.size < ETH_HLEN)) {
1230 @@ -1316,11 +1342,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1231
1232 /* No crossing a page as the payload mustn't fragment. */
1233 if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1234 - netdev_dbg(vif->dev,
1235 + netdev_err(vif->dev,
1236 "txreq.offset: %x, size: %u, end: %lu\n",
1237 txreq.offset, txreq.size,
1238 (txreq.offset&~PAGE_MASK) + txreq.size);
1239 - netbk_tx_err(vif, &txreq, idx);
1240 + netbk_fatal_tx_err(vif);
1241 continue;
1242 }
1243
1244 @@ -1348,8 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1245 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1246
1247 if (netbk_set_skb_gso(vif, skb, gso)) {
1248 + /* Failure in netbk_set_skb_gso is fatal. */
1249 kfree_skb(skb);
1250 - netbk_tx_err(vif, &txreq, idx);
1251 continue;
1252 }
1253 }
1254 @@ -1448,7 +1474,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1255 txp->size -= data_len;
1256 } else {
1257 /* Schedule a response immediately. */
1258 - xen_netbk_idx_release(netbk, pending_idx);
1259 + xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
1260 }
1261
1262 if (txp->flags & XEN_NETTXF_csum_blank)
1263 @@ -1500,7 +1526,8 @@ static void xen_netbk_tx_action(struct xen_netbk *netbk)
1264 xen_netbk_tx_submit(netbk);
1265 }
1266
1267 -static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1268 +static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
1269 + u8 status)
1270 {
1271 struct xenvif *vif;
1272 struct pending_tx_info *pending_tx_info;
1273 @@ -1514,7 +1541,7 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
1274
1275 vif = pending_tx_info->vif;
1276
1277 - make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
1278 + make_tx_response(vif, &pending_tx_info->req, status);
1279
1280 index = pending_index(netbk->pending_prod++);
1281 netbk->pending_ring[index] = pending_idx;
1282 diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
1283 index 6a20019..6b431a7 100644
1284 --- a/drivers/regulator/max8998.c
1285 +++ b/drivers/regulator/max8998.c
1286 @@ -65,7 +65,7 @@ static const struct voltage_map_desc ldo9_voltage_map_desc = {
1287 .min = 2800000, .step = 100000, .max = 3100000,
1288 };
1289 static const struct voltage_map_desc ldo10_voltage_map_desc = {
1290 - .min = 95000, .step = 50000, .max = 1300000,
1291 + .min = 950000, .step = 50000, .max = 1300000,
1292 };
1293 static const struct voltage_map_desc ldo1213_voltage_map_desc = {
1294 .min = 800000, .step = 100000, .max = 3300000,
1295 diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
1296 index 6f68491..66ca769 100644
1297 --- a/drivers/regulator/of_regulator.c
1298 +++ b/drivers/regulator/of_regulator.c
1299 @@ -120,6 +120,12 @@ int of_regulator_match(struct device *dev, struct device_node *node,
1300 if (!dev || !node)
1301 return -EINVAL;
1302
1303 + for (i = 0; i < num_matches; i++) {
1304 + struct of_regulator_match *match = &matches[i];
1305 + match->init_data = NULL;
1306 + match->of_node = NULL;
1307 + }
1308 +
1309 for_each_child_of_node(node, child) {
1310 name = of_get_property(child,
1311 "regulator-compatible", NULL);
1312 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
1313 index 3fd1b88..d6e4e1c 100644
1314 --- a/drivers/regulator/s2mps11.c
1315 +++ b/drivers/regulator/s2mps11.c
1316 @@ -174,9 +174,9 @@ static struct regulator_ops s2mps11_buck_ops = {
1317 .min_uV = S2MPS11_BUCK_MIN2, \
1318 .uV_step = S2MPS11_BUCK_STEP2, \
1319 .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
1320 - .vsel_reg = S2MPS11_REG_B9CTRL2, \
1321 + .vsel_reg = S2MPS11_REG_B10CTRL2, \
1322 .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
1323 - .enable_reg = S2MPS11_REG_B9CTRL1, \
1324 + .enable_reg = S2MPS11_REG_B10CTRL1, \
1325 .enable_mask = S2MPS11_ENABLE_MASK \
1326 }
1327
1328 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1329 index a848ffc..825fb7e 100644
1330 --- a/include/linux/netdevice.h
1331 +++ b/include/linux/netdevice.h
1332 @@ -60,6 +60,9 @@ struct wireless_dev;
1333 #define SET_ETHTOOL_OPS(netdev,ops) \
1334 ( (netdev)->ethtool_ops = (ops) )
1335
1336 +extern void netdev_set_default_ethtool_ops(struct net_device *dev,
1337 + const struct ethtool_ops *ops);
1338 +
1339 /* hardware address assignment types */
1340 #define NET_ADDR_PERM 0 /* address is permanent (default) */
1341 #define NET_ADDR_RANDOM 1 /* address is generated randomly */
1342 diff --git a/include/net/ip.h b/include/net/ip.h
1343 index 0707fb9..a68f838 100644
1344 --- a/include/net/ip.h
1345 +++ b/include/net/ip.h
1346 @@ -143,6 +143,8 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
1347 extern int ip4_datagram_connect(struct sock *sk,
1348 struct sockaddr *uaddr, int addr_len);
1349
1350 +extern void ip4_datagram_release_cb(struct sock *sk);
1351 +
1352 struct ip_reply_arg {
1353 struct kvec iov[1];
1354 int flags;
1355 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
1356 index a592337..cf31bad 100644
1357 --- a/net/bluetooth/smp.c
1358 +++ b/net/bluetooth/smp.c
1359 @@ -859,6 +859,19 @@ int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1360
1361 skb_pull(skb, sizeof(code));
1362
1363 + /*
1364 + * The SMP context must be initialized for all other PDUs except
1365 + * pairing and security requests. If we get any other PDU when
1366 + * not initialized simply disconnect (done if this function
1367 + * returns an error).
1368 + */
1369 + if (code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ &&
1370 + !conn->smp_chan) {
1371 + BT_ERR("Unexpected SMP command 0x%02x. Disconnecting.", code);
1372 + kfree_skb(skb);
1373 + return -ENOTSUPP;
1374 + }
1375 +
1376 switch (code) {
1377 case SMP_CMD_PAIRING_REQ:
1378 reason = smp_cmd_pairing_req(conn, skb);
1379 diff --git a/net/core/dev.c b/net/core/dev.c
1380 index e5942bf..3470794 100644
1381 --- a/net/core/dev.c
1382 +++ b/net/core/dev.c
1383 @@ -6012,6 +6012,14 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
1384
1385 static const struct ethtool_ops default_ethtool_ops;
1386
1387 +void netdev_set_default_ethtool_ops(struct net_device *dev,
1388 + const struct ethtool_ops *ops)
1389 +{
1390 + if (dev->ethtool_ops == &default_ethtool_ops)
1391 + dev->ethtool_ops = ops;
1392 +}
1393 +EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
1394 +
1395 /**
1396 * alloc_netdev_mqs - allocate network device
1397 * @sizeof_priv: size of private data to allocate space for
1398 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1399 index d1dc14c..21350db 100644
1400 --- a/net/core/pktgen.c
1401 +++ b/net/core/pktgen.c
1402 @@ -1795,10 +1795,13 @@ static ssize_t pktgen_thread_write(struct file *file,
1403 return -EFAULT;
1404 i += len;
1405 mutex_lock(&pktgen_thread_lock);
1406 - pktgen_add_device(t, f);
1407 + ret = pktgen_add_device(t, f);
1408 mutex_unlock(&pktgen_thread_lock);
1409 - ret = count;
1410 - sprintf(pg_result, "OK: add_device=%s", f);
1411 + if (!ret) {
1412 + ret = count;
1413 + sprintf(pg_result, "OK: add_device=%s", f);
1414 + } else
1415 + sprintf(pg_result, "ERROR: can not add device %s", f);
1416 goto out;
1417 }
1418
1419 diff --git a/net/core/request_sock.c b/net/core/request_sock.c
1420 index c31d9e8..4425148 100644
1421 --- a/net/core/request_sock.c
1422 +++ b/net/core/request_sock.c
1423 @@ -186,8 +186,6 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
1424 struct fastopen_queue *fastopenq =
1425 inet_csk(lsk)->icsk_accept_queue.fastopenq;
1426
1427 - BUG_ON(!spin_is_locked(&sk->sk_lock.slock) && !sock_owned_by_user(sk));
1428 -
1429 tcp_sk(sk)->fastopen_rsk = NULL;
1430 spin_lock_bh(&fastopenq->lock);
1431 fastopenq->qlen--;
1432 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1433 index 3f0636c..1899d83 100644
1434 --- a/net/core/skbuff.c
1435 +++ b/net/core/skbuff.c
1436 @@ -1620,7 +1620,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1437
1438 static struct page *linear_to_page(struct page *page, unsigned int *len,
1439 unsigned int *offset,
1440 - struct sk_buff *skb, struct sock *sk)
1441 + struct sock *sk)
1442 {
1443 struct page_frag *pfrag = sk_page_frag(sk);
1444
1445 @@ -1653,14 +1653,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1446 static bool spd_fill_page(struct splice_pipe_desc *spd,
1447 struct pipe_inode_info *pipe, struct page *page,
1448 unsigned int *len, unsigned int offset,
1449 - struct sk_buff *skb, bool linear,
1450 + bool linear,
1451 struct sock *sk)
1452 {
1453 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1454 return true;
1455
1456 if (linear) {
1457 - page = linear_to_page(page, len, &offset, skb, sk);
1458 + page = linear_to_page(page, len, &offset, sk);
1459 if (!page)
1460 return true;
1461 }
1462 @@ -1677,23 +1677,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
1463 return false;
1464 }
1465
1466 -static inline void __segment_seek(struct page **page, unsigned int *poff,
1467 - unsigned int *plen, unsigned int off)
1468 -{
1469 - unsigned long n;
1470 -
1471 - *poff += off;
1472 - n = *poff / PAGE_SIZE;
1473 - if (n)
1474 - *page = nth_page(*page, n);
1475 -
1476 - *poff = *poff % PAGE_SIZE;
1477 - *plen -= off;
1478 -}
1479 -
1480 static bool __splice_segment(struct page *page, unsigned int poff,
1481 unsigned int plen, unsigned int *off,
1482 - unsigned int *len, struct sk_buff *skb,
1483 + unsigned int *len,
1484 struct splice_pipe_desc *spd, bool linear,
1485 struct sock *sk,
1486 struct pipe_inode_info *pipe)
1487 @@ -1708,23 +1694,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
1488 }
1489
1490 /* ignore any bits we already processed */
1491 - if (*off) {
1492 - __segment_seek(&page, &poff, &plen, *off);
1493 - *off = 0;
1494 - }
1495 + poff += *off;
1496 + plen -= *off;
1497 + *off = 0;
1498
1499 do {
1500 unsigned int flen = min(*len, plen);
1501
1502 - /* the linear region may spread across several pages */
1503 - flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1504 -
1505 - if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1506 + if (spd_fill_page(spd, pipe, page, &flen, poff,
1507 + linear, sk))
1508 return true;
1509 -
1510 - __segment_seek(&page, &poff, &plen, flen);
1511 + poff += flen;
1512 + plen -= flen;
1513 *len -= flen;
1514 -
1515 } while (*len && plen);
1516
1517 return false;
1518 @@ -1748,7 +1730,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1519 if (__splice_segment(virt_to_page(skb->data),
1520 (unsigned long) skb->data & (PAGE_SIZE - 1),
1521 skb_headlen(skb),
1522 - offset, len, skb, spd,
1523 + offset, len, spd,
1524 skb_head_is_locked(skb),
1525 sk, pipe))
1526 return true;
1527 @@ -1761,7 +1743,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1528
1529 if (__splice_segment(skb_frag_page(f),
1530 f->page_offset, skb_frag_size(f),
1531 - offset, len, skb, spd, false, sk, pipe))
1532 + offset, len, spd, false, sk, pipe))
1533 return true;
1534 }
1535
1536 diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
1537 index 424fafb..b28e863 100644
1538 --- a/net/ipv4/datagram.c
1539 +++ b/net/ipv4/datagram.c
1540 @@ -85,3 +85,28 @@ out:
1541 return err;
1542 }
1543 EXPORT_SYMBOL(ip4_datagram_connect);
1544 +
1545 +void ip4_datagram_release_cb(struct sock *sk)
1546 +{
1547 + const struct inet_sock *inet = inet_sk(sk);
1548 + const struct ip_options_rcu *inet_opt;
1549 + __be32 daddr = inet->inet_daddr;
1550 + struct flowi4 fl4;
1551 + struct rtable *rt;
1552 +
1553 + if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
1554 + return;
1555 +
1556 + rcu_read_lock();
1557 + inet_opt = rcu_dereference(inet->inet_opt);
1558 + if (inet_opt && inet_opt->opt.srr)
1559 + daddr = inet_opt->opt.faddr;
1560 + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, daddr,
1561 + inet->inet_saddr, inet->inet_dport,
1562 + inet->inet_sport, sk->sk_protocol,
1563 + RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1564 + if (!IS_ERR(rt))
1565 + __sk_dst_set(sk, &rt->dst);
1566 + rcu_read_unlock();
1567 +}
1568 +EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
1569 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
1570 index 7240f8e..07538a7 100644
1571 --- a/net/ipv4/ip_gre.c
1572 +++ b/net/ipv4/ip_gre.c
1573 @@ -972,8 +972,12 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
1574 ptr--;
1575 }
1576 if (tunnel->parms.o_flags&GRE_CSUM) {
1577 + int offset = skb_transport_offset(skb);
1578 +
1579 *ptr = 0;
1580 - *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
1581 + *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
1582 + skb->len - offset,
1583 + 0));
1584 }
1585 }
1586
1587 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1588 index 14bbfcf..e95d72b 100644
1589 --- a/net/ipv4/ip_sockglue.c
1590 +++ b/net/ipv4/ip_sockglue.c
1591 @@ -590,7 +590,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
1592 case IP_TTL:
1593 if (optlen < 1)
1594 goto e_inval;
1595 - if (val != -1 && (val < 0 || val > 255))
1596 + if (val != -1 && (val < 1 || val > 255))
1597 goto e_inval;
1598 inet->uc_ttl = val;
1599 break;
1600 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1601 index 8f3d054..6f9c072 100644
1602 --- a/net/ipv4/ping.c
1603 +++ b/net/ipv4/ping.c
1604 @@ -738,6 +738,7 @@ struct proto ping_prot = {
1605 .recvmsg = ping_recvmsg,
1606 .bind = ping_bind,
1607 .backlog_rcv = ping_queue_rcv_skb,
1608 + .release_cb = ip4_datagram_release_cb,
1609 .hash = ping_v4_hash,
1610 .unhash = ping_v4_unhash,
1611 .get_port = ping_v4_get_port,
1612 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
1613 index 73d1e4d..6f08991 100644
1614 --- a/net/ipv4/raw.c
1615 +++ b/net/ipv4/raw.c
1616 @@ -894,6 +894,7 @@ struct proto raw_prot = {
1617 .recvmsg = raw_recvmsg,
1618 .bind = raw_bind,
1619 .backlog_rcv = raw_rcv_skb,
1620 + .release_cb = ip4_datagram_release_cb,
1621 .hash = raw_hash_sk,
1622 .unhash = raw_unhash_sk,
1623 .obj_size = sizeof(struct raw_sock),
1624 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1625 index df25142..0fdfe4c 100644
1626 --- a/net/ipv4/route.c
1627 +++ b/net/ipv4/route.c
1628 @@ -912,6 +912,9 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1629 struct dst_entry *dst = &rt->dst;
1630 struct fib_result res;
1631
1632 + if (dst_metric_locked(dst, RTAX_MTU))
1633 + return;
1634 +
1635 if (dst->dev->mtu < mtu)
1636 return;
1637
1638 @@ -962,7 +965,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1639 }
1640 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1641
1642 -void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1643 +static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1644 {
1645 const struct iphdr *iph = (const struct iphdr *) skb->data;
1646 struct flowi4 fl4;
1647 @@ -975,6 +978,53 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1648 ip_rt_put(rt);
1649 }
1650 }
1651 +
1652 +void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1653 +{
1654 + const struct iphdr *iph = (const struct iphdr *) skb->data;
1655 + struct flowi4 fl4;
1656 + struct rtable *rt;
1657 + struct dst_entry *dst;
1658 + bool new = false;
1659 +
1660 + bh_lock_sock(sk);
1661 + rt = (struct rtable *) __sk_dst_get(sk);
1662 +
1663 + if (sock_owned_by_user(sk) || !rt) {
1664 + __ipv4_sk_update_pmtu(skb, sk, mtu);
1665 + goto out;
1666 + }
1667 +
1668 + __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1669 +
1670 + if (!__sk_dst_check(sk, 0)) {
1671 + rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1672 + if (IS_ERR(rt))
1673 + goto out;
1674 +
1675 + new = true;
1676 + }
1677 +
1678 + __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1679 +
1680 + dst = dst_check(&rt->dst, 0);
1681 + if (!dst) {
1682 + if (new)
1683 + dst_release(&rt->dst);
1684 +
1685 + rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1686 + if (IS_ERR(rt))
1687 + goto out;
1688 +
1689 + new = true;
1690 + }
1691 +
1692 + if (new)
1693 + __sk_dst_set(sk, &rt->dst);
1694 +
1695 +out:
1696 + bh_unlock_sock(sk);
1697 +}
1698 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1699
1700 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1701 @@ -1120,7 +1170,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
1702 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1703 mtu = dst_metric_raw(dst, RTAX_MTU);
1704
1705 - if (mtu && rt_is_output_route(rt))
1706 + if (mtu)
1707 return mtu;
1708
1709 mtu = dst->dev->mtu;
1710 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1711 index e457c7a..667e8a0 100644
1712 --- a/net/ipv4/tcp.c
1713 +++ b/net/ipv4/tcp.c
1714 @@ -1427,12 +1427,12 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)
1715 }
1716 #endif
1717
1718 -static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1719 +static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1720 {
1721 struct sk_buff *skb;
1722 u32 offset;
1723
1724 - skb_queue_walk(&sk->sk_receive_queue, skb) {
1725 + while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1726 offset = seq - TCP_SKB_CB(skb)->seq;
1727 if (tcp_hdr(skb)->syn)
1728 offset--;
1729 @@ -1440,6 +1440,11 @@ static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1730 *off = offset;
1731 return skb;
1732 }
1733 + /* This looks weird, but this can happen if TCP collapsing
1734 + * splitted a fat GRO packet, while we released socket lock
1735 + * in skb_splice_bits()
1736 + */
1737 + sk_eat_skb(sk, skb, false);
1738 }
1739 return NULL;
1740 }
1741 @@ -1481,7 +1486,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1742 break;
1743 }
1744 used = recv_actor(desc, skb, offset, len);
1745 - if (used < 0) {
1746 + if (used <= 0) {
1747 if (!copied)
1748 copied = used;
1749 break;
1750 @@ -1490,15 +1495,19 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1751 copied += used;
1752 offset += used;
1753 }
1754 - /*
1755 - * If recv_actor drops the lock (e.g. TCP splice
1756 + /* If recv_actor drops the lock (e.g. TCP splice
1757 * receive) the skb pointer might be invalid when
1758 * getting here: tcp_collapse might have deleted it
1759 * while aggregating skbs from the socket queue.
1760 */
1761 - skb = tcp_recv_skb(sk, seq-1, &offset);
1762 - if (!skb || (offset+1 != skb->len))
1763 + skb = tcp_recv_skb(sk, seq - 1, &offset);
1764 + if (!skb)
1765 break;
1766 + /* TCP coalescing might have appended data to the skb.
1767 + * Try to splice more frags
1768 + */
1769 + if (offset + 1 != skb->len)
1770 + continue;
1771 }
1772 if (tcp_hdr(skb)->fin) {
1773 sk_eat_skb(sk, skb, false);
1774 @@ -1515,8 +1524,10 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1775 tcp_rcv_space_adjust(sk);
1776
1777 /* Clean up data we have read: This will do ACK frames. */
1778 - if (copied > 0)
1779 + if (copied > 0) {
1780 + tcp_recv_skb(sk, seq, &offset);
1781 tcp_cleanup_rbuf(sk, copied);
1782 + }
1783 return copied;
1784 }
1785 EXPORT_SYMBOL(tcp_read_sock);
1786 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
1787 index 1432cdb..fc582a7 100644
1788 --- a/net/ipv4/tcp_cong.c
1789 +++ b/net/ipv4/tcp_cong.c
1790 @@ -309,6 +309,12 @@ void tcp_slow_start(struct tcp_sock *tp)
1791 {
1792 int cnt; /* increase in packets */
1793 unsigned int delta = 0;
1794 + u32 snd_cwnd = tp->snd_cwnd;
1795 +
1796 + if (unlikely(!snd_cwnd)) {
1797 + pr_err_once("snd_cwnd is nul, please report this bug.\n");
1798 + snd_cwnd = 1U;
1799 + }
1800
1801 /* RFC3465: ABC Slow start
1802 * Increase only after a full MSS of bytes is acked
1803 @@ -323,7 +329,7 @@ void tcp_slow_start(struct tcp_sock *tp)
1804 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh)
1805 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */
1806 else
1807 - cnt = tp->snd_cwnd; /* exponential increase */
1808 + cnt = snd_cwnd; /* exponential increase */
1809
1810 /* RFC3465: ABC
1811 * We MAY increase by 2 if discovered delayed ack
1812 @@ -333,11 +339,11 @@ void tcp_slow_start(struct tcp_sock *tp)
1813 tp->bytes_acked = 0;
1814
1815 tp->snd_cwnd_cnt += cnt;
1816 - while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
1817 - tp->snd_cwnd_cnt -= tp->snd_cwnd;
1818 + while (tp->snd_cwnd_cnt >= snd_cwnd) {
1819 + tp->snd_cwnd_cnt -= snd_cwnd;
1820 delta++;
1821 }
1822 - tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
1823 + tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
1824 }
1825 EXPORT_SYMBOL_GPL(tcp_slow_start);
1826
1827 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1828 index 181fc82..beabc80 100644
1829 --- a/net/ipv4/tcp_input.c
1830 +++ b/net/ipv4/tcp_input.c
1831 @@ -3504,6 +3504,11 @@ static bool tcp_process_frto(struct sock *sk, int flag)
1832 }
1833 } else {
1834 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
1835 + if (!tcp_packets_in_flight(tp)) {
1836 + tcp_enter_frto_loss(sk, 2, flag);
1837 + return true;
1838 + }
1839 +
1840 /* Prevent sending of new data. */
1841 tp->snd_cwnd = min(tp->snd_cwnd,
1842 tcp_packets_in_flight(tp));
1843 @@ -5639,8 +5644,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
1844 * the remote receives only the retransmitted (regular) SYNs: either
1845 * the original SYN-data or the corresponding SYN-ACK is lost.
1846 */
1847 - syn_drop = (cookie->len <= 0 && data &&
1848 - inet_csk(sk)->icsk_retransmits);
1849 + syn_drop = (cookie->len <= 0 && data && tp->total_retrans);
1850
1851 tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
1852
1853 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1854 index bc3cb46..e637770 100644
1855 --- a/net/ipv4/tcp_ipv4.c
1856 +++ b/net/ipv4/tcp_ipv4.c
1857 @@ -380,11 +380,10 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
1858 * We do take care of PMTU discovery (RFC1191) special case :
1859 * we can receive locally generated ICMP messages while socket is held.
1860 */
1861 - if (sock_owned_by_user(sk) &&
1862 - type != ICMP_DEST_UNREACH &&
1863 - code != ICMP_FRAG_NEEDED)
1864 - NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1865 -
1866 + if (sock_owned_by_user(sk)) {
1867 + if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
1868 + NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
1869 + }
1870 if (sk->sk_state == TCP_CLOSE)
1871 goto out;
1872
1873 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1874 index 79c8dbe..1f4d405 100644
1875 --- a/net/ipv4/udp.c
1876 +++ b/net/ipv4/udp.c
1877 @@ -1952,6 +1952,7 @@ struct proto udp_prot = {
1878 .recvmsg = udp_recvmsg,
1879 .sendpage = udp_sendpage,
1880 .backlog_rcv = __udp_queue_rcv_skb,
1881 + .release_cb = ip4_datagram_release_cb,
1882 .hash = udp_lib_hash,
1883 .unhash = udp_lib_unhash,
1884 .rehash = udp_v4_rehash,
1885 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1886 index 0424e4e..a468a36 100644
1887 --- a/net/ipv6/addrconf.c
1888 +++ b/net/ipv6/addrconf.c
1889 @@ -1723,7 +1723,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
1890 continue;
1891 if ((rt->rt6i_flags & flags) != flags)
1892 continue;
1893 - if ((noflags != 0) && ((rt->rt6i_flags & flags) != 0))
1894 + if ((rt->rt6i_flags & noflags) != 0)
1895 continue;
1896 dst_hold(&rt->dst);
1897 break;
1898 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
1899 index 24d69db..4d844d7 100644
1900 --- a/net/ipv6/icmp.c
1901 +++ b/net/ipv6/icmp.c
1902 @@ -81,10 +81,22 @@ static inline struct sock *icmpv6_sk(struct net *net)
1903 return net->ipv6.icmp_sk[smp_processor_id()];
1904 }
1905
1906 +static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1907 + u8 type, u8 code, int offset, __be32 info)
1908 +{
1909 + struct net *net = dev_net(skb->dev);
1910 +
1911 + if (type == ICMPV6_PKT_TOOBIG)
1912 + ip6_update_pmtu(skb, net, info, 0, 0);
1913 + else if (type == NDISC_REDIRECT)
1914 + ip6_redirect(skb, net, 0, 0);
1915 +}
1916 +
1917 static int icmpv6_rcv(struct sk_buff *skb);
1918
1919 static const struct inet6_protocol icmpv6_protocol = {
1920 .handler = icmpv6_rcv,
1921 + .err_handler = icmpv6_err,
1922 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1923 };
1924
1925 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1926 index d5cb3c4..a23350c 100644
1927 --- a/net/ipv6/ip6_gre.c
1928 +++ b/net/ipv6/ip6_gre.c
1929 @@ -976,7 +976,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
1930 int ret;
1931
1932 if (!ip6_tnl_xmit_ctl(t))
1933 - return -1;
1934 + goto tx_err;
1935
1936 switch (skb->protocol) {
1937 case htons(ETH_P_IP):
1938 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1939 index aece3e7..8dea314 100644
1940 --- a/net/ipv6/ip6_output.c
1941 +++ b/net/ipv6/ip6_output.c
1942 @@ -1279,10 +1279,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1943 if (dst_allfrag(rt->dst.path))
1944 cork->flags |= IPCORK_ALLFRAG;
1945 cork->length = 0;
1946 - exthdrlen = (opt ? opt->opt_flen : 0) - rt->rt6i_nfheader_len;
1947 + exthdrlen = (opt ? opt->opt_flen : 0);
1948 length += exthdrlen;
1949 transhdrlen += exthdrlen;
1950 - dst_exthdrlen = rt->dst.header_len;
1951 + dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1952 } else {
1953 rt = (struct rt6_info *)cork->dst;
1954 fl6 = &inet->cork.fl.u.ip6;
1955 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1956 index b1e6cf0..b140ef2 100644
1957 --- a/net/ipv6/route.c
1958 +++ b/net/ipv6/route.c
1959 @@ -872,7 +872,7 @@ restart:
1960 dst_hold(&rt->dst);
1961 read_unlock_bh(&table->tb6_lock);
1962
1963 - if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
1964 + if (!rt->n && !(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_LOCAL)))
1965 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
1966 else if (!(rt->dst.flags & DST_HOST))
1967 nrt = rt6_alloc_clone(rt, &fl6->daddr);
1968 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1969 index 94060ed..5db6316 100644
1970 --- a/net/packet/af_packet.c
1971 +++ b/net/packet/af_packet.c
1972 @@ -2335,13 +2335,15 @@ static int packet_release(struct socket *sock)
1973
1974 packet_flush_mclist(sk);
1975
1976 - memset(&req_u, 0, sizeof(req_u));
1977 -
1978 - if (po->rx_ring.pg_vec)
1979 + if (po->rx_ring.pg_vec) {
1980 + memset(&req_u, 0, sizeof(req_u));
1981 packet_set_ring(sk, &req_u, 1, 0);
1982 + }
1983
1984 - if (po->tx_ring.pg_vec)
1985 + if (po->tx_ring.pg_vec) {
1986 + memset(&req_u, 0, sizeof(req_u));
1987 packet_set_ring(sk, &req_u, 1, 1);
1988 + }
1989
1990 fanout_release(sk);
1991
1992 diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
1993 index 1859e2b..80a7264 100644
1994 --- a/net/sctp/endpointola.c
1995 +++ b/net/sctp/endpointola.c
1996 @@ -249,6 +249,8 @@ void sctp_endpoint_free(struct sctp_endpoint *ep)
1997 /* Final destructor for endpoint. */
1998 static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
1999 {
2000 + int i;
2001 +
2002 SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return);
2003
2004 /* Free up the HMAC transform. */
2005 @@ -271,6 +273,9 @@ static void sctp_endpoint_destroy(struct sctp_endpoint *ep)
2006 sctp_inq_free(&ep->base.inqueue);
2007 sctp_bind_addr_free(&ep->base.bind_addr);
2008
2009 + for (i = 0; i < SCTP_HOW_MANY_SECRETS; ++i)
2010 + memset(&ep->secret_key[i], 0, SCTP_SECRET_SIZE);
2011 +
2012 /* Remove and free the port */
2013 if (sctp_sk(ep->base.sk)->bind_hash)
2014 sctp_put_port(ep->base.sk);
2015 diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
2016 index 1b4a7f8..bcaa4c8 100644
2017 --- a/net/sctp/outqueue.c
2018 +++ b/net/sctp/outqueue.c
2019 @@ -224,7 +224,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
2020
2021 /* Free the outqueue structure and any related pending chunks.
2022 */
2023 -void sctp_outq_teardown(struct sctp_outq *q)
2024 +static void __sctp_outq_teardown(struct sctp_outq *q)
2025 {
2026 struct sctp_transport *transport;
2027 struct list_head *lchunk, *temp;
2028 @@ -277,8 +277,6 @@ void sctp_outq_teardown(struct sctp_outq *q)
2029 sctp_chunk_free(chunk);
2030 }
2031
2032 - q->error = 0;
2033 -
2034 /* Throw away any leftover control chunks. */
2035 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
2036 list_del_init(&chunk->list);
2037 @@ -286,11 +284,17 @@ void sctp_outq_teardown(struct sctp_outq *q)
2038 }
2039 }
2040
2041 +void sctp_outq_teardown(struct sctp_outq *q)
2042 +{
2043 + __sctp_outq_teardown(q);
2044 + sctp_outq_init(q->asoc, q);
2045 +}
2046 +
2047 /* Free the outqueue structure and any related pending chunks. */
2048 void sctp_outq_free(struct sctp_outq *q)
2049 {
2050 /* Throw away leftover chunks. */
2051 - sctp_outq_teardown(q);
2052 + __sctp_outq_teardown(q);
2053
2054 /* If we were kmalloc()'d, free the memory. */
2055 if (q->malloced)
2056 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2057 index 406d957..9261d9a 100644
2058 --- a/net/sctp/socket.c
2059 +++ b/net/sctp/socket.c
2060 @@ -3388,7 +3388,7 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
2061
2062 ret = sctp_auth_set_key(sctp_sk(sk)->ep, asoc, authkey);
2063 out:
2064 - kfree(authkey);
2065 + kzfree(authkey);
2066 return ret;
2067 }
2068
2069 diff --git a/net/wireless/core.c b/net/wireless/core.c
2070 index 3f72530..d1531e5 100644
2071 --- a/net/wireless/core.c
2072 +++ b/net/wireless/core.c
2073 @@ -856,8 +856,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
2074 /* allow mac80211 to determine the timeout */
2075 wdev->ps_timeout = -1;
2076
2077 - if (!dev->ethtool_ops)
2078 - dev->ethtool_ops = &cfg80211_ethtool_ops;
2079 + netdev_set_default_ethtool_ops(dev, &cfg80211_ethtool_ops);
2080
2081 if ((wdev->iftype == NL80211_IFTYPE_STATION ||
2082 wdev->iftype == NL80211_IFTYPE_P2P_CLIENT ||