Magellan Linux

Contents of /trunk/kernel-lts/patches-3.4/0165-3.4.66-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2307 - (show annotations) (download)
Mon Oct 14 07:15:39 2013 UTC (10 years, 7 months ago) by niro
File size: 53018 byte(s)
-linux-3.4.66
1 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
2 index 8f5813b..056cfbe 100644
3 --- a/arch/arm/mm/init.c
4 +++ b/arch/arm/mm/init.c
5 @@ -98,6 +98,9 @@ void show_mem(unsigned int filter)
6 printk("Mem-info:\n");
7 show_free_areas(filter);
8
9 + if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
10 + return;
11 +
12 for_each_bank (i, mi) {
13 struct membank *bank = &mi->bank[i];
14 unsigned int pfn1, pfn2;
15 diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
16 index 1516d1d..f2652fc 100644
17 --- a/arch/ia64/mm/contig.c
18 +++ b/arch/ia64/mm/contig.c
19 @@ -47,6 +47,8 @@ void show_mem(unsigned int filter)
20 printk(KERN_INFO "Mem-info:\n");
21 show_free_areas(filter);
22 printk(KERN_INFO "Node memory in pages:\n");
23 + if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
24 + return;
25 for_each_online_pgdat(pgdat) {
26 unsigned long present;
27 unsigned long flags;
28 diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
29 index c641333..2230817 100644
30 --- a/arch/ia64/mm/discontig.c
31 +++ b/arch/ia64/mm/discontig.c
32 @@ -623,6 +623,8 @@ void show_mem(unsigned int filter)
33
34 printk(KERN_INFO "Mem-info:\n");
35 show_free_areas(filter);
36 + if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
37 + return;
38 printk(KERN_INFO "Node memory in pages:\n");
39 for_each_online_pgdat(pgdat) {
40 unsigned long present;
41 diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
42 index 82f364e..0b62162 100644
43 --- a/arch/parisc/mm/init.c
44 +++ b/arch/parisc/mm/init.c
45 @@ -685,6 +685,8 @@ void show_mem(unsigned int filter)
46
47 printk(KERN_INFO "Mem-info:\n");
48 show_free_areas(filter);
49 + if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
50 + return;
51 #ifndef CONFIG_DISCONTIGMEM
52 i = max_mapnr;
53 while (i-- > 0) {
54 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
55 index 359f078..b2f4a8ed 100644
56 --- a/arch/powerpc/kernel/iommu.c
57 +++ b/arch/powerpc/kernel/iommu.c
58 @@ -501,7 +501,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
59 /* number of bytes needed for the bitmap */
60 sz = (tbl->it_size + 7) >> 3;
61
62 - page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
63 + page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
64 if (!page)
65 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
66 tbl->it_map = page_address(page);
67 diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
68 index a3a9990..cfe0069 100644
69 --- a/arch/powerpc/kernel/vio.c
70 +++ b/arch/powerpc/kernel/vio.c
71 @@ -1341,11 +1341,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
72 const char *cp;
73
74 dn = dev->of_node;
75 - if (!dn)
76 - return -ENODEV;
77 + if (!dn) {
78 + strcat(buf, "\n");
79 + return strlen(buf);
80 + }
81 cp = of_get_property(dn, "compatible", NULL);
82 - if (!cp)
83 - return -ENODEV;
84 + if (!cp) {
85 + strcat(buf, "\n");
86 + return strlen(buf);
87 + }
88
89 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
90 }
91 diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
92 index 18245af..afa2eba 100644
93 --- a/arch/powerpc/lib/checksum_64.S
94 +++ b/arch/powerpc/lib/checksum_64.S
95 @@ -272,8 +272,8 @@ _GLOBAL(csum_partial_copy_generic)
96 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
97 beq .Lcopy_aligned
98
99 - li r7,4
100 - sub r6,r7,r6
101 + li r9,4
102 + sub r6,r9,r6
103 mtctr r6
104
105 1:
106 diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
107 index f445e98..cfabc3d 100644
108 --- a/arch/sparc/kernel/entry.S
109 +++ b/arch/sparc/kernel/entry.S
110 @@ -1177,7 +1177,7 @@ sys_sigreturn:
111 nop
112
113 call syscall_trace
114 - nop
115 + mov 1, %o1
116
117 1:
118 /* We don't want to muck with user registers like a
119 diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
120 index 79f3103..7c00735 100644
121 --- a/arch/sparc/kernel/ktlb.S
122 +++ b/arch/sparc/kernel/ktlb.S
123 @@ -25,11 +25,10 @@ kvmap_itlb:
124 */
125 kvmap_itlb_4v:
126
127 -kvmap_itlb_nonlinear:
128 /* Catch kernel NULL pointer calls. */
129 sethi %hi(PAGE_SIZE), %g5
130 cmp %g4, %g5
131 - bleu,pn %xcc, kvmap_dtlb_longpath
132 + blu,pn %xcc, kvmap_itlb_longpath
133 nop
134
135 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
136 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
137 index 7f5f65d..817187d 100644
138 --- a/arch/sparc/kernel/syscalls.S
139 +++ b/arch/sparc/kernel/syscalls.S
140 @@ -147,7 +147,7 @@ linux_syscall_trace32:
141 srl %i4, 0, %o4
142 srl %i1, 0, %o1
143 srl %i2, 0, %o2
144 - ba,pt %xcc, 2f
145 + ba,pt %xcc, 5f
146 srl %i3, 0, %o3
147
148 linux_syscall_trace:
149 @@ -177,13 +177,13 @@ linux_sparc_syscall32:
150 srl %i1, 0, %o1 ! IEU0 Group
151 ldx [%g6 + TI_FLAGS], %l0 ! Load
152
153 - srl %i5, 0, %o5 ! IEU1
154 + srl %i3, 0, %o3 ! IEU0
155 srl %i2, 0, %o2 ! IEU0 Group
156 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
157 bne,pn %icc, linux_syscall_trace32 ! CTI
158 mov %i0, %l5 ! IEU1
159 - call %l7 ! CTI Group brk forced
160 - srl %i3, 0, %o3 ! IEU0
161 +5: call %l7 ! CTI Group brk forced
162 + srl %i5, 0, %o5 ! IEU1
163 ba,a,pt %xcc, 3f
164
165 /* Linux native system calls enter here... */
166 diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
167 index da1b781..8fa84a3 100644
168 --- a/arch/sparc/kernel/trampoline_64.S
169 +++ b/arch/sparc/kernel/trampoline_64.S
170 @@ -131,7 +131,6 @@ startup_continue:
171 clr %l5
172 sethi %hi(num_kernel_image_mappings), %l6
173 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
174 - add %l6, 1, %l6
175
176 mov 15, %l7
177 BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
178 @@ -224,7 +223,6 @@ niagara_lock_tlb:
179 clr %l5
180 sethi %hi(num_kernel_image_mappings), %l6
181 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
182 - add %l6, 1, %l6
183
184 1:
185 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
186 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
187 index f73c224..0b59bd3 100644
188 --- a/arch/sparc/lib/ksyms.c
189 +++ b/arch/sparc/lib/ksyms.c
190 @@ -125,15 +125,6 @@ EXPORT_SYMBOL(___copy_from_user);
191 EXPORT_SYMBOL(___copy_in_user);
192 EXPORT_SYMBOL(__clear_user);
193
194 -/* RW semaphores */
195 -EXPORT_SYMBOL(__down_read);
196 -EXPORT_SYMBOL(__down_read_trylock);
197 -EXPORT_SYMBOL(__down_write);
198 -EXPORT_SYMBOL(__down_write_trylock);
199 -EXPORT_SYMBOL(__up_read);
200 -EXPORT_SYMBOL(__up_write);
201 -EXPORT_SYMBOL(__downgrade_write);
202 -
203 /* Atomic counter implementation. */
204 EXPORT_SYMBOL(atomic_add);
205 EXPORT_SYMBOL(atomic_add_ret);
206 diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
207 index 63294f5..4f7ae39 100644
208 --- a/arch/tile/include/asm/percpu.h
209 +++ b/arch/tile/include/asm/percpu.h
210 @@ -15,9 +15,37 @@
211 #ifndef _ASM_TILE_PERCPU_H
212 #define _ASM_TILE_PERCPU_H
213
214 -register unsigned long __my_cpu_offset __asm__("tp");
215 -#define __my_cpu_offset __my_cpu_offset
216 -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
217 +register unsigned long my_cpu_offset_reg asm("tp");
218 +
219 +#ifdef CONFIG_PREEMPT
220 +/*
221 + * For full preemption, we can't just use the register variable
222 + * directly, since we need barrier() to hazard against it, causing the
223 + * compiler to reload anything computed from a previous "tp" value.
224 + * But we also don't want to use volatile asm, since we'd like the
225 + * compiler to be able to cache the value across multiple percpu reads.
226 + * So we use a fake stack read as a hazard against barrier().
227 + * The 'U' constraint is like 'm' but disallows postincrement.
228 + */
229 +static inline unsigned long __my_cpu_offset(void)
230 +{
231 + unsigned long tp;
232 + register unsigned long *sp asm("sp");
233 + asm("move %0, tp" : "=r" (tp) : "U" (*sp));
234 + return tp;
235 +}
236 +#define __my_cpu_offset __my_cpu_offset()
237 +#else
238 +/*
239 + * We don't need to hazard against barrier() since "tp" doesn't ever
240 + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
241 + * changes at function call points, at which we are already re-reading
242 + * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
243 + */
244 +#define __my_cpu_offset my_cpu_offset_reg
245 +#endif
246 +
247 +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
248
249 #include <asm-generic/percpu.h>
250
251 diff --git a/arch/unicore32/mm/init.c b/arch/unicore32/mm/init.c
252 index de186bd..6444828 100644
253 --- a/arch/unicore32/mm/init.c
254 +++ b/arch/unicore32/mm/init.c
255 @@ -66,6 +66,9 @@ void show_mem(unsigned int filter)
256 printk(KERN_DEFAULT "Mem-info:\n");
257 show_free_areas(filter);
258
259 + if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
260 + return;
261 +
262 for_each_bank(i, mi) {
263 struct membank *bank = &mi->bank[i];
264 unsigned int pfn1, pfn2;
265 diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c
266 index f40acef..a6977e1 100644
267 --- a/drivers/acpi/acpi_ipmi.c
268 +++ b/drivers/acpi/acpi_ipmi.c
269 @@ -39,6 +39,7 @@
270 #include <linux/ipmi.h>
271 #include <linux/device.h>
272 #include <linux/pnp.h>
273 +#include <linux/spinlock.h>
274
275 MODULE_AUTHOR("Zhao Yakui");
276 MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
277 @@ -57,7 +58,7 @@ struct acpi_ipmi_device {
278 struct list_head head;
279 /* the IPMI request message list */
280 struct list_head tx_msg_list;
281 - struct mutex tx_msg_lock;
282 + spinlock_t tx_msg_lock;
283 acpi_handle handle;
284 struct pnp_dev *pnp_dev;
285 ipmi_user_t user_interface;
286 @@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
287 struct kernel_ipmi_msg *msg;
288 struct acpi_ipmi_buffer *buffer;
289 struct acpi_ipmi_device *device;
290 + unsigned long flags;
291
292 msg = &tx_msg->tx_message;
293 /*
294 @@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg,
295
296 /* Get the msgid */
297 device = tx_msg->device;
298 - mutex_lock(&device->tx_msg_lock);
299 + spin_lock_irqsave(&device->tx_msg_lock, flags);
300 device->curr_msgid++;
301 tx_msg->tx_msgid = device->curr_msgid;
302 - mutex_unlock(&device->tx_msg_lock);
303 + spin_unlock_irqrestore(&device->tx_msg_lock, flags);
304 }
305
306 static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
307 @@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
308 int msg_found = 0;
309 struct acpi_ipmi_msg *tx_msg;
310 struct pnp_dev *pnp_dev = ipmi_device->pnp_dev;
311 + unsigned long flags;
312
313 if (msg->user != ipmi_device->user_interface) {
314 dev_warn(&pnp_dev->dev, "Unexpected response is returned. "
315 @@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
316 ipmi_free_recv_msg(msg);
317 return;
318 }
319 - mutex_lock(&ipmi_device->tx_msg_lock);
320 + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
321 list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) {
322 if (msg->msgid == tx_msg->tx_msgid) {
323 msg_found = 1;
324 @@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
325 }
326 }
327
328 - mutex_unlock(&ipmi_device->tx_msg_lock);
329 + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
330 if (!msg_found) {
331 dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is "
332 "returned.\n", msg->msgid);
333 @@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
334 struct acpi_ipmi_device *ipmi_device = handler_context;
335 int err, rem_time;
336 acpi_status status;
337 + unsigned long flags;
338 /*
339 * IPMI opregion message.
340 * IPMI message is firstly written to the BMC and system software
341 @@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
342 return AE_NO_MEMORY;
343
344 acpi_format_ipmi_msg(tx_msg, address, value);
345 - mutex_lock(&ipmi_device->tx_msg_lock);
346 + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
347 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
348 - mutex_unlock(&ipmi_device->tx_msg_lock);
349 + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
350 err = ipmi_request_settime(ipmi_device->user_interface,
351 &tx_msg->addr,
352 tx_msg->tx_msgid,
353 @@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
354 status = AE_OK;
355
356 end_label:
357 - mutex_lock(&ipmi_device->tx_msg_lock);
358 + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
359 list_del(&tx_msg->head);
360 - mutex_unlock(&ipmi_device->tx_msg_lock);
361 + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
362 kfree(tx_msg);
363 return status;
364 }
365 @@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device)
366
367 INIT_LIST_HEAD(&ipmi_device->head);
368
369 - mutex_init(&ipmi_device->tx_msg_lock);
370 + spin_lock_init(&ipmi_device->tx_msg_lock);
371 INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
372 ipmi_install_space_handler(ipmi_device);
373
374 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
375 index d3446f6..d7ad865 100644
376 --- a/drivers/block/cciss.c
377 +++ b/drivers/block/cciss.c
378 @@ -1186,6 +1186,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
379 int err;
380 u32 cp;
381
382 + memset(&arg64, 0, sizeof(arg64));
383 err = 0;
384 err |=
385 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
386 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
387 index 9125bbe..504bc16 100644
388 --- a/drivers/block/cpqarray.c
389 +++ b/drivers/block/cpqarray.c
390 @@ -1195,6 +1195,7 @@ out_passthru:
391 ida_pci_info_struct pciinfo;
392
393 if (!arg) return -EINVAL;
394 + memset(&pciinfo, 0, sizeof(pciinfo));
395 pciinfo.bus = host->pci_dev->bus->number;
396 pciinfo.dev_fn = host->pci_dev->devfn;
397 pciinfo.board_id = host->board_id;
398 diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
399 index 180f523..d5ce453 100644
400 --- a/drivers/dma/imx-dma.c
401 +++ b/drivers/dma/imx-dma.c
402 @@ -373,17 +373,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
403 struct imxdma_engine *imxdma = imxdmac->imxdma;
404 int chno = imxdmac->channel;
405 struct imxdma_desc *desc;
406 + unsigned long flags;
407
408 - spin_lock(&imxdma->lock);
409 + spin_lock_irqsave(&imxdma->lock, flags);
410 if (list_empty(&imxdmac->ld_active)) {
411 - spin_unlock(&imxdma->lock);
412 + spin_unlock_irqrestore(&imxdma->lock, flags);
413 goto out;
414 }
415
416 desc = list_first_entry(&imxdmac->ld_active,
417 struct imxdma_desc,
418 node);
419 - spin_unlock(&imxdma->lock);
420 + spin_unlock_irqrestore(&imxdma->lock, flags);
421
422 if (desc->sg) {
423 u32 tmp;
424 @@ -455,7 +456,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
425 {
426 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
427 struct imxdma_engine *imxdma = imxdmac->imxdma;
428 - unsigned long flags;
429 int slot = -1;
430 int i;
431
432 @@ -463,7 +463,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
433 switch (d->type) {
434 case IMXDMA_DESC_INTERLEAVED:
435 /* Try to get a free 2D slot */
436 - spin_lock_irqsave(&imxdma->lock, flags);
437 for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
438 if ((imxdma->slots_2d[i].count > 0) &&
439 ((imxdma->slots_2d[i].xsr != d->x) ||
440 @@ -473,10 +472,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
441 slot = i;
442 break;
443 }
444 - if (slot < 0) {
445 - spin_unlock_irqrestore(&imxdma->lock, flags);
446 + if (slot < 0)
447 return -EBUSY;
448 - }
449
450 imxdma->slots_2d[slot].xsr = d->x;
451 imxdma->slots_2d[slot].ysr = d->y;
452 @@ -485,7 +482,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
453
454 imxdmac->slot_2d = slot;
455 imxdmac->enabled_2d = true;
456 - spin_unlock_irqrestore(&imxdma->lock, flags);
457
458 if (slot == IMX_DMA_2D_SLOT_A) {
459 d->config_mem &= ~CCR_MSEL_B;
460 @@ -561,18 +557,17 @@ static void imxdma_tasklet(unsigned long data)
461 struct imxdma_channel *imxdmac = (void *)data;
462 struct imxdma_engine *imxdma = imxdmac->imxdma;
463 struct imxdma_desc *desc;
464 + unsigned long flags;
465
466 - spin_lock(&imxdma->lock);
467 + spin_lock_irqsave(&imxdma->lock, flags);
468
469 if (list_empty(&imxdmac->ld_active)) {
470 /* Someone might have called terminate all */
471 - goto out;
472 + spin_unlock_irqrestore(&imxdma->lock, flags);
473 + return;
474 }
475 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
476
477 - if (desc->desc.callback)
478 - desc->desc.callback(desc->desc.callback_param);
479 -
480 /* If we are dealing with a cyclic descriptor keep it on ld_active
481 * and dont mark the descripor as complete.
482 * Only in non-cyclic cases it would be marked as complete
483 @@ -599,7 +594,11 @@ static void imxdma_tasklet(unsigned long data)
484 __func__, imxdmac->channel);
485 }
486 out:
487 - spin_unlock(&imxdma->lock);
488 + spin_unlock_irqrestore(&imxdma->lock, flags);
489 +
490 + if (desc->desc.callback)
491 + desc->desc.callback(desc->desc.callback_param);
492 +
493 }
494
495 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
496 @@ -823,7 +822,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
497 kfree(imxdmac->sg_list);
498
499 imxdmac->sg_list = kcalloc(periods + 1,
500 - sizeof(struct scatterlist), GFP_KERNEL);
501 + sizeof(struct scatterlist), GFP_ATOMIC);
502 if (!imxdmac->sg_list)
503 return NULL;
504
505 diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
506 index daf21b8..2309f2e 100644
507 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
508 +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
509 @@ -1610,7 +1610,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
510 int resp_data_len;
511 int resp_len;
512
513 - resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
514 + resp_data_len = 4;
515 resp_len = sizeof(*srp_rsp) + resp_data_len;
516
517 srp_rsp = ioctx->ioctx.buf;
518 @@ -1622,11 +1622,9 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
519 + atomic_xchg(&ch->req_lim_delta, 0));
520 srp_rsp->tag = tag;
521
522 - if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
523 - srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
524 - srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
525 - srp_rsp->data[3] = rsp_code;
526 - }
527 + srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
528 + srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
529 + srp_rsp->data[3] = rsp_code;
530
531 return resp_len;
532 }
533 @@ -2373,6 +2371,8 @@ static void srpt_release_channel_work(struct work_struct *w)
534 transport_deregister_session(ch->sess);
535 ch->sess = NULL;
536
537 + ib_destroy_cm_id(ch->cm_id);
538 +
539 srpt_destroy_ch_ib(ch);
540
541 srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
542 @@ -2383,8 +2383,6 @@ static void srpt_release_channel_work(struct work_struct *w)
543 list_del(&ch->list);
544 spin_unlock_irq(&sdev->spinlock);
545
546 - ib_destroy_cm_id(ch->cm_id);
547 -
548 if (ch->release_done)
549 complete(ch->release_done);
550
551 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
552 index 80103bb..098581a 100644
553 --- a/drivers/net/bonding/bond_main.c
554 +++ b/drivers/net/bonding/bond_main.c
555 @@ -1933,6 +1933,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
556 struct bonding *bond = netdev_priv(bond_dev);
557 struct slave *slave, *oldcurrent;
558 struct sockaddr addr;
559 + int old_flags = bond_dev->flags;
560 netdev_features_t old_features = bond_dev->features;
561
562 /* slave is not a slave or master is not master of this slave */
563 @@ -2066,12 +2067,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
564 * already taken care of above when we detached the slave
565 */
566 if (!USES_PRIMARY(bond->params.mode)) {
567 - /* unset promiscuity level from slave */
568 - if (bond_dev->flags & IFF_PROMISC)
569 + /* unset promiscuity level from slave
570 + * NOTE: The NETDEV_CHANGEADDR call above may change the value
571 + * of the IFF_PROMISC flag in the bond_dev, but we need the
572 + * value of that flag before that change, as that was the value
573 + * when this slave was attached, so we cache at the start of the
574 + * function and use it here. Same goes for ALLMULTI below
575 + */
576 + if (old_flags & IFF_PROMISC)
577 dev_set_promiscuity(slave_dev, -1);
578
579 /* unset allmulti level from slave */
580 - if (bond_dev->flags & IFF_ALLMULTI)
581 + if (old_flags & IFF_ALLMULTI)
582 dev_set_allmulti(slave_dev, -1);
583
584 /* flush master's mc_list from slave */
585 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
586 index 0819a74..d5de9a4 100644
587 --- a/drivers/net/ethernet/freescale/gianfar.c
588 +++ b/drivers/net/ethernet/freescale/gianfar.c
589 @@ -394,7 +394,13 @@ static void gfar_init_mac(struct net_device *ndev)
590 if (ndev->features & NETIF_F_IP_CSUM)
591 tctrl |= TCTRL_INIT_CSUM;
592
593 - tctrl |= TCTRL_TXSCHED_PRIO;
594 + if (priv->prio_sched_en)
595 + tctrl |= TCTRL_TXSCHED_PRIO;
596 + else {
597 + tctrl |= TCTRL_TXSCHED_WRRS;
598 + gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
599 + gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
600 + }
601
602 gfar_write(&regs->tctrl, tctrl);
603
604 @@ -1153,6 +1159,9 @@ static int gfar_probe(struct platform_device *ofdev)
605 priv->rx_filer_enable = 1;
606 /* Enable most messages by default */
607 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
608 + /* use pritority h/w tx queue scheduling for single queue devices */
609 + if (priv->num_tx_queues == 1)
610 + priv->prio_sched_en = 1;
611
612 /* Carrier starts down, phylib will bring it up */
613 netif_carrier_off(dev);
614 diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
615 index 4c9f8d4..348b6dd 100644
616 --- a/drivers/net/ethernet/freescale/gianfar.h
617 +++ b/drivers/net/ethernet/freescale/gianfar.h
618 @@ -301,8 +301,16 @@ extern const char gfar_driver_version[];
619 #define TCTRL_TFCPAUSE 0x00000008
620 #define TCTRL_TXSCHED_MASK 0x00000006
621 #define TCTRL_TXSCHED_INIT 0x00000000
622 +/* priority scheduling */
623 #define TCTRL_TXSCHED_PRIO 0x00000002
624 +/* weighted round-robin scheduling (WRRS) */
625 #define TCTRL_TXSCHED_WRRS 0x00000004
626 +/* default WRRS weight and policy setting,
627 + * tailored to the tr03wt and tr47wt registers:
628 + * equal weight for all Tx Qs, measured in 64byte units
629 + */
630 +#define DEFAULT_WRRS_WEIGHT 0x18181818
631 +
632 #define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
633
634 #define IEVENT_INIT_CLEAR 0xffffffff
635 @@ -1098,7 +1106,8 @@ struct gfar_private {
636 extended_hash:1,
637 bd_stash_en:1,
638 rx_filer_enable:1,
639 - wol_en:1; /* Wake-on-LAN enabled */
640 + wol_en:1, /* Wake-on-LAN enabled */
641 + prio_sched_en:1; /* Enable priorty based Tx scheduling in Hw */
642 unsigned short padding;
643
644 /* PHY stuff */
645 diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
646 index 4c76db4..5fa0880 100644
647 --- a/drivers/net/ethernet/via/via-rhine.c
648 +++ b/drivers/net/ethernet/via/via-rhine.c
649 @@ -32,7 +32,7 @@
650 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
651
652 #define DRV_NAME "via-rhine"
653 -#define DRV_VERSION "1.5.0"
654 +#define DRV_VERSION "1.5.1"
655 #define DRV_RELDATE "2010-10-09"
656
657 #include <linux/types.h>
658 @@ -1684,7 +1684,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
659 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
660
661 if (unlikely(vlan_tx_tag_present(skb))) {
662 - rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
663 + u16 vid_pcp = vlan_tx_tag_get(skb);
664 +
665 + /* drop CFI/DEI bit, register needs VID and PCP */
666 + vid_pcp = (vid_pcp & VLAN_VID_MASK) |
667 + ((vid_pcp & VLAN_PRIO_MASK) >> 1);
668 + rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
669 /* request tagging */
670 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
671 }
672 diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
673 index d21591a..5e5b791 100644
674 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c
675 +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
676 @@ -308,6 +308,12 @@ static int temac_dma_bd_init(struct net_device *ndev)
677 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
678 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
679
680 + /* Init descriptor indexes */
681 + lp->tx_bd_ci = 0;
682 + lp->tx_bd_next = 0;
683 + lp->tx_bd_tail = 0;
684 + lp->rx_bd_ci = 0;
685 +
686 return 0;
687
688 out:
689 diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
690 index f617566..8327cc5 100644
691 --- a/drivers/net/ppp/pptp.c
692 +++ b/drivers/net/ppp/pptp.c
693 @@ -281,7 +281,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
694 nf_reset(skb);
695
696 skb->ip_summed = CHECKSUM_NONE;
697 - ip_select_ident(iph, &rt->dst, NULL);
698 + ip_select_ident(skb, &rt->dst, NULL);
699 ip_send_check(iph);
700
701 ip_local_out(skb);
702 diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
703 index b972263..905b16f 100644
704 --- a/drivers/net/usb/dm9601.c
705 +++ b/drivers/net/usb/dm9601.c
706 @@ -384,7 +384,7 @@ static void dm9601_set_multicast(struct net_device *net)
707 rx_ctl |= 0x02;
708 } else if (net->flags & IFF_ALLMULTI ||
709 netdev_mc_count(net) > DM_MAX_MCAST) {
710 - rx_ctl |= 0x04;
711 + rx_ctl |= 0x08;
712 } else if (!netdev_mc_empty(net)) {
713 struct netdev_hw_addr *ha;
714
715 diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
716 index af30777..c31ac08 100644
717 --- a/drivers/net/wireless/p54/p54usb.c
718 +++ b/drivers/net/wireless/p54/p54usb.c
719 @@ -83,6 +83,7 @@ static struct usb_device_id p54u_table[] = {
720 {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */
721 {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
722 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
723 + {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */
724 {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */
725 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
726 {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */
727 diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
728 index 717d3ba..38c51ea 100644
729 --- a/drivers/net/wireless/rtlwifi/wifi.h
730 +++ b/drivers/net/wireless/rtlwifi/wifi.h
731 @@ -1638,7 +1638,7 @@ struct rtl_priv {
732 that it points to the data allocated
733 beyond this structure like:
734 rtl_pci_priv or rtl_usb_priv */
735 - u8 priv[0];
736 + u8 priv[0] __aligned(sizeof(void *));
737 };
738
739 #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
740 diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
741 index 394ed9e..4aa30d8 100644
742 --- a/drivers/scsi/esp_scsi.c
743 +++ b/drivers/scsi/esp_scsi.c
744 @@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp)
745 static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
746 struct esp_lun_data *lp)
747 {
748 - if (!ent->tag[0]) {
749 + if (!ent->orig_tag[0]) {
750 /* Non-tagged, slot already taken? */
751 if (lp->non_tagged_cmd)
752 return -EBUSY;
753 @@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
754 return -EBUSY;
755 }
756
757 - BUG_ON(lp->tagged_cmds[ent->tag[1]]);
758 + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
759
760 - lp->tagged_cmds[ent->tag[1]] = ent;
761 + lp->tagged_cmds[ent->orig_tag[1]] = ent;
762 lp->num_tagged++;
763
764 return 0;
765 @@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
766 static void esp_free_lun_tag(struct esp_cmd_entry *ent,
767 struct esp_lun_data *lp)
768 {
769 - if (ent->tag[0]) {
770 - BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
771 - lp->tagged_cmds[ent->tag[1]] = NULL;
772 + if (ent->orig_tag[0]) {
773 + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
774 + lp->tagged_cmds[ent->orig_tag[1]] = NULL;
775 lp->num_tagged--;
776 } else {
777 BUG_ON(lp->non_tagged_cmd != ent);
778 @@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
779 ent->tag[0] = 0;
780 ent->tag[1] = 0;
781 }
782 + ent->orig_tag[0] = ent->tag[0];
783 + ent->orig_tag[1] = ent->tag[1];
784
785 if (esp_alloc_lun_tag(ent, lp) < 0)
786 continue;
787 diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
788 index 28e22ac..cd68805 100644
789 --- a/drivers/scsi/esp_scsi.h
790 +++ b/drivers/scsi/esp_scsi.h
791 @@ -271,6 +271,7 @@ struct esp_cmd_entry {
792 #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
793
794 u8 tag[2];
795 + u8 orig_tag[2];
796
797 u8 status;
798 u8 message;
799 diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
800 index 403fc09..8b564ad 100644
801 --- a/drivers/staging/comedi/drivers/ni_65xx.c
802 +++ b/drivers/staging/comedi/drivers/ni_65xx.c
803 @@ -411,29 +411,25 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
804 struct comedi_subdevice *s,
805 struct comedi_insn *insn, unsigned int *data)
806 {
807 - unsigned base_bitfield_channel;
808 - const unsigned max_ports_per_bitfield = 5;
809 + int base_bitfield_channel;
810 unsigned read_bits = 0;
811 - unsigned j;
812 + int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1);
813 + int port_offset;
814 +
815 if (insn->n != 2)
816 return -EINVAL;
817 base_bitfield_channel = CR_CHAN(insn->chanspec);
818 - for (j = 0; j < max_ports_per_bitfield; ++j) {
819 - const unsigned port_offset =
820 - ni_65xx_port_by_channel(base_bitfield_channel) + j;
821 - const unsigned port =
822 - sprivate(s)->base_port + port_offset;
823 - unsigned base_port_channel;
824 + for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel);
825 + port_offset <= last_port_offset; port_offset++) {
826 + unsigned port = sprivate(s)->base_port + port_offset;
827 + int base_port_channel = port_offset * ni_65xx_channels_per_port;
828 unsigned port_mask, port_data, port_read_bits;
829 - int bitshift;
830 - if (port >= ni_65xx_total_num_ports(board(dev)))
831 + int bitshift = base_port_channel - base_bitfield_channel;
832 +
833 + if (bitshift >= 32)
834 break;
835 - base_port_channel = port_offset * ni_65xx_channels_per_port;
836 port_mask = data[0];
837 port_data = data[1];
838 - bitshift = base_port_channel - base_bitfield_channel;
839 - if (bitshift >= 32 || bitshift <= -32)
840 - break;
841 if (bitshift > 0) {
842 port_mask >>= bitshift;
843 port_data >>= bitshift;
844 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
845 index a37f14c..9def72f 100644
846 --- a/drivers/usb/serial/option.c
847 +++ b/drivers/usb/serial/option.c
848 @@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb);
849
850 #define HUAWEI_VENDOR_ID 0x12D1
851 #define HUAWEI_PRODUCT_E173 0x140C
852 +#define HUAWEI_PRODUCT_E1750 0x1406
853 #define HUAWEI_PRODUCT_K4505 0x1464
854 #define HUAWEI_PRODUCT_K3765 0x1465
855 #define HUAWEI_PRODUCT_K4605 0x14C6
856 @@ -581,6 +582,8 @@ static const struct usb_device_id option_ids[] = {
857 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
858 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
859 .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
860 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
861 + .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
862 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
863 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
864 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
865 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
866 index 646ee21..92841a7 100644
867 --- a/fs/btrfs/relocation.c
868 +++ b/fs/btrfs/relocation.c
869 @@ -684,6 +684,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
870 int cowonly;
871 int ret;
872 int err = 0;
873 + bool need_check = true;
874
875 path1 = btrfs_alloc_path();
876 path2 = btrfs_alloc_path();
877 @@ -906,6 +907,7 @@ again:
878 cur->bytenr);
879
880 lower = cur;
881 + need_check = true;
882 for (; level < BTRFS_MAX_LEVEL; level++) {
883 if (!path2->nodes[level]) {
884 BUG_ON(btrfs_root_bytenr(&root->root_item) !=
885 @@ -949,14 +951,12 @@ again:
886
887 /*
888 * add the block to pending list if we
889 - * need check its backrefs. only block
890 - * at 'cur->level + 1' is added to the
891 - * tail of pending list. this guarantees
892 - * we check backrefs from lower level
893 - * blocks to upper level blocks.
894 + * need check its backrefs, we only do this once
895 + * while walking up a tree as we will catch
896 + * anything else later on.
897 */
898 - if (!upper->checked &&
899 - level == cur->level + 1) {
900 + if (!upper->checked && need_check) {
901 + need_check = false;
902 list_add_tail(&edge->list[UPPER],
903 &list);
904 } else
905 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
906 index 9fb3fae..54ad9a5 100644
907 --- a/fs/ext4/namei.c
908 +++ b/fs/ext4/namei.c
909 @@ -2054,7 +2054,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
910 int err = 0;
911
912 /* ext4_handle_valid() assumes a valid handle_t pointer */
913 - if (handle && !ext4_handle_valid(handle))
914 + if (handle && !ext4_handle_valid(handle) &&
915 + !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS))
916 return 0;
917
918 mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock);
919 diff --git a/include/linux/mm.h b/include/linux/mm.h
920 index ece5ff4..dbca4b2 100644
921 --- a/include/linux/mm.h
922 +++ b/include/linux/mm.h
923 @@ -866,7 +866,8 @@ extern void pagefault_out_of_memory(void);
924 * Flags passed to show_mem() and show_free_areas() to suppress output in
925 * various contexts.
926 */
927 -#define SHOW_MEM_FILTER_NODES (0x0001u) /* filter disallowed nodes */
928 +#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
929 +#define SHOW_MEM_FILTER_PAGE_COUNT (0x0002u) /* page type count */
930
931 extern void show_free_areas(unsigned int flags);
932 extern bool skip_free_areas_node(unsigned int flags, int nid);
933 diff --git a/include/net/ip.h b/include/net/ip.h
934 index b53d65f..0750bf7 100644
935 --- a/include/net/ip.h
936 +++ b/include/net/ip.h
937 @@ -266,9 +266,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
938
939 extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
940
941 -static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
942 +static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk)
943 {
944 - if (iph->frag_off & htons(IP_DF)) {
945 + struct iphdr *iph = ip_hdr(skb);
946 +
947 + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
948 /* This is only to work around buggy Windows95/2000
949 * VJ compression implementations. If the ID field
950 * does not change, they drop every other packet in
951 @@ -280,9 +282,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str
952 __ip_select_ident(iph, dst, 0);
953 }
954
955 -static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
956 +static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more)
957 {
958 - if (iph->frag_off & htons(IP_DF)) {
959 + struct iphdr *iph = ip_hdr(skb);
960 +
961 + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) {
962 if (sk && inet_sk(sk)->inet_daddr) {
963 iph->id = htons(inet_sk(sk)->inet_id);
964 inet_sk(sk)->inet_id += 1 + more;
965 diff --git a/include/net/ipip.h b/include/net/ipip.h
966 index a32654d..4dccfe3 100644
967 --- a/include/net/ipip.h
968 +++ b/include/net/ipip.h
969 @@ -50,7 +50,7 @@ struct ip_tunnel_prl_entry {
970 int pkt_len = skb->len - skb_transport_offset(skb); \
971 \
972 skb->ip_summed = CHECKSUM_NONE; \
973 - ip_select_ident(iph, &rt->dst, NULL); \
974 + ip_select_ident(skb, &rt->dst, NULL); \
975 \
976 err = ip_local_out(skb); \
977 if (likely(net_xmit_eval(err) == 0)) { \
978 diff --git a/lib/show_mem.c b/lib/show_mem.c
979 index 4407f8c..b7c7231 100644
980 --- a/lib/show_mem.c
981 +++ b/lib/show_mem.c
982 @@ -18,6 +18,9 @@ void show_mem(unsigned int filter)
983 printk("Mem-Info:\n");
984 show_free_areas(filter);
985
986 + if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
987 + return;
988 +
989 for_each_online_pgdat(pgdat) {
990 unsigned long i, flags;
991
992 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
993 index 8090542..508822e 100644
994 --- a/mm/page_alloc.c
995 +++ b/mm/page_alloc.c
996 @@ -1885,6 +1885,13 @@ void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
997 return;
998
999 /*
1000 + * Walking all memory to count page types is very expensive and should
1001 + * be inhibited in non-blockable contexts.
1002 + */
1003 + if (!(gfp_mask & __GFP_WAIT))
1004 + filter |= SHOW_MEM_FILTER_PAGE_COUNT;
1005 +
1006 + /*
1007 * This documents exceptions given to allocations in certain
1008 * contexts that are allowed to allocate outside current's set
1009 * of allowed nodes.
1010 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
1011 index ef45e10..1e6347c 100644
1012 --- a/net/bluetooth/hci_event.c
1013 +++ b/net/bluetooth/hci_event.c
1014 @@ -3375,7 +3375,11 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
1015 cp.handle = cpu_to_le16(conn->handle);
1016
1017 if (ltk->authenticated)
1018 - conn->sec_level = BT_SECURITY_HIGH;
1019 + conn->pending_sec_level = BT_SECURITY_HIGH;
1020 + else
1021 + conn->pending_sec_level = BT_SECURITY_MEDIUM;
1022 +
1023 + conn->enc_key_size = ltk->enc_size;
1024
1025 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
1026
1027 diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
1028 index 51e8826..1237006 100644
1029 --- a/net/bridge/br_private.h
1030 +++ b/net/bridge/br_private.h
1031 @@ -501,6 +501,7 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
1032 extern void br_init_port(struct net_bridge_port *p);
1033 extern void br_become_designated_port(struct net_bridge_port *p);
1034
1035 +extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t);
1036 extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
1037 extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
1038 extern int br_set_max_age(struct net_bridge *br, unsigned long x);
1039 diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
1040 index 8c836d9..eca1c94 100644
1041 --- a/net/bridge/br_stp.c
1042 +++ b/net/bridge/br_stp.c
1043 @@ -189,7 +189,7 @@ static void br_record_config_information(struct net_bridge_port *p,
1044 p->designated_age = jiffies - bpdu->message_age;
1045
1046 mod_timer(&p->message_age_timer, jiffies
1047 - + (p->br->max_age - bpdu->message_age));
1048 + + (bpdu->max_age - bpdu->message_age));
1049 }
1050
1051 /* called under bridge lock */
1052 @@ -517,18 +517,27 @@ int br_set_max_age(struct net_bridge *br, unsigned long val)
1053
1054 }
1055
1056 +void __br_set_forward_delay(struct net_bridge *br, unsigned long t)
1057 +{
1058 + br->bridge_forward_delay = t;
1059 + if (br_is_root_bridge(br))
1060 + br->forward_delay = br->bridge_forward_delay;
1061 +}
1062 +
1063 int br_set_forward_delay(struct net_bridge *br, unsigned long val)
1064 {
1065 unsigned long t = clock_t_to_jiffies(val);
1066 + int err = -ERANGE;
1067
1068 + spin_lock_bh(&br->lock);
1069 if (br->stp_enabled != BR_NO_STP &&
1070 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
1071 - return -ERANGE;
1072 + goto unlock;
1073
1074 - spin_lock_bh(&br->lock);
1075 - br->bridge_forward_delay = t;
1076 - if (br_is_root_bridge(br))
1077 - br->forward_delay = br->bridge_forward_delay;
1078 + __br_set_forward_delay(br, t);
1079 + err = 0;
1080 +
1081 +unlock:
1082 spin_unlock_bh(&br->lock);
1083 - return 0;
1084 + return err;
1085 }
1086 diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
1087 index f494496..f774796 100644
1088 --- a/net/bridge/br_stp_if.c
1089 +++ b/net/bridge/br_stp_if.c
1090 @@ -129,6 +129,14 @@ static void br_stp_start(struct net_bridge *br)
1091 char *envp[] = { NULL };
1092
1093 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
1094 +
1095 + spin_lock_bh(&br->lock);
1096 +
1097 + if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY)
1098 + __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY);
1099 + else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY)
1100 + __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY);
1101 +
1102 if (r == 0) {
1103 br->stp_enabled = BR_USER_STP;
1104 br_debug(br, "userspace STP started\n");
1105 @@ -137,10 +145,10 @@ static void br_stp_start(struct net_bridge *br)
1106 br_debug(br, "using kernel STP\n");
1107
1108 /* To start timers on any ports left in blocking */
1109 - spin_lock_bh(&br->lock);
1110 br_port_state_selection(br);
1111 - spin_unlock_bh(&br->lock);
1112 }
1113 +
1114 + spin_unlock_bh(&br->lock);
1115 }
1116
1117 static void br_stp_stop(struct net_bridge *br)
1118 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
1119 index 5cf5222..84efbe4 100644
1120 --- a/net/caif/cfctrl.c
1121 +++ b/net/caif/cfctrl.c
1122 @@ -288,9 +288,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
1123
1124 count = cfctrl_cancel_req(&cfctrl->serv.layer,
1125 user_layer);
1126 - if (count != 1)
1127 + if (count != 1) {
1128 pr_err("Could not remove request (%d)", count);
1129 return -ENODEV;
1130 + }
1131 }
1132 return 0;
1133 }
1134 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1135 index a225089..2774788 100644
1136 --- a/net/core/flow_dissector.c
1137 +++ b/net/core/flow_dissector.c
1138 @@ -133,8 +133,8 @@ ipv6:
1139 if (poff >= 0) {
1140 __be32 *ports, _ports;
1141
1142 - nhoff += poff;
1143 - ports = skb_header_pointer(skb, nhoff, sizeof(_ports), &_ports);
1144 + ports = skb_header_pointer(skb, nhoff + poff,
1145 + sizeof(_ports), &_ports);
1146 if (ports)
1147 flow->ports = *ports;
1148 }
1149 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
1150 index f9f40b9..5e81c49 100644
1151 --- a/net/core/netpoll.c
1152 +++ b/net/core/netpoll.c
1153 @@ -920,15 +920,14 @@ EXPORT_SYMBOL_GPL(__netpoll_cleanup);
1154
1155 void netpoll_cleanup(struct netpoll *np)
1156 {
1157 - if (!np->dev)
1158 - return;
1159 -
1160 rtnl_lock();
1161 + if (!np->dev)
1162 + goto out;
1163 __netpoll_cleanup(np);
1164 - rtnl_unlock();
1165 -
1166 dev_put(np->dev);
1167 np->dev = NULL;
1168 +out:
1169 + rtnl_unlock();
1170 }
1171 EXPORT_SYMBOL(netpoll_cleanup);
1172
1173 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1174 index 5dfecfd..c8e2699 100644
1175 --- a/net/ipv4/igmp.c
1176 +++ b/net/ipv4/igmp.c
1177 @@ -343,7 +343,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
1178 pip->saddr = fl4.saddr;
1179 pip->protocol = IPPROTO_IGMP;
1180 pip->tot_len = 0; /* filled in later */
1181 - ip_select_ident(pip, &rt->dst, NULL);
1182 + ip_select_ident(skb, &rt->dst, NULL);
1183 ((u8*)&pip[1])[0] = IPOPT_RA;
1184 ((u8*)&pip[1])[1] = 4;
1185 ((u8*)&pip[1])[2] = 0;
1186 @@ -687,7 +687,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
1187 iph->daddr = dst;
1188 iph->saddr = fl4.saddr;
1189 iph->protocol = IPPROTO_IGMP;
1190 - ip_select_ident(iph, &rt->dst, NULL);
1191 + ip_select_ident(skb, &rt->dst, NULL);
1192 ((u8*)&iph[1])[0] = IPOPT_RA;
1193 ((u8*)&iph[1])[1] = 4;
1194 ((u8*)&iph[1])[2] = 0;
1195 @@ -709,7 +709,7 @@ static void igmp_gq_timer_expire(unsigned long data)
1196
1197 in_dev->mr_gq_running = 0;
1198 igmpv3_send_report(in_dev, NULL);
1199 - __in_dev_put(in_dev);
1200 + in_dev_put(in_dev);
1201 }
1202
1203 static void igmp_ifc_timer_expire(unsigned long data)
1204 @@ -721,7 +721,7 @@ static void igmp_ifc_timer_expire(unsigned long data)
1205 in_dev->mr_ifc_count--;
1206 igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
1207 }
1208 - __in_dev_put(in_dev);
1209 + in_dev_put(in_dev);
1210 }
1211
1212 static void igmp_ifc_event(struct in_device *in_dev)
1213 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
1214 index dfba343..d63a926 100644
1215 --- a/net/ipv4/inetpeer.c
1216 +++ b/net/ipv4/inetpeer.c
1217 @@ -32,8 +32,8 @@
1218 * At the moment of writing this notes identifier of IP packets is generated
1219 * to be unpredictable using this code only for packets subjected
1220 * (actually or potentially) to defragmentation. I.e. DF packets less than
1221 - * PMTU in size uses a constant ID and do not use this code (see
1222 - * ip_select_ident() in include/net/ip.h).
1223 + * PMTU in size when local fragmentation is disabled use a constant ID and do
1224 + * not use this code (see ip_select_ident() in include/net/ip.h).
1225 *
1226 * Route cache entries hold references to our nodes.
1227 * New cache entries get references via lookup by destination IP address in
1228 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1229 index 4910176..3bc4c97 100644
1230 --- a/net/ipv4/ip_output.c
1231 +++ b/net/ipv4/ip_output.c
1232 @@ -161,7 +161,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
1233 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
1234 iph->saddr = saddr;
1235 iph->protocol = sk->sk_protocol;
1236 - ip_select_ident(iph, &rt->dst, sk);
1237 + ip_select_ident(skb, &rt->dst, sk);
1238
1239 if (opt && opt->opt.optlen) {
1240 iph->ihl += opt->opt.optlen>>2;
1241 @@ -403,7 +403,7 @@ packet_routed:
1242 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
1243 }
1244
1245 - ip_select_ident_more(iph, &rt->dst, sk,
1246 + ip_select_ident_more(skb, &rt->dst, sk,
1247 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
1248
1249 skb->priority = sk->sk_priority;
1250 @@ -1342,12 +1342,12 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1251 else
1252 ttl = ip_select_ttl(inet, &rt->dst);
1253
1254 - iph = (struct iphdr *)skb->data;
1255 + iph = ip_hdr(skb);
1256 iph->version = 4;
1257 iph->ihl = 5;
1258 iph->tos = inet->tos;
1259 iph->frag_off = df;
1260 - ip_select_ident(iph, &rt->dst, sk);
1261 + ip_select_ident(skb, &rt->dst, sk);
1262 iph->ttl = ttl;
1263 iph->protocol = sk->sk_protocol;
1264 ip_copy_addrs(iph, fl4);
1265 diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1266 index 8626b64..a0b7166 100644
1267 --- a/net/ipv4/ipmr.c
1268 +++ b/net/ipv4/ipmr.c
1269 @@ -1573,7 +1573,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1270 iph->protocol = IPPROTO_IPIP;
1271 iph->ihl = 5;
1272 iph->tot_len = htons(skb->len);
1273 - ip_select_ident(iph, skb_dst(skb), NULL);
1274 + ip_select_ident(skb, skb_dst(skb), NULL);
1275 ip_send_check(iph);
1276
1277 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1278 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
1279 index 2fe0dc2..c6b9ca6 100644
1280 --- a/net/ipv4/raw.c
1281 +++ b/net/ipv4/raw.c
1282 @@ -384,7 +384,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
1283 iph->check = 0;
1284 iph->tot_len = htons(length);
1285 if (!iph->id)
1286 - ip_select_ident(iph, &rt->dst, NULL);
1287 + ip_select_ident(skb, &rt->dst, NULL);
1288
1289 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1290 }
1291 diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
1292 index ed4bf11..938553e 100644
1293 --- a/net/ipv4/xfrm4_mode_tunnel.c
1294 +++ b/net/ipv4/xfrm4_mode_tunnel.c
1295 @@ -54,7 +54,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
1296
1297 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
1298 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
1299 - ip_select_ident(top_iph, dst->child, NULL);
1300 + ip_select_ident(skb, dst->child, NULL);
1301
1302 top_iph->ttl = ip4_dst_hoplimit(dst->child);
1303
1304 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1305 index d6b9d56..1acfb19 100644
1306 --- a/net/ipv6/ip6_output.c
1307 +++ b/net/ipv6/ip6_output.c
1308 @@ -1128,6 +1128,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1309 * udp datagram
1310 */
1311 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1312 + struct frag_hdr fhdr;
1313 +
1314 skb = sock_alloc_send_skb(sk,
1315 hh_len + fragheaderlen + transhdrlen + 20,
1316 (flags & MSG_DONTWAIT), &err);
1317 @@ -1148,12 +1150,6 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1318
1319 skb->ip_summed = CHECKSUM_PARTIAL;
1320 skb->csum = 0;
1321 - }
1322 -
1323 - err = skb_append_datato_frags(sk,skb, getfrag, from,
1324 - (length - transhdrlen));
1325 - if (!err) {
1326 - struct frag_hdr fhdr;
1327
1328 /* Specify the length of each IPv6 datagram fragment.
1329 * It has to be a multiple of 8.
1330 @@ -1164,15 +1160,10 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1331 ipv6_select_ident(&fhdr, rt);
1332 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1333 __skb_queue_tail(&sk->sk_write_queue, skb);
1334 -
1335 - return 0;
1336 }
1337 - /* There is not enough support do UPD LSO,
1338 - * so follow normal path
1339 - */
1340 - kfree_skb(skb);
1341
1342 - return err;
1343 + return skb_append_datato_frags(sk, skb, getfrag, from,
1344 + (length - transhdrlen));
1345 }
1346
1347 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1348 @@ -1345,27 +1336,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1349 * --yoshfuji
1350 */
1351
1352 - cork->length += length;
1353 - if (length > mtu) {
1354 - int proto = sk->sk_protocol;
1355 - if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
1356 - ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1357 - return -EMSGSIZE;
1358 - }
1359 -
1360 - if (proto == IPPROTO_UDP &&
1361 - (rt->dst.dev->features & NETIF_F_UFO)) {
1362 + if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1363 + sk->sk_protocol == IPPROTO_RAW)) {
1364 + ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1365 + return -EMSGSIZE;
1366 + }
1367
1368 - err = ip6_ufo_append_data(sk, getfrag, from, length,
1369 - hh_len, fragheaderlen,
1370 - transhdrlen, mtu, flags, rt);
1371 - if (err)
1372 - goto error;
1373 - return 0;
1374 - }
1375 + skb = skb_peek_tail(&sk->sk_write_queue);
1376 + cork->length += length;
1377 + if (((length > mtu) ||
1378 + (skb && skb_is_gso(skb))) &&
1379 + (sk->sk_protocol == IPPROTO_UDP) &&
1380 + (rt->dst.dev->features & NETIF_F_UFO)) {
1381 + err = ip6_ufo_append_data(sk, getfrag, from, length,
1382 + hh_len, fragheaderlen,
1383 + transhdrlen, mtu, flags, rt);
1384 + if (err)
1385 + goto error;
1386 + return 0;
1387 }
1388
1389 - if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1390 + if (!skb)
1391 goto alloc_new_skb;
1392
1393 while (length > 0) {
1394 diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1395 index 6a4ab24..9728df5 100644
1396 --- a/net/ipv6/mcast.c
1397 +++ b/net/ipv6/mcast.c
1398 @@ -2149,7 +2149,7 @@ static void mld_gq_timer_expire(unsigned long data)
1399
1400 idev->mc_gq_running = 0;
1401 mld_send_report(idev, NULL);
1402 - __in6_dev_put(idev);
1403 + in6_dev_put(idev);
1404 }
1405
1406 static void mld_ifc_timer_expire(unsigned long data)
1407 @@ -2162,7 +2162,7 @@ static void mld_ifc_timer_expire(unsigned long data)
1408 if (idev->mc_ifc_count)
1409 mld_ifc_start_timer(idev, idev->mc_maxdelay);
1410 }
1411 - __in6_dev_put(idev);
1412 + in6_dev_put(idev);
1413 }
1414
1415 static void mld_ifc_event(struct inet6_dev *idev)
1416 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
1417 index 7fd66de..ec78ab6 100644
1418 --- a/net/netfilter/ipvs/ip_vs_xmit.c
1419 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
1420 @@ -853,7 +853,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1421 iph->daddr = cp->daddr.ip;
1422 iph->saddr = saddr;
1423 iph->ttl = old_iph->ttl;
1424 - ip_select_ident(iph, &rt->dst, NULL);
1425 + ip_select_ident(skb, &rt->dst, NULL);
1426
1427 /* Another hack: avoid icmp_send in ip_fragment */
1428 skb->local_df = 1;
1429 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1430 index 91f4791..53a7f03 100644
1431 --- a/net/sctp/ipv6.c
1432 +++ b/net/sctp/ipv6.c
1433 @@ -205,45 +205,24 @@ out:
1434 in6_dev_put(idev);
1435 }
1436
1437 -/* Based on tcp_v6_xmit() in tcp_ipv6.c. */
1438 static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
1439 {
1440 struct sock *sk = skb->sk;
1441 struct ipv6_pinfo *np = inet6_sk(sk);
1442 - struct flowi6 fl6;
1443 -
1444 - memset(&fl6, 0, sizeof(fl6));
1445 -
1446 - fl6.flowi6_proto = sk->sk_protocol;
1447 -
1448 - /* Fill in the dest address from the route entry passed with the skb
1449 - * and the source address from the transport.
1450 - */
1451 - fl6.daddr = transport->ipaddr.v6.sin6_addr;
1452 - fl6.saddr = transport->saddr.v6.sin6_addr;
1453 -
1454 - fl6.flowlabel = np->flow_label;
1455 - IP6_ECN_flow_xmit(sk, fl6.flowlabel);
1456 - if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL)
1457 - fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id;
1458 - else
1459 - fl6.flowi6_oif = sk->sk_bound_dev_if;
1460 -
1461 - if (np->opt && np->opt->srcrt) {
1462 - struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1463 - fl6.daddr = *rt0->addr;
1464 - }
1465 + struct flowi6 *fl6 = &transport->fl.u.ip6;
1466
1467 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n",
1468 __func__, skb, skb->len,
1469 - &fl6.saddr, &fl6.daddr);
1470 + &fl6->saddr, &fl6->daddr);
1471
1472 - SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
1473 + IP6_ECN_flow_xmit(sk, fl6->flowlabel);
1474
1475 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
1476 skb->local_df = 1;
1477
1478 - return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
1479 + SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
1480 +
1481 + return ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
1482 }
1483
1484 /* Returns the dst cache entry for the given source and destination ip
1485 @@ -256,10 +235,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
1486 struct dst_entry *dst = NULL;
1487 struct flowi6 *fl6 = &fl->u.ip6;
1488 struct sctp_bind_addr *bp;
1489 + struct ipv6_pinfo *np = inet6_sk(sk);
1490 struct sctp_sockaddr_entry *laddr;
1491 union sctp_addr *baddr = NULL;
1492 union sctp_addr *daddr = &t->ipaddr;
1493 union sctp_addr dst_saddr;
1494 + struct in6_addr *final_p, final;
1495 __u8 matchlen = 0;
1496 __u8 bmatchlen;
1497 sctp_scope_t scope;
1498 @@ -282,7 +263,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
1499 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
1500 }
1501
1502 - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
1503 + final_p = fl6_update_dst(fl6, np->opt, &final);
1504 + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
1505 if (!asoc || saddr)
1506 goto out;
1507
1508 @@ -333,10 +315,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
1509 }
1510 }
1511 rcu_read_unlock();
1512 +
1513 if (baddr) {
1514 fl6->saddr = baddr->v6.sin6_addr;
1515 fl6->fl6_sport = baddr->v6.sin6_port;
1516 - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
1517 + final_p = fl6_update_dst(fl6, np->opt, &final);
1518 + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
1519 }
1520
1521 out:
1522 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
1523 index 4bc6e0b..e9e50ca 100644
1524 --- a/net/sctp/socket.c
1525 +++ b/net/sctp/socket.c
1526 @@ -814,6 +814,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
1527 goto skip_mkasconf;
1528 }
1529
1530 + if (laddr == NULL)
1531 + return -EINVAL;
1532 +
1533 /* We do not need RCU protection throughout this loop
1534 * because this is done under a socket lock from the
1535 * setsockopt call.
1536 diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
1537 index 9fd3b68..3ff2b94 100644
1538 --- a/sound/soc/codecs/88pm860x-codec.c
1539 +++ b/sound/soc/codecs/88pm860x-codec.c
1540 @@ -351,6 +351,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol,
1541 val = ucontrol->value.integer.value[0];
1542 val2 = ucontrol->value.integer.value[1];
1543
1544 + if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table))
1545 + return -EINVAL;
1546 +
1547 err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m);
1548 if (err < 0)
1549 return err;
1550 diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
1551 index 0bb511a..196dfa3 100644
1552 --- a/sound/soc/codecs/max98095.c
1553 +++ b/sound/soc/codecs/max98095.c
1554 @@ -1860,7 +1860,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
1555 struct max98095_pdata *pdata = max98095->pdata;
1556 int channel = max98095_get_eq_channel(kcontrol->id.name);
1557 struct max98095_cdata *cdata;
1558 - int sel = ucontrol->value.integer.value[0];
1559 + unsigned int sel = ucontrol->value.integer.value[0];
1560 struct max98095_eq_cfg *coef_set;
1561 int fs, best, best_val, i;
1562 int regmask, regsave;
1563 @@ -2013,7 +2013,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
1564 struct max98095_pdata *pdata = max98095->pdata;
1565 int channel = max98095_get_bq_channel(codec, kcontrol->id.name);
1566 struct max98095_cdata *cdata;
1567 - int sel = ucontrol->value.integer.value[0];
1568 + unsigned int sel = ucontrol->value.integer.value[0];
1569 struct max98095_biquad_cfg *coef_set;
1570 int fs, best, best_val, i;
1571 int regmask, regsave;