Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.0/0105-4.0.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2594 - (show annotations) (download)
Tue Jun 23 12:01:08 2015 UTC (8 years, 10 months ago) by niro
File size: 125254 byte(s)
-linux-4.0.6
1 diff --git a/Makefile b/Makefile
2 index 1880cf77059b..af6da040b952 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 0
8 -SUBLEVEL = 5
9 +SUBLEVEL = 6
10 EXTRAVERSION =
11 NAME = Hurr durr I'ma sheep
12
13 diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
14 index c3255e0c90aa..dbb3f4d2bf84 100644
15 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi
16 +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
17 @@ -223,6 +223,25 @@
18 /include/ "tps65217.dtsi"
19
20 &tps {
21 + /*
22 + * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
23 + * mode") at poweroff. Most BeagleBone versions do not support RTC-only
24 + * mode and risk hardware damage if this mode is entered.
25 + *
26 + * For details, see linux-omap mailing list May 2015 thread
27 + * [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
28 + * In particular, messages:
29 + * http://www.spinics.net/lists/linux-omap/msg118585.html
30 + * http://www.spinics.net/lists/linux-omap/msg118615.html
31 + *
32 + * You can override this later with
33 + * &tps { /delete-property/ ti,pmic-shutdown-controller; }
34 + * if you want to use RTC-only mode and made sure you are not affected
35 + * by the hardware problems. (Tip: double-check by performing a current
36 + * measurement after shutdown: it should be less than 1 mA.)
37 + */
38 + ti,pmic-shutdown-controller;
39 +
40 regulators {
41 dcdc1_reg: regulator@0 {
42 regulator-name = "vdds_dpr";
43 diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
44 index 43d54017b779..d0ab012fa379 100644
45 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
46 +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
47 @@ -16,7 +16,8 @@
48 #include "mt8173.dtsi"
49
50 / {
51 - model = "mediatek,mt8173-evb";
52 + model = "MediaTek MT8173 evaluation board";
53 + compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
54
55 aliases {
56 serial0 = &uart0;
57 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
58 index d2bfbc2e8995..be15e52a47a0 100644
59 --- a/arch/mips/kernel/irq.c
60 +++ b/arch/mips/kernel/irq.c
61 @@ -109,7 +109,7 @@ void __init init_IRQ(void)
62 #endif
63 }
64
65 -#ifdef DEBUG_STACKOVERFLOW
66 +#ifdef CONFIG_DEBUG_STACKOVERFLOW
67 static inline void check_stack_overflow(void)
68 {
69 unsigned long sp;
70 diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
71 index 838d3a6a5b7d..cea02968a908 100644
72 --- a/arch/mips/kvm/emulate.c
73 +++ b/arch/mips/kvm/emulate.c
74 @@ -2101,7 +2101,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
75 if (vcpu->mmio_needed == 2)
76 *gpr = *(int16_t *) run->mmio.data;
77 else
78 - *gpr = *(int16_t *) run->mmio.data;
79 + *gpr = *(uint16_t *)run->mmio.data;
80
81 break;
82 case 1:
83 diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
84 index e20b02e3ae28..e10d10b9e82a 100644
85 --- a/arch/mips/ralink/ill_acc.c
86 +++ b/arch/mips/ralink/ill_acc.c
87 @@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
88 addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
89 type & ILL_ACC_LEN_M);
90
91 - rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
92 + rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
93
94 return IRQ_HANDLED;
95 }
96 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
97 index db257a58571f..e657b7ba3292 100644
98 --- a/arch/x86/include/asm/segment.h
99 +++ b/arch/x86/include/asm/segment.h
100 @@ -200,10 +200,21 @@
101 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
102
103 #ifdef __KERNEL__
104 +
105 +/*
106 + * early_idt_handler_array is an array of entry points referenced in the
107 + * early IDT. For simplicity, it's a real array with one entry point
108 + * every nine bytes. That leaves room for an optional 'push $0' if the
109 + * vector has no error code (two bytes), a 'push $vector_number' (two
110 + * bytes), and a jump to the common entry code (up to five bytes).
111 + */
112 +#define EARLY_IDT_HANDLER_SIZE 9
113 +
114 #ifndef __ASSEMBLY__
115 -extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
116 +
117 +extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
118 #ifdef CONFIG_TRACING
119 -#define trace_early_idt_handlers early_idt_handlers
120 +# define trace_early_idt_handler_array early_idt_handler_array
121 #endif
122
123 /*
124 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
125 index c4f8d4659070..b111ab5c4509 100644
126 --- a/arch/x86/kernel/head64.c
127 +++ b/arch/x86/kernel/head64.c
128 @@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
129 clear_bss();
130
131 for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
132 - set_intr_gate(i, early_idt_handlers[i]);
133 + set_intr_gate(i, early_idt_handler_array[i]);
134 load_idt((const struct desc_ptr *)&idt_descr);
135
136 copy_bootdata(__va(real_mode_data));
137 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
138 index f36bd42d6f0c..30a2aa3782fa 100644
139 --- a/arch/x86/kernel/head_32.S
140 +++ b/arch/x86/kernel/head_32.S
141 @@ -477,21 +477,22 @@ is486:
142 __INIT
143 setup_once:
144 /*
145 - * Set up a idt with 256 entries pointing to ignore_int,
146 - * interrupt gates. It doesn't actually load idt - that needs
147 - * to be done on each CPU. Interrupts are enabled elsewhere,
148 - * when we can be relatively sure everything is ok.
149 + * Set up a idt with 256 interrupt gates that push zero if there
150 + * is no error code and then jump to early_idt_handler_common.
151 + * It doesn't actually load the idt - that needs to be done on
152 + * each CPU. Interrupts are enabled elsewhere, when we can be
153 + * relatively sure everything is ok.
154 */
155
156 movl $idt_table,%edi
157 - movl $early_idt_handlers,%eax
158 + movl $early_idt_handler_array,%eax
159 movl $NUM_EXCEPTION_VECTORS,%ecx
160 1:
161 movl %eax,(%edi)
162 movl %eax,4(%edi)
163 /* interrupt gate, dpl=0, present */
164 movl $(0x8E000000 + __KERNEL_CS),2(%edi)
165 - addl $9,%eax
166 + addl $EARLY_IDT_HANDLER_SIZE,%eax
167 addl $8,%edi
168 loop 1b
169
170 @@ -523,26 +524,28 @@ setup_once:
171 andl $0,setup_once_ref /* Once is enough, thanks */
172 ret
173
174 -ENTRY(early_idt_handlers)
175 +ENTRY(early_idt_handler_array)
176 # 36(%esp) %eflags
177 # 32(%esp) %cs
178 # 28(%esp) %eip
179 # 24(%rsp) error code
180 i = 0
181 .rept NUM_EXCEPTION_VECTORS
182 - .if (EXCEPTION_ERRCODE_MASK >> i) & 1
183 - ASM_NOP2
184 - .else
185 + .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
186 pushl $0 # Dummy error code, to make stack frame uniform
187 .endif
188 pushl $i # 20(%esp) Vector number
189 - jmp early_idt_handler
190 + jmp early_idt_handler_common
191 i = i + 1
192 + .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
193 .endr
194 -ENDPROC(early_idt_handlers)
195 +ENDPROC(early_idt_handler_array)
196
197 - /* This is global to keep gas from relaxing the jumps */
198 -ENTRY(early_idt_handler)
199 +early_idt_handler_common:
200 + /*
201 + * The stack is the hardware frame, an error code or zero, and the
202 + * vector number.
203 + */
204 cld
205
206 cmpl $2,(%esp) # X86_TRAP_NMI
207 @@ -602,7 +605,7 @@ ex_entry:
208 is_nmi:
209 addl $8,%esp /* drop vector number and error code */
210 iret
211 -ENDPROC(early_idt_handler)
212 +ENDPROC(early_idt_handler_common)
213
214 /* This is the default interrupt "handler" :-) */
215 ALIGN
216 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
217 index 6fd514d9f69a..f8a8406033c3 100644
218 --- a/arch/x86/kernel/head_64.S
219 +++ b/arch/x86/kernel/head_64.S
220 @@ -321,26 +321,28 @@ bad_address:
221 jmp bad_address
222
223 __INIT
224 - .globl early_idt_handlers
225 -early_idt_handlers:
226 +ENTRY(early_idt_handler_array)
227 # 104(%rsp) %rflags
228 # 96(%rsp) %cs
229 # 88(%rsp) %rip
230 # 80(%rsp) error code
231 i = 0
232 .rept NUM_EXCEPTION_VECTORS
233 - .if (EXCEPTION_ERRCODE_MASK >> i) & 1
234 - ASM_NOP2
235 - .else
236 + .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
237 pushq $0 # Dummy error code, to make stack frame uniform
238 .endif
239 pushq $i # 72(%rsp) Vector number
240 - jmp early_idt_handler
241 + jmp early_idt_handler_common
242 i = i + 1
243 + .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
244 .endr
245 +ENDPROC(early_idt_handler_array)
246
247 -/* This is global to keep gas from relaxing the jumps */
248 -ENTRY(early_idt_handler)
249 +early_idt_handler_common:
250 + /*
251 + * The stack is the hardware frame, an error code or zero, and the
252 + * vector number.
253 + */
254 cld
255
256 cmpl $2,(%rsp) # X86_TRAP_NMI
257 @@ -412,7 +414,7 @@ ENTRY(early_idt_handler)
258 is_nmi:
259 addq $16,%rsp # drop vector number and error code
260 INTERRUPT_RETURN
261 -ENDPROC(early_idt_handler)
262 +ENDPROC(early_idt_handler_common)
263
264 __INITDATA
265
266 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
267 index 987514396c1e..ddeff4844a10 100644
268 --- a/arch/x86/net/bpf_jit_comp.c
269 +++ b/arch/x86/net/bpf_jit_comp.c
270 @@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
271 if (is_ereg(dst_reg))
272 EMIT1(0x41);
273 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
274 +
275 + /* emit 'movzwl eax, ax' */
276 + if (is_ereg(dst_reg))
277 + EMIT3(0x45, 0x0F, 0xB7);
278 + else
279 + EMIT2(0x0F, 0xB7);
280 + EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
281 break;
282 case 32:
283 /* emit 'bswap eax' to swap lower 4 bytes */
284 @@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
285 break;
286
287 case BPF_ALU | BPF_END | BPF_FROM_LE:
288 + switch (imm32) {
289 + case 16:
290 + /* emit 'movzwl eax, ax' to zero extend 16-bit
291 + * into 64 bit
292 + */
293 + if (is_ereg(dst_reg))
294 + EMIT3(0x45, 0x0F, 0xB7);
295 + else
296 + EMIT2(0x0F, 0xB7);
297 + EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
298 + break;
299 + case 32:
300 + /* emit 'mov eax, eax' to clear upper 32-bits */
301 + if (is_ereg(dst_reg))
302 + EMIT1(0x45);
303 + EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
304 + break;
305 + case 64:
306 + /* nop */
307 + break;
308 + }
309 break;
310
311 /* ST: *(u8*)(dst_reg + off) = imm */
312 @@ -938,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
313 }
314 ctx.cleanup_addr = proglen;
315
316 - for (pass = 0; pass < 10; pass++) {
317 + /* JITed image shrinks with every pass and the loop iterates
318 + * until the image stops shrinking. Very large bpf programs
319 + * may converge on the last pass. In such case do one more
320 + * pass to emit the final image
321 + */
322 + for (pass = 0; pass < 10 || image; pass++) {
323 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
324 if (proglen <= 0) {
325 image = NULL;
326 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
327 index 7b9be9822724..8533c96bab13 100644
328 --- a/arch/x86/vdso/Makefile
329 +++ b/arch/x86/vdso/Makefile
330 @@ -51,7 +51,7 @@ VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
331 $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
332 $(call if_changed,vdso)
333
334 -HOST_EXTRACFLAGS += -I$(srctree)/tools/include
335 +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
336 hostprogs-y += vdso2c
337
338 quiet_cmd_vdso2c = VDSO2C $@
339 diff --git a/block/blk-mq.c b/block/blk-mq.c
340 index 5c39703e644f..b2e73e1ef8a4 100644
341 --- a/block/blk-mq.c
342 +++ b/block/blk-mq.c
343 @@ -1589,6 +1589,7 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
344 return NOTIFY_OK;
345 }
346
347 +/* hctx->ctxs will be freed in queue's release handler */
348 static void blk_mq_exit_hctx(struct request_queue *q,
349 struct blk_mq_tag_set *set,
350 struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
351 @@ -1607,7 +1608,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
352
353 blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
354 blk_free_flush_queue(hctx->fq);
355 - kfree(hctx->ctxs);
356 blk_mq_free_bitmap(&hctx->ctx_map);
357 }
358
359 @@ -1873,8 +1873,12 @@ void blk_mq_release(struct request_queue *q)
360 unsigned int i;
361
362 /* hctx kobj stays in hctx */
363 - queue_for_each_hw_ctx(q, hctx, i)
364 + queue_for_each_hw_ctx(q, hctx, i) {
365 + if (!hctx)
366 + continue;
367 + kfree(hctx->ctxs);
368 kfree(hctx);
369 + }
370
371 kfree(q->queue_hw_ctx);
372
373 diff --git a/block/genhd.c b/block/genhd.c
374 index 0a536dc05f3b..ea982eadaf63 100644
375 --- a/block/genhd.c
376 +++ b/block/genhd.c
377 @@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
378 /* allocate ext devt */
379 idr_preload(GFP_KERNEL);
380
381 - spin_lock(&ext_devt_lock);
382 + spin_lock_bh(&ext_devt_lock);
383 idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
384 - spin_unlock(&ext_devt_lock);
385 + spin_unlock_bh(&ext_devt_lock);
386
387 idr_preload_end();
388 if (idx < 0)
389 @@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
390 return;
391
392 if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
393 - spin_lock(&ext_devt_lock);
394 + spin_lock_bh(&ext_devt_lock);
395 idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
396 - spin_unlock(&ext_devt_lock);
397 + spin_unlock_bh(&ext_devt_lock);
398 }
399 }
400
401 @@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
402 disk->flags &= ~GENHD_FL_UP;
403
404 sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
405 - bdi_unregister(&disk->queue->backing_dev_info);
406 blk_unregister_queue(disk);
407 blk_unregister_region(disk_devt(disk), disk->minors);
408
409 @@ -691,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
410 } else {
411 struct hd_struct *part;
412
413 - spin_lock(&ext_devt_lock);
414 + spin_lock_bh(&ext_devt_lock);
415 part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
416 if (part && get_disk(part_to_disk(part))) {
417 *partno = part->partno;
418 disk = part_to_disk(part);
419 }
420 - spin_unlock(&ext_devt_lock);
421 + spin_unlock_bh(&ext_devt_lock);
422 }
423
424 return disk;
425 diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
426 index 23716dd8a7ec..5928d0746a27 100644
427 --- a/drivers/ata/ahci_mvebu.c
428 +++ b/drivers/ata/ahci_mvebu.c
429 @@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
430 writel((cs->mbus_attr << 8) |
431 (dram->mbus_dram_target_id << 4) | 1,
432 hpriv->mmio + AHCI_WINDOW_CTRL(i));
433 - writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
434 + writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
435 writel(((cs->size - 1) & 0xffff0000),
436 hpriv->mmio + AHCI_WINDOW_SIZE(i));
437 }
438 diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
439 index 80a80548ad0a..27245957eee3 100644
440 --- a/drivers/ata/pata_octeon_cf.c
441 +++ b/drivers/ata/pata_octeon_cf.c
442 @@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
443 },
444 {},
445 };
446 -MODULE_DEVICE_TABLE(of, octeon_i2c_match);
447 +MODULE_DEVICE_TABLE(of, octeon_cf_match);
448
449 static struct platform_driver octeon_cf_driver = {
450 .probe = octeon_cf_probe,
451 diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
452 index 9c2ba1c97c42..df0c66cb7ad3 100644
453 --- a/drivers/base/cacheinfo.c
454 +++ b/drivers/base/cacheinfo.c
455 @@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
456 {
457 int ret;
458
459 - if (init_cache_level(cpu))
460 + if (init_cache_level(cpu) || !cache_leaves(cpu))
461 return -ENOENT;
462
463 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
464 diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
465 index fb9ec6221730..6f047dcb94c2 100644
466 --- a/drivers/bus/mvebu-mbus.c
467 +++ b/drivers/bus/mvebu-mbus.c
468 @@ -58,7 +58,6 @@
469 #include <linux/debugfs.h>
470 #include <linux/log2.h>
471 #include <linux/syscore_ops.h>
472 -#include <linux/memblock.h>
473
474 /*
475 * DDR target is the same on all platforms.
476 @@ -70,6 +69,7 @@
477 */
478 #define WIN_CTRL_OFF 0x0000
479 #define WIN_CTRL_ENABLE BIT(0)
480 +/* Only on HW I/O coherency capable platforms */
481 #define WIN_CTRL_SYNCBARRIER BIT(1)
482 #define WIN_CTRL_TGT_MASK 0xf0
483 #define WIN_CTRL_TGT_SHIFT 4
484 @@ -102,9 +102,7 @@
485
486 /* Relative to mbusbridge_base */
487 #define MBUS_BRIDGE_CTRL_OFF 0x0
488 -#define MBUS_BRIDGE_SIZE_MASK 0xffff0000
489 #define MBUS_BRIDGE_BASE_OFF 0x4
490 -#define MBUS_BRIDGE_BASE_MASK 0xffff0000
491
492 /* Maximum number of windows, for all known platforms */
493 #define MBUS_WINS_MAX 20
494 @@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
495 ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
496 (attr << WIN_CTRL_ATTR_SHIFT) |
497 (target << WIN_CTRL_TGT_SHIFT) |
498 - WIN_CTRL_SYNCBARRIER |
499 WIN_CTRL_ENABLE;
500 + if (mbus->hw_io_coherency)
501 + ctrl |= WIN_CTRL_SYNCBARRIER;
502
503 writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
504 writel(ctrl, addr + WIN_CTRL_OFF);
505 @@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
506 return MVEBU_MBUS_NO_REMAP;
507 }
508
509 -/*
510 - * Use the memblock information to find the MBus bridge hole in the
511 - * physical address space.
512 - */
513 -static void __init
514 -mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
515 -{
516 - struct memblock_region *r;
517 - uint64_t s = 0;
518 -
519 - for_each_memblock(memory, r) {
520 - /*
521 - * This part of the memory is above 4 GB, so we don't
522 - * care for the MBus bridge hole.
523 - */
524 - if (r->base >= 0x100000000)
525 - continue;
526 -
527 - /*
528 - * The MBus bridge hole is at the end of the RAM under
529 - * the 4 GB limit.
530 - */
531 - if (r->base + r->size > s)
532 - s = r->base + r->size;
533 - }
534 -
535 - *start = s;
536 - *end = 0x100000000;
537 -}
538 -
539 static void __init
540 mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
541 {
542 int i;
543 int cs;
544 - uint64_t mbus_bridge_base, mbus_bridge_end;
545
546 mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
547
548 - mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
549 -
550 for (i = 0, cs = 0; i < 4; i++) {
551 - u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
552 - u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
553 - u64 end;
554 - struct mbus_dram_window *w;
555 -
556 - /* Ignore entries that are not enabled */
557 - if (!(size & DDR_SIZE_ENABLED))
558 - continue;
559 -
560 - /*
561 - * Ignore entries whose base address is above 2^32,
562 - * since devices cannot DMA to such high addresses
563 - */
564 - if (base & DDR_BASE_CS_HIGH_MASK)
565 - continue;
566 -
567 - base = base & DDR_BASE_CS_LOW_MASK;
568 - size = (size | ~DDR_SIZE_MASK) + 1;
569 - end = base + size;
570 -
571 - /*
572 - * Adjust base/size of the current CS to make sure it
573 - * doesn't overlap with the MBus bridge hole. This is
574 - * particularly important for devices that do DMA from
575 - * DRAM to a SRAM mapped in a MBus window, such as the
576 - * CESA cryptographic engine.
577 - */
578 + u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
579 + u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
580
581 /*
582 - * The CS is fully enclosed inside the MBus bridge
583 - * area, so ignore it.
584 + * We only take care of entries for which the chip
585 + * select is enabled, and that don't have high base
586 + * address bits set (devices can only access the first
587 + * 32 bits of the memory).
588 */
589 - if (base >= mbus_bridge_base && end <= mbus_bridge_end)
590 - continue;
591 + if ((size & DDR_SIZE_ENABLED) &&
592 + !(base & DDR_BASE_CS_HIGH_MASK)) {
593 + struct mbus_dram_window *w;
594
595 - /*
596 - * Beginning of CS overlaps with end of MBus, raise CS
597 - * base address, and shrink its size.
598 - */
599 - if (base >= mbus_bridge_base && end > mbus_bridge_end) {
600 - size -= mbus_bridge_end - base;
601 - base = mbus_bridge_end;
602 + w = &mvebu_mbus_dram_info.cs[cs++];
603 + w->cs_index = i;
604 + w->mbus_attr = 0xf & ~(1 << i);
605 + if (mbus->hw_io_coherency)
606 + w->mbus_attr |= ATTR_HW_COHERENCY;
607 + w->base = base & DDR_BASE_CS_LOW_MASK;
608 + w->size = (size | ~DDR_SIZE_MASK) + 1;
609 }
610 -
611 - /*
612 - * End of CS overlaps with beginning of MBus, shrink
613 - * CS size.
614 - */
615 - if (base < mbus_bridge_base && end > mbus_bridge_base)
616 - size -= end - mbus_bridge_base;
617 -
618 - w = &mvebu_mbus_dram_info.cs[cs++];
619 - w->cs_index = i;
620 - w->mbus_attr = 0xf & ~(1 << i);
621 - if (mbus->hw_io_coherency)
622 - w->mbus_attr |= ATTR_HW_COHERENCY;
623 - w->base = base;
624 - w->size = size;
625 }
626 mvebu_mbus_dram_info.num_cs = cs;
627 }
628 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
629 index d9891d3461f6..7992164ea9ec 100644
630 --- a/drivers/dma/at_xdmac.c
631 +++ b/drivers/dma/at_xdmac.c
632 @@ -174,6 +174,8 @@
633 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
634
635 #define AT_XDMAC_MAX_CHAN 0x20
636 +#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
637 +#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
638
639 #define AT_XDMAC_DMA_BUSWIDTHS\
640 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
641 @@ -192,20 +194,17 @@ struct at_xdmac_chan {
642 struct dma_chan chan;
643 void __iomem *ch_regs;
644 u32 mask; /* Channel Mask */
645 - u32 cfg[2]; /* Channel Configuration Register */
646 - #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */
647 - #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */
648 + u32 cfg; /* Channel Configuration Register */
649 u8 perid; /* Peripheral ID */
650 u8 perif; /* Peripheral Interface */
651 u8 memif; /* Memory Interface */
652 - u32 per_src_addr;
653 - u32 per_dst_addr;
654 u32 save_cc;
655 u32 save_cim;
656 u32 save_cnda;
657 u32 save_cndc;
658 unsigned long status;
659 struct tasklet_struct tasklet;
660 + struct dma_slave_config sconfig;
661
662 spinlock_t lock;
663
664 @@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
665 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
666 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
667 dma_cookie_t cookie;
668 + unsigned long irqflags;
669
670 - spin_lock_bh(&atchan->lock);
671 + spin_lock_irqsave(&atchan->lock, irqflags);
672 cookie = dma_cookie_assign(tx);
673
674 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
675 @@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
676 if (list_is_singular(&atchan->xfers_list))
677 at_xdmac_start_xfer(atchan, desc);
678
679 - spin_unlock_bh(&atchan->lock);
680 + spin_unlock_irqrestore(&atchan->lock, irqflags);
681 return cookie;
682 }
683
684 @@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
685 return chan;
686 }
687
688 +static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
689 + enum dma_transfer_direction direction)
690 +{
691 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
692 + int csize, dwidth;
693 +
694 + if (direction == DMA_DEV_TO_MEM) {
695 + atchan->cfg =
696 + AT91_XDMAC_DT_PERID(atchan->perid)
697 + | AT_XDMAC_CC_DAM_INCREMENTED_AM
698 + | AT_XDMAC_CC_SAM_FIXED_AM
699 + | AT_XDMAC_CC_DIF(atchan->memif)
700 + | AT_XDMAC_CC_SIF(atchan->perif)
701 + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
702 + | AT_XDMAC_CC_DSYNC_PER2MEM
703 + | AT_XDMAC_CC_MBSIZE_SIXTEEN
704 + | AT_XDMAC_CC_TYPE_PER_TRAN;
705 + csize = ffs(atchan->sconfig.src_maxburst) - 1;
706 + if (csize < 0) {
707 + dev_err(chan2dev(chan), "invalid src maxburst value\n");
708 + return -EINVAL;
709 + }
710 + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
711 + dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
712 + if (dwidth < 0) {
713 + dev_err(chan2dev(chan), "invalid src addr width value\n");
714 + return -EINVAL;
715 + }
716 + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
717 + } else if (direction == DMA_MEM_TO_DEV) {
718 + atchan->cfg =
719 + AT91_XDMAC_DT_PERID(atchan->perid)
720 + | AT_XDMAC_CC_DAM_FIXED_AM
721 + | AT_XDMAC_CC_SAM_INCREMENTED_AM
722 + | AT_XDMAC_CC_DIF(atchan->perif)
723 + | AT_XDMAC_CC_SIF(atchan->memif)
724 + | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
725 + | AT_XDMAC_CC_DSYNC_MEM2PER
726 + | AT_XDMAC_CC_MBSIZE_SIXTEEN
727 + | AT_XDMAC_CC_TYPE_PER_TRAN;
728 + csize = ffs(atchan->sconfig.dst_maxburst) - 1;
729 + if (csize < 0) {
730 + dev_err(chan2dev(chan), "invalid src maxburst value\n");
731 + return -EINVAL;
732 + }
733 + atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
734 + dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
735 + if (dwidth < 0) {
736 + dev_err(chan2dev(chan), "invalid dst addr width value\n");
737 + return -EINVAL;
738 + }
739 + atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
740 + }
741 +
742 + dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
743 +
744 + return 0;
745 +}
746 +
747 +/*
748 + * Only check that maxburst and addr width values are supported by the
749 + * the controller but not that the configuration is good to perform the
750 + * transfer since we don't know the direction at this stage.
751 + */
752 +static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
753 +{
754 + if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
755 + || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
756 + return -EINVAL;
757 +
758 + if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
759 + || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
760 + return -EINVAL;
761 +
762 + return 0;
763 +}
764 +
765 static int at_xdmac_set_slave_config(struct dma_chan *chan,
766 struct dma_slave_config *sconfig)
767 {
768 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
769 - u8 dwidth;
770 - int csize;
771
772 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
773 - AT91_XDMAC_DT_PERID(atchan->perid)
774 - | AT_XDMAC_CC_DAM_INCREMENTED_AM
775 - | AT_XDMAC_CC_SAM_FIXED_AM
776 - | AT_XDMAC_CC_DIF(atchan->memif)
777 - | AT_XDMAC_CC_SIF(atchan->perif)
778 - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
779 - | AT_XDMAC_CC_DSYNC_PER2MEM
780 - | AT_XDMAC_CC_MBSIZE_SIXTEEN
781 - | AT_XDMAC_CC_TYPE_PER_TRAN;
782 - csize = at_xdmac_csize(sconfig->src_maxburst);
783 - if (csize < 0) {
784 - dev_err(chan2dev(chan), "invalid src maxburst value\n");
785 + if (at_xdmac_check_slave_config(sconfig)) {
786 + dev_err(chan2dev(chan), "invalid slave configuration\n");
787 return -EINVAL;
788 }
789 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
790 - dwidth = ffs(sconfig->src_addr_width) - 1;
791 - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
792 -
793 -
794 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
795 - AT91_XDMAC_DT_PERID(atchan->perid)
796 - | AT_XDMAC_CC_DAM_FIXED_AM
797 - | AT_XDMAC_CC_SAM_INCREMENTED_AM
798 - | AT_XDMAC_CC_DIF(atchan->perif)
799 - | AT_XDMAC_CC_SIF(atchan->memif)
800 - | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
801 - | AT_XDMAC_CC_DSYNC_MEM2PER
802 - | AT_XDMAC_CC_MBSIZE_SIXTEEN
803 - | AT_XDMAC_CC_TYPE_PER_TRAN;
804 - csize = at_xdmac_csize(sconfig->dst_maxburst);
805 - if (csize < 0) {
806 - dev_err(chan2dev(chan), "invalid src maxburst value\n");
807 - return -EINVAL;
808 - }
809 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
810 - dwidth = ffs(sconfig->dst_addr_width) - 1;
811 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
812 -
813 - /* Src and dst addr are needed to configure the link list descriptor. */
814 - atchan->per_src_addr = sconfig->src_addr;
815 - atchan->per_dst_addr = sconfig->dst_addr;
816
817 - dev_dbg(chan2dev(chan),
818 - "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
819 - __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
820 - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
821 - atchan->per_src_addr, atchan->per_dst_addr);
822 + memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
823
824 return 0;
825 }
826 @@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
827 struct scatterlist *sg;
828 int i;
829 unsigned int xfer_size = 0;
830 + unsigned long irqflags;
831 + struct dma_async_tx_descriptor *ret = NULL;
832
833 if (!sgl)
834 return NULL;
835 @@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
836 flags);
837
838 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
839 - spin_lock_bh(&atchan->lock);
840 + spin_lock_irqsave(&atchan->lock, irqflags);
841 +
842 + if (at_xdmac_compute_chan_conf(chan, direction))
843 + goto spin_unlock;
844
845 /* Prepare descriptors. */
846 for_each_sg(sgl, sg, sg_len, i) {
847 @@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
848 mem = sg_dma_address(sg);
849 if (unlikely(!len)) {
850 dev_err(chan2dev(chan), "sg data length is zero\n");
851 - spin_unlock_bh(&atchan->lock);
852 - return NULL;
853 + goto spin_unlock;
854 }
855 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
856 __func__, i, len, mem);
857 @@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
858 dev_err(chan2dev(chan), "can't get descriptor\n");
859 if (first)
860 list_splice_init(&first->descs_list, &atchan->free_descs_list);
861 - spin_unlock_bh(&atchan->lock);
862 - return NULL;
863 + goto spin_unlock;
864 }
865
866 /* Linked list descriptor setup. */
867 if (direction == DMA_DEV_TO_MEM) {
868 - desc->lld.mbr_sa = atchan->per_src_addr;
869 + desc->lld.mbr_sa = atchan->sconfig.src_addr;
870 desc->lld.mbr_da = mem;
871 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
872 } else {
873 desc->lld.mbr_sa = mem;
874 - desc->lld.mbr_da = atchan->per_dst_addr;
875 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
876 + desc->lld.mbr_da = atchan->sconfig.dst_addr;
877 }
878 + desc->lld.mbr_cfg = atchan->cfg;
879 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
880 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
881 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
882 @@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
883 xfer_size += len;
884 }
885
886 - spin_unlock_bh(&atchan->lock);
887
888 first->tx_dma_desc.flags = flags;
889 first->xfer_size = xfer_size;
890 first->direction = direction;
891 + ret = &first->tx_dma_desc;
892
893 - return &first->tx_dma_desc;
894 +spin_unlock:
895 + spin_unlock_irqrestore(&atchan->lock, irqflags);
896 + return ret;
897 }
898
899 static struct dma_async_tx_descriptor *
900 @@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
901 struct at_xdmac_desc *first = NULL, *prev = NULL;
902 unsigned int periods = buf_len / period_len;
903 int i;
904 + unsigned long irqflags;
905
906 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
907 __func__, &buf_addr, buf_len, period_len,
908 @@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
909 return NULL;
910 }
911
912 + if (at_xdmac_compute_chan_conf(chan, direction))
913 + return NULL;
914 +
915 for (i = 0; i < periods; i++) {
916 struct at_xdmac_desc *desc = NULL;
917
918 - spin_lock_bh(&atchan->lock);
919 + spin_lock_irqsave(&atchan->lock, irqflags);
920 desc = at_xdmac_get_desc(atchan);
921 if (!desc) {
922 dev_err(chan2dev(chan), "can't get descriptor\n");
923 if (first)
924 list_splice_init(&first->descs_list, &atchan->free_descs_list);
925 - spin_unlock_bh(&atchan->lock);
926 + spin_unlock_irqrestore(&atchan->lock, irqflags);
927 return NULL;
928 }
929 - spin_unlock_bh(&atchan->lock);
930 + spin_unlock_irqrestore(&atchan->lock, irqflags);
931 dev_dbg(chan2dev(chan),
932 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
933 __func__, desc, &desc->tx_dma_desc.phys);
934
935 if (direction == DMA_DEV_TO_MEM) {
936 - desc->lld.mbr_sa = atchan->per_src_addr;
937 + desc->lld.mbr_sa = atchan->sconfig.src_addr;
938 desc->lld.mbr_da = buf_addr + i * period_len;
939 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
940 } else {
941 desc->lld.mbr_sa = buf_addr + i * period_len;
942 - desc->lld.mbr_da = atchan->per_dst_addr;
943 - desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
944 + desc->lld.mbr_da = atchan->sconfig.dst_addr;
945 }
946 + desc->lld.mbr_cfg = atchan->cfg;
947 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
948 | AT_XDMAC_MBR_UBC_NDEN
949 | AT_XDMAC_MBR_UBC_NSEN
950 @@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
951 | AT_XDMAC_CC_SIF(0)
952 | AT_XDMAC_CC_MBSIZE_SIXTEEN
953 | AT_XDMAC_CC_TYPE_MEM_TRAN;
954 + unsigned long irqflags;
955
956 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
957 __func__, &src, &dest, len, flags);
958 @@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
959
960 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
961
962 - spin_lock_bh(&atchan->lock);
963 + spin_lock_irqsave(&atchan->lock, irqflags);
964 desc = at_xdmac_get_desc(atchan);
965 - spin_unlock_bh(&atchan->lock);
966 + spin_unlock_irqrestore(&atchan->lock, irqflags);
967 if (!desc) {
968 dev_err(chan2dev(chan), "can't get descriptor\n");
969 if (first)
970 @@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
971 int residue;
972 u32 cur_nda, mask, value;
973 u8 dwidth = 0;
974 + unsigned long flags;
975
976 ret = dma_cookie_status(chan, cookie, txstate);
977 if (ret == DMA_COMPLETE)
978 @@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
979 if (!txstate)
980 return ret;
981
982 - spin_lock_bh(&atchan->lock);
983 + spin_lock_irqsave(&atchan->lock, flags);
984
985 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
986
987 @@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
988 */
989 if (!desc->active_xfer) {
990 dma_set_residue(txstate, desc->xfer_size);
991 - spin_unlock_bh(&atchan->lock);
992 - return ret;
993 + goto spin_unlock;
994 }
995
996 residue = desc->xfer_size;
997 @@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
998 }
999 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
1000
1001 - spin_unlock_bh(&atchan->lock);
1002 -
1003 dma_set_residue(txstate, residue);
1004
1005 dev_dbg(chan2dev(chan),
1006 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1007 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1008
1009 +spin_unlock:
1010 + spin_unlock_irqrestore(&atchan->lock, flags);
1011 return ret;
1012 }
1013
1014 @@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1015 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1016 {
1017 struct at_xdmac_desc *desc;
1018 + unsigned long flags;
1019
1020 - spin_lock_bh(&atchan->lock);
1021 + spin_lock_irqsave(&atchan->lock, flags);
1022
1023 /*
1024 * If channel is enabled, do nothing, advance_work will be triggered
1025 @@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1026 at_xdmac_start_xfer(atchan, desc);
1027 }
1028
1029 - spin_unlock_bh(&atchan->lock);
1030 + spin_unlock_irqrestore(&atchan->lock, flags);
1031 }
1032
1033 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1034 @@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
1035 {
1036 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1037 int ret;
1038 + unsigned long flags;
1039
1040 dev_dbg(chan2dev(chan), "%s\n", __func__);
1041
1042 - spin_lock_bh(&atchan->lock);
1043 + spin_lock_irqsave(&atchan->lock, flags);
1044 ret = at_xdmac_set_slave_config(chan, config);
1045 - spin_unlock_bh(&atchan->lock);
1046 + spin_unlock_irqrestore(&atchan->lock, flags);
1047
1048 return ret;
1049 }
1050 @@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
1051 {
1052 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1053 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1054 + unsigned long flags;
1055
1056 dev_dbg(chan2dev(chan), "%s\n", __func__);
1057
1058 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1059 return 0;
1060
1061 - spin_lock_bh(&atchan->lock);
1062 + spin_lock_irqsave(&atchan->lock, flags);
1063 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1064 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1065 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1066 cpu_relax();
1067 - spin_unlock_bh(&atchan->lock);
1068 + spin_unlock_irqrestore(&atchan->lock, flags);
1069
1070 return 0;
1071 }
1072 @@ -1150,16 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
1073 {
1074 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1075 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1076 + unsigned long flags;
1077
1078 dev_dbg(chan2dev(chan), "%s\n", __func__);
1079
1080 - spin_lock_bh(&atchan->lock);
1081 - if (!at_xdmac_chan_is_paused(atchan))
1082 + spin_lock_irqsave(&atchan->lock, flags);
1083 + if (!at_xdmac_chan_is_paused(atchan)) {
1084 + spin_unlock_irqrestore(&atchan->lock, flags);
1085 return 0;
1086 + }
1087
1088 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1089 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1090 - spin_unlock_bh(&atchan->lock);
1091 + spin_unlock_irqrestore(&atchan->lock, flags);
1092
1093 return 0;
1094 }
1095 @@ -1169,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1096 struct at_xdmac_desc *desc, *_desc;
1097 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1098 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1099 + unsigned long flags;
1100
1101 dev_dbg(chan2dev(chan), "%s\n", __func__);
1102
1103 - spin_lock_bh(&atchan->lock);
1104 + spin_lock_irqsave(&atchan->lock, flags);
1105 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1106 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1107 cpu_relax();
1108 @@ -1182,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1109 at_xdmac_remove_xfer(atchan, desc);
1110
1111 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1112 - spin_unlock_bh(&atchan->lock);
1113 + spin_unlock_irqrestore(&atchan->lock, flags);
1114
1115 return 0;
1116 }
1117 @@ -1192,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1118 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1119 struct at_xdmac_desc *desc;
1120 int i;
1121 + unsigned long flags;
1122
1123 - spin_lock_bh(&atchan->lock);
1124 + spin_lock_irqsave(&atchan->lock, flags);
1125
1126 if (at_xdmac_chan_is_enabled(atchan)) {
1127 dev_err(chan2dev(chan),
1128 @@ -1224,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1129 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1130
1131 spin_unlock:
1132 - spin_unlock_bh(&atchan->lock);
1133 + spin_unlock_irqrestore(&atchan->lock, flags);
1134 return i;
1135 }
1136
1137 diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
1138 index ac336a961dea..8e70e580c98a 100644
1139 --- a/drivers/dma/dmaengine.c
1140 +++ b/drivers/dma/dmaengine.c
1141 @@ -505,7 +505,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
1142 caps->directions = device->directions;
1143 caps->residue_granularity = device->residue_granularity;
1144
1145 - caps->cmd_pause = !!device->device_pause;
1146 + /*
1147 + * Some devices implement only pause (e.g. to get residuum) but no
1148 + * resume. However cmd_pause is advertised as pause AND resume.
1149 + */
1150 + caps->cmd_pause = !!(device->device_pause && device->device_resume);
1151 caps->cmd_terminate = !!device->device_terminate_all;
1152
1153 return 0;
1154 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1155 index 0e1f56772855..a2771a8d4377 100644
1156 --- a/drivers/dma/pl330.c
1157 +++ b/drivers/dma/pl330.c
1158 @@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
1159 struct pl330_dmac *pl330 = pch->dmac;
1160 LIST_HEAD(list);
1161
1162 + pm_runtime_get_sync(pl330->ddma.dev);
1163 spin_lock_irqsave(&pch->lock, flags);
1164 spin_lock(&pl330->lock);
1165 _stop(pch->thread);
1166 @@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
1167 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
1168 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
1169 spin_unlock_irqrestore(&pch->lock, flags);
1170 + pm_runtime_mark_last_busy(pl330->ddma.dev);
1171 + pm_runtime_put_autosuspend(pl330->ddma.dev);
1172
1173 return 0;
1174 }
1175 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1176 index 406624a0b201..340e21918f33 100644
1177 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1178 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1179 @@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
1180 dev->node_props.cpu_core_id_base);
1181 sysfs_show_32bit_prop(buffer, "simd_id_base",
1182 dev->node_props.simd_id_base);
1183 - sysfs_show_32bit_prop(buffer, "capability",
1184 - dev->node_props.capability);
1185 sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
1186 dev->node_props.max_waves_per_simd);
1187 sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
1188 @@ -735,6 +733,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
1189 kfd2kgd->get_fw_version(
1190 dev->gpu->kgd,
1191 KGD_ENGINE_MEC1));
1192 + sysfs_show_32bit_prop(buffer, "capability",
1193 + dev->node_props.capability);
1194 }
1195
1196 return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
1197 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
1198 index 27ea6bdebce7..7a628e4cb27a 100644
1199 --- a/drivers/gpu/drm/i915/i915_gem.c
1200 +++ b/drivers/gpu/drm/i915/i915_gem.c
1201 @@ -2732,9 +2732,6 @@ void i915_gem_reset(struct drm_device *dev)
1202 void
1203 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
1204 {
1205 - if (list_empty(&ring->request_list))
1206 - return;
1207 -
1208 WARN_ON(i915_verify_lists(ring->dev));
1209
1210 /* Retire requests first as we use it above for the early return.
1211 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1212 index 88b36a9173c9..336e8b63ca08 100644
1213 --- a/drivers/gpu/drm/i915/intel_dp.c
1214 +++ b/drivers/gpu/drm/i915/intel_dp.c
1215 @@ -881,10 +881,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
1216 DP_AUX_CH_CTL_RECEIVE_ERROR))
1217 continue;
1218 if (status & DP_AUX_CH_CTL_DONE)
1219 - break;
1220 + goto done;
1221 }
1222 - if (status & DP_AUX_CH_CTL_DONE)
1223 - break;
1224 }
1225
1226 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1227 @@ -893,6 +891,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
1228 goto out;
1229 }
1230
1231 +done:
1232 /* Check for timeout or receive error.
1233 * Timeouts occur when the sink is not connected
1234 */
1235 diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
1236 index 56e437e31580..ae628001fd97 100644
1237 --- a/drivers/gpu/drm/i915/intel_i2c.c
1238 +++ b/drivers/gpu/drm/i915/intel_i2c.c
1239 @@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
1240 struct intel_gmbus,
1241 adapter);
1242 struct drm_i915_private *dev_priv = bus->dev_priv;
1243 - int i, reg_offset;
1244 + int i = 0, inc, try = 0, reg_offset;
1245 int ret = 0;
1246
1247 intel_aux_display_runtime_get(dev_priv);
1248 @@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
1249
1250 reg_offset = dev_priv->gpio_mmio_base;
1251
1252 +retry:
1253 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
1254
1255 - for (i = 0; i < num; i++) {
1256 + for (; i < num; i += inc) {
1257 + inc = 1;
1258 if (gmbus_is_index_read(msgs, i, num)) {
1259 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
1260 - i += 1; /* set i to the index of the read xfer */
1261 + inc = 2; /* an index read is two msgs */
1262 } else if (msgs[i].flags & I2C_M_RD) {
1263 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
1264 } else {
1265 @@ -525,6 +527,18 @@ clear_err:
1266 adapter->name, msgs[i].addr,
1267 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
1268
1269 + /*
1270 + * Passive adapters sometimes NAK the first probe. Retry the first
1271 + * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
1272 + * has retries internally. See also the retry loop in
1273 + * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
1274 + */
1275 + if (ret == -ENXIO && i == 0 && try++ == 0) {
1276 + DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
1277 + adapter->name);
1278 + goto retry;
1279 + }
1280 +
1281 goto out;
1282
1283 timeout:
1284 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1285 index 965a45619f6b..9bd56116fd5a 100644
1286 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1287 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1288 @@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1289 else
1290 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
1291
1292 - /* if there is no audio, set MINM_OVER_MAXP */
1293 - if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
1294 - radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
1295 if (rdev->family < CHIP_RV770)
1296 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
1297 /* use frac fb div on APUs */
1298 @@ -1789,9 +1786,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
1299 if ((crtc->mode.clock == test_crtc->mode.clock) &&
1300 (adjusted_clock == test_adjusted_clock) &&
1301 (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
1302 - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
1303 - (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
1304 - drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
1305 + (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
1306 return test_radeon_crtc->pll_id;
1307 }
1308 }
1309 diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1310 index f04205170b8a..cfa3a84a2af0 100644
1311 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
1312 +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
1313 @@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
1314 struct drm_device *dev = encoder->dev;
1315 struct radeon_device *rdev = dev->dev_private;
1316
1317 - WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
1318 + WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
1319 HDMI0_ACR_SOURCE | /* select SW CTS value */
1320 HDMI0_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
1321
1322 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1323 index bd7519fdd3f4..aa232fd25992 100644
1324 --- a/drivers/gpu/drm/radeon/radeon_device.c
1325 +++ b/drivers/gpu/drm/radeon/radeon_device.c
1326 @@ -1458,6 +1458,21 @@ int radeon_device_init(struct radeon_device *rdev,
1327 if (r)
1328 DRM_ERROR("ib ring test failed (%d).\n", r);
1329
1330 + /*
1331 + * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1332 + * after the CP ring have chew one packet at least. Hence here we stop
1333 + * and restart DPM after the radeon_ib_ring_tests().
1334 + */
1335 + if (rdev->pm.dpm_enabled &&
1336 + (rdev->pm.pm_method == PM_METHOD_DPM) &&
1337 + (rdev->family == CHIP_TURKS) &&
1338 + (rdev->flags & RADEON_IS_MOBILITY)) {
1339 + mutex_lock(&rdev->pm.mutex);
1340 + radeon_dpm_disable(rdev);
1341 + radeon_dpm_enable(rdev);
1342 + mutex_unlock(&rdev->pm.mutex);
1343 + }
1344 +
1345 if ((radeon_testing & 1)) {
1346 if (rdev->accel_working)
1347 radeon_test_moves(rdev);
1348 diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
1349 index de42fc4a22b8..9c3377ca17b7 100644
1350 --- a/drivers/gpu/drm/radeon/radeon_vm.c
1351 +++ b/drivers/gpu/drm/radeon/radeon_vm.c
1352 @@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1353 /* make sure object fit at this offset */
1354 eoffset = soffset + size;
1355 if (soffset >= eoffset) {
1356 - return -EINVAL;
1357 + r = -EINVAL;
1358 + goto error_unreserve;
1359 }
1360
1361 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
1362 if (last_pfn > rdev->vm_manager.max_pfn) {
1363 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
1364 last_pfn, rdev->vm_manager.max_pfn);
1365 - return -EINVAL;
1366 + r = -EINVAL;
1367 + goto error_unreserve;
1368 }
1369
1370 } else {
1371 @@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1372 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
1373 soffset, tmp->bo, tmp->it.start, tmp->it.last);
1374 mutex_unlock(&vm->mutex);
1375 - return -EINVAL;
1376 + r = -EINVAL;
1377 + goto error_unreserve;
1378 }
1379 }
1380
1381 @@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1382 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
1383 if (!tmp) {
1384 mutex_unlock(&vm->mutex);
1385 - return -ENOMEM;
1386 + r = -ENOMEM;
1387 + goto error_unreserve;
1388 }
1389 tmp->it.start = bo_va->it.start;
1390 tmp->it.last = bo_va->it.last;
1391 @@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1392 r = radeon_vm_clear_bo(rdev, pt);
1393 if (r) {
1394 radeon_bo_unref(&pt);
1395 - radeon_bo_reserve(bo_va->bo, false);
1396 return r;
1397 }
1398
1399 @@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
1400
1401 mutex_unlock(&vm->mutex);
1402 return 0;
1403 +
1404 +error_unreserve:
1405 + radeon_bo_unreserve(bo_va->bo);
1406 + return r;
1407 }
1408
1409 /**
1410 diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
1411 index 8fe78d08e01c..7c6966434ee7 100644
1412 --- a/drivers/i2c/busses/i2c-hix5hd2.c
1413 +++ b/drivers/i2c/busses/i2c-hix5hd2.c
1414 @@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
1415 MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
1416 MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
1417 MODULE_LICENSE("GPL");
1418 -MODULE_ALIAS("platform:i2c-hix5hd2");
1419 +MODULE_ALIAS("platform:hix5hd2-i2c");
1420 diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1421 index 958c8db4ec30..297e9c9ac943 100644
1422 --- a/drivers/i2c/busses/i2c-s3c2410.c
1423 +++ b/drivers/i2c/busses/i2c-s3c2410.c
1424 @@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1425 return -ENOMEM;
1426
1427 i2c->quirks = s3c24xx_get_device_quirks(pdev);
1428 + i2c->sysreg = ERR_PTR(-ENOENT);
1429 if (pdata)
1430 memcpy(i2c->pdata, pdata, sizeof(*pdata));
1431 else
1432 diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
1433 index 89d8aa1d2818..df12c57e6ce0 100644
1434 --- a/drivers/iio/adc/twl6030-gpadc.c
1435 +++ b/drivers/iio/adc/twl6030-gpadc.c
1436 @@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
1437
1438 module_platform_driver(twl6030_gpadc_driver);
1439
1440 -MODULE_ALIAS("platform: " DRIVER_NAME);
1441 +MODULE_ALIAS("platform:" DRIVER_NAME);
1442 MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
1443 MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
1444 MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
1445 diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
1446 index 0916bf6b6c31..73b189c1c0fb 100644
1447 --- a/drivers/iio/imu/adis16400.h
1448 +++ b/drivers/iio/imu/adis16400.h
1449 @@ -139,6 +139,7 @@
1450 #define ADIS16400_NO_BURST BIT(1)
1451 #define ADIS16400_HAS_SLOW_MODE BIT(2)
1452 #define ADIS16400_HAS_SERIAL_NUMBER BIT(3)
1453 +#define ADIS16400_BURST_DIAG_STAT BIT(4)
1454
1455 struct adis16400_state;
1456
1457 @@ -165,6 +166,7 @@ struct adis16400_state {
1458 int filt_int;
1459
1460 struct adis adis;
1461 + unsigned long avail_scan_mask[2];
1462 };
1463
1464 /* At the moment triggers are only used for ring buffer
1465 diff --git a/drivers/iio/imu/adis16400_buffer.c b/drivers/iio/imu/adis16400_buffer.c
1466 index 6e727ffe5262..90c24a23c679 100644
1467 --- a/drivers/iio/imu/adis16400_buffer.c
1468 +++ b/drivers/iio/imu/adis16400_buffer.c
1469 @@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
1470 {
1471 struct adis16400_state *st = iio_priv(indio_dev);
1472 struct adis *adis = &st->adis;
1473 - uint16_t *tx;
1474 + unsigned int burst_length;
1475 + u8 *tx;
1476
1477 if (st->variant->flags & ADIS16400_NO_BURST)
1478 return adis_update_scan_mode(indio_dev, scan_mask);
1479 @@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
1480 kfree(adis->xfer);
1481 kfree(adis->buffer);
1482
1483 + /* All but the timestamp channel */
1484 + burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
1485 + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
1486 + burst_length += sizeof(u16);
1487 +
1488 adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
1489 if (!adis->xfer)
1490 return -ENOMEM;
1491
1492 - adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
1493 - GFP_KERNEL);
1494 + adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
1495 if (!adis->buffer)
1496 return -ENOMEM;
1497
1498 - tx = adis->buffer + indio_dev->scan_bytes;
1499 -
1500 + tx = adis->buffer + burst_length;
1501 tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
1502 tx[1] = 0;
1503
1504 adis->xfer[0].tx_buf = tx;
1505 adis->xfer[0].bits_per_word = 8;
1506 adis->xfer[0].len = 2;
1507 - adis->xfer[1].tx_buf = tx;
1508 + adis->xfer[1].rx_buf = adis->buffer;
1509 adis->xfer[1].bits_per_word = 8;
1510 - adis->xfer[1].len = indio_dev->scan_bytes;
1511 + adis->xfer[1].len = burst_length;
1512
1513 spi_message_init(&adis->msg);
1514 spi_message_add_tail(&adis->xfer[0], &adis->msg);
1515 @@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
1516 struct adis16400_state *st = iio_priv(indio_dev);
1517 struct adis *adis = &st->adis;
1518 u32 old_speed_hz = st->adis.spi->max_speed_hz;
1519 + void *buffer;
1520 int ret;
1521
1522 if (!adis->buffer)
1523 @@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
1524 spi_setup(st->adis.spi);
1525 }
1526
1527 - iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
1528 + if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
1529 + buffer = adis->buffer + sizeof(u16);
1530 + else
1531 + buffer = adis->buffer;
1532 +
1533 + iio_push_to_buffers_with_timestamp(indio_dev, buffer,
1534 pf->timestamp);
1535
1536 iio_trigger_notify_done(indio_dev->trig);
1537 diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
1538 index fa795dcd5f75..2fd68f2219a7 100644
1539 --- a/drivers/iio/imu/adis16400_core.c
1540 +++ b/drivers/iio/imu/adis16400_core.c
1541 @@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1542 *val = st->variant->temp_scale_nano / 1000000;
1543 *val2 = (st->variant->temp_scale_nano % 1000000);
1544 return IIO_VAL_INT_PLUS_MICRO;
1545 + case IIO_PRESSURE:
1546 + /* 20 uBar = 0.002kPascal */
1547 + *val = 0;
1548 + *val2 = 2000;
1549 + return IIO_VAL_INT_PLUS_MICRO;
1550 default:
1551 return -EINVAL;
1552 }
1553 @@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1554 }
1555 }
1556
1557 -#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
1558 +#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
1559 .type = IIO_VOLTAGE, \
1560 .indexed = 1, \
1561 - .channel = 0, \
1562 + .channel = chn, \
1563 .extend_name = name, \
1564 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
1565 BIT(IIO_CHAN_INFO_SCALE), \
1566 @@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
1567 }
1568
1569 #define ADIS16400_SUPPLY_CHAN(addr, bits) \
1570 - ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
1571 + ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
1572
1573 #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
1574 - ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
1575 + ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
1576
1577 #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
1578 .type = IIO_ANGL_VEL, \
1579 @@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
1580 .channels = adis16448_channels,
1581 .num_channels = ARRAY_SIZE(adis16448_channels),
1582 .flags = ADIS16400_HAS_PROD_ID |
1583 - ADIS16400_HAS_SERIAL_NUMBER,
1584 + ADIS16400_HAS_SERIAL_NUMBER |
1585 + ADIS16400_BURST_DIAG_STAT,
1586 .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
1587 .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
1588 .temp_scale_nano = 73860000, /* 0.07386 C */
1589 @@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
1590 .debugfs_reg_access = adis_debugfs_reg_access,
1591 };
1592
1593 -static const unsigned long adis16400_burst_scan_mask[] = {
1594 - ~0UL,
1595 - 0,
1596 -};
1597 -
1598 static const char * const adis16400_status_error_msgs[] = {
1599 [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
1600 [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
1601 @@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
1602 BIT(ADIS16400_DIAG_STAT_POWER_LOW),
1603 };
1604
1605 +static void adis16400_setup_chan_mask(struct adis16400_state *st)
1606 +{
1607 + const struct adis16400_chip_info *chip_info = st->variant;
1608 + unsigned i;
1609 +
1610 + for (i = 0; i < chip_info->num_channels; i++) {
1611 + const struct iio_chan_spec *ch = &chip_info->channels[i];
1612 +
1613 + if (ch->scan_index >= 0 &&
1614 + ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
1615 + st->avail_scan_mask[0] |= BIT(ch->scan_index);
1616 + }
1617 +}
1618 +
1619 static int adis16400_probe(struct spi_device *spi)
1620 {
1621 struct adis16400_state *st;
1622 @@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
1623 indio_dev->info = &adis16400_info;
1624 indio_dev->modes = INDIO_DIRECT_MODE;
1625
1626 - if (!(st->variant->flags & ADIS16400_NO_BURST))
1627 - indio_dev->available_scan_masks = adis16400_burst_scan_mask;
1628 + if (!(st->variant->flags & ADIS16400_NO_BURST)) {
1629 + adis16400_setup_chan_mask(st);
1630 + indio_dev->available_scan_masks = st->avail_scan_mask;
1631 + }
1632
1633 ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
1634 if (ret)
1635 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
1636 index ea6cb64dfb28..d5335e664240 100644
1637 --- a/drivers/input/mouse/alps.c
1638 +++ b/drivers/input/mouse/alps.c
1639 @@ -1042,9 +1042,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
1640 right = (packet[1] & 0x02) >> 1;
1641 middle = (packet[1] & 0x04) >> 2;
1642
1643 - /* Divide 2 since trackpoint's speed is too fast */
1644 - input_report_rel(dev2, REL_X, (char)x / 2);
1645 - input_report_rel(dev2, REL_Y, -((char)y / 2));
1646 + input_report_rel(dev2, REL_X, (char)x);
1647 + input_report_rel(dev2, REL_Y, -((char)y));
1648
1649 input_report_key(dev2, BTN_LEFT, left);
1650 input_report_key(dev2, BTN_RIGHT, right);
1651 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
1652 index 79363b687195..ce3d40004458 100644
1653 --- a/drivers/input/mouse/elantech.c
1654 +++ b/drivers/input/mouse/elantech.c
1655 @@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
1656 return true;
1657
1658 /*
1659 - * Some models have a revision higher then 20. Meaning param[2] may
1660 - * be 10 or 20, skip the rates check for these.
1661 + * Some hw_version >= 4 models have a revision higher then 20. Meaning
1662 + * that param[2] may be 10 or 20, skip the rates check for these.
1663 */
1664 - if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
1665 + if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
1666 + param[2] < 40)
1667 return true;
1668
1669 for (i = 0; i < ARRAY_SIZE(rates); i++)
1670 @@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
1671 case 9:
1672 case 10:
1673 case 13:
1674 + case 14:
1675 etd->hw_version = 4;
1676 break;
1677 default:
1678 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1679 index 3b06c8a360b6..907ac9bdd763 100644
1680 --- a/drivers/input/mouse/synaptics.c
1681 +++ b/drivers/input/mouse/synaptics.c
1682 @@ -148,6 +148,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1683 1024, 5112, 2024, 4832
1684 },
1685 {
1686 + (const char * const []){"LEN2000", NULL},
1687 + {ANY_BOARD_ID, ANY_BOARD_ID},
1688 + 1024, 5113, 2021, 4832
1689 + },
1690 + {
1691 (const char * const []){"LEN2001", NULL},
1692 {ANY_BOARD_ID, ANY_BOARD_ID},
1693 1024, 5022, 2508, 4832
1694 @@ -188,7 +193,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
1695 "LEN0045",
1696 "LEN0047",
1697 "LEN0049",
1698 - "LEN2000",
1699 + "LEN2000", /* S540 */
1700 "LEN2001", /* Edge E431 */
1701 "LEN2002", /* Edge E531 */
1702 "LEN2003",
1703 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1704 index 2d1e05bdbb53..272149d66f5b 100644
1705 --- a/drivers/iommu/intel-iommu.c
1706 +++ b/drivers/iommu/intel-iommu.c
1707 @@ -50,6 +50,7 @@
1708 #define CONTEXT_SIZE VTD_PAGE_SIZE
1709
1710 #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
1711 +#define IS_USB_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
1712 #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
1713 #define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
1714
1715 @@ -672,6 +673,11 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
1716 domain->iommu_superpage = domain_update_iommu_superpage(NULL);
1717 }
1718
1719 +static int iommu_dummy(struct device *dev)
1720 +{
1721 + return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
1722 +}
1723 +
1724 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
1725 {
1726 struct dmar_drhd_unit *drhd = NULL;
1727 @@ -681,6 +687,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
1728 u16 segment = 0;
1729 int i;
1730
1731 + if (iommu_dummy(dev))
1732 + return NULL;
1733 +
1734 if (dev_is_pci(dev)) {
1735 pdev = to_pci_dev(dev);
1736 segment = pci_domain_nr(pdev->bus);
1737 @@ -2554,6 +2563,10 @@ static bool device_has_rmrr(struct device *dev)
1738 * In both cases we assume that PCI USB devices with RMRRs have them largely
1739 * for historical reasons and that the RMRR space is not actively used post
1740 * boot. This exclusion may change if vendors begin to abuse it.
1741 + *
1742 + * The same exception is made for graphics devices, with the requirement that
1743 + * any use of the RMRR regions will be torn down before assigning the device
1744 + * to a guest.
1745 */
1746 static bool device_is_rmrr_locked(struct device *dev)
1747 {
1748 @@ -2563,7 +2576,7 @@ static bool device_is_rmrr_locked(struct device *dev)
1749 if (dev_is_pci(dev)) {
1750 struct pci_dev *pdev = to_pci_dev(dev);
1751
1752 - if ((pdev->class >> 8) == PCI_CLASS_SERIAL_USB)
1753 + if (IS_USB_DEVICE(pdev) || IS_GFX_DEVICE(pdev))
1754 return false;
1755 }
1756
1757 @@ -2969,11 +2982,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
1758 return __get_valid_domain_for_dev(dev);
1759 }
1760
1761 -static int iommu_dummy(struct device *dev)
1762 -{
1763 - return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
1764 -}
1765 -
1766 /* Check if the dev needs to go through non-identity map and unmap process.*/
1767 static int iommu_no_mapping(struct device *dev)
1768 {
1769 diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
1770 index 4a9ce5b50c5b..6b2b582433bd 100644
1771 --- a/drivers/irqchip/irq-sunxi-nmi.c
1772 +++ b/drivers/irqchip/irq-sunxi-nmi.c
1773 @@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
1774 irqd_set_trigger_type(data, flow_type);
1775 irq_setup_alt_chip(data, flow_type);
1776
1777 - for (i = 0; i <= gc->num_ct; i++, ct++)
1778 + for (i = 0; i < gc->num_ct; i++, ct++)
1779 if (ct->type & flow_type)
1780 ctrl_off = ct->regs.type;
1781
1782 diff --git a/drivers/md/md.c b/drivers/md/md.c
1783 index 907534b7f40d..b7bf8ee857fa 100644
1784 --- a/drivers/md/md.c
1785 +++ b/drivers/md/md.c
1786 @@ -3765,7 +3765,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
1787 err = -EBUSY;
1788 }
1789 spin_unlock(&mddev->lock);
1790 - return err;
1791 + return err ?: len;
1792 }
1793 err = mddev_lock(mddev);
1794 if (err)
1795 @@ -4144,13 +4144,14 @@ action_store(struct mddev *mddev, const char *page, size_t len)
1796 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1797 else
1798 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1799 - flush_workqueue(md_misc_wq);
1800 - if (mddev->sync_thread) {
1801 - set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1802 - if (mddev_lock(mddev) == 0) {
1803 + if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
1804 + mddev_lock(mddev) == 0) {
1805 + flush_workqueue(md_misc_wq);
1806 + if (mddev->sync_thread) {
1807 + set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1808 md_reap_sync_thread(mddev);
1809 - mddev_unlock(mddev);
1810 }
1811 + mddev_unlock(mddev);
1812 }
1813 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1814 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1815 diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
1816 index 4df28943d222..e8d3c1d35453 100644
1817 --- a/drivers/net/bonding/bond_options.c
1818 +++ b/drivers/net/bonding/bond_options.c
1819 @@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
1820 out:
1821 if (ret)
1822 bond_opt_error_interpret(bond, opt, ret, val);
1823 - else
1824 + else if (bond->dev->reg_state == NETREG_REGISTERED)
1825 call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
1826
1827 return ret;
1828 diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
1829 index 7f05f309e935..da36bcf32404 100644
1830 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c
1831 +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
1832 @@ -1773,9 +1773,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1833 total_size = buf_len;
1834
1835 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1836 - get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1837 - get_fat_cmd.size,
1838 - &get_fat_cmd.dma);
1839 + get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1840 + get_fat_cmd.size,
1841 + &get_fat_cmd.dma, GFP_ATOMIC);
1842 if (!get_fat_cmd.va) {
1843 dev_err(&adapter->pdev->dev,
1844 "Memory allocation failure while reading FAT data\n");
1845 @@ -1820,8 +1820,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1846 log_offset += buf_size;
1847 }
1848 err:
1849 - pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1850 - get_fat_cmd.va, get_fat_cmd.dma);
1851 + dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1852 + get_fat_cmd.va, get_fat_cmd.dma);
1853 spin_unlock_bh(&adapter->mcc_lock);
1854 return status;
1855 }
1856 @@ -2272,12 +2272,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
1857 return -EINVAL;
1858
1859 cmd.size = sizeof(struct be_cmd_resp_port_type);
1860 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1861 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1862 + GFP_ATOMIC);
1863 if (!cmd.va) {
1864 dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
1865 return -ENOMEM;
1866 }
1867 - memset(cmd.va, 0, cmd.size);
1868
1869 spin_lock_bh(&adapter->mcc_lock);
1870
1871 @@ -2302,7 +2302,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
1872 }
1873 err:
1874 spin_unlock_bh(&adapter->mcc_lock);
1875 - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1876 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
1877 return status;
1878 }
1879
1880 @@ -2777,7 +2777,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
1881 goto err;
1882 }
1883 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
1884 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1885 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1886 + GFP_ATOMIC);
1887 if (!cmd.va) {
1888 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
1889 status = -ENOMEM;
1890 @@ -2811,7 +2812,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
1891 BE_SUPPORTED_SPEED_1GBPS;
1892 }
1893 }
1894 - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1895 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
1896 err:
1897 spin_unlock_bh(&adapter->mcc_lock);
1898 return status;
1899 @@ -2862,8 +2863,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1900
1901 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
1902 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
1903 - attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
1904 - &attribs_cmd.dma);
1905 + attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1906 + attribs_cmd.size,
1907 + &attribs_cmd.dma, GFP_ATOMIC);
1908 if (!attribs_cmd.va) {
1909 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
1910 status = -ENOMEM;
1911 @@ -2890,8 +2892,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
1912 err:
1913 mutex_unlock(&adapter->mbox_lock);
1914 if (attribs_cmd.va)
1915 - pci_free_consistent(adapter->pdev, attribs_cmd.size,
1916 - attribs_cmd.va, attribs_cmd.dma);
1917 + dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
1918 + attribs_cmd.va, attribs_cmd.dma);
1919 return status;
1920 }
1921
1922 @@ -3029,9 +3031,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1923
1924 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
1925 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
1926 - get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
1927 - get_mac_list_cmd.size,
1928 - &get_mac_list_cmd.dma);
1929 + get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1930 + get_mac_list_cmd.size,
1931 + &get_mac_list_cmd.dma,
1932 + GFP_ATOMIC);
1933
1934 if (!get_mac_list_cmd.va) {
1935 dev_err(&adapter->pdev->dev,
1936 @@ -3104,8 +3107,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
1937
1938 out:
1939 spin_unlock_bh(&adapter->mcc_lock);
1940 - pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
1941 - get_mac_list_cmd.va, get_mac_list_cmd.dma);
1942 + dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
1943 + get_mac_list_cmd.va, get_mac_list_cmd.dma);
1944 return status;
1945 }
1946
1947 @@ -3158,8 +3161,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
1948
1949 memset(&cmd, 0, sizeof(struct be_dma_mem));
1950 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
1951 - cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
1952 - &cmd.dma, GFP_KERNEL);
1953 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1954 + GFP_KERNEL);
1955 if (!cmd.va)
1956 return -ENOMEM;
1957
1958 @@ -3348,7 +3351,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
1959
1960 memset(&cmd, 0, sizeof(struct be_dma_mem));
1961 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
1962 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
1963 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
1964 + GFP_ATOMIC);
1965 if (!cmd.va) {
1966 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
1967 status = -ENOMEM;
1968 @@ -3383,7 +3387,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
1969 err:
1970 mutex_unlock(&adapter->mbox_lock);
1971 if (cmd.va)
1972 - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
1973 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
1974 + cmd.dma);
1975 return status;
1976
1977 }
1978 @@ -3397,8 +3402,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
1979
1980 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
1981 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
1982 - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
1983 - &extfat_cmd.dma);
1984 + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1985 + extfat_cmd.size, &extfat_cmd.dma,
1986 + GFP_ATOMIC);
1987 if (!extfat_cmd.va)
1988 return -ENOMEM;
1989
1990 @@ -3420,8 +3426,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
1991
1992 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
1993 err:
1994 - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
1995 - extfat_cmd.dma);
1996 + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
1997 + extfat_cmd.dma);
1998 return status;
1999 }
2000
2001 @@ -3434,8 +3440,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
2002
2003 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
2004 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
2005 - extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
2006 - &extfat_cmd.dma);
2007 + extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2008 + extfat_cmd.size, &extfat_cmd.dma,
2009 + GFP_ATOMIC);
2010
2011 if (!extfat_cmd.va) {
2012 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
2013 @@ -3453,8 +3460,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
2014 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
2015 }
2016 }
2017 - pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
2018 - extfat_cmd.dma);
2019 + dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
2020 + extfat_cmd.dma);
2021 err:
2022 return level;
2023 }
2024 @@ -3652,7 +3659,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
2025
2026 memset(&cmd, 0, sizeof(struct be_dma_mem));
2027 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2028 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2029 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2030 + GFP_ATOMIC);
2031 if (!cmd.va) {
2032 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2033 status = -ENOMEM;
2034 @@ -3692,7 +3700,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
2035 err:
2036 mutex_unlock(&adapter->mbox_lock);
2037 if (cmd.va)
2038 - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2039 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2040 + cmd.dma);
2041 return status;
2042 }
2043
2044 @@ -3713,7 +3722,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
2045
2046 memset(&cmd, 0, sizeof(struct be_dma_mem));
2047 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
2048 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2049 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2050 + GFP_ATOMIC);
2051 if (!cmd.va)
2052 return -ENOMEM;
2053
2054 @@ -3752,7 +3762,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
2055 res->vf_if_cap_flags = vf_res->cap_flags;
2056 err:
2057 if (cmd.va)
2058 - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2059 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2060 + cmd.dma);
2061 return status;
2062 }
2063
2064 @@ -3767,7 +3778,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
2065
2066 memset(&cmd, 0, sizeof(struct be_dma_mem));
2067 cmd.size = sizeof(struct be_cmd_req_set_profile_config);
2068 - cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2069 + cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2070 + GFP_ATOMIC);
2071 if (!cmd.va)
2072 return -ENOMEM;
2073
2074 @@ -3783,7 +3795,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
2075 status = be_cmd_notify_wait(adapter, &wrb);
2076
2077 if (cmd.va)
2078 - pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2079 + dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2080 + cmd.dma);
2081 return status;
2082 }
2083
2084 diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
2085 index 4d2de4700769..22ffcd81a6b5 100644
2086 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
2087 +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
2088 @@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
2089 int status = 0;
2090
2091 read_cmd.size = LANCER_READ_FILE_CHUNK;
2092 - read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
2093 - &read_cmd.dma);
2094 + read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
2095 + &read_cmd.dma, GFP_ATOMIC);
2096
2097 if (!read_cmd.va) {
2098 dev_err(&adapter->pdev->dev,
2099 @@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
2100 break;
2101 }
2102 }
2103 - pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
2104 - read_cmd.dma);
2105 + dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
2106 + read_cmd.dma);
2107
2108 return status;
2109 }
2110 @@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
2111 };
2112
2113 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
2114 - ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
2115 - &ddrdma_cmd.dma, GFP_KERNEL);
2116 + ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2117 + ddrdma_cmd.size, &ddrdma_cmd.dma,
2118 + GFP_KERNEL);
2119 if (!ddrdma_cmd.va)
2120 return -ENOMEM;
2121
2122 @@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
2123
2124 memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
2125 eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
2126 - eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
2127 - &eeprom_cmd.dma, GFP_KERNEL);
2128 + eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2129 + eeprom_cmd.size, &eeprom_cmd.dma,
2130 + GFP_KERNEL);
2131
2132 if (!eeprom_cmd.va)
2133 return -ENOMEM;
2134 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
2135 index e6b790f0d9dc..893753f18098 100644
2136 --- a/drivers/net/ethernet/emulex/benet/be_main.c
2137 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
2138 @@ -4392,8 +4392,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
2139
2140 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2141 + LANCER_FW_DOWNLOAD_CHUNK;
2142 - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
2143 - &flash_cmd.dma, GFP_KERNEL);
2144 + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
2145 + &flash_cmd.dma, GFP_KERNEL);
2146 if (!flash_cmd.va)
2147 return -ENOMEM;
2148
2149 @@ -4526,8 +4526,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2150 }
2151
2152 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
2153 - flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
2154 - GFP_KERNEL);
2155 + flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
2156 + GFP_KERNEL);
2157 if (!flash_cmd.va)
2158 return -ENOMEM;
2159
2160 @@ -4941,10 +4941,10 @@ static int be_ctrl_init(struct be_adapter *adapter)
2161 goto done;
2162
2163 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2164 - mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2165 - mbox_mem_alloc->size,
2166 - &mbox_mem_alloc->dma,
2167 - GFP_KERNEL);
2168 + mbox_mem_alloc->va = dma_zalloc_coherent(&adapter->pdev->dev,
2169 + mbox_mem_alloc->size,
2170 + &mbox_mem_alloc->dma,
2171 + GFP_KERNEL);
2172 if (!mbox_mem_alloc->va) {
2173 status = -ENOMEM;
2174 goto unmap_pci_bars;
2175 diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
2176 index e22e602beef3..c5789cdf7778 100644
2177 --- a/drivers/net/phy/dp83640.c
2178 +++ b/drivers/net/phy/dp83640.c
2179 @@ -47,7 +47,7 @@
2180 #define PSF_TX 0x1000
2181 #define EXT_EVENT 1
2182 #define CAL_EVENT 7
2183 -#define CAL_TRIGGER 7
2184 +#define CAL_TRIGGER 1
2185 #define DP83640_N_PINS 12
2186
2187 #define MII_DP83640_MICR 0x11
2188 @@ -495,7 +495,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
2189 else
2190 evnt |= EVNT_RISE;
2191 }
2192 + mutex_lock(&clock->extreg_lock);
2193 ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
2194 + mutex_unlock(&clock->extreg_lock);
2195 return 0;
2196
2197 case PTP_CLK_REQ_PEROUT:
2198 @@ -531,6 +533,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
2199
2200 static void enable_status_frames(struct phy_device *phydev, bool on)
2201 {
2202 + struct dp83640_private *dp83640 = phydev->priv;
2203 + struct dp83640_clock *clock = dp83640->clock;
2204 u16 cfg0 = 0, ver;
2205
2206 if (on)
2207 @@ -538,9 +542,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
2208
2209 ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
2210
2211 + mutex_lock(&clock->extreg_lock);
2212 +
2213 ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
2214 ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
2215
2216 + mutex_unlock(&clock->extreg_lock);
2217 +
2218 if (!phydev->attached_dev) {
2219 pr_warn("expected to find an attached netdevice\n");
2220 return;
2221 @@ -837,7 +845,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
2222 list_del_init(&rxts->list);
2223 phy2rxts(phy_rxts, rxts);
2224
2225 - spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
2226 + spin_lock(&dp83640->rx_queue.lock);
2227 skb_queue_walk(&dp83640->rx_queue, skb) {
2228 struct dp83640_skb_info *skb_info;
2229
2230 @@ -852,7 +860,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
2231 break;
2232 }
2233 }
2234 - spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
2235 + spin_unlock(&dp83640->rx_queue.lock);
2236
2237 if (!shhwtstamps)
2238 list_add_tail(&rxts->list, &dp83640->rxts);
2239 @@ -1172,11 +1180,18 @@ static int dp83640_config_init(struct phy_device *phydev)
2240
2241 if (clock->chosen && !list_empty(&clock->phylist))
2242 recalibrate(clock);
2243 - else
2244 + else {
2245 + mutex_lock(&clock->extreg_lock);
2246 enable_broadcast(phydev, clock->page, 1);
2247 + mutex_unlock(&clock->extreg_lock);
2248 + }
2249
2250 enable_status_frames(phydev, true);
2251 +
2252 + mutex_lock(&clock->extreg_lock);
2253 ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
2254 + mutex_unlock(&clock->extreg_lock);
2255 +
2256 return 0;
2257 }
2258
2259 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2260 index 52cd8db2c57d..757f28a4284c 100644
2261 --- a/drivers/net/phy/phy.c
2262 +++ b/drivers/net/phy/phy.c
2263 @@ -1053,13 +1053,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
2264 {
2265 /* According to 802.3az,the EEE is supported only in full duplex-mode.
2266 * Also EEE feature is active when core is operating with MII, GMII
2267 - * or RGMII. Internal PHYs are also allowed to proceed and should
2268 - * return an error if they do not support EEE.
2269 + * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
2270 + * should return an error if they do not support EEE.
2271 */
2272 if ((phydev->duplex == DUPLEX_FULL) &&
2273 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
2274 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
2275 - (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
2276 + (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
2277 + phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
2278 phy_is_internal(phydev))) {
2279 int eee_lp, eee_cap, eee_adv;
2280 u32 lp, cap, adv;
2281 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2282 index c3e4da9e79ca..8067b8fbb0ee 100644
2283 --- a/drivers/net/usb/cdc_ncm.c
2284 +++ b/drivers/net/usb/cdc_ncm.c
2285 @@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
2286 * payload data instead.
2287 */
2288 usbnet_set_skb_tx_stats(skb_out, n,
2289 - ctx->tx_curr_frame_payload - skb_out->len);
2290 + (long)ctx->tx_curr_frame_payload - skb_out->len);
2291
2292 return skb_out;
2293
2294 diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
2295 index 794204e34fba..152131a10047 100644
2296 --- a/drivers/net/xen-netback/xenbus.c
2297 +++ b/drivers/net/xen-netback/xenbus.c
2298 @@ -34,6 +34,8 @@ struct backend_info {
2299 enum xenbus_state frontend_state;
2300 struct xenbus_watch hotplug_status_watch;
2301 u8 have_hotplug_status_watch:1;
2302 +
2303 + const char *hotplug_script;
2304 };
2305
2306 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
2307 @@ -236,6 +238,7 @@ static int netback_remove(struct xenbus_device *dev)
2308 xenvif_free(be->vif);
2309 be->vif = NULL;
2310 }
2311 + kfree(be->hotplug_script);
2312 kfree(be);
2313 dev_set_drvdata(&dev->dev, NULL);
2314 return 0;
2315 @@ -253,6 +256,7 @@ static int netback_probe(struct xenbus_device *dev,
2316 struct xenbus_transaction xbt;
2317 int err;
2318 int sg;
2319 + const char *script;
2320 struct backend_info *be = kzalloc(sizeof(struct backend_info),
2321 GFP_KERNEL);
2322 if (!be) {
2323 @@ -345,6 +349,15 @@ static int netback_probe(struct xenbus_device *dev,
2324 if (err)
2325 pr_debug("Error writing multi-queue-max-queues\n");
2326
2327 + script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
2328 + if (IS_ERR(script)) {
2329 + err = PTR_ERR(script);
2330 + xenbus_dev_fatal(dev, err, "reading script");
2331 + goto fail;
2332 + }
2333 +
2334 + be->hotplug_script = script;
2335 +
2336 err = xenbus_switch_state(dev, XenbusStateInitWait);
2337 if (err)
2338 goto fail;
2339 @@ -377,22 +390,14 @@ static int netback_uevent(struct xenbus_device *xdev,
2340 struct kobj_uevent_env *env)
2341 {
2342 struct backend_info *be = dev_get_drvdata(&xdev->dev);
2343 - char *val;
2344
2345 - val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
2346 - if (IS_ERR(val)) {
2347 - int err = PTR_ERR(val);
2348 - xenbus_dev_fatal(xdev, err, "reading script");
2349 - return err;
2350 - } else {
2351 - if (add_uevent_var(env, "script=%s", val)) {
2352 - kfree(val);
2353 - return -ENOMEM;
2354 - }
2355 - kfree(val);
2356 - }
2357 + if (!be)
2358 + return 0;
2359 +
2360 + if (add_uevent_var(env, "script=%s", be->hotplug_script))
2361 + return -ENOMEM;
2362
2363 - if (!be || !be->vif)
2364 + if (!be->vif)
2365 return 0;
2366
2367 return add_uevent_var(env, "vif=%s", be->vif->dev->name);
2368 @@ -736,6 +741,7 @@ static void connect(struct backend_info *be)
2369 goto err;
2370 }
2371
2372 + queue->credit_bytes = credit_bytes;
2373 queue->remaining_credit = credit_bytes;
2374 queue->credit_usec = credit_usec;
2375
2376 diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
2377 index 3351ef408125..53826b84e0ec 100644
2378 --- a/drivers/of/dynamic.c
2379 +++ b/drivers/of/dynamic.c
2380 @@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
2381 phandle = __of_get_property(np, "phandle", &sz);
2382 if (!phandle)
2383 phandle = __of_get_property(np, "linux,phandle", &sz);
2384 - if (IS_ENABLED(PPC_PSERIES) && !phandle)
2385 + if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
2386 phandle = __of_get_property(np, "ibm,phandle", &sz);
2387 np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
2388
2389 diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
2390 index 8543bb29a138..9737a979b8db 100644
2391 --- a/drivers/staging/ozwpan/ozhcd.c
2392 +++ b/drivers/staging/ozwpan/ozhcd.c
2393 @@ -743,8 +743,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
2394 /*
2395 * Context: softirq
2396 */
2397 -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
2398 - int length, int offset, int total_size)
2399 +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
2400 + u8 length, u16 offset, u16 total_size)
2401 {
2402 struct oz_port *port = hport;
2403 struct urb *urb;
2404 @@ -756,8 +756,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
2405 if (!urb)
2406 return;
2407 if (status == 0) {
2408 - int copy_len;
2409 - int required_size = urb->transfer_buffer_length;
2410 + unsigned int copy_len;
2411 + unsigned int required_size = urb->transfer_buffer_length;
2412
2413 if (required_size > total_size)
2414 required_size = total_size;
2415 diff --git a/drivers/staging/ozwpan/ozusbif.h b/drivers/staging/ozwpan/ozusbif.h
2416 index 4249fa374012..d2a6085345be 100644
2417 --- a/drivers/staging/ozwpan/ozusbif.h
2418 +++ b/drivers/staging/ozwpan/ozusbif.h
2419 @@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
2420
2421 /* Confirmation functions.
2422 */
2423 -void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
2424 - const u8 *desc, int length, int offset, int total_size);
2425 +void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
2426 + const u8 *desc, u8 length, u16 offset, u16 total_size);
2427 void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
2428 const u8 *data, int data_len);
2429
2430 diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
2431 index d434d8c6fff6..f660bb198c65 100644
2432 --- a/drivers/staging/ozwpan/ozusbsvc1.c
2433 +++ b/drivers/staging/ozwpan/ozusbsvc1.c
2434 @@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
2435 struct oz_multiple_fixed *body =
2436 (struct oz_multiple_fixed *)data_hdr;
2437 u8 *data = body->data;
2438 - int n = (len - sizeof(struct oz_multiple_fixed)+1)
2439 + unsigned int n;
2440 + if (!body->unit_size ||
2441 + len < sizeof(struct oz_multiple_fixed) - 1)
2442 + break;
2443 + n = (len - (sizeof(struct oz_multiple_fixed) - 1))
2444 / body->unit_size;
2445 while (n--) {
2446 oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
2447 @@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
2448 case OZ_GET_DESC_RSP: {
2449 struct oz_get_desc_rsp *body =
2450 (struct oz_get_desc_rsp *)usb_hdr;
2451 - int data_len = elt->length -
2452 - sizeof(struct oz_get_desc_rsp) + 1;
2453 - u16 offs = le16_to_cpu(get_unaligned(&body->offset));
2454 - u16 total_size =
2455 + u16 offs, total_size;
2456 + u8 data_len;
2457 +
2458 + if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
2459 + break;
2460 + data_len = elt->length -
2461 + (sizeof(struct oz_get_desc_rsp) - 1);
2462 + offs = le16_to_cpu(get_unaligned(&body->offset));
2463 + total_size =
2464 le16_to_cpu(get_unaligned(&body->total_size));
2465 oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
2466 oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
2467 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
2468 index cc57a3a6b02b..eee40b5cb025 100644
2469 --- a/drivers/tty/n_tty.c
2470 +++ b/drivers/tty/n_tty.c
2471 @@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
2472 return put_user(x, ptr);
2473 }
2474
2475 +static inline int tty_copy_to_user(struct tty_struct *tty,
2476 + void __user *to,
2477 + const void *from,
2478 + unsigned long n)
2479 +{
2480 + struct n_tty_data *ldata = tty->disc_data;
2481 +
2482 + tty_audit_add_data(tty, to, n, ldata->icanon);
2483 + return copy_to_user(to, from, n);
2484 +}
2485 +
2486 /**
2487 * n_tty_kick_worker - start input worker (if required)
2488 * @tty: terminal
2489 @@ -2084,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
2490 __func__, eol, found, n, c, size, more);
2491
2492 if (n > size) {
2493 - ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
2494 + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
2495 if (ret)
2496 return -EFAULT;
2497 - ret = copy_to_user(*b + size, ldata->read_buf, n - size);
2498 + ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
2499 } else
2500 - ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
2501 + ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
2502
2503 if (ret)
2504 return -EFAULT;
2505 diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
2506 index 23061918b0e4..f74f400fcb57 100644
2507 --- a/drivers/tty/serial/imx.c
2508 +++ b/drivers/tty/serial/imx.c
2509 @@ -959,6 +959,14 @@ static void dma_rx_callback(void *data)
2510
2511 status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
2512 count = RX_BUF_SIZE - state.residue;
2513 +
2514 + if (readl(sport->port.membase + USR2) & USR2_IDLE) {
2515 + /* In condition [3] the SDMA counted up too early */
2516 + count--;
2517 +
2518 + writel(USR2_IDLE, sport->port.membase + USR2);
2519 + }
2520 +
2521 dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
2522
2523 if (count) {
2524 diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
2525 index d201910b892f..f176941a92dd 100644
2526 --- a/drivers/usb/dwc3/core.h
2527 +++ b/drivers/usb/dwc3/core.h
2528 @@ -339,7 +339,7 @@
2529 #define DWC3_DGCMD_SET_ENDPOINT_NRDY 0x0c
2530 #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK 0x10
2531
2532 -#define DWC3_DGCMD_STATUS(n) (((n) >> 15) & 1)
2533 +#define DWC3_DGCMD_STATUS(n) (((n) >> 12) & 0x0F)
2534 #define DWC3_DGCMD_CMDACT (1 << 10)
2535 #define DWC3_DGCMD_CMDIOC (1 << 8)
2536
2537 @@ -355,7 +355,7 @@
2538 #define DWC3_DEPCMD_PARAM_SHIFT 16
2539 #define DWC3_DEPCMD_PARAM(x) ((x) << DWC3_DEPCMD_PARAM_SHIFT)
2540 #define DWC3_DEPCMD_GET_RSC_IDX(x) (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
2541 -#define DWC3_DEPCMD_STATUS(x) (((x) >> 15) & 1)
2542 +#define DWC3_DEPCMD_STATUS(x) (((x) >> 12) & 0x0F)
2543 #define DWC3_DEPCMD_HIPRI_FORCERM (1 << 11)
2544 #define DWC3_DEPCMD_CMDACT (1 << 10)
2545 #define DWC3_DEPCMD_CMDIOC (1 << 8)
2546 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2547 index ec8ac1674854..36bf089b708f 100644
2548 --- a/drivers/usb/host/xhci.c
2549 +++ b/drivers/usb/host/xhci.c
2550 @@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2551 {
2552 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2553 unsigned long flags;
2554 - int ret;
2555 + int ret, slot_id;
2556 struct xhci_command *command;
2557
2558 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
2559 if (!command)
2560 return 0;
2561
2562 + /* xhci->slot_id and xhci->addr_dev are not thread-safe */
2563 + mutex_lock(&xhci->mutex);
2564 spin_lock_irqsave(&xhci->lock, flags);
2565 command->completion = &xhci->addr_dev;
2566 ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
2567 if (ret) {
2568 spin_unlock_irqrestore(&xhci->lock, flags);
2569 + mutex_unlock(&xhci->mutex);
2570 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2571 kfree(command);
2572 return 0;
2573 @@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2574 spin_unlock_irqrestore(&xhci->lock, flags);
2575
2576 wait_for_completion(command->completion);
2577 + slot_id = xhci->slot_id;
2578 + mutex_unlock(&xhci->mutex);
2579
2580 - if (!xhci->slot_id || command->status != COMP_SUCCESS) {
2581 + if (!slot_id || command->status != COMP_SUCCESS) {
2582 xhci_err(xhci, "Error while assigning device slot ID\n");
2583 xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
2584 HCS_MAX_SLOTS(
2585 @@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2586 * xhci_discover_or_reset_device(), which may be called as part of
2587 * mass storage driver error handling.
2588 */
2589 - if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2590 + if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
2591 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2592 goto disable_slot;
2593 }
2594 - udev->slot_id = xhci->slot_id;
2595 + udev->slot_id = slot_id;
2596
2597 #ifndef CONFIG_USB_DEFAULT_PERSIST
2598 /*
2599 @@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2600 struct xhci_slot_ctx *slot_ctx;
2601 struct xhci_input_control_ctx *ctrl_ctx;
2602 u64 temp_64;
2603 - struct xhci_command *command;
2604 + struct xhci_command *command = NULL;
2605 +
2606 + mutex_lock(&xhci->mutex);
2607
2608 if (!udev->slot_id) {
2609 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2610 "Bad Slot ID %d", udev->slot_id);
2611 - return -EINVAL;
2612 + ret = -EINVAL;
2613 + goto out;
2614 }
2615
2616 virt_dev = xhci->devs[udev->slot_id];
2617 @@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2618 */
2619 xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
2620 udev->slot_id);
2621 - return -EINVAL;
2622 + ret = -EINVAL;
2623 + goto out;
2624 }
2625
2626 if (setup == SETUP_CONTEXT_ONLY) {
2627 @@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2628 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
2629 SLOT_STATE_DEFAULT) {
2630 xhci_dbg(xhci, "Slot already in default state\n");
2631 - return 0;
2632 + goto out;
2633 }
2634 }
2635
2636 command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
2637 - if (!command)
2638 - return -ENOMEM;
2639 + if (!command) {
2640 + ret = -ENOMEM;
2641 + goto out;
2642 + }
2643
2644 command->in_ctx = virt_dev->in_ctx;
2645 command->completion = &xhci->addr_dev;
2646 @@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2647 if (!ctrl_ctx) {
2648 xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
2649 __func__);
2650 - kfree(command);
2651 - return -EINVAL;
2652 + ret = -EINVAL;
2653 + goto out;
2654 }
2655 /*
2656 * If this is the first Set Address since device plug-in or
2657 @@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2658 spin_unlock_irqrestore(&xhci->lock, flags);
2659 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2660 "FIXME: allocate a command ring segment");
2661 - kfree(command);
2662 - return ret;
2663 + goto out;
2664 }
2665 xhci_ring_cmd_db(xhci);
2666 spin_unlock_irqrestore(&xhci->lock, flags);
2667 @@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2668 ret = -EINVAL;
2669 break;
2670 }
2671 - if (ret) {
2672 - kfree(command);
2673 - return ret;
2674 - }
2675 + if (ret)
2676 + goto out;
2677 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2678 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2679 "Op regs DCBAA ptr = %#016llx", temp_64);
2680 @@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
2681 xhci_dbg_trace(xhci, trace_xhci_dbg_address,
2682 "Internal device address = %d",
2683 le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
2684 +out:
2685 + mutex_unlock(&xhci->mutex);
2686 kfree(command);
2687 - return 0;
2688 + return ret;
2689 }
2690
2691 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2692 @@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
2693 return 0;
2694 }
2695
2696 + mutex_init(&xhci->mutex);
2697 xhci->cap_regs = hcd->regs;
2698 xhci->op_regs = hcd->regs +
2699 HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
2700 @@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
2701 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
2702 return 0;
2703 }
2704 +
2705 +/*
2706 + * If an init function is provided, an exit function must also be provided
2707 + * to allow module unload.
2708 + */
2709 +static void __exit xhci_hcd_fini(void) { }
2710 +
2711 module_init(xhci_hcd_init);
2712 +module_exit(xhci_hcd_fini);
2713 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2714 index ea75e8ccd3c1..6977f8491fa7 100644
2715 --- a/drivers/usb/host/xhci.h
2716 +++ b/drivers/usb/host/xhci.h
2717 @@ -1497,6 +1497,8 @@ struct xhci_hcd {
2718 struct list_head lpm_failed_devs;
2719
2720 /* slot enabling and address device helpers */
2721 + /* these are not thread safe so use mutex */
2722 + struct mutex mutex;
2723 struct completion addr_dev;
2724 int slot_id;
2725 /* For USB 3.0 LPM enable/disable. */
2726 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2727 index 9031750e7404..ffd739e31bfc 100644
2728 --- a/drivers/usb/serial/cp210x.c
2729 +++ b/drivers/usb/serial/cp210x.c
2730 @@ -128,6 +128,7 @@ static const struct usb_device_id id_table[] = {
2731 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
2732 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
2733 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
2734 + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
2735 { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
2736 { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
2737 { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
2738 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2739 index 8eb68a31cab6..4c8b3b82103d 100644
2740 --- a/drivers/usb/serial/ftdi_sio.c
2741 +++ b/drivers/usb/serial/ftdi_sio.c
2742 @@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
2743 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
2744 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
2745 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
2746 + { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
2747 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
2748 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
2749 { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
2750 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2751 index 4e4f46f3c89c..792e054126de 100644
2752 --- a/drivers/usb/serial/ftdi_sio_ids.h
2753 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2754 @@ -155,6 +155,7 @@
2755 #define XSENS_AWINDA_STATION_PID 0x0101
2756 #define XSENS_AWINDA_DONGLE_PID 0x0102
2757 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
2758 +#define XSENS_MTDEVBOARD_PID 0x0300 /* Motion Tracker Development Board */
2759 #define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
2760
2761 /* Xsens devices using FTDI VID */
2762 diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
2763 index e894eb278d83..eba1b7ac7294 100644
2764 --- a/drivers/virtio/virtio_pci_common.c
2765 +++ b/drivers/virtio/virtio_pci_common.c
2766 @@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
2767 if (cpu == -1)
2768 irq_set_affinity_hint(irq, NULL);
2769 else {
2770 + cpumask_clear(mask);
2771 cpumask_set_cpu(cpu, mask);
2772 irq_set_affinity_hint(irq, mask);
2773 }
2774 diff --git a/fs/aio.c b/fs/aio.c
2775 index a793f7023755..a1736e98c278 100644
2776 --- a/fs/aio.c
2777 +++ b/fs/aio.c
2778 @@ -77,6 +77,11 @@ struct kioctx_cpu {
2779 unsigned reqs_available;
2780 };
2781
2782 +struct ctx_rq_wait {
2783 + struct completion comp;
2784 + atomic_t count;
2785 +};
2786 +
2787 struct kioctx {
2788 struct percpu_ref users;
2789 atomic_t dead;
2790 @@ -115,7 +120,7 @@ struct kioctx {
2791 /*
2792 * signals when all in-flight requests are done
2793 */
2794 - struct completion *requests_done;
2795 + struct ctx_rq_wait *rq_wait;
2796
2797 struct {
2798 /*
2799 @@ -539,8 +544,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
2800 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
2801
2802 /* At this point we know that there are no any in-flight requests */
2803 - if (ctx->requests_done)
2804 - complete(ctx->requests_done);
2805 + if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
2806 + complete(&ctx->rq_wait->comp);
2807
2808 INIT_WORK(&ctx->free_work, free_ioctx);
2809 schedule_work(&ctx->free_work);
2810 @@ -751,7 +756,7 @@ err:
2811 * the rapid destruction of the kioctx.
2812 */
2813 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
2814 - struct completion *requests_done)
2815 + struct ctx_rq_wait *wait)
2816 {
2817 struct kioctx_table *table;
2818
2819 @@ -781,7 +786,7 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
2820 if (ctx->mmap_size)
2821 vm_munmap(ctx->mmap_base, ctx->mmap_size);
2822
2823 - ctx->requests_done = requests_done;
2824 + ctx->rq_wait = wait;
2825 percpu_ref_kill(&ctx->users);
2826 return 0;
2827 }
2828 @@ -813,18 +818,24 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
2829 void exit_aio(struct mm_struct *mm)
2830 {
2831 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
2832 - int i;
2833 + struct ctx_rq_wait wait;
2834 + int i, skipped;
2835
2836 if (!table)
2837 return;
2838
2839 + atomic_set(&wait.count, table->nr);
2840 + init_completion(&wait.comp);
2841 +
2842 + skipped = 0;
2843 for (i = 0; i < table->nr; ++i) {
2844 struct kioctx *ctx = table->table[i];
2845 - struct completion requests_done =
2846 - COMPLETION_INITIALIZER_ONSTACK(requests_done);
2847
2848 - if (!ctx)
2849 + if (!ctx) {
2850 + skipped++;
2851 continue;
2852 + }
2853 +
2854 /*
2855 * We don't need to bother with munmap() here - exit_mmap(mm)
2856 * is coming and it'll unmap everything. And we simply can't,
2857 @@ -833,10 +844,12 @@ void exit_aio(struct mm_struct *mm)
2858 * that it needs to unmap the area, just set it to 0.
2859 */
2860 ctx->mmap_size = 0;
2861 - kill_ioctx(mm, ctx, &requests_done);
2862 + kill_ioctx(mm, ctx, &wait);
2863 + }
2864
2865 + if (!atomic_sub_and_test(skipped, &wait.count)) {
2866 /* Wait until all IO for the context are done. */
2867 - wait_for_completion(&requests_done);
2868 + wait_for_completion(&wait.comp);
2869 }
2870
2871 RCU_INIT_POINTER(mm->ioctx_table, NULL);
2872 @@ -1321,15 +1334,17 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
2873 {
2874 struct kioctx *ioctx = lookup_ioctx(ctx);
2875 if (likely(NULL != ioctx)) {
2876 - struct completion requests_done =
2877 - COMPLETION_INITIALIZER_ONSTACK(requests_done);
2878 + struct ctx_rq_wait wait;
2879 int ret;
2880
2881 + init_completion(&wait.comp);
2882 + atomic_set(&wait.count, 1);
2883 +
2884 /* Pass requests_done to kill_ioctx() where it can be set
2885 * in a thread-safe way. If we try to set it here then we have
2886 * a race condition if two io_destroy() called simultaneously.
2887 */
2888 - ret = kill_ioctx(current->mm, ioctx, &requests_done);
2889 + ret = kill_ioctx(current->mm, ioctx, &wait);
2890 percpu_ref_put(&ioctx->users);
2891
2892 /* Wait until all IO for the context are done. Otherwise kernel
2893 @@ -1337,7 +1352,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
2894 * is destroyed.
2895 */
2896 if (!ret)
2897 - wait_for_completion(&requests_done);
2898 + wait_for_completion(&wait.comp);
2899
2900 return ret;
2901 }
2902 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2903 index 8b33da6ec3dd..63be2a96ed6a 100644
2904 --- a/fs/btrfs/extent-tree.c
2905 +++ b/fs/btrfs/extent-tree.c
2906 @@ -8535,6 +8535,24 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
2907 trans = btrfs_join_transaction(root);
2908 if (IS_ERR(trans))
2909 return PTR_ERR(trans);
2910 + /*
2911 + * if we are changing raid levels, try to allocate a corresponding
2912 + * block group with the new raid level.
2913 + */
2914 + alloc_flags = update_block_group_flags(root, cache->flags);
2915 + if (alloc_flags != cache->flags) {
2916 + ret = do_chunk_alloc(trans, root, alloc_flags,
2917 + CHUNK_ALLOC_FORCE);
2918 + /*
2919 + * ENOSPC is allowed here, we may have enough space
2920 + * already allocated at the new raid level to
2921 + * carry on
2922 + */
2923 + if (ret == -ENOSPC)
2924 + ret = 0;
2925 + if (ret < 0)
2926 + goto out;
2927 + }
2928
2929 ret = set_block_group_ro(cache, 0);
2930 if (!ret)
2931 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
2932 index d688cfe5d496..782f3bc4651d 100644
2933 --- a/fs/btrfs/extent_io.c
2934 +++ b/fs/btrfs/extent_io.c
2935 @@ -4514,8 +4514,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2936 }
2937 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
2938 em_len, flags);
2939 - if (ret)
2940 + if (ret) {
2941 + if (ret == 1)
2942 + ret = 0;
2943 goto out_free;
2944 + }
2945 }
2946 out_free:
2947 free_extent_map(em);
2948 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
2949 index 2b4c5423672d..64e8fb639f72 100644
2950 --- a/fs/btrfs/ioctl.c
2951 +++ b/fs/btrfs/ioctl.c
2952 @@ -3206,6 +3206,8 @@ static int btrfs_clone(struct inode *src, struct inode *inode,
2953 key.offset = off;
2954
2955 while (1) {
2956 + u64 next_key_min_offset = key.offset + 1;
2957 +
2958 /*
2959 * note the key will change type as we walk through the
2960 * tree.
2961 @@ -3286,7 +3288,7 @@ process_slot:
2962 } else if (key.offset >= off + len) {
2963 break;
2964 }
2965 -
2966 + next_key_min_offset = key.offset + datal;
2967 size = btrfs_item_size_nr(leaf, slot);
2968 read_extent_buffer(leaf, buf,
2969 btrfs_item_ptr_offset(leaf, slot),
2970 @@ -3501,7 +3503,7 @@ process_slot:
2971 break;
2972 }
2973 btrfs_release_path(path);
2974 - key.offset++;
2975 + key.offset = next_key_min_offset;
2976 }
2977 ret = 0;
2978
2979 diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
2980 index d6033f540cc7..571de5a08fe7 100644
2981 --- a/fs/btrfs/send.c
2982 +++ b/fs/btrfs/send.c
2983 @@ -5852,19 +5852,20 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
2984 ret = PTR_ERR(clone_root);
2985 goto out;
2986 }
2987 - clone_sources_to_rollback = i + 1;
2988 spin_lock(&clone_root->root_item_lock);
2989 - clone_root->send_in_progress++;
2990 - if (!btrfs_root_readonly(clone_root)) {
2991 + if (!btrfs_root_readonly(clone_root) ||
2992 + btrfs_root_dead(clone_root)) {
2993 spin_unlock(&clone_root->root_item_lock);
2994 srcu_read_unlock(&fs_info->subvol_srcu, index);
2995 ret = -EPERM;
2996 goto out;
2997 }
2998 + clone_root->send_in_progress++;
2999 spin_unlock(&clone_root->root_item_lock);
3000 srcu_read_unlock(&fs_info->subvol_srcu, index);
3001
3002 sctx->clone_roots[i].root = clone_root;
3003 + clone_sources_to_rollback = i + 1;
3004 }
3005 vfree(clone_sources_tmp);
3006 clone_sources_tmp = NULL;
3007 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3008 index 05fef198ff94..e477ed67a49a 100644
3009 --- a/fs/btrfs/super.c
3010 +++ b/fs/btrfs/super.c
3011 @@ -901,6 +901,15 @@ find_root:
3012 if (IS_ERR(new_root))
3013 return ERR_CAST(new_root);
3014
3015 + if (!(sb->s_flags & MS_RDONLY)) {
3016 + int ret;
3017 + down_read(&fs_info->cleanup_work_sem);
3018 + ret = btrfs_orphan_cleanup(new_root);
3019 + up_read(&fs_info->cleanup_work_sem);
3020 + if (ret)
3021 + return ERR_PTR(ret);
3022 + }
3023 +
3024 dir_id = btrfs_root_dirid(&new_root->root_item);
3025 setup_root:
3026 location.objectid = dir_id;
3027 diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
3028 index aff923ae8c4b..d87d8eced064 100644
3029 --- a/include/linux/backing-dev.h
3030 +++ b/include/linux/backing-dev.h
3031 @@ -116,7 +116,6 @@ __printf(3, 4)
3032 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
3033 const char *fmt, ...);
3034 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
3035 -void bdi_unregister(struct backing_dev_info *bdi);
3036 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
3037 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
3038 enum wb_reason reason);
3039 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
3040 index 5976bdecf58b..9fe865ccc3f3 100644
3041 --- a/include/net/inet_connection_sock.h
3042 +++ b/include/net/inet_connection_sock.h
3043 @@ -98,7 +98,8 @@ struct inet_connection_sock {
3044 const struct tcp_congestion_ops *icsk_ca_ops;
3045 const struct inet_connection_sock_af_ops *icsk_af_ops;
3046 unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
3047 - __u8 icsk_ca_state:7,
3048 + __u8 icsk_ca_state:6,
3049 + icsk_ca_setsockopt:1,
3050 icsk_ca_dst_locked:1;
3051 __u8 icsk_retransmits;
3052 __u8 icsk_pending;
3053 diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
3054 index 856f01cb51dd..230775f5952a 100644
3055 --- a/include/net/sctp/sctp.h
3056 +++ b/include/net/sctp/sctp.h
3057 @@ -571,11 +571,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
3058 /* Map v4 address to v4-mapped v6 address */
3059 static inline void sctp_v4_map_v6(union sctp_addr *addr)
3060 {
3061 + __be16 port;
3062 +
3063 + port = addr->v4.sin_port;
3064 + addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
3065 + addr->v6.sin6_port = port;
3066 addr->v6.sin6_family = AF_INET6;
3067 addr->v6.sin6_flowinfo = 0;
3068 addr->v6.sin6_scope_id = 0;
3069 - addr->v6.sin6_port = addr->v4.sin_port;
3070 - addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
3071 addr->v6.sin6_addr.s6_addr32[0] = 0;
3072 addr->v6.sin6_addr.s6_addr32[1] = 0;
3073 addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
3074 diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
3075 index 5a14ead59696..885d3a380451 100644
3076 --- a/include/trace/events/writeback.h
3077 +++ b/include/trace/events/writeback.h
3078 @@ -233,7 +233,6 @@ DEFINE_EVENT(writeback_class, name, \
3079 DEFINE_WRITEBACK_EVENT(writeback_nowork);
3080 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
3081 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
3082 -DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
3083
3084 DECLARE_EVENT_CLASS(wbc_class,
3085 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
3086 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3087 index 241213be507c..486d00c408b0 100644
3088 --- a/kernel/sched/fair.c
3089 +++ b/kernel/sched/fair.c
3090 @@ -2166,7 +2166,7 @@ void task_numa_work(struct callback_head *work)
3091 }
3092 for (; vma; vma = vma->vm_next) {
3093 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
3094 - is_vm_hugetlb_page(vma)) {
3095 + is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
3096 continue;
3097 }
3098
3099 diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
3100 index 13d945c0d03f..1b28df2d9104 100644
3101 --- a/kernel/trace/ring_buffer_benchmark.c
3102 +++ b/kernel/trace/ring_buffer_benchmark.c
3103 @@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
3104
3105 if (producer_fifo >= 0) {
3106 struct sched_param param = {
3107 - .sched_priority = consumer_fifo
3108 + .sched_priority = producer_fifo
3109 };
3110 sched_setscheduler(producer, SCHED_FIFO, &param);
3111 } else
3112 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
3113 index 6dc4580df2af..000e7b3b9896 100644
3114 --- a/mm/backing-dev.c
3115 +++ b/mm/backing-dev.c
3116 @@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
3117 flush_delayed_work(&bdi->wb.dwork);
3118 }
3119
3120 -/*
3121 - * Called when the device behind @bdi has been removed or ejected.
3122 - *
3123 - * We can't really do much here except for reducing the dirty ratio at
3124 - * the moment. In the future we should be able to set a flag so that
3125 - * the filesystem can handle errors at mark_inode_dirty time instead
3126 - * of only at writeback time.
3127 - */
3128 -void bdi_unregister(struct backing_dev_info *bdi)
3129 -{
3130 - if (WARN_ON_ONCE(!bdi->dev))
3131 - return;
3132 -
3133 - bdi_set_min_ratio(bdi, 0);
3134 -}
3135 -EXPORT_SYMBOL(bdi_unregister);
3136 -
3137 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
3138 {
3139 memset(wb, 0, sizeof(*wb));
3140 @@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
3141 int i;
3142
3143 bdi_wb_shutdown(bdi);
3144 + bdi_set_min_ratio(bdi, 0);
3145
3146 WARN_ON(!list_empty(&bdi->work_list));
3147 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
3148 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
3149 index 65842d688b7c..93caba791cde 100644
3150 --- a/mm/memory_hotplug.c
3151 +++ b/mm/memory_hotplug.c
3152 @@ -1978,8 +1978,10 @@ void try_offline_node(int nid)
3153 * wait_table may be allocated from boot memory,
3154 * here only free if it's allocated by vmalloc.
3155 */
3156 - if (is_vmalloc_addr(zone->wait_table))
3157 + if (is_vmalloc_addr(zone->wait_table)) {
3158 vfree(zone->wait_table);
3159 + zone->wait_table = NULL;
3160 + }
3161 }
3162 }
3163 EXPORT_SYMBOL(try_offline_node);
3164 diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
3165 index e0670d7054f9..659fb96672e4 100644
3166 --- a/net/bridge/br_fdb.c
3167 +++ b/net/bridge/br_fdb.c
3168 @@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
3169 int err = 0;
3170
3171 if (ndm->ndm_flags & NTF_USE) {
3172 + local_bh_disable();
3173 rcu_read_lock();
3174 br_fdb_update(p->br, p, addr, vid, true);
3175 rcu_read_unlock();
3176 + local_bh_enable();
3177 } else {
3178 spin_lock_bh(&p->br->hash_lock);
3179 err = fdb_add_entry(p, addr, ndm->ndm_state,
3180 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3181 index c465876c7861..b0aee78dba41 100644
3182 --- a/net/bridge/br_multicast.c
3183 +++ b/net/bridge/br_multicast.c
3184 @@ -1071,7 +1071,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
3185
3186 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
3187 vid);
3188 - if (!err)
3189 + if (err)
3190 break;
3191 }
3192
3193 @@ -1821,7 +1821,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
3194 if (query->startup_sent < br->multicast_startup_query_count)
3195 query->startup_sent++;
3196
3197 - RCU_INIT_POINTER(querier, NULL);
3198 + RCU_INIT_POINTER(querier->port, NULL);
3199 br_multicast_send_query(br, NULL, query);
3200 spin_unlock(&br->multicast_lock);
3201 }
3202 diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
3203 index a6e2da0bc718..982101c12258 100644
3204 --- a/net/caif/caif_socket.c
3205 +++ b/net/caif/caif_socket.c
3206 @@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
3207 release_sock(sk);
3208 timeo = schedule_timeout(timeo);
3209 lock_sock(sk);
3210 +
3211 + if (sock_flag(sk, SOCK_DEAD))
3212 + break;
3213 +
3214 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
3215 }
3216
3217 @@ -374,6 +378,10 @@ static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3218 struct sk_buff *skb;
3219
3220 lock_sock(sk);
3221 + if (sock_flag(sk, SOCK_DEAD)) {
3222 + err = -ECONNRESET;
3223 + goto unlock;
3224 + }
3225 skb = skb_dequeue(&sk->sk_receive_queue);
3226 caif_check_flow_release(sk);
3227
3228 diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
3229 index a1ef53c04415..b1f2d1f44d37 100644
3230 --- a/net/ceph/crush/mapper.c
3231 +++ b/net/ceph/crush/mapper.c
3232 @@ -290,6 +290,7 @@ static int is_out(const struct crush_map *map,
3233 * @type: the type of item to choose
3234 * @out: pointer to output vector
3235 * @outpos: our position in that vector
3236 + * @out_size: size of the out vector
3237 * @tries: number of attempts to make
3238 * @recurse_tries: number of attempts to have recursive chooseleaf make
3239 * @local_retries: localized retries
3240 @@ -304,6 +305,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3241 const __u32 *weight, int weight_max,
3242 int x, int numrep, int type,
3243 int *out, int outpos,
3244 + int out_size,
3245 unsigned int tries,
3246 unsigned int recurse_tries,
3247 unsigned int local_retries,
3248 @@ -322,6 +324,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3249 int item = 0;
3250 int itemtype;
3251 int collide, reject;
3252 + int count = out_size;
3253
3254 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
3255 recurse_to_leaf ? "_LEAF" : "",
3256 @@ -329,7 +332,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3257 tries, recurse_tries, local_retries, local_fallback_retries,
3258 parent_r);
3259
3260 - for (rep = outpos; rep < numrep; rep++) {
3261 + for (rep = outpos; rep < numrep && count > 0 ; rep++) {
3262 /* keep trying until we get a non-out, non-colliding item */
3263 ftotal = 0;
3264 skip_rep = 0;
3265 @@ -403,7 +406,7 @@ static int crush_choose_firstn(const struct crush_map *map,
3266 map->buckets[-1-item],
3267 weight, weight_max,
3268 x, outpos+1, 0,
3269 - out2, outpos,
3270 + out2, outpos, count,
3271 recurse_tries, 0,
3272 local_retries,
3273 local_fallback_retries,
3274 @@ -463,6 +466,7 @@ reject:
3275 dprintk("CHOOSE got %d\n", item);
3276 out[outpos] = item;
3277 outpos++;
3278 + count--;
3279 }
3280
3281 dprintk("CHOOSE returns %d\n", outpos);
3282 @@ -654,6 +658,7 @@ int crush_do_rule(const struct crush_map *map,
3283 __u32 step;
3284 int i, j;
3285 int numrep;
3286 + int out_size;
3287 /*
3288 * the original choose_total_tries value was off by one (it
3289 * counted "retries" and not "tries"). add one.
3290 @@ -761,6 +766,7 @@ int crush_do_rule(const struct crush_map *map,
3291 x, numrep,
3292 curstep->arg2,
3293 o+osize, j,
3294 + result_max-osize,
3295 choose_tries,
3296 recurse_tries,
3297 choose_local_retries,
3298 @@ -770,11 +776,13 @@ int crush_do_rule(const struct crush_map *map,
3299 c+osize,
3300 0);
3301 } else {
3302 + out_size = ((numrep < (result_max-osize)) ?
3303 + numrep : (result_max-osize));
3304 crush_choose_indep(
3305 map,
3306 map->buckets[-1-w[i]],
3307 weight, weight_max,
3308 - x, numrep, numrep,
3309 + x, out_size, numrep,
3310 curstep->arg2,
3311 o+osize, j,
3312 choose_tries,
3313 @@ -783,7 +791,7 @@ int crush_do_rule(const struct crush_map *map,
3314 recurse_to_leaf,
3315 c+osize,
3316 0);
3317 - osize += numrep;
3318 + osize += out_size;
3319 }
3320 }
3321
3322 diff --git a/net/core/dev.c b/net/core/dev.c
3323 index 22a53acdb5bb..e977e15c2ac0 100644
3324 --- a/net/core/dev.c
3325 +++ b/net/core/dev.c
3326 @@ -5170,7 +5170,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
3327 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
3328 return -EBUSY;
3329
3330 - if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
3331 + if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
3332 return -EEXIST;
3333
3334 if (master && netdev_master_upper_dev_get(dev))
3335 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
3336 index 7ebed55b5f7d..a2b90e1fc115 100644
3337 --- a/net/core/rtnetlink.c
3338 +++ b/net/core/rtnetlink.c
3339 @@ -2337,6 +2337,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
3340 {
3341 struct sk_buff *skb;
3342
3343 + if (dev->reg_state != NETREG_REGISTERED)
3344 + return;
3345 +
3346 skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
3347 if (skb)
3348 rtmsg_ifinfo_send(skb, dev, flags);
3349 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3350 index 20fc0202cbbe..e262a087050b 100644
3351 --- a/net/ipv4/route.c
3352 +++ b/net/ipv4/route.c
3353 @@ -903,6 +903,10 @@ static int ip_error(struct sk_buff *skb)
3354 bool send;
3355 int code;
3356
3357 + /* IP on this device is disabled. */
3358 + if (!in_dev)
3359 + goto out;
3360 +
3361 net = dev_net(rt->dst.dev);
3362 if (!IN_DEV_FORWARD(in_dev)) {
3363 switch (rt->dst.error) {
3364 diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
3365 index 62856e185a93..9d2fbd88df93 100644
3366 --- a/net/ipv4/tcp_cong.c
3367 +++ b/net/ipv4/tcp_cong.c
3368 @@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
3369
3370 tcp_cleanup_congestion_control(sk);
3371 icsk->icsk_ca_ops = ca;
3372 + icsk->icsk_ca_setsockopt = 1;
3373
3374 if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
3375 icsk->icsk_ca_ops->init(sk);
3376 @@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
3377 rcu_read_lock();
3378 ca = __tcp_ca_find_autoload(name);
3379 /* No change asking for existing value */
3380 - if (ca == icsk->icsk_ca_ops)
3381 + if (ca == icsk->icsk_ca_ops) {
3382 + icsk->icsk_ca_setsockopt = 1;
3383 goto out;
3384 + }
3385 if (!ca)
3386 err = -ENOENT;
3387 else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
3388 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
3389 index dd11ac7798c6..50277af92485 100644
3390 --- a/net/ipv4/tcp_minisocks.c
3391 +++ b/net/ipv4/tcp_minisocks.c
3392 @@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
3393 tw->tw_v6_daddr = sk->sk_v6_daddr;
3394 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
3395 tw->tw_tclass = np->tclass;
3396 - tw->tw_flowlabel = np->flow_label >> 12;
3397 + tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
3398 tw->tw_ipv6only = sk->sk_ipv6only;
3399 }
3400 #endif
3401 @@ -437,7 +437,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
3402 rcu_read_unlock();
3403 }
3404
3405 - if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
3406 + /* If no valid choice made yet, assign current system default ca. */
3407 + if (!ca_got_dst &&
3408 + (!icsk->icsk_ca_setsockopt ||
3409 + !try_module_get(icsk->icsk_ca_ops->owner)))
3410 tcp_assign_congestion_control(sk);
3411
3412 tcp_set_ca_state(sk, TCP_CA_Open);
3413 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
3414 index 97ef1f8b7be8..51f17454bd7b 100644
3415 --- a/net/ipv4/udp.c
3416 +++ b/net/ipv4/udp.c
3417 @@ -90,6 +90,7 @@
3418 #include <linux/socket.h>
3419 #include <linux/sockios.h>
3420 #include <linux/igmp.h>
3421 +#include <linux/inetdevice.h>
3422 #include <linux/in.h>
3423 #include <linux/errno.h>
3424 #include <linux/timer.h>
3425 @@ -1348,10 +1349,8 @@ csum_copy_err:
3426 }
3427 unlock_sock_fast(sk, slow);
3428
3429 - if (noblock)
3430 - return -EAGAIN;
3431 -
3432 - /* starting over for a new packet */
3433 + /* starting over for a new packet, but check if we need to yield */
3434 + cond_resched();
3435 msg->msg_flags &= ~MSG_TRUNC;
3436 goto try_again;
3437 }
3438 @@ -1968,6 +1967,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
3439 struct sock *sk;
3440 struct dst_entry *dst;
3441 int dif = skb->dev->ifindex;
3442 + int ours;
3443
3444 /* validate the packet */
3445 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
3446 @@ -1977,14 +1977,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
3447 uh = udp_hdr(skb);
3448
3449 if (skb->pkt_type == PACKET_BROADCAST ||
3450 - skb->pkt_type == PACKET_MULTICAST)
3451 + skb->pkt_type == PACKET_MULTICAST) {
3452 + struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
3453 +
3454 + if (!in_dev)
3455 + return;
3456 +
3457 + ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
3458 + iph->protocol);
3459 + if (!ours)
3460 + return;
3461 sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
3462 uh->source, iph->saddr, dif);
3463 - else if (skb->pkt_type == PACKET_HOST)
3464 + } else if (skb->pkt_type == PACKET_HOST) {
3465 sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
3466 uh->source, iph->saddr, dif);
3467 - else
3468 + } else {
3469 return;
3470 + }
3471
3472 if (!sk)
3473 return;
3474 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3475 index 1f5e62229aaa..5ca3bc880fef 100644
3476 --- a/net/ipv6/tcp_ipv6.c
3477 +++ b/net/ipv6/tcp_ipv6.c
3478 @@ -975,7 +975,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
3479 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
3480 tcp_time_stamp + tcptw->tw_ts_offset,
3481 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
3482 - tw->tw_tclass, (tw->tw_flowlabel << 12));
3483 + tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
3484
3485 inet_twsk_put(tw);
3486 }
3487 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
3488 index d048d46779fc..1c9512aba77e 100644
3489 --- a/net/ipv6/udp.c
3490 +++ b/net/ipv6/udp.c
3491 @@ -528,10 +528,8 @@ csum_copy_err:
3492 }
3493 unlock_sock_fast(sk, slow);
3494
3495 - if (noblock)
3496 - return -EAGAIN;
3497 -
3498 - /* starting over for a new packet */
3499 + /* starting over for a new packet, but check if we need to yield */
3500 + cond_resched();
3501 msg->msg_flags &= ~MSG_TRUNC;
3502 goto try_again;
3503 }
3504 @@ -734,7 +732,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
3505 (inet->inet_dport && inet->inet_dport != rmt_port) ||
3506 (!ipv6_addr_any(&sk->sk_v6_daddr) &&
3507 !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
3508 - (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
3509 + (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
3510 + (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
3511 + !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
3512 return false;
3513 if (!inet6_mc_check(sk, loc_addr, rmt_addr))
3514 return false;
3515 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3516 index d1d7a8166f46..0e9c28dc86b7 100644
3517 --- a/net/netlink/af_netlink.c
3518 +++ b/net/netlink/af_netlink.c
3519 @@ -1052,7 +1052,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
3520 struct netlink_table *table = &nl_table[sk->sk_protocol];
3521 int err;
3522
3523 - lock_sock(sk);
3524 + mutex_lock(&table->hash.mutex);
3525
3526 err = -EBUSY;
3527 if (nlk_sk(sk)->portid)
3528 @@ -1069,11 +1069,12 @@ static int netlink_insert(struct sock *sk, u32 portid)
3529 err = 0;
3530 if (!__netlink_insert(table, sk)) {
3531 err = -EADDRINUSE;
3532 + nlk_sk(sk)->portid = 0;
3533 sock_put(sk);
3534 }
3535
3536 err:
3537 - release_sock(sk);
3538 + mutex_unlock(&table->hash.mutex);
3539 return err;
3540 }
3541
3542 @@ -1082,10 +1083,12 @@ static void netlink_remove(struct sock *sk)
3543 struct netlink_table *table;
3544
3545 table = &nl_table[sk->sk_protocol];
3546 + mutex_lock(&table->hash.mutex);
3547 if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
3548 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
3549 __sock_put(sk);
3550 }
3551 + mutex_unlock(&table->hash.mutex);
3552
3553 netlink_table_grab();
3554 if (nlk_sk(sk)->subscriptions) {
3555 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
3556 index baef987fe2c0..d3328a19f5b2 100644
3557 --- a/net/sched/cls_api.c
3558 +++ b/net/sched/cls_api.c
3559 @@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
3560 struct tcf_proto_ops *t;
3561 int rc = -ENOENT;
3562
3563 + /* Wait for outstanding call_rcu()s, if any, from a
3564 + * tcf_proto_ops's destroy() handler.
3565 + */
3566 + rcu_barrier();
3567 +
3568 write_lock(&cls_mod_lock);
3569 list_for_each_entry(t, &tcf_proto_base, head) {
3570 if (t == ops) {
3571 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
3572 index 243b7d169d61..d9c2ee6d2959 100644
3573 --- a/net/sched/sch_api.c
3574 +++ b/net/sched/sch_api.c
3575 @@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
3576 if (dev->flags & IFF_UP)
3577 dev_deactivate(dev);
3578
3579 - if (new && new->ops->attach) {
3580 - new->ops->attach(new);
3581 - num_q = 0;
3582 - }
3583 + if (new && new->ops->attach)
3584 + goto skip;
3585
3586 for (i = 0; i < num_q; i++) {
3587 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
3588 @@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
3589 qdisc_destroy(old);
3590 }
3591
3592 +skip:
3593 if (!ingress) {
3594 notify_and_destroy(net, skb, n, classid,
3595 dev->qdisc, new);
3596 if (new && !new->ops->attach)
3597 atomic_inc(&new->refcnt);
3598 dev->qdisc = new ? : &noop_qdisc;
3599 +
3600 + if (new && new->ops->attach)
3601 + new->ops->attach(new);
3602 } else {
3603 notify_and_destroy(net, skb, n, classid, old, new);
3604 }
3605 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
3606 index 526b6edab018..146881f068e2 100644
3607 --- a/net/unix/af_unix.c
3608 +++ b/net/unix/af_unix.c
3609 @@ -1887,6 +1887,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
3610 unix_state_unlock(sk);
3611 timeo = freezable_schedule_timeout(timeo);
3612 unix_state_lock(sk);
3613 +
3614 + if (sock_flag(sk, SOCK_DEAD))
3615 + break;
3616 +
3617 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
3618 }
3619
3620 @@ -1947,6 +1951,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
3621 struct sk_buff *skb, *last;
3622
3623 unix_state_lock(sk);
3624 + if (sock_flag(sk, SOCK_DEAD)) {
3625 + err = -ECONNRESET;
3626 + goto unlock;
3627 + }
3628 last = skb = skb_peek(&sk->sk_receive_queue);
3629 again:
3630 if (skb == NULL) {
3631 diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
3632 index 5b24d39d7903..318026617b57 100644
3633 --- a/net/wireless/wext-compat.c
3634 +++ b/net/wireless/wext-compat.c
3635 @@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
3636 memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
3637 wdev_unlock(wdev);
3638
3639 + memset(&sinfo, 0, sizeof(sinfo));
3640 +
3641 if (rdev_get_station(rdev, dev, bssid, &sinfo))
3642 return NULL;
3643
3644 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3645 index 93c78c3c4b95..a556d63564e6 100644
3646 --- a/sound/pci/hda/patch_realtek.c
3647 +++ b/sound/pci/hda/patch_realtek.c
3648 @@ -2167,6 +2167,7 @@ static const struct hda_fixup alc882_fixups[] = {
3649 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
3650 SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
3651 SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
3652 + SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
3653 SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
3654 SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
3655 SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
3656 diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3657 index 3e2ef61c627b..8b7e391dd0b8 100644
3658 --- a/sound/usb/mixer.c
3659 +++ b/sound/usb/mixer.c
3660 @@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
3661 case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
3662 case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
3663 case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
3664 + case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
3665 case USB_ID(0x046d, 0x0991):
3666 /* Most audio usb devices lie about volume resolution.
3667 * Most Logitech webcams have res = 384.
3668 @@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
3669 unitid);
3670 return -EINVAL;
3671 }
3672 - /* no bmControls field (e.g. Maya44) -> ignore */
3673 - if (desc->bLength <= 10 + input_pins) {
3674 - usb_audio_dbg(state->chip, "MU %d has no bmControls field\n",
3675 - unitid);
3676 - return 0;
3677 - }
3678
3679 num_ins = 0;
3680 ich = 0;
3681 @@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
3682 err = parse_audio_unit(state, desc->baSourceID[pin]);
3683 if (err < 0)
3684 continue;
3685 + /* no bmControls field (e.g. Maya44) -> ignore */
3686 + if (desc->bLength <= 10 + input_pins)
3687 + continue;
3688 err = check_input_term(state, desc->baSourceID[pin], &iterm);
3689 if (err < 0)
3690 return err;
3691 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
3692 index b703cb3cda19..e5000da9e9d7 100644
3693 --- a/sound/usb/mixer_maps.c
3694 +++ b/sound/usb/mixer_maps.c
3695 @@ -437,6 +437,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
3696 .map = ebox44_map,
3697 },
3698 {
3699 + /* MAYA44 USB+ */
3700 + .id = USB_ID(0x2573, 0x0008),
3701 + .map = maya44_map,
3702 + },
3703 + {
3704 /* KEF X300A */
3705 .id = USB_ID(0x27ac, 0x1000),
3706 .map = scms_usb3318_map,
3707 diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
3708 index e21ec5abcc3a..2a408c60114b 100644
3709 --- a/sound/usb/quirks.c
3710 +++ b/sound/usb/quirks.c
3711 @@ -1120,6 +1120,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
3712 case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
3713 case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
3714 case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
3715 + case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
3716 return true;
3717 }
3718 return false;
3719 @@ -1266,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
3720 if (fp->altsetting == 2)
3721 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
3722 break;
3723 - /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
3724 - case USB_ID(0x20b1, 0x2009):
3725 +
3726 + case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
3727 + case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
3728 if (fp->altsetting == 3)
3729 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
3730 break;