Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.17/0102-3.17.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2525 - (show annotations) (download)
Thu Nov 20 11:30:09 2014 UTC (9 years, 5 months ago) by niro
File size: 407935 byte(s)
-linux-3.17.3
1 diff --git a/Makefile b/Makefile
2 index 390afde6538e..57a45b1ea2c7 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 17
8 -SUBLEVEL = 2
9 +SUBLEVEL = 3
10 EXTRAVERSION =
11 NAME = Shuffling Zombie Juror
12
13 diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
14 index 4f31b2eb5cdf..398064cef746 100644
15 --- a/arch/arc/boot/dts/nsimosci.dts
16 +++ b/arch/arc/boot/dts/nsimosci.dts
17 @@ -20,7 +20,7 @@
18 /* this is for console on PGU */
19 /* bootargs = "console=tty0 consoleblank=0"; */
20 /* this is for console on serial */
21 - bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug";
22 + bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
23 };
24
25 aliases {
26 diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
27 index 372466b371bf..a9cbabe22d99 100644
28 --- a/arch/arc/include/asm/arcregs.h
29 +++ b/arch/arc/include/asm/arcregs.h
30 @@ -191,14 +191,6 @@
31 #define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
32 #define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
33
34 -#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
35 -/* These DPFP regs need to be saved/restored across ctx-sw */
36 -struct arc_fpu {
37 - struct {
38 - unsigned int l, h;
39 - } aux_dpfp[2];
40 -};
41 -#endif
42
43 /*
44 ***************************************************************
45 diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
46 index b65fca7ffeb5..fea931634136 100644
47 --- a/arch/arc/include/asm/kgdb.h
48 +++ b/arch/arc/include/asm/kgdb.h
49 @@ -19,7 +19,7 @@
50 * register API yet */
51 #undef DBG_MAX_REG_NUM
52
53 -#define GDB_MAX_REGS 39
54 +#define GDB_MAX_REGS 87
55
56 #define BREAK_INSTR_SIZE 2
57 #define CACHE_FLUSH_IS_SAFE 1
58 @@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
59
60 extern void kgdb_trap(struct pt_regs *regs);
61
62 -enum arc700_linux_regnums {
63 +/* This is the numbering of registers according to the GDB. See GDB's
64 + * arc-tdep.h for details.
65 + *
66 + * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
67 +enum arc_linux_regnums {
68 _R0 = 0,
69 _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
70 _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
71 _R25, _R26,
72 - _BTA = 27,
73 - _LP_START = 28,
74 - _LP_END = 29,
75 - _LP_COUNT = 30,
76 - _STATUS32 = 31,
77 - _BLINK = 32,
78 - _FP = 33,
79 - __SP = 34,
80 - _EFA = 35,
81 - _RET = 36,
82 - _ORIG_R8 = 37,
83 - _STOP_PC = 38
84 + _FP = 27,
85 + __SP = 28,
86 + _R30 = 30,
87 + _BLINK = 31,
88 + _LP_COUNT = 60,
89 + _STOP_PC = 64,
90 + _RET = 64,
91 + _LP_START = 65,
92 + _LP_END = 66,
93 + _STATUS32 = 67,
94 + _ECR = 76,
95 + _BTA = 82,
96 };
97
98 #else
99 diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
100 index 82588f3ba77f..38175c06f168 100644
101 --- a/arch/arc/include/asm/processor.h
102 +++ b/arch/arc/include/asm/processor.h
103 @@ -20,6 +20,15 @@
104
105 #include <asm/ptrace.h>
106
107 +#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
108 +/* These DPFP regs need to be saved/restored across ctx-sw */
109 +struct arc_fpu {
110 + struct {
111 + unsigned int l, h;
112 + } aux_dpfp[2];
113 +};
114 +#endif
115 +
116 /* Arch specific stuff which needs to be saved per task.
117 * However these items are not so important so as to earn a place in
118 * struct thread_info
119 diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
120 index b11ad54f8d17..2f78e543b315 100644
121 --- a/arch/arm/Kconfig.debug
122 +++ b/arch/arm/Kconfig.debug
123 @@ -1142,7 +1142,7 @@ config DEBUG_UART_VIRT
124 default 0xf1c28000 if DEBUG_SUNXI_UART0
125 default 0xf1c28400 if DEBUG_SUNXI_UART1
126 default 0xf1f02800 if DEBUG_SUNXI_R_UART
127 - default 0xf2100000 if DEBUG_PXA_UART1
128 + default 0xf6200000 if DEBUG_PXA_UART1
129 default 0xf4090000 if ARCH_LPC32XX
130 default 0xf4200000 if ARCH_GEMINI
131 default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \
132 diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts
133 index 41afd9da6876..229140b6de64 100644
134 --- a/arch/arm/boot/dts/zynq-parallella.dts
135 +++ b/arch/arm/boot/dts/zynq-parallella.dts
136 @@ -34,6 +34,10 @@
137 };
138 };
139
140 +&clkc {
141 + fclk-enable = <0xf>;
142 +};
143 +
144 &gem0 {
145 status = "okay";
146 phy-mode = "rgmii-id";
147 diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
148 index bbf9df37ad4b..d28fe291233a 100644
149 --- a/arch/arm/mach-pxa/include/mach/addr-map.h
150 +++ b/arch/arm/mach-pxa/include/mach/addr-map.h
151 @@ -39,6 +39,11 @@
152 #define DMEMC_SIZE 0x00100000
153
154 /*
155 + * Reserved space for low level debug virtual addresses within
156 + * 0xf6200000..0xf6201000
157 + */
158 +
159 +/*
160 * Internal Memory Controller (PXA27x and later)
161 */
162 #define IMEMC_PHYS 0x58000000
163 diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
164 index 992aaba603b5..b463f2aa5a61 100644
165 --- a/arch/mips/include/asm/ftrace.h
166 +++ b/arch/mips/include/asm/ftrace.h
167 @@ -24,7 +24,7 @@ do { \
168 asm volatile ( \
169 "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
170 " li %[tmp_err], 0\n" \
171 - "2:\n" \
172 + "2: .insn\n" \
173 \
174 ".section .fixup, \"ax\"\n" \
175 "3: li %[tmp_err], 1\n" \
176 @@ -46,7 +46,7 @@ do { \
177 asm volatile ( \
178 "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
179 " li %[tmp_err], 0\n" \
180 - "2:\n" \
181 + "2: .insn\n" \
182 \
183 ".section .fixup, \"ax\"\n" \
184 "3: li %[tmp_err], 1\n" \
185 diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
186 index bbcfb8ba8106..91a3d197ede3 100644
187 --- a/arch/mips/include/uapi/asm/ptrace.h
188 +++ b/arch/mips/include/uapi/asm/ptrace.h
189 @@ -9,6 +9,8 @@
190 #ifndef _UAPI_ASM_PTRACE_H
191 #define _UAPI_ASM_PTRACE_H
192
193 +#include <linux/types.h>
194 +
195 /* 0 - 31 are integer registers, 32 - 63 are fp registers. */
196 #define FPR_BASE 32
197 #define PC 64
198 diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
199 index a217061beee3..462e34d46b4a 100644
200 --- a/arch/mips/loongson/lemote-2f/clock.c
201 +++ b/arch/mips/loongson/lemote-2f/clock.c
202 @@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
203
204 int clk_set_rate(struct clk *clk, unsigned long rate)
205 {
206 + unsigned int rate_khz = rate / 1000;
207 struct cpufreq_frequency_table *pos;
208 int ret = 0;
209 int regval;
210 @@ -107,9 +108,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
211 propagate_rate(clk);
212
213 cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
214 - if (rate == pos->frequency)
215 + if (rate_khz == pos->frequency)
216 break;
217 - if (rate != pos->frequency)
218 + if (rate_khz != pos->frequency)
219 return -ENOTSUPP;
220
221 clk->rate = rate;
222 diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
223 index 7a4727795a70..51a0fde4bec1 100644
224 --- a/arch/mips/math-emu/cp1emu.c
225 +++ b/arch/mips/math-emu/cp1emu.c
226 @@ -1023,7 +1023,7 @@ emul:
227 goto emul;
228
229 case cop1x_op:
230 - if (cpu_has_mips_4_5 || cpu_has_mips64)
231 + if (cpu_has_mips_4_5 || cpu_has_mips64 || cpu_has_mips32r2)
232 /* its one of ours */
233 goto emul;
234
235 @@ -1068,7 +1068,7 @@ emul:
236 break;
237
238 case cop1x_op:
239 - if (!cpu_has_mips_4_5 && !cpu_has_mips64)
240 + if (!cpu_has_mips_4_5 && !cpu_has_mips64 && !cpu_has_mips32r2)
241 return SIGILL;
242
243 sig = fpux_emu(xcp, ctx, ir, fault_addr);
244 diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
245 index a08dd53a1cc5..b5f228e7eae6 100644
246 --- a/arch/mips/mm/tlbex.c
247 +++ b/arch/mips/mm/tlbex.c
248 @@ -1062,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
249 struct mips_huge_tlb_info {
250 int huge_pte;
251 int restore_scratch;
252 + bool need_reload_pte;
253 };
254
255 static struct mips_huge_tlb_info
256 @@ -1076,6 +1077,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
257
258 rv.huge_pte = scratch;
259 rv.restore_scratch = 0;
260 + rv.need_reload_pte = false;
261
262 if (check_for_high_segbits) {
263 UASM_i_MFC0(p, tmp, C0_BADVADDR);
264 @@ -1264,6 +1266,7 @@ static void build_r4000_tlb_refill_handler(void)
265 } else {
266 htlb_info.huge_pte = K0;
267 htlb_info.restore_scratch = 0;
268 + htlb_info.need_reload_pte = true;
269 vmalloc_mode = refill_noscratch;
270 /*
271 * create the plain linear handler
272 @@ -1300,7 +1303,8 @@ static void build_r4000_tlb_refill_handler(void)
273 }
274 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
275 uasm_l_tlb_huge_update(&l, p);
276 - UASM_i_LW(&p, K0, 0, K1);
277 + if (htlb_info.need_reload_pte)
278 + UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
279 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
280 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
281 htlb_info.restore_scratch);
282 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
283 index 5bbd1bc8c3b0..0905c8da90f1 100644
284 --- a/arch/powerpc/kernel/entry_64.S
285 +++ b/arch/powerpc/kernel/entry_64.S
286 @@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite)
287 3:
288 #endif
289 bl save_nvgprs
290 + /*
291 + * Use a non volatile GPR to save and restore our thread_info flags
292 + * across the call to restore_interrupts.
293 + */
294 + mr r30,r4
295 bl restore_interrupts
296 + mr r4,r30
297 addi r3,r1,STACK_FRAME_OVERHEAD
298 bl do_notify_resume
299 b ret_from_except
300 diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
301 index ad4b31df779a..e4169d68cb32 100644
302 --- a/arch/powerpc/platforms/powernv/opal-lpc.c
303 +++ b/arch/powerpc/platforms/powernv/opal-lpc.c
304 @@ -216,14 +216,54 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
305 &data, len);
306 if (rc)
307 return -ENXIO;
308 +
309 + /*
310 + * Now there is some trickery with the data returned by OPAL
311 + * as it's the desired data right justified in a 32-bit BE
312 + * word.
313 + *
314 + * This is a very bad interface and I'm to blame for it :-(
315 + *
316 + * So we can't just apply a 32-bit swap to what comes from OPAL,
317 + * because user space expects the *bytes* to be in their proper
318 + * respective positions (ie, LPC position).
319 + *
320 + * So what we really want to do here is to shift data right
321 + * appropriately on a LE kernel.
322 + *
323 + * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that
324 + * order, we have in memory written to by OPAL at the "data"
325 + * pointer:
326 + *
327 + * Bytes: OPAL "data" LE "data"
328 + * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0
329 + * 16-bit: B0 B1 0000B0B1 B1B00000
330 + * 8-bit: B0 000000B0 B0000000
331 + *
332 + * So a BE kernel will have the leftmost of the above in the MSB
333 + * and rightmost in the LSB and can just then "cast" the u32 "data"
334 + * down to the appropriate quantity and write it.
335 + *
336 + * However, an LE kernel can't. It doesn't need to swap because a
337 + * load from data followed by a store to user are going to preserve
338 + * the byte ordering which is the wire byte order which is what the
339 + * user wants, but in order to "crop" to the right size, we need to
340 + * shift right first.
341 + */
342 switch(len) {
343 case 4:
344 rc = __put_user((u32)data, (u32 __user *)ubuf);
345 break;
346 case 2:
347 +#ifdef __LITTLE_ENDIAN__
348 + data >>= 16;
349 +#endif
350 rc = __put_user((u16)data, (u16 __user *)ubuf);
351 break;
352 default:
353 +#ifdef __LITTLE_ENDIAN__
354 + data >>= 24;
355 +#endif
356 rc = __put_user((u8)data, (u8 __user *)ubuf);
357 break;
358 }
359 @@ -263,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf,
360 else if (todo > 1 && (pos & 1) == 0)
361 len = 2;
362 }
363 +
364 + /*
365 + * Similarly to the read case, we have some trickery here but
366 + * it's different to handle. We need to pass the value to OPAL in
367 + * a register whose layout depends on the access size. We want
368 + * to reproduce the memory layout of the user, however we aren't
369 + * doing a load from user and a store to another memory location
370 + * which would achieve that. Here we pass the value to OPAL via
371 + * a register which is expected to contain the "BE" interpretation
372 + * of the byte sequence. IE: for a 32-bit access, byte 0 should be
373 + * in the MSB. So here we *do* need to byteswap on LE.
374 + *
375 + * User bytes: LE "data" OPAL "data"
376 + * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3
377 + * 16-bit: B0 B1 0000B1B0 0000B0B1
378 + * 8-bit: B0 000000B0 000000B0
379 + */
380 switch(len) {
381 case 4:
382 rc = __get_user(data, (u32 __user *)ubuf);
383 + data = cpu_to_be32(data);
384 break;
385 case 2:
386 rc = __get_user(data, (u16 __user *)ubuf);
387 + data = cpu_to_be16(data);
388 break;
389 default:
390 rc = __get_user(data, (u8 __user *)ubuf);
391 diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
392 index a2450b8a50a5..92eb35ed4b9e 100644
393 --- a/arch/powerpc/platforms/pseries/dlpar.c
394 +++ b/arch/powerpc/platforms/pseries/dlpar.c
395 @@ -379,7 +379,7 @@ static int dlpar_online_cpu(struct device_node *dn)
396 BUG_ON(get_cpu_current_state(cpu)
397 != CPU_STATE_OFFLINE);
398 cpu_maps_update_done();
399 - rc = cpu_up(cpu);
400 + rc = device_online(get_cpu_device(cpu));
401 if (rc)
402 goto out;
403 cpu_maps_update_begin();
404 @@ -462,7 +462,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
405 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
406 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
407 cpu_maps_update_done();
408 - rc = cpu_down(cpu);
409 + rc = device_offline(get_cpu_device(cpu));
410 if (rc)
411 goto out;
412 cpu_maps_update_begin();
413 diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
414 index 355a16c55702..b93bed76ea94 100644
415 --- a/arch/s390/kernel/topology.c
416 +++ b/arch/s390/kernel/topology.c
417 @@ -464,15 +464,17 @@ static struct sched_domain_topology_level s390_topology[] = {
418
419 static int __init topology_init(void)
420 {
421 - if (!MACHINE_HAS_TOPOLOGY) {
422 + if (MACHINE_HAS_TOPOLOGY)
423 + set_topology_timer();
424 + else
425 topology_update_polarization_simple();
426 - goto out;
427 - }
428 - set_topology_timer();
429 -out:
430 -
431 - set_sched_topology(s390_topology);
432 -
433 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
434 }
435 device_initcall(topology_init);
436 +
437 +static int __init early_topology_init(void)
438 +{
439 + set_sched_topology(s390_topology);
440 + return 0;
441 +}
442 +early_initcall(early_topology_init);
443 diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
444 index 9139d14b9c53..538c10db3537 100644
445 --- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
446 +++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
447 @@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
448 };
449
450 static struct resource scif0_resources[] = {
451 - DEFINE_RES_MEM(0xfffffe80, 0x100),
452 + DEFINE_RES_MEM(0xfffffe80, 0x10),
453 DEFINE_RES_IRQ(evt2irq(0x4e0)),
454 };
455
456 @@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
457 };
458
459 static struct resource scif1_resources[] = {
460 - DEFINE_RES_MEM(0xa4000150, 0x100),
461 + DEFINE_RES_MEM(0xa4000150, 0x10),
462 DEFINE_RES_IRQ(evt2irq(0x900)),
463 };
464
465 @@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
466 };
467
468 static struct resource scif2_resources[] = {
469 - DEFINE_RES_MEM(0xa4000140, 0x100),
470 + DEFINE_RES_MEM(0xa4000140, 0x10),
471 DEFINE_RES_IRQ(evt2irq(0x880)),
472 };
473
474 diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
475 index 3716e6952554..e8ab93c3e638 100644
476 --- a/arch/um/drivers/ubd_kern.c
477 +++ b/arch/um/drivers/ubd_kern.c
478 @@ -1277,7 +1277,7 @@ static void do_ubd_request(struct request_queue *q)
479
480 while(1){
481 struct ubd *dev = q->queuedata;
482 - if(dev->end_sg == 0){
483 + if(dev->request == NULL){
484 struct request *req = blk_fetch_request(q);
485 if(req == NULL)
486 return;
487 @@ -1299,7 +1299,8 @@ static void do_ubd_request(struct request_queue *q)
488 return;
489 }
490 prepare_flush_request(req, io_req);
491 - submit_request(io_req, dev);
492 + if (submit_request(io_req, dev) == false)
493 + return;
494 }
495
496 while(dev->start_sg < dev->end_sg){
497 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
498 index 4299eb05023c..92a2e9333620 100644
499 --- a/arch/x86/ia32/ia32entry.S
500 +++ b/arch/x86/ia32/ia32entry.S
501 @@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target)
502 1: movl (%rbp),%ebp
503 _ASM_EXTABLE(1b,ia32_badarg)
504 ASM_CLAC
505 +
506 + /*
507 + * Sysenter doesn't filter flags, so we need to clear NT
508 + * ourselves. To save a few cycles, we can check whether
509 + * NT was set instead of doing an unconditional popfq.
510 + */
511 + testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
512 + jnz sysenter_fix_flags
513 +sysenter_flags_fixed:
514 +
515 orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
516 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
517 CFI_REMEMBER_STATE
518 @@ -184,6 +194,8 @@ sysexit_from_sys_call:
519 TRACE_IRQS_ON
520 ENABLE_INTERRUPTS_SYSEXIT32
521
522 + CFI_RESTORE_STATE
523 +
524 #ifdef CONFIG_AUDITSYSCALL
525 .macro auditsys_entry_common
526 movl %esi,%r9d /* 6th arg: 4th syscall arg */
527 @@ -226,7 +238,6 @@ sysexit_from_sys_call:
528 .endm
529
530 sysenter_auditsys:
531 - CFI_RESTORE_STATE
532 auditsys_entry_common
533 movl %ebp,%r9d /* reload 6th syscall arg */
534 jmp sysenter_dispatch
535 @@ -235,6 +246,11 @@ sysexit_audit:
536 auditsys_exit sysexit_from_sys_call
537 #endif
538
539 +sysenter_fix_flags:
540 + pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
541 + popfq_cfi
542 + jmp sysenter_flags_fixed
543 +
544 sysenter_tracesys:
545 #ifdef CONFIG_AUDITSYSCALL
546 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
547 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
548 index 1a055c81d864..ca3347a9dab5 100644
549 --- a/arch/x86/include/asm/elf.h
550 +++ b/arch/x86/include/asm/elf.h
551 @@ -160,8 +160,9 @@ do { \
552 #define elf_check_arch(x) \
553 ((x)->e_machine == EM_X86_64)
554
555 -#define compat_elf_check_arch(x) \
556 - (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64)
557 +#define compat_elf_check_arch(x) \
558 + (elf_check_arch_ia32(x) || \
559 + (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64))
560
561 #if __USER32_DS != __USER_DS
562 # error "The following code assumes __USER32_DS == __USER_DS"
563 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
564 index 92d3486a6196..0d47ae116a36 100644
565 --- a/arch/x86/include/asm/kvm_host.h
566 +++ b/arch/x86/include/asm/kvm_host.h
567 @@ -991,6 +991,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
568 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
569 }
570
571 +static inline u64 get_canonical(u64 la)
572 +{
573 + return ((int64_t)la << 16) >> 16;
574 +}
575 +
576 +static inline bool is_noncanonical_address(u64 la)
577 +{
578 +#ifdef CONFIG_X86_64
579 + return get_canonical(la) != la;
580 +#else
581 + return false;
582 +#endif
583 +}
584 +
585 #define TSS_IOPB_BASE_OFFSET 0x66
586 #define TSS_BASE_SIZE 0x68
587 #define TSS_IOPB_SIZE (65536 / 8)
588 @@ -1049,7 +1063,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
589 void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
590
591 void kvm_define_shared_msr(unsigned index, u32 msr);
592 -void kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
593 +int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
594
595 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
596
597 diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
598 index 0e79420376eb..990a2fe1588d 100644
599 --- a/arch/x86/include/uapi/asm/vmx.h
600 +++ b/arch/x86/include/uapi/asm/vmx.h
601 @@ -67,6 +67,7 @@
602 #define EXIT_REASON_EPT_MISCONFIG 49
603 #define EXIT_REASON_INVEPT 50
604 #define EXIT_REASON_PREEMPTION_TIMER 52
605 +#define EXIT_REASON_INVVPID 53
606 #define EXIT_REASON_WBINVD 54
607 #define EXIT_REASON_XSETBV 55
608 #define EXIT_REASON_APIC_WRITE 56
609 @@ -114,6 +115,7 @@
610 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
611 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
612 { EXIT_REASON_INVD, "INVD" }, \
613 + { EXIT_REASON_INVVPID, "INVVPID" }, \
614 { EXIT_REASON_INVPCID, "INVPCID" }
615
616 #endif /* _UAPIVMX_H */
617 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
618 index b436fc735aa4..a142e77693e1 100644
619 --- a/arch/x86/kernel/acpi/boot.c
620 +++ b/arch/x86/kernel/acpi/boot.c
621 @@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
622
623 /* Don't set up the ACPI SCI because it's already set up */
624 if (acpi_gbl_FADT.sci_interrupt == gsi)
625 - return gsi;
626 + return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
627
628 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
629 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
630 @@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
631
632 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
633 {
634 - int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
635 + int irq;
636
637 - if (irq >= 0) {
638 + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
639 + *irqp = gsi;
640 + } else {
641 + irq = mp_map_gsi_to_irq(gsi,
642 + IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
643 + if (irq < 0)
644 + return -1;
645 *irqp = irq;
646 - return 0;
647 }
648 -
649 - return -1;
650 + return 0;
651 }
652 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
653
654 diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
655 index af5b08ab3b71..7c3832424168 100644
656 --- a/arch/x86/kernel/apb_timer.c
657 +++ b/arch/x86/kernel/apb_timer.c
658 @@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
659
660 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
661 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
662 - /* APB timer irqs are set up as mp_irqs, timer is edge type */
663 - __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
664 }
665
666 /* Should be called with per cpu */
667 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
668 index 67760275544b..24b5894396a0 100644
669 --- a/arch/x86/kernel/apic/apic.c
670 +++ b/arch/x86/kernel/apic/apic.c
671 @@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
672 unsigned int value, queued;
673 int i, j, acked = 0;
674 unsigned long long tsc = 0, ntsc;
675 - long long max_loops = cpu_khz;
676 + long long max_loops = cpu_khz ? cpu_khz : 1000000;
677
678 if (cpu_has_tsc)
679 rdtscll(tsc);
680 @@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
681 break;
682 }
683 if (queued) {
684 - if (cpu_has_tsc) {
685 + if (cpu_has_tsc && cpu_khz) {
686 rdtscll(ntsc);
687 max_loops = (cpu_khz << 10) - (ntsc - tsc);
688 } else
689 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
690 index e4ab2b42bd6f..31265580c38a 100644
691 --- a/arch/x86/kernel/cpu/common.c
692 +++ b/arch/x86/kernel/cpu/common.c
693 @@ -1184,7 +1184,7 @@ void syscall_init(void)
694 /* Flags to clear on syscall */
695 wrmsrl(MSR_SYSCALL_MASK,
696 X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|
697 - X86_EFLAGS_IOPL|X86_EFLAGS_AC);
698 + X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT);
699 }
700
701 /*
702 diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
703 index 50ce7519ccef..1ef456273172 100644
704 --- a/arch/x86/kernel/cpu/intel.c
705 +++ b/arch/x86/kernel/cpu/intel.c
706 @@ -397,6 +397,13 @@ static void init_intel(struct cpuinfo_x86 *c)
707 }
708
709 l2 = init_intel_cacheinfo(c);
710 +
711 + /* Detect legacy cache sizes if init_intel_cacheinfo did not */
712 + if (l2 == 0) {
713 + cpu_detect_cache_sizes(c);
714 + l2 = c->x86_cache_size;
715 + }
716 +
717 if (c->cpuid_level > 9) {
718 unsigned eax = cpuid_eax(10);
719 /* Check for version and the number of counters */
720 @@ -500,6 +507,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
721 */
722 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
723 size = 256;
724 +
725 + /*
726 + * Intel Quark SoC X1000 contains a 4-way set associative
727 + * 16K cache with a 16 byte cache line and 256 lines per tag
728 + */
729 + if ((c->x86 == 5) && (c->x86_model == 9))
730 + size = 16;
731 return size;
732 }
733 #endif
734 @@ -701,7 +715,8 @@ static const struct cpu_dev intel_cpu_dev = {
735 [3] = "OverDrive PODP5V83",
736 [4] = "Pentium MMX",
737 [7] = "Mobile Pentium 75 - 200",
738 - [8] = "Mobile Pentium MMX"
739 + [8] = "Mobile Pentium MMX",
740 + [9] = "Quark SoC X1000",
741 }
742 },
743 { .family = 6, .model_names =
744 diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
745 index 9030e83db6ee..c957e11a2d0f 100644
746 --- a/arch/x86/kernel/iosf_mbi.c
747 +++ b/arch/x86/kernel/iosf_mbi.c
748 @@ -26,6 +26,7 @@
749 #include <asm/iosf_mbi.h>
750
751 #define PCI_DEVICE_ID_BAYTRAIL 0x0F00
752 +#define PCI_DEVICE_ID_BRASWELL 0x2280
753 #define PCI_DEVICE_ID_QUARK_X1000 0x0958
754
755 static DEFINE_SPINLOCK(iosf_mbi_lock);
756 @@ -204,6 +205,7 @@ static int iosf_mbi_probe(struct pci_dev *pdev,
757
758 static const struct pci_device_id iosf_mbi_pci_ids[] = {
759 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BAYTRAIL) },
760 + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_BRASWELL) },
761 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_QUARK_X1000) },
762 { 0, },
763 };
764 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
765 index 2851d63c1202..ed37a768d0fc 100644
766 --- a/arch/x86/kernel/signal.c
767 +++ b/arch/x86/kernel/signal.c
768 @@ -675,6 +675,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
769 * handler too.
770 */
771 regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF);
772 + /*
773 + * Ensure the signal handler starts with the new fpu state.
774 + */
775 + if (used_math())
776 + drop_init_fpu(current);
777 }
778 signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP));
779 }
780 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
781 index b6025f9e36c6..b7e50bba3bbb 100644
782 --- a/arch/x86/kernel/tsc.c
783 +++ b/arch/x86/kernel/tsc.c
784 @@ -1166,14 +1166,17 @@ void __init tsc_init(void)
785
786 x86_init.timers.tsc_pre_init();
787
788 - if (!cpu_has_tsc)
789 + if (!cpu_has_tsc) {
790 + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
791 return;
792 + }
793
794 tsc_khz = x86_platform.calibrate_tsc();
795 cpu_khz = tsc_khz;
796
797 if (!tsc_khz) {
798 mark_tsc_unstable("could not calculate TSC khz");
799 + setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
800 return;
801 }
802
803 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
804 index 940b142cc11f..4c540c4719d8 100644
805 --- a/arch/x86/kernel/xsave.c
806 +++ b/arch/x86/kernel/xsave.c
807 @@ -271,8 +271,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
808 if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
809 return -1;
810
811 - drop_init_fpu(tsk); /* trigger finit */
812 -
813 return 0;
814 }
815
816 @@ -402,8 +400,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
817 set_used_math();
818 }
819
820 - if (use_eager_fpu())
821 + if (use_eager_fpu()) {
822 + preempt_disable();
823 math_state_restore();
824 + preempt_enable();
825 + }
826
827 return err;
828 } else {
829 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
830 index 03954f7900f5..77c77fe84f13 100644
831 --- a/arch/x86/kvm/emulate.c
832 +++ b/arch/x86/kvm/emulate.c
833 @@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
834 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
835 }
836
837 -static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
838 -{
839 - register_address_increment(ctxt, &ctxt->_eip, rel);
840 -}
841 -
842 static u32 desc_limit_scaled(struct desc_struct *desc)
843 {
844 u32 limit = get_desc_limit(desc);
845 @@ -568,6 +563,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
846 return emulate_exception(ctxt, NM_VECTOR, 0, false);
847 }
848
849 +static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
850 + int cs_l)
851 +{
852 + switch (ctxt->op_bytes) {
853 + case 2:
854 + ctxt->_eip = (u16)dst;
855 + break;
856 + case 4:
857 + ctxt->_eip = (u32)dst;
858 + break;
859 +#ifdef CONFIG_X86_64
860 + case 8:
861 + if ((cs_l && is_noncanonical_address(dst)) ||
862 + (!cs_l && (dst >> 32) != 0))
863 + return emulate_gp(ctxt, 0);
864 + ctxt->_eip = dst;
865 + break;
866 +#endif
867 + default:
868 + WARN(1, "unsupported eip assignment size\n");
869 + }
870 + return X86EMUL_CONTINUE;
871 +}
872 +
873 +static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
874 +{
875 + return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
876 +}
877 +
878 +static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
879 +{
880 + return assign_eip_near(ctxt, ctxt->_eip + rel);
881 +}
882 +
883 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
884 {
885 u16 selector;
886 @@ -613,7 +642,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
887
888 static int __linearize(struct x86_emulate_ctxt *ctxt,
889 struct segmented_address addr,
890 - unsigned size, bool write, bool fetch,
891 + unsigned *max_size, unsigned size,
892 + bool write, bool fetch,
893 ulong *linear)
894 {
895 struct desc_struct desc;
896 @@ -624,10 +654,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
897 unsigned cpl;
898
899 la = seg_base(ctxt, addr.seg) + addr.ea;
900 + *max_size = 0;
901 switch (ctxt->mode) {
902 case X86EMUL_MODE_PROT64:
903 if (((signed long)la << 16) >> 16 != la)
904 return emulate_gp(ctxt, 0);
905 +
906 + *max_size = min_t(u64, ~0u, (1ull << 48) - la);
907 + if (size > *max_size)
908 + goto bad;
909 break;
910 default:
911 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
912 @@ -645,20 +680,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
913 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
914 (ctxt->d & NoBigReal)) {
915 /* la is between zero and 0xffff */
916 - if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
917 + if (la > 0xffff)
918 goto bad;
919 + *max_size = 0x10000 - la;
920 } else if ((desc.type & 8) || !(desc.type & 4)) {
921 /* expand-up segment */
922 - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
923 + if (addr.ea > lim)
924 goto bad;
925 + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
926 } else {
927 /* expand-down segment */
928 - if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
929 + if (addr.ea <= lim)
930 goto bad;
931 lim = desc.d ? 0xffffffff : 0xffff;
932 - if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
933 + if (addr.ea > lim)
934 goto bad;
935 + *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
936 }
937 + if (size > *max_size)
938 + goto bad;
939 cpl = ctxt->ops->cpl(ctxt);
940 if (!(desc.type & 8)) {
941 /* data segment */
942 @@ -693,7 +733,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
943 unsigned size, bool write,
944 ulong *linear)
945 {
946 - return __linearize(ctxt, addr, size, write, false, linear);
947 + unsigned max_size;
948 + return __linearize(ctxt, addr, &max_size, size, write, false, linear);
949 }
950
951
952 @@ -718,17 +759,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
953 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
954 {
955 int rc;
956 - unsigned size;
957 + unsigned size, max_size;
958 unsigned long linear;
959 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
960 struct segmented_address addr = { .seg = VCPU_SREG_CS,
961 .ea = ctxt->eip + cur_size };
962
963 - size = 15UL ^ cur_size;
964 - rc = __linearize(ctxt, addr, size, false, true, &linear);
965 + /*
966 + * We do not know exactly how many bytes will be needed, and
967 + * __linearize is expensive, so fetch as much as possible. We
968 + * just have to avoid going beyond the 15 byte limit, the end
969 + * of the segment, or the end of the page.
970 + *
971 + * __linearize is called with size 0 so that it does not do any
972 + * boundary check itself. Instead, we use max_size to check
973 + * against op_size.
974 + */
975 + rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
976 if (unlikely(rc != X86EMUL_CONTINUE))
977 return rc;
978
979 + size = min_t(unsigned, 15UL ^ cur_size, max_size);
980 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
981
982 /*
983 @@ -738,7 +789,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
984 * still, we must have hit the 15-byte boundary.
985 */
986 if (unlikely(size < op_size))
987 - return X86EMUL_UNHANDLEABLE;
988 + return emulate_gp(ctxt, 0);
989 +
990 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
991 size, &ctxt->exception);
992 if (unlikely(rc != X86EMUL_CONTINUE))
993 @@ -750,8 +802,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
994 static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
995 unsigned size)
996 {
997 - if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
998 - return __do_insn_fetch_bytes(ctxt, size);
999 + unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
1000 +
1001 + if (unlikely(done_size < size))
1002 + return __do_insn_fetch_bytes(ctxt, size - done_size);
1003 else
1004 return X86EMUL_CONTINUE;
1005 }
1006 @@ -1415,7 +1469,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1007
1008 /* Does not support long mode */
1009 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1010 - u16 selector, int seg, u8 cpl, bool in_task_switch)
1011 + u16 selector, int seg, u8 cpl,
1012 + bool in_task_switch,
1013 + struct desc_struct *desc)
1014 {
1015 struct desc_struct seg_desc, old_desc;
1016 u8 dpl, rpl;
1017 @@ -1547,6 +1603,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1018 }
1019 load:
1020 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1021 + if (desc)
1022 + *desc = seg_desc;
1023 return X86EMUL_CONTINUE;
1024 exception:
1025 emulate_exception(ctxt, err_vec, err_code, true);
1026 @@ -1557,7 +1615,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1027 u16 selector, int seg)
1028 {
1029 u8 cpl = ctxt->ops->cpl(ctxt);
1030 - return __load_segment_descriptor(ctxt, selector, seg, cpl, false);
1031 + return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1032 }
1033
1034 static void write_register_operand(struct operand *op)
1035 @@ -1951,17 +2009,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
1036 static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1037 {
1038 int rc;
1039 - unsigned short sel;
1040 + unsigned short sel, old_sel;
1041 + struct desc_struct old_desc, new_desc;
1042 + const struct x86_emulate_ops *ops = ctxt->ops;
1043 + u8 cpl = ctxt->ops->cpl(ctxt);
1044 +
1045 + /* Assignment of RIP may only fail in 64-bit mode */
1046 + if (ctxt->mode == X86EMUL_MODE_PROT64)
1047 + ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
1048 + VCPU_SREG_CS);
1049
1050 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1051
1052 - rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1053 + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
1054 + &new_desc);
1055 if (rc != X86EMUL_CONTINUE)
1056 return rc;
1057
1058 - ctxt->_eip = 0;
1059 - memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1060 - return X86EMUL_CONTINUE;
1061 + rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1062 + if (rc != X86EMUL_CONTINUE) {
1063 + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1064 + /* assigning eip failed; restore the old cs */
1065 + ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
1066 + return rc;
1067 + }
1068 + return rc;
1069 }
1070
1071 static int em_grp45(struct x86_emulate_ctxt *ctxt)
1072 @@ -1972,13 +2044,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
1073 case 2: /* call near abs */ {
1074 long int old_eip;
1075 old_eip = ctxt->_eip;
1076 - ctxt->_eip = ctxt->src.val;
1077 + rc = assign_eip_near(ctxt, ctxt->src.val);
1078 + if (rc != X86EMUL_CONTINUE)
1079 + break;
1080 ctxt->src.val = old_eip;
1081 rc = em_push(ctxt);
1082 break;
1083 }
1084 case 4: /* jmp abs */
1085 - ctxt->_eip = ctxt->src.val;
1086 + rc = assign_eip_near(ctxt, ctxt->src.val);
1087 break;
1088 case 5: /* jmp far */
1089 rc = em_jmp_far(ctxt);
1090 @@ -2013,30 +2087,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1091
1092 static int em_ret(struct x86_emulate_ctxt *ctxt)
1093 {
1094 - ctxt->dst.type = OP_REG;
1095 - ctxt->dst.addr.reg = &ctxt->_eip;
1096 - ctxt->dst.bytes = ctxt->op_bytes;
1097 - return em_pop(ctxt);
1098 + int rc;
1099 + unsigned long eip;
1100 +
1101 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1102 + if (rc != X86EMUL_CONTINUE)
1103 + return rc;
1104 +
1105 + return assign_eip_near(ctxt, eip);
1106 }
1107
1108 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
1109 {
1110 int rc;
1111 - unsigned long cs;
1112 + unsigned long eip, cs;
1113 + u16 old_cs;
1114 int cpl = ctxt->ops->cpl(ctxt);
1115 + struct desc_struct old_desc, new_desc;
1116 + const struct x86_emulate_ops *ops = ctxt->ops;
1117
1118 - rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
1119 + if (ctxt->mode == X86EMUL_MODE_PROT64)
1120 + ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
1121 + VCPU_SREG_CS);
1122 +
1123 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1124 if (rc != X86EMUL_CONTINUE)
1125 return rc;
1126 - if (ctxt->op_bytes == 4)
1127 - ctxt->_eip = (u32)ctxt->_eip;
1128 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
1129 if (rc != X86EMUL_CONTINUE)
1130 return rc;
1131 /* Outer-privilege level return is not implemented */
1132 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
1133 return X86EMUL_UNHANDLEABLE;
1134 - rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1135 + rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
1136 + &new_desc);
1137 + if (rc != X86EMUL_CONTINUE)
1138 + return rc;
1139 + rc = assign_eip_far(ctxt, eip, new_desc.l);
1140 + if (rc != X86EMUL_CONTINUE) {
1141 + WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
1142 + ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
1143 + }
1144 return rc;
1145 }
1146
1147 @@ -2297,7 +2388,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1148 {
1149 const struct x86_emulate_ops *ops = ctxt->ops;
1150 struct desc_struct cs, ss;
1151 - u64 msr_data;
1152 + u64 msr_data, rcx, rdx;
1153 int usermode;
1154 u16 cs_sel = 0, ss_sel = 0;
1155
1156 @@ -2313,6 +2404,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1157 else
1158 usermode = X86EMUL_MODE_PROT32;
1159
1160 + rcx = reg_read(ctxt, VCPU_REGS_RCX);
1161 + rdx = reg_read(ctxt, VCPU_REGS_RDX);
1162 +
1163 cs.dpl = 3;
1164 ss.dpl = 3;
1165 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1166 @@ -2330,6 +2424,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1167 ss_sel = cs_sel + 8;
1168 cs.d = 0;
1169 cs.l = 1;
1170 + if (is_noncanonical_address(rcx) ||
1171 + is_noncanonical_address(rdx))
1172 + return emulate_gp(ctxt, 0);
1173 break;
1174 }
1175 cs_sel |= SELECTOR_RPL_MASK;
1176 @@ -2338,8 +2435,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
1177 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1178 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1179
1180 - ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
1181 - *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
1182 + ctxt->_eip = rdx;
1183 + *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
1184
1185 return X86EMUL_CONTINUE;
1186 }
1187 @@ -2457,19 +2554,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
1188 * Now load segment descriptors. If fault happens at this stage
1189 * it is handled in a context of new task
1190 */
1191 - ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true);
1192 + ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
1193 + true, NULL);
1194 if (ret != X86EMUL_CONTINUE)
1195 return ret;
1196 - ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
1197 + ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
1198 + true, NULL);
1199 if (ret != X86EMUL_CONTINUE)
1200 return ret;
1201 - ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
1202 + ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
1203 + true, NULL);
1204 if (ret != X86EMUL_CONTINUE)
1205 return ret;
1206 - ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
1207 + ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
1208 + true, NULL);
1209 if (ret != X86EMUL_CONTINUE)
1210 return ret;
1211 - ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
1212 + ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
1213 + true, NULL);
1214 if (ret != X86EMUL_CONTINUE)
1215 return ret;
1216
1217 @@ -2594,25 +2696,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
1218 * Now load segment descriptors. If fault happenes at this stage
1219 * it is handled in a context of new task
1220 */
1221 - ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true);
1222 + ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
1223 + cpl, true, NULL);
1224 if (ret != X86EMUL_CONTINUE)
1225 return ret;
1226 - ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true);
1227 + ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
1228 + true, NULL);
1229 if (ret != X86EMUL_CONTINUE)
1230 return ret;
1231 - ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true);
1232 + ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
1233 + true, NULL);
1234 if (ret != X86EMUL_CONTINUE)
1235 return ret;
1236 - ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true);
1237 + ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
1238 + true, NULL);
1239 if (ret != X86EMUL_CONTINUE)
1240 return ret;
1241 - ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true);
1242 + ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
1243 + true, NULL);
1244 if (ret != X86EMUL_CONTINUE)
1245 return ret;
1246 - ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true);
1247 + ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
1248 + true, NULL);
1249 if (ret != X86EMUL_CONTINUE)
1250 return ret;
1251 - ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true);
1252 + ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
1253 + true, NULL);
1254 if (ret != X86EMUL_CONTINUE)
1255 return ret;
1256
1257 @@ -2880,10 +2989,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
1258
1259 static int em_call(struct x86_emulate_ctxt *ctxt)
1260 {
1261 + int rc;
1262 long rel = ctxt->src.val;
1263
1264 ctxt->src.val = (unsigned long)ctxt->_eip;
1265 - jmp_rel(ctxt, rel);
1266 + rc = jmp_rel(ctxt, rel);
1267 + if (rc != X86EMUL_CONTINUE)
1268 + return rc;
1269 return em_push(ctxt);
1270 }
1271
1272 @@ -2892,34 +3004,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
1273 u16 sel, old_cs;
1274 ulong old_eip;
1275 int rc;
1276 + struct desc_struct old_desc, new_desc;
1277 + const struct x86_emulate_ops *ops = ctxt->ops;
1278 + int cpl = ctxt->ops->cpl(ctxt);
1279
1280 - old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
1281 old_eip = ctxt->_eip;
1282 + ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
1283
1284 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1285 - if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
1286 + rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
1287 + &new_desc);
1288 + if (rc != X86EMUL_CONTINUE)
1289 return X86EMUL_CONTINUE;
1290
1291 - ctxt->_eip = 0;
1292 - memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes);
1293 + rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1294 + if (rc != X86EMUL_CONTINUE)
1295 + goto fail;
1296
1297 ctxt->src.val = old_cs;
1298 rc = em_push(ctxt);
1299 if (rc != X86EMUL_CONTINUE)
1300 - return rc;
1301 + goto fail;
1302
1303 ctxt->src.val = old_eip;
1304 - return em_push(ctxt);
1305 + rc = em_push(ctxt);
1306 + /* If we failed, we tainted the memory, but the very least we should
1307 + restore cs */
1308 + if (rc != X86EMUL_CONTINUE)
1309 + goto fail;
1310 + return rc;
1311 +fail:
1312 + ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
1313 + return rc;
1314 +
1315 }
1316
1317 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
1318 {
1319 int rc;
1320 + unsigned long eip;
1321
1322 - ctxt->dst.type = OP_REG;
1323 - ctxt->dst.addr.reg = &ctxt->_eip;
1324 - ctxt->dst.bytes = ctxt->op_bytes;
1325 - rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1326 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
1327 + if (rc != X86EMUL_CONTINUE)
1328 + return rc;
1329 + rc = assign_eip_near(ctxt, eip);
1330 if (rc != X86EMUL_CONTINUE)
1331 return rc;
1332 rsp_increment(ctxt, ctxt->src.val);
1333 @@ -3250,20 +3378,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
1334
1335 static int em_loop(struct x86_emulate_ctxt *ctxt)
1336 {
1337 + int rc = X86EMUL_CONTINUE;
1338 +
1339 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
1340 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
1341 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
1342 - jmp_rel(ctxt, ctxt->src.val);
1343 + rc = jmp_rel(ctxt, ctxt->src.val);
1344
1345 - return X86EMUL_CONTINUE;
1346 + return rc;
1347 }
1348
1349 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
1350 {
1351 + int rc = X86EMUL_CONTINUE;
1352 +
1353 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
1354 - jmp_rel(ctxt, ctxt->src.val);
1355 + rc = jmp_rel(ctxt, ctxt->src.val);
1356
1357 - return X86EMUL_CONTINUE;
1358 + return rc;
1359 }
1360
1361 static int em_in(struct x86_emulate_ctxt *ctxt)
1362 @@ -3351,6 +3483,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
1363 return X86EMUL_CONTINUE;
1364 }
1365
1366 +static int em_clflush(struct x86_emulate_ctxt *ctxt)
1367 +{
1368 + /* emulating clflush regardless of cpuid */
1369 + return X86EMUL_CONTINUE;
1370 +}
1371 +
1372 static bool valid_cr(int nr)
1373 {
1374 switch (nr) {
1375 @@ -3683,6 +3821,16 @@ static const struct opcode group11[] = {
1376 X7(D(Undefined)),
1377 };
1378
1379 +static const struct gprefix pfx_0f_ae_7 = {
1380 + I(SrcMem | ByteOp, em_clflush), N, N, N,
1381 +};
1382 +
1383 +static const struct group_dual group15 = { {
1384 + N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
1385 +}, {
1386 + N, N, N, N, N, N, N, N,
1387 +} };
1388 +
1389 static const struct gprefix pfx_0f_6f_0f_7f = {
1390 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
1391 };
1392 @@ -3887,10 +4035,11 @@ static const struct opcode twobyte_table[256] = {
1393 N, I(ImplicitOps | EmulateOnUD, em_syscall),
1394 II(ImplicitOps | Priv, em_clts, clts), N,
1395 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
1396 - N, D(ImplicitOps | ModRM), N, N,
1397 + N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
1398 /* 0x10 - 0x1F */
1399 N, N, N, N, N, N, N, N,
1400 - D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM),
1401 + D(ImplicitOps | ModRM | SrcMem | NoAccess),
1402 + N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
1403 /* 0x20 - 0x2F */
1404 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
1405 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
1406 @@ -3942,7 +4091,7 @@ static const struct opcode twobyte_table[256] = {
1407 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
1408 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
1409 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
1410 - D(ModRM), F(DstReg | SrcMem | ModRM, em_imul),
1411 + GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
1412 /* 0xB0 - 0xB7 */
1413 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
1414 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
1415 @@ -4458,10 +4607,10 @@ done_prefixes:
1416 /* Decode and fetch the destination operand: register or memory. */
1417 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
1418
1419 -done:
1420 if (ctxt->rip_relative)
1421 ctxt->memopp->addr.mem.ea += ctxt->_eip;
1422
1423 +done:
1424 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
1425 }
1426
1427 @@ -4711,7 +4860,7 @@ special_insn:
1428 break;
1429 case 0x70 ... 0x7f: /* jcc (short) */
1430 if (test_cc(ctxt->b, ctxt->eflags))
1431 - jmp_rel(ctxt, ctxt->src.val);
1432 + rc = jmp_rel(ctxt, ctxt->src.val);
1433 break;
1434 case 0x8d: /* lea r16/r32, m */
1435 ctxt->dst.val = ctxt->src.addr.mem.ea;
1436 @@ -4741,7 +4890,7 @@ special_insn:
1437 break;
1438 case 0xe9: /* jmp rel */
1439 case 0xeb: /* jmp rel short */
1440 - jmp_rel(ctxt, ctxt->src.val);
1441 + rc = jmp_rel(ctxt, ctxt->src.val);
1442 ctxt->dst.type = OP_NONE; /* Disable writeback. */
1443 break;
1444 case 0xf4: /* hlt */
1445 @@ -4864,13 +5013,11 @@ twobyte_insn:
1446 break;
1447 case 0x80 ... 0x8f: /* jnz rel, etc*/
1448 if (test_cc(ctxt->b, ctxt->eflags))
1449 - jmp_rel(ctxt, ctxt->src.val);
1450 + rc = jmp_rel(ctxt, ctxt->src.val);
1451 break;
1452 case 0x90 ... 0x9f: /* setcc r/m8 */
1453 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
1454 break;
1455 - case 0xae: /* clflush */
1456 - break;
1457 case 0xb6 ... 0xb7: /* movzx */
1458 ctxt->dst.bytes = ctxt->op_bytes;
1459 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
1460 diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
1461 index 518d86471b76..298781d4cfb4 100644
1462 --- a/arch/x86/kvm/i8254.c
1463 +++ b/arch/x86/kvm/i8254.c
1464 @@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
1465 return;
1466
1467 timer = &pit->pit_state.timer;
1468 + mutex_lock(&pit->pit_state.lock);
1469 if (hrtimer_cancel(timer))
1470 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
1471 + mutex_unlock(&pit->pit_state.lock);
1472 }
1473
1474 static void destroy_pit_timer(struct kvm_pit *pit)
1475 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1476 index ddf742768ecf..78dadc36fc78 100644
1477 --- a/arch/x86/kvm/svm.c
1478 +++ b/arch/x86/kvm/svm.c
1479 @@ -3234,7 +3234,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
1480 msr.host_initiated = false;
1481
1482 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1483 - if (svm_set_msr(&svm->vcpu, &msr)) {
1484 + if (kvm_set_msr(&svm->vcpu, &msr)) {
1485 trace_kvm_msr_write_ex(ecx, data);
1486 kvm_inject_gp(&svm->vcpu, 0);
1487 } else {
1488 @@ -3534,9 +3534,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
1489
1490 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
1491 || !svm_exit_handlers[exit_code]) {
1492 - kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
1493 - kvm_run->hw.hardware_exit_reason = exit_code;
1494 - return 0;
1495 + WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
1496 + kvm_queue_exception(vcpu, UD_VECTOR);
1497 + return 1;
1498 }
1499
1500 return svm_exit_handlers[exit_code](svm);
1501 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1502 index 6a118fa378b5..41a5426c8edb 100644
1503 --- a/arch/x86/kvm/vmx.c
1504 +++ b/arch/x86/kvm/vmx.c
1505 @@ -2632,12 +2632,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1506 default:
1507 msr = find_msr_entry(vmx, msr_index);
1508 if (msr) {
1509 + u64 old_msr_data = msr->data;
1510 msr->data = data;
1511 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
1512 preempt_disable();
1513 - kvm_set_shared_msr(msr->index, msr->data,
1514 - msr->mask);
1515 + ret = kvm_set_shared_msr(msr->index, msr->data,
1516 + msr->mask);
1517 preempt_enable();
1518 + if (ret)
1519 + msr->data = old_msr_data;
1520 }
1521 break;
1522 }
1523 @@ -5263,7 +5266,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
1524 msr.data = data;
1525 msr.index = ecx;
1526 msr.host_initiated = false;
1527 - if (vmx_set_msr(vcpu, &msr) != 0) {
1528 + if (kvm_set_msr(vcpu, &msr) != 0) {
1529 trace_kvm_msr_write_ex(ecx, data);
1530 kvm_inject_gp(vcpu, 0);
1531 return 1;
1532 @@ -6636,6 +6639,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
1533 return 1;
1534 }
1535
1536 +static int handle_invvpid(struct kvm_vcpu *vcpu)
1537 +{
1538 + kvm_queue_exception(vcpu, UD_VECTOR);
1539 + return 1;
1540 +}
1541 +
1542 /*
1543 * The exit handlers return 1 if the exit was handled fully and guest execution
1544 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
1545 @@ -6681,6 +6690,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
1546 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
1547 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
1548 [EXIT_REASON_INVEPT] = handle_invept,
1549 + [EXIT_REASON_INVVPID] = handle_invvpid,
1550 };
1551
1552 static const int kvm_vmx_max_exit_handlers =
1553 @@ -6914,7 +6924,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
1554 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
1555 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
1556 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
1557 - case EXIT_REASON_INVEPT:
1558 + case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
1559 /*
1560 * VMX instructions trap unconditionally. This allows L1 to
1561 * emulate them for its L2 guest, i.e., allows 3-level nesting!
1562 @@ -7055,10 +7065,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
1563 && kvm_vmx_exit_handlers[exit_reason])
1564 return kvm_vmx_exit_handlers[exit_reason](vcpu);
1565 else {
1566 - vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
1567 - vcpu->run->hw.hardware_exit_reason = exit_reason;
1568 + WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
1569 + kvm_queue_exception(vcpu, UD_VECTOR);
1570 + return 1;
1571 }
1572 - return 0;
1573 }
1574
1575 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
1576 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1577 index 8f1e22d3b286..9d292e8372d6 100644
1578 --- a/arch/x86/kvm/x86.c
1579 +++ b/arch/x86/kvm/x86.c
1580 @@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
1581 shared_msr_update(i, shared_msrs_global.msrs[i]);
1582 }
1583
1584 -void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
1585 +int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
1586 {
1587 unsigned int cpu = smp_processor_id();
1588 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
1589 + int err;
1590
1591 if (((value ^ smsr->values[slot].curr) & mask) == 0)
1592 - return;
1593 + return 0;
1594 smsr->values[slot].curr = value;
1595 - wrmsrl(shared_msrs_global.msrs[slot], value);
1596 + err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
1597 + if (err)
1598 + return 1;
1599 +
1600 if (!smsr->registered) {
1601 smsr->urn.on_user_return = kvm_on_user_return;
1602 user_return_notifier_register(&smsr->urn);
1603 smsr->registered = true;
1604 }
1605 + return 0;
1606 }
1607 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
1608
1609 @@ -984,7 +989,6 @@ void kvm_enable_efer_bits(u64 mask)
1610 }
1611 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1612
1613 -
1614 /*
1615 * Writes msr value into into the appropriate "register".
1616 * Returns 0 on success, non-0 otherwise.
1617 @@ -992,8 +996,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1618 */
1619 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
1620 {
1621 + switch (msr->index) {
1622 + case MSR_FS_BASE:
1623 + case MSR_GS_BASE:
1624 + case MSR_KERNEL_GS_BASE:
1625 + case MSR_CSTAR:
1626 + case MSR_LSTAR:
1627 + if (is_noncanonical_address(msr->data))
1628 + return 1;
1629 + break;
1630 + case MSR_IA32_SYSENTER_EIP:
1631 + case MSR_IA32_SYSENTER_ESP:
1632 + /*
1633 + * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1634 + * non-canonical address is written on Intel but not on
1635 + * AMD (which ignores the top 32-bits, because it does
1636 + * not implement 64-bit SYSENTER).
1637 + *
1638 + * 64-bit code should hence be able to write a non-canonical
1639 + * value on AMD. Making the address canonical ensures that
1640 + * vmentry does not fail on Intel after writing a non-canonical
1641 + * value, and that something deterministic happens if the guest
1642 + * invokes 64-bit SYSENTER.
1643 + */
1644 + msr->data = get_canonical(msr->data);
1645 + }
1646 return kvm_x86_ops->set_msr(vcpu, msr);
1647 }
1648 +EXPORT_SYMBOL_GPL(kvm_set_msr);
1649
1650 /*
1651 * Adapt set_msr() to msr_io()'s calling convention
1652 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
1653 index ae242a7c11c7..36de293caf25 100644
1654 --- a/arch/x86/mm/pageattr.c
1655 +++ b/arch/x86/mm/pageattr.c
1656 @@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
1657 psize = page_level_size(level);
1658 pmask = page_level_mask(level);
1659 offset = virt_addr & ~pmask;
1660 - phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
1661 + phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
1662 return (phys_addr | offset);
1663 }
1664 EXPORT_SYMBOL_GPL(slow_virt_to_phys);
1665 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
1666 index 5c8cb8043c5a..c881ba8491e5 100644
1667 --- a/arch/x86/net/bpf_jit_comp.c
1668 +++ b/arch/x86/net/bpf_jit_comp.c
1669 @@ -211,12 +211,17 @@ struct jit_context {
1670 bool seen_ld_abs;
1671 };
1672
1673 +/* maximum number of bytes emitted while JITing one eBPF insn */
1674 +#define BPF_MAX_INSN_SIZE 128
1675 +#define BPF_INSN_SAFETY 64
1676 +
1677 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
1678 int oldproglen, struct jit_context *ctx)
1679 {
1680 struct bpf_insn *insn = bpf_prog->insnsi;
1681 int insn_cnt = bpf_prog->len;
1682 - u8 temp[64];
1683 + bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
1684 + u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1685 int i;
1686 int proglen = 0;
1687 u8 *prog = temp;
1688 @@ -254,7 +259,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
1689 EMIT2(0x31, 0xc0); /* xor eax, eax */
1690 EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */
1691
1692 - if (ctx->seen_ld_abs) {
1693 + if (seen_ld_abs) {
1694 /* r9d : skb->len - skb->data_len (headlen)
1695 * r10 : skb->data
1696 */
1697 @@ -655,7 +660,7 @@ xadd: if (is_imm8(insn->off))
1698 case BPF_JMP | BPF_CALL:
1699 func = (u8 *) __bpf_call_base + imm32;
1700 jmp_offset = func - (image + addrs[i]);
1701 - if (ctx->seen_ld_abs) {
1702 + if (seen_ld_abs) {
1703 EMIT2(0x41, 0x52); /* push %r10 */
1704 EMIT2(0x41, 0x51); /* push %r9 */
1705 /* need to adjust jmp offset, since
1706 @@ -669,7 +674,7 @@ xadd: if (is_imm8(insn->off))
1707 return -EINVAL;
1708 }
1709 EMIT1_off32(0xE8, jmp_offset);
1710 - if (ctx->seen_ld_abs) {
1711 + if (seen_ld_abs) {
1712 EMIT2(0x41, 0x59); /* pop %r9 */
1713 EMIT2(0x41, 0x5A); /* pop %r10 */
1714 }
1715 @@ -774,7 +779,8 @@ emit_jmp:
1716 goto common_load;
1717 case BPF_LD | BPF_ABS | BPF_W:
1718 func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
1719 -common_load: ctx->seen_ld_abs = true;
1720 +common_load:
1721 + ctx->seen_ld_abs = seen_ld_abs = true;
1722 jmp_offset = func - (image + addrs[i]);
1723 if (!func || !is_simm32(jmp_offset)) {
1724 pr_err("unsupported bpf func %d addr %p image %p\n",
1725 @@ -848,6 +854,11 @@ common_load: ctx->seen_ld_abs = true;
1726 }
1727
1728 ilen = prog - temp;
1729 + if (ilen > BPF_MAX_INSN_SIZE) {
1730 + pr_err("bpf_jit_compile fatal insn size error\n");
1731 + return -EFAULT;
1732 + }
1733 +
1734 if (image) {
1735 if (unlikely(proglen + ilen > oldproglen)) {
1736 pr_err("bpf_jit_compile fatal error\n");
1737 @@ -904,9 +915,11 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
1738 goto out;
1739 }
1740 if (image) {
1741 - if (proglen != oldproglen)
1742 + if (proglen != oldproglen) {
1743 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1744 proglen, oldproglen);
1745 + goto out;
1746 + }
1747 break;
1748 }
1749 if (proglen == oldproglen) {
1750 diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
1751 index 3c53a90fdb18..c14ad34776c4 100644
1752 --- a/arch/x86/platform/intel-mid/sfi.c
1753 +++ b/arch/x86/platform/intel-mid/sfi.c
1754 @@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
1755 mp_irq.dstapic = MP_APIC_ALL;
1756 mp_irq.dstirq = pentry->irq;
1757 mp_save_irq(&mp_irq);
1758 + mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
1759 }
1760
1761 return 0;
1762 @@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
1763 mp_irq.dstapic = MP_APIC_ALL;
1764 mp_irq.dstirq = pentry->irq;
1765 mp_save_irq(&mp_irq);
1766 + mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
1767 }
1768 return 0;
1769 }
1770 diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
1771 index c1b92426c95e..74a4168ea34e 100644
1772 --- a/block/blk-mq-tag.c
1773 +++ b/block/blk-mq-tag.c
1774 @@ -463,8 +463,8 @@ static void bt_update_count(struct blk_mq_bitmap_tags *bt,
1775 }
1776
1777 bt->wake_cnt = BT_WAIT_BATCH;
1778 - if (bt->wake_cnt > depth / 4)
1779 - bt->wake_cnt = max(1U, depth / 4);
1780 + if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
1781 + bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
1782
1783 bt->depth = depth;
1784 }
1785 diff --git a/block/blk-settings.c b/block/blk-settings.c
1786 index f1a1795a5683..aa02247d227e 100644
1787 --- a/block/blk-settings.c
1788 +++ b/block/blk-settings.c
1789 @@ -574,7 +574,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1790 bottom = max(b->physical_block_size, b->io_min) + alignment;
1791
1792 /* Verify that top and bottom intervals line up */
1793 - if (max(top, bottom) & (min(top, bottom) - 1)) {
1794 + if (max(top, bottom) % min(top, bottom)) {
1795 t->misaligned = 1;
1796 ret = -1;
1797 }
1798 @@ -619,7 +619,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1799
1800 /* Find lowest common alignment_offset */
1801 t->alignment_offset = lcm(t->alignment_offset, alignment)
1802 - & (max(t->physical_block_size, t->io_min) - 1);
1803 + % max(t->physical_block_size, t->io_min);
1804
1805 /* Verify that new alignment_offset is on a logical block boundary */
1806 if (t->alignment_offset & (t->logical_block_size - 1)) {
1807 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
1808 index 9b8eaeca6a79..a6d6270806c3 100644
1809 --- a/block/scsi_ioctl.c
1810 +++ b/block/scsi_ioctl.c
1811 @@ -509,7 +509,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
1812
1813 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
1814 err = DRIVER_ERROR << 24;
1815 - goto out;
1816 + goto error;
1817 }
1818
1819 memset(sense, 0, sizeof(sense));
1820 @@ -518,7 +518,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
1821
1822 blk_execute_rq(q, disk, rq, 0);
1823
1824 -out:
1825 err = rq->errors & 0xff; /* only 8 bit SCSI status */
1826 if (err) {
1827 if (rq->sense_len && rq->sense) {
1828 diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
1829 index a19c027b29bd..83187f497c7c 100644
1830 --- a/crypto/algif_skcipher.c
1831 +++ b/crypto/algif_skcipher.c
1832 @@ -49,7 +49,7 @@ struct skcipher_ctx {
1833 struct ablkcipher_request req;
1834 };
1835
1836 -#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \
1837 +#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
1838 sizeof(struct scatterlist) - 1)
1839
1840 static inline int skcipher_sndbuf(struct sock *sk)
1841 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
1842 index 67075f800e34..5e9cbd664286 100644
1843 --- a/drivers/acpi/device_pm.c
1844 +++ b/drivers/acpi/device_pm.c
1845 @@ -710,7 +710,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
1846 return -ENODEV;
1847 }
1848
1849 - return acpi_device_wakeup(adev, enable, ACPI_STATE_S0);
1850 + return acpi_device_wakeup(adev, ACPI_STATE_S0, enable);
1851 }
1852 EXPORT_SYMBOL(acpi_pm_device_run_wake);
1853 #endif /* CONFIG_PM_RUNTIME */
1854 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1855 index cb6066c809ea..c874859b4565 100644
1856 --- a/drivers/acpi/ec.c
1857 +++ b/drivers/acpi/ec.c
1858 @@ -126,6 +126,7 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
1859 static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
1860 static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
1861 static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
1862 +static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
1863
1864 /* --------------------------------------------------------------------------
1865 Transaction Management
1866 @@ -210,13 +211,8 @@ static bool advance_transaction(struct acpi_ec *ec)
1867 }
1868 return wakeup;
1869 } else {
1870 - /*
1871 - * There is firmware refusing to respond QR_EC when SCI_EVT
1872 - * is not set, for which case, we complete the QR_EC
1873 - * without issuing it to the firmware.
1874 - * https://bugzilla.kernel.org/show_bug.cgi?id=86211
1875 - */
1876 - if (!(status & ACPI_EC_FLAG_SCI) &&
1877 + if (EC_FLAGS_QUERY_HANDSHAKE &&
1878 + !(status & ACPI_EC_FLAG_SCI) &&
1879 (t->command == ACPI_EC_COMMAND_QUERY)) {
1880 t->flags |= ACPI_EC_COMMAND_POLL;
1881 t->rdata[t->ri++] = 0x00;
1882 @@ -981,6 +977,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
1883 }
1884
1885 /*
1886 + * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
1887 + * which case, we complete the QR_EC without issuing it to the firmware.
1888 + * https://bugzilla.kernel.org/show_bug.cgi?id=86211
1889 + */
1890 +static int ec_flag_query_handshake(const struct dmi_system_id *id)
1891 +{
1892 + pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1893 + EC_FLAGS_QUERY_HANDSHAKE = 1;
1894 + return 0;
1895 +}
1896 +
1897 +/*
1898 * On some hardware it is necessary to clear events accumulated by the EC during
1899 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
1900 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
1901 @@ -1054,6 +1062,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
1902 {
1903 ec_clear_on_resume, "Samsung hardware", {
1904 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1905 + {
1906 + ec_flag_query_handshake, "Acer hardware", {
1907 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
1908 {},
1909 };
1910
1911 diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1912 index 1121153f1ecd..db90aa35cb71 100644
1913 --- a/drivers/ata/libata-sff.c
1914 +++ b/drivers/ata/libata-sff.c
1915 @@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1916
1917 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1918
1919 - /* software reset. causes dev0 to be selected */
1920 - iowrite8(ap->ctl, ioaddr->ctl_addr);
1921 - udelay(20); /* FIXME: flush */
1922 - iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1923 - udelay(20); /* FIXME: flush */
1924 - iowrite8(ap->ctl, ioaddr->ctl_addr);
1925 - ap->last_ctl = ap->ctl;
1926 + if (ap->ioaddr.ctl_addr) {
1927 + /* software reset. causes dev0 to be selected */
1928 + iowrite8(ap->ctl, ioaddr->ctl_addr);
1929 + udelay(20); /* FIXME: flush */
1930 + iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1931 + udelay(20); /* FIXME: flush */
1932 + iowrite8(ap->ctl, ioaddr->ctl_addr);
1933 + ap->last_ctl = ap->ctl;
1934 + }
1935
1936 /* wait the port to become ready */
1937 return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
1938 @@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap)
1939
1940 spin_unlock_irqrestore(ap->lock, flags);
1941
1942 - /* ignore ata_sff_softreset if ctl isn't accessible */
1943 - if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
1944 - softreset = NULL;
1945 -
1946 /* ignore built-in hardresets if SCR access is not available */
1947 if ((hardreset == sata_std_hardreset ||
1948 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
1949 diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
1950 index fc5f31d4828e..57de02123c4c 100644
1951 --- a/drivers/ata/pata_serverworks.c
1952 +++ b/drivers/ata/pata_serverworks.c
1953 @@ -251,12 +251,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev
1954 pci_write_config_byte(pdev, 0x54, ultra_cfg);
1955 }
1956
1957 -static struct scsi_host_template serverworks_sht = {
1958 +static struct scsi_host_template serverworks_osb4_sht = {
1959 + ATA_BMDMA_SHT(DRV_NAME),
1960 + .sg_tablesize = LIBATA_DUMB_MAX_PRD,
1961 +};
1962 +
1963 +static struct scsi_host_template serverworks_csb_sht = {
1964 ATA_BMDMA_SHT(DRV_NAME),
1965 };
1966
1967 static struct ata_port_operations serverworks_osb4_port_ops = {
1968 .inherits = &ata_bmdma_port_ops,
1969 + .qc_prep = ata_bmdma_dumb_qc_prep,
1970 .cable_detect = serverworks_cable_detect,
1971 .mode_filter = serverworks_osb4_filter,
1972 .set_piomode = serverworks_set_piomode,
1973 @@ -265,6 +271,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
1974
1975 static struct ata_port_operations serverworks_csb_port_ops = {
1976 .inherits = &serverworks_osb4_port_ops,
1977 + .qc_prep = ata_bmdma_qc_prep,
1978 .mode_filter = serverworks_csb_filter,
1979 };
1980
1981 @@ -404,6 +411,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
1982 }
1983 };
1984 const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
1985 + struct scsi_host_template *sht = &serverworks_csb_sht;
1986 int rc;
1987
1988 rc = pcim_enable_device(pdev);
1989 @@ -417,6 +425,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
1990 /* Select non UDMA capable OSB4 if we can't do fixups */
1991 if (rc < 0)
1992 ppi[0] = &info[1];
1993 + sht = &serverworks_osb4_sht;
1994 }
1995 /* setup CSB5/CSB6 : South Bridge and IDE option RAID */
1996 else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
1997 @@ -433,7 +442,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id
1998 ppi[1] = &ata_dummy_port_info;
1999 }
2000
2001 - return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0);
2002 + return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0);
2003 }
2004
2005 #ifdef CONFIG_PM_SLEEP
2006 diff --git a/drivers/base/core.c b/drivers/base/core.c
2007 index 20da3ad1696b..0e9468cb814f 100644
2008 --- a/drivers/base/core.c
2009 +++ b/drivers/base/core.c
2010 @@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
2011 return &dir->kobj;
2012 }
2013
2014 +static DEFINE_MUTEX(gdp_mutex);
2015
2016 static struct kobject *get_device_parent(struct device *dev,
2017 struct device *parent)
2018 {
2019 if (dev->class) {
2020 - static DEFINE_MUTEX(gdp_mutex);
2021 struct kobject *kobj = NULL;
2022 struct kobject *parent_kobj;
2023 struct kobject *k;
2024 @@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
2025 glue_dir->kset != &dev->class->p->glue_dirs)
2026 return;
2027
2028 + mutex_lock(&gdp_mutex);
2029 kobject_put(glue_dir);
2030 + mutex_unlock(&gdp_mutex);
2031 }
2032
2033 static void cleanup_device_parent(struct device *dev)
2034 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
2035 index b67d9aef9fe4..ebc2f9decdda 100644
2036 --- a/drivers/base/power/main.c
2037 +++ b/drivers/base/power/main.c
2038 @@ -1266,6 +1266,8 @@ static int dpm_suspend_late(pm_message_t state)
2039 }
2040 mutex_unlock(&dpm_list_mtx);
2041 async_synchronize_full();
2042 + if (!error)
2043 + error = async_error;
2044 if (error) {
2045 suspend_stats.failed_suspend_late++;
2046 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
2047 diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
2048 index 89c497c630b4..04a14e0f8878 100644
2049 --- a/drivers/block/drbd/drbd_interval.c
2050 +++ b/drivers/block/drbd/drbd_interval.c
2051 @@ -79,6 +79,7 @@ bool
2052 drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
2053 {
2054 struct rb_node **new = &root->rb_node, *parent = NULL;
2055 + sector_t this_end = this->sector + (this->size >> 9);
2056
2057 BUG_ON(!IS_ALIGNED(this->size, 512));
2058
2059 @@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
2060 rb_entry(*new, struct drbd_interval, rb);
2061
2062 parent = *new;
2063 + if (here->end < this_end)
2064 + here->end = this_end;
2065 if (this->sector < here->sector)
2066 new = &(*new)->rb_left;
2067 else if (this->sector > here->sector)
2068 @@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
2069 return false;
2070 }
2071
2072 + this->end = this_end;
2073 rb_link_node(&this->rb, parent, new);
2074 rb_insert_augmented(&this->rb, root, &augment_callbacks);
2075 return true;
2076 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
2077 index 4b97baf8afa3..33f0f97d3750 100644
2078 --- a/drivers/block/rbd.c
2079 +++ b/drivers/block/rbd.c
2080 @@ -3382,7 +3382,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2081 page_count = (u32) calc_pages_for(offset, length);
2082 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2083 if (IS_ERR(pages))
2084 - ret = PTR_ERR(pages);
2085 + return PTR_ERR(pages);
2086
2087 ret = -ENOMEM;
2088 obj_request = rbd_obj_request_create(object_name, offset, length,
2089 @@ -5087,7 +5087,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
2090 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
2091 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
2092
2093 - rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name);
2094 + rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
2095 + rbd_dev->disk->disk_name);
2096 if (!rbd_dev->rq_wq) {
2097 ret = -ENOMEM;
2098 goto err_out_mapping;
2099 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
2100 index 64c60edcdfbc..63fc7f06a014 100644
2101 --- a/drivers/block/xen-blkback/blkback.c
2102 +++ b/drivers/block/xen-blkback/blkback.c
2103 @@ -763,6 +763,7 @@ again:
2104 BUG_ON(new_map_idx >= segs_to_map);
2105 if (unlikely(map[new_map_idx].status != 0)) {
2106 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
2107 + put_free_pages(blkif, &pages[seg_idx]->page, 1);
2108 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
2109 ret |= 1;
2110 goto next;
2111 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
2112 index 3a8b810b4980..54f408963201 100644
2113 --- a/drivers/block/xen-blkback/xenbus.c
2114 +++ b/drivers/block/xen-blkback/xenbus.c
2115 @@ -270,6 +270,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
2116 blkif->blk_rings.common.sring = NULL;
2117 }
2118
2119 + /* Remove all persistent grants and the cache of ballooned pages. */
2120 + xen_blkbk_free_caches(blkif);
2121 +
2122 return 0;
2123 }
2124
2125 @@ -281,9 +284,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
2126 xen_blkif_disconnect(blkif);
2127 xen_vbd_free(&blkif->vbd);
2128
2129 - /* Remove all persistent grants and the cache of ballooned pages. */
2130 - xen_blkbk_free_caches(blkif);
2131 -
2132 /* Make sure everything is drained before shutting down */
2133 BUG_ON(blkif->persistent_gnt_c != 0);
2134 BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
2135 diff --git a/drivers/char/random.c b/drivers/char/random.c
2136 index c18d41db83d8..8c86a95203a0 100644
2137 --- a/drivers/char/random.c
2138 +++ b/drivers/char/random.c
2139 @@ -1106,7 +1106,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
2140 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
2141 spin_unlock_irqrestore(&r->lock, flags);
2142
2143 - memset(workspace, 0, sizeof(workspace));
2144 + memzero_explicit(workspace, sizeof(workspace));
2145
2146 /*
2147 * In case the hash function has some recognizable output
2148 @@ -1118,7 +1118,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
2149 hash.w[2] ^= rol32(hash.w[2], 16);
2150
2151 memcpy(out, &hash, EXTRACT_SIZE);
2152 - memset(&hash, 0, sizeof(hash));
2153 + memzero_explicit(&hash, sizeof(hash));
2154 }
2155
2156 /*
2157 @@ -1175,7 +1175,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
2158 }
2159
2160 /* Wipe data just returned from memory */
2161 - memset(tmp, 0, sizeof(tmp));
2162 + memzero_explicit(tmp, sizeof(tmp));
2163
2164 return ret;
2165 }
2166 @@ -1218,7 +1218,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
2167 }
2168
2169 /* Wipe data just returned from memory */
2170 - memset(tmp, 0, sizeof(tmp));
2171 + memzero_explicit(tmp, sizeof(tmp));
2172
2173 return ret;
2174 }
2175 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
2176 index 61190f6b4829..c05821e8de41 100644
2177 --- a/drivers/cpufreq/cpufreq.c
2178 +++ b/drivers/cpufreq/cpufreq.c
2179 @@ -512,7 +512,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
2180 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
2181 show_one(scaling_min_freq, min);
2182 show_one(scaling_max_freq, max);
2183 -show_one(scaling_cur_freq, cur);
2184 +
2185 +static ssize_t show_scaling_cur_freq(
2186 + struct cpufreq_policy *policy, char *buf)
2187 +{
2188 + ssize_t ret;
2189 +
2190 + if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
2191 + ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
2192 + else
2193 + ret = sprintf(buf, "%u\n", policy->cur);
2194 + return ret;
2195 +}
2196
2197 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2198 struct cpufreq_policy *new_policy);
2199 @@ -906,11 +917,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
2200 if (ret)
2201 goto err_out_kobj_put;
2202 }
2203 - if (has_target()) {
2204 - ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
2205 - if (ret)
2206 - goto err_out_kobj_put;
2207 - }
2208 +
2209 + ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
2210 + if (ret)
2211 + goto err_out_kobj_put;
2212 +
2213 if (cpufreq_driver->bios_limit) {
2214 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
2215 if (ret)
2216 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
2217 index 0668b389c516..27bb6d3877ed 100644
2218 --- a/drivers/cpufreq/intel_pstate.c
2219 +++ b/drivers/cpufreq/intel_pstate.c
2220 @@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
2221 return div_s64((int64_t)x << FRAC_BITS, y);
2222 }
2223
2224 +static inline int ceiling_fp(int32_t x)
2225 +{
2226 + int mask, ret;
2227 +
2228 + ret = fp_toint(x);
2229 + mask = (1 << FRAC_BITS) - 1;
2230 + if (x & mask)
2231 + ret += 1;
2232 + return ret;
2233 +}
2234 +
2235 struct sample {
2236 int32_t core_pct_busy;
2237 u64 aperf;
2238 @@ -64,6 +75,7 @@ struct pstate_data {
2239 int current_pstate;
2240 int min_pstate;
2241 int max_pstate;
2242 + int scaling;
2243 int turbo_pstate;
2244 };
2245
2246 @@ -113,6 +125,7 @@ struct pstate_funcs {
2247 int (*get_max)(void);
2248 int (*get_min)(void);
2249 int (*get_turbo)(void);
2250 + int (*get_scaling)(void);
2251 void (*set)(struct cpudata*, int pstate);
2252 void (*get_vid)(struct cpudata *);
2253 };
2254 @@ -138,6 +151,7 @@ struct perf_limits {
2255
2256 static struct perf_limits limits = {
2257 .no_turbo = 0,
2258 + .turbo_disabled = 0,
2259 .max_perf_pct = 100,
2260 .max_perf = int_tofp(1),
2261 .min_perf_pct = 0,
2262 @@ -218,6 +232,18 @@ static inline void intel_pstate_reset_all_pid(void)
2263 }
2264 }
2265
2266 +static inline void update_turbo_state(void)
2267 +{
2268 + u64 misc_en;
2269 + struct cpudata *cpu;
2270 +
2271 + cpu = all_cpu_data[0];
2272 + rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
2273 + limits.turbo_disabled =
2274 + (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
2275 + cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
2276 +}
2277 +
2278 /************************** debugfs begin ************************/
2279 static int pid_param_set(void *data, u64 val)
2280 {
2281 @@ -274,6 +300,20 @@ static void __init intel_pstate_debug_expose_params(void)
2282 return sprintf(buf, "%u\n", limits.object); \
2283 }
2284
2285 +static ssize_t show_no_turbo(struct kobject *kobj,
2286 + struct attribute *attr, char *buf)
2287 +{
2288 + ssize_t ret;
2289 +
2290 + update_turbo_state();
2291 + if (limits.turbo_disabled)
2292 + ret = sprintf(buf, "%u\n", limits.turbo_disabled);
2293 + else
2294 + ret = sprintf(buf, "%u\n", limits.no_turbo);
2295 +
2296 + return ret;
2297 +}
2298 +
2299 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
2300 const char *buf, size_t count)
2301 {
2302 @@ -283,11 +323,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
2303 ret = sscanf(buf, "%u", &input);
2304 if (ret != 1)
2305 return -EINVAL;
2306 - limits.no_turbo = clamp_t(int, input, 0 , 1);
2307 +
2308 + update_turbo_state();
2309 if (limits.turbo_disabled) {
2310 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
2311 - limits.no_turbo = limits.turbo_disabled;
2312 + return -EPERM;
2313 }
2314 + limits.no_turbo = clamp_t(int, input, 0, 1);
2315 +
2316 return count;
2317 }
2318
2319 @@ -323,7 +366,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
2320 return count;
2321 }
2322
2323 -show_one(no_turbo, no_turbo);
2324 show_one(max_perf_pct, max_perf_pct);
2325 show_one(min_perf_pct, min_perf_pct);
2326
2327 @@ -394,7 +436,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
2328 cpudata->vid.ratio);
2329
2330 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
2331 - vid = fp_toint(vid_fp);
2332 + vid = ceiling_fp(vid_fp);
2333
2334 if (pstate > cpudata->pstate.max_pstate)
2335 vid = cpudata->vid.turbo;
2336 @@ -404,6 +446,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
2337 wrmsrl(MSR_IA32_PERF_CTL, val);
2338 }
2339
2340 +#define BYT_BCLK_FREQS 5
2341 +static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
2342 +
2343 +static int byt_get_scaling(void)
2344 +{
2345 + u64 value;
2346 + int i;
2347 +
2348 + rdmsrl(MSR_FSB_FREQ, value);
2349 + i = value & 0x3;
2350 +
2351 + BUG_ON(i > BYT_BCLK_FREQS);
2352 +
2353 + return byt_freq_table[i] * 100;
2354 +}
2355 +
2356 static void byt_get_vid(struct cpudata *cpudata)
2357 {
2358 u64 value;
2359 @@ -449,6 +507,11 @@ static int core_get_turbo_pstate(void)
2360 return ret;
2361 }
2362
2363 +static inline int core_get_scaling(void)
2364 +{
2365 + return 100000;
2366 +}
2367 +
2368 static void core_set_pstate(struct cpudata *cpudata, int pstate)
2369 {
2370 u64 val;
2371 @@ -473,6 +536,7 @@ static struct cpu_defaults core_params = {
2372 .get_max = core_get_max_pstate,
2373 .get_min = core_get_min_pstate,
2374 .get_turbo = core_get_turbo_pstate,
2375 + .get_scaling = core_get_scaling,
2376 .set = core_set_pstate,
2377 },
2378 };
2379 @@ -491,6 +555,7 @@ static struct cpu_defaults byt_params = {
2380 .get_min = byt_get_min_pstate,
2381 .get_turbo = byt_get_turbo_pstate,
2382 .set = byt_set_pstate,
2383 + .get_scaling = byt_get_scaling,
2384 .get_vid = byt_get_vid,
2385 },
2386 };
2387 @@ -501,7 +566,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
2388 int max_perf_adj;
2389 int min_perf;
2390
2391 - if (limits.no_turbo)
2392 + if (limits.no_turbo || limits.turbo_disabled)
2393 max_perf = cpu->pstate.max_pstate;
2394
2395 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
2396 @@ -516,6 +581,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
2397 {
2398 int max_perf, min_perf;
2399
2400 + update_turbo_state();
2401 +
2402 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
2403
2404 pstate = clamp_t(int, pstate, min_perf, max_perf);
2405 @@ -523,7 +590,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
2406 if (pstate == cpu->pstate.current_pstate)
2407 return;
2408
2409 - trace_cpu_frequency(pstate * 100000, cpu->cpu);
2410 + trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
2411
2412 cpu->pstate.current_pstate = pstate;
2413
2414 @@ -535,6 +602,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
2415 cpu->pstate.min_pstate = pstate_funcs.get_min();
2416 cpu->pstate.max_pstate = pstate_funcs.get_max();
2417 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
2418 + cpu->pstate.scaling = pstate_funcs.get_scaling();
2419
2420 if (pstate_funcs.get_vid)
2421 pstate_funcs.get_vid(cpu);
2422 @@ -550,7 +618,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
2423 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
2424
2425 sample->freq = fp_toint(
2426 - mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
2427 + mul_fp(int_tofp(
2428 + cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
2429 + core_pct));
2430
2431 sample->core_pct_busy = (int32_t)core_pct;
2432 }
2433 @@ -671,7 +741,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
2434 {
2435 struct cpudata *cpu;
2436
2437 - all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
2438 + if (!all_cpu_data[cpunum])
2439 + all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
2440 + GFP_KERNEL);
2441 if (!all_cpu_data[cpunum])
2442 return -ENOMEM;
2443
2444 @@ -714,9 +786,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2445 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
2446 limits.min_perf_pct = 100;
2447 limits.min_perf = int_tofp(1);
2448 + limits.max_policy_pct = 100;
2449 limits.max_perf_pct = 100;
2450 limits.max_perf = int_tofp(1);
2451 - limits.no_turbo = limits.turbo_disabled;
2452 + limits.no_turbo = 0;
2453 return 0;
2454 }
2455 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
2456 @@ -751,15 +824,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
2457
2458 del_timer_sync(&all_cpu_data[cpu_num]->timer);
2459 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
2460 - kfree(all_cpu_data[cpu_num]);
2461 - all_cpu_data[cpu_num] = NULL;
2462 }
2463
2464 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2465 {
2466 struct cpudata *cpu;
2467 int rc;
2468 - u64 misc_en;
2469
2470 rc = intel_pstate_init_cpu(policy->cpu);
2471 if (rc)
2472 @@ -767,23 +837,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2473
2474 cpu = all_cpu_data[policy->cpu];
2475
2476 - rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
2477 - if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
2478 - cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
2479 - limits.turbo_disabled = 1;
2480 - limits.no_turbo = 1;
2481 - }
2482 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
2483 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
2484 else
2485 policy->policy = CPUFREQ_POLICY_POWERSAVE;
2486
2487 - policy->min = cpu->pstate.min_pstate * 100000;
2488 - policy->max = cpu->pstate.turbo_pstate * 100000;
2489 + policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
2490 + policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2491
2492 /* cpuinfo and default policy values */
2493 - policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
2494 - policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
2495 + policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2496 + policy->cpuinfo.max_freq =
2497 + cpu->pstate.turbo_pstate * cpu->pstate.scaling;
2498 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
2499 cpumask_set_cpu(policy->cpu, policy->cpus);
2500
2501 @@ -841,6 +906,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
2502 pstate_funcs.get_max = funcs->get_max;
2503 pstate_funcs.get_min = funcs->get_min;
2504 pstate_funcs.get_turbo = funcs->get_turbo;
2505 + pstate_funcs.get_scaling = funcs->get_scaling;
2506 pstate_funcs.set = funcs->set;
2507 pstate_funcs.get_vid = funcs->get_vid;
2508 }
2509 diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
2510 index df6575f1430d..682288ced4ac 100644
2511 --- a/drivers/edac/cpc925_edac.c
2512 +++ b/drivers/edac/cpc925_edac.c
2513 @@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
2514
2515 if (apiexcp & UECC_EXCP_DETECTED) {
2516 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
2517 - edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
2518 + edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
2519 pfn, offset, 0,
2520 csrow, -1, -1,
2521 mci->ctl_name, "");
2522 diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
2523 index 3cda79bc8b00..ece3aef16bb1 100644
2524 --- a/drivers/edac/e7xxx_edac.c
2525 +++ b/drivers/edac/e7xxx_edac.c
2526 @@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
2527 static void process_ce_no_info(struct mem_ctl_info *mci)
2528 {
2529 edac_dbg(3, "\n");
2530 - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
2531 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
2532 "e7xxx CE log register overflow", "");
2533 }
2534
2535 diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
2536 index 022a70273ada..aa98b136f5d0 100644
2537 --- a/drivers/edac/i3200_edac.c
2538 +++ b/drivers/edac/i3200_edac.c
2539 @@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
2540 -1, -1,
2541 "i3000 UE", "");
2542 } else if (log & I3200_ECCERRLOG_CE) {
2543 - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
2544 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
2545 0, 0, eccerrlog_syndrome(log),
2546 eccerrlog_row(channel, log),
2547 -1, -1,
2548 - "i3000 UE", "");
2549 + "i3000 CE", "");
2550 }
2551 }
2552 }
2553 diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
2554 index 3382f6344e42..4382343a7c60 100644
2555 --- a/drivers/edac/i82860_edac.c
2556 +++ b/drivers/edac/i82860_edac.c
2557 @@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
2558 dimm->location[0], dimm->location[1], -1,
2559 "i82860 UE", "");
2560 else
2561 - edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
2562 + edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
2563 info->eap, 0, info->derrsyn,
2564 dimm->location[0], dimm->location[1], -1,
2565 "i82860 CE", "");
2566 diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
2567 index 5389350244f2..70bedf9bd1ea 100644
2568 --- a/drivers/gpu/drm/ast/ast_mode.c
2569 +++ b/drivers/gpu/drm/ast/ast_mode.c
2570 @@ -1080,8 +1080,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height)
2571 srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
2572 data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
2573 data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
2574 - data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4);
2575 - data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4);
2576 + data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
2577 + data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
2578
2579 writel(data32.ul, dstxor);
2580 csum += data32.ul;
2581 diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
2582 index 919c73b94447..4977631927d8 100644
2583 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c
2584 +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
2585 @@ -32,6 +32,8 @@ static struct drm_driver driver;
2586 static const struct pci_device_id pciidlist[] = {
2587 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
2588 0, 0 },
2589 + { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
2590 + 0x0001, 0, 0, 0 },
2591 {0,}
2592 };
2593
2594 diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
2595 index d38413997379..d182058383a9 100644
2596 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c
2597 +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
2598 @@ -293,15 +293,23 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
2599 static struct i915_mmu_notifier *
2600 i915_mmu_notifier_find(struct i915_mm_struct *mm)
2601 {
2602 - if (mm->mn == NULL) {
2603 - down_write(&mm->mm->mmap_sem);
2604 - mutex_lock(&to_i915(mm->dev)->mm_lock);
2605 - if (mm->mn == NULL)
2606 - mm->mn = i915_mmu_notifier_create(mm->mm);
2607 - mutex_unlock(&to_i915(mm->dev)->mm_lock);
2608 - up_write(&mm->mm->mmap_sem);
2609 + struct i915_mmu_notifier *mn = mm->mn;
2610 +
2611 + mn = mm->mn;
2612 + if (mn)
2613 + return mn;
2614 +
2615 + down_write(&mm->mm->mmap_sem);
2616 + mutex_lock(&to_i915(mm->dev)->mm_lock);
2617 + if ((mn = mm->mn) == NULL) {
2618 + mn = i915_mmu_notifier_create(mm->mm);
2619 + if (!IS_ERR(mn))
2620 + mm->mn = mn;
2621 }
2622 - return mm->mn;
2623 + mutex_unlock(&to_i915(mm->dev)->mm_lock);
2624 + up_write(&mm->mm->mmap_sem);
2625 +
2626 + return mn;
2627 }
2628
2629 static int
2630 @@ -681,16 +689,15 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
2631 static void
2632 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
2633 {
2634 - struct scatterlist *sg;
2635 - int i;
2636 + struct sg_page_iter sg_iter;
2637
2638 BUG_ON(obj->userptr.work != NULL);
2639
2640 if (obj->madv != I915_MADV_WILLNEED)
2641 obj->dirty = 0;
2642
2643 - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
2644 - struct page *page = sg_page(sg);
2645 + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
2646 + struct page *page = sg_page_iter_page(&sg_iter);
2647
2648 if (obj->dirty)
2649 set_page_dirty(page);
2650 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
2651 index 0050ee9470f1..5d387a86b404 100644
2652 --- a/drivers/gpu/drm/i915/i915_irq.c
2653 +++ b/drivers/gpu/drm/i915/i915_irq.c
2654 @@ -3482,12 +3482,13 @@ static void gen8_irq_reset(struct drm_device *dev)
2655 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
2656 {
2657 unsigned long irqflags;
2658 + uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
2659
2660 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2661 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
2662 - ~dev_priv->de_irq_mask[PIPE_B]);
2663 + ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
2664 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
2665 - ~dev_priv->de_irq_mask[PIPE_C]);
2666 + ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
2667 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2668 }
2669
2670 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
2671 index d8324c69fa86..b71a02663bae 100644
2672 --- a/drivers/gpu/drm/i915/intel_display.c
2673 +++ b/drivers/gpu/drm/i915/intel_display.c
2674 @@ -4470,7 +4470,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
2675 * BSpec erroneously claims we should aim for 4MHz, but
2676 * in fact 1MHz is the correct frequency.
2677 */
2678 - I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq);
2679 + I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
2680 }
2681
2682 /* Adjust CDclk dividers to allow high res or save power if possible */
2683 @@ -12507,6 +12507,9 @@ static struct intel_quirk intel_quirks[] = {
2684 /* Acer C720 Chromebook (Core i3 4005U) */
2685 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
2686
2687 + /* Apple Macbook 2,1 (Core 2 T7400) */
2688 + { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
2689 +
2690 /* Toshiba CB35 Chromebook (Celeron 2955U) */
2691 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
2692
2693 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
2694 index fdff1d420c14..9222e20e230c 100644
2695 --- a/drivers/gpu/drm/i915/intel_dp.c
2696 +++ b/drivers/gpu/drm/i915/intel_dp.c
2697 @@ -2364,6 +2364,13 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2698 ssize_t ret;
2699 int i;
2700
2701 + /*
2702 + * Sometime we just get the same incorrect byte repeated
2703 + * over the entire buffer. Doing just one throw away read
2704 + * initially seems to "solve" it.
2705 + */
2706 + drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2707 +
2708 for (i = 0; i < 3; i++) {
2709 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2710 if (ret == size)
2711 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
2712 index 8e374449c6b5..cbe8a8de85de 100644
2713 --- a/drivers/gpu/drm/i915/intel_panel.c
2714 +++ b/drivers/gpu/drm/i915/intel_panel.c
2715 @@ -398,6 +398,9 @@ intel_panel_detect(struct drm_device *dev)
2716 }
2717 }
2718
2719 +#define DIV_ROUND_CLOSEST_ULL(ll, d) \
2720 +({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
2721 +
2722 /**
2723 * scale - scale values from one range to another
2724 *
2725 @@ -419,9 +422,8 @@ static uint32_t scale(uint32_t source_val,
2726 source_val = clamp(source_val, source_min, source_max);
2727
2728 /* avoid overflows */
2729 - target_val = (uint64_t)(source_val - source_min) *
2730 - (target_max - target_min);
2731 - do_div(target_val, source_max - source_min);
2732 + target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
2733 + (target_max - target_min), source_max - source_min);
2734 target_val += target_min;
2735
2736 return target_val;
2737 diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
2738 index f5d7f7ce4bc6..0197d6c3d94c 100644
2739 --- a/drivers/gpu/drm/nouveau/Makefile
2740 +++ b/drivers/gpu/drm/nouveau/Makefile
2741 @@ -129,7 +129,7 @@ nouveau-y += core/subdev/fb/gddr5.o
2742 nouveau-y += core/subdev/gpio/base.o
2743 nouveau-y += core/subdev/gpio/nv10.o
2744 nouveau-y += core/subdev/gpio/nv50.o
2745 -nouveau-y += core/subdev/gpio/nv92.o
2746 +nouveau-y += core/subdev/gpio/nv94.o
2747 nouveau-y += core/subdev/gpio/nvd0.o
2748 nouveau-y += core/subdev/gpio/nve0.o
2749 nouveau-y += core/subdev/i2c/base.o
2750 diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
2751 index 932f84fae459..cbab586ec6f1 100644
2752 --- a/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
2753 +++ b/drivers/gpu/drm/nouveau/core/engine/device/nv50.c
2754 @@ -141,7 +141,7 @@ nv50_identify(struct nouveau_device *device)
2755 case 0x92:
2756 device->cname = "G92";
2757 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2758 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2759 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv50_gpio_oclass;
2760 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
2761 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
2762 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2763 @@ -169,7 +169,7 @@ nv50_identify(struct nouveau_device *device)
2764 case 0x94:
2765 device->cname = "G94";
2766 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2767 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2768 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2769 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2770 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
2771 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2772 @@ -197,7 +197,7 @@ nv50_identify(struct nouveau_device *device)
2773 case 0x96:
2774 device->cname = "G96";
2775 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2776 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2777 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2778 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2779 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
2780 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2781 @@ -225,7 +225,7 @@ nv50_identify(struct nouveau_device *device)
2782 case 0x98:
2783 device->cname = "G98";
2784 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2785 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2786 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2787 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2788 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
2789 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2790 @@ -253,7 +253,7 @@ nv50_identify(struct nouveau_device *device)
2791 case 0xa0:
2792 device->cname = "G200";
2793 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2794 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2795 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2796 device->oclass[NVDEV_SUBDEV_I2C ] = nv50_i2c_oclass;
2797 device->oclass[NVDEV_SUBDEV_CLOCK ] = nv84_clock_oclass;
2798 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2799 @@ -281,7 +281,7 @@ nv50_identify(struct nouveau_device *device)
2800 case 0xaa:
2801 device->cname = "MCP77/MCP78";
2802 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2803 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2804 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2805 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2806 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
2807 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2808 @@ -309,7 +309,7 @@ nv50_identify(struct nouveau_device *device)
2809 case 0xac:
2810 device->cname = "MCP79/MCP7A";
2811 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2812 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2813 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2814 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2815 device->oclass[NVDEV_SUBDEV_CLOCK ] = nvaa_clock_oclass;
2816 device->oclass[NVDEV_SUBDEV_THERM ] = &nv84_therm_oclass;
2817 @@ -337,7 +337,7 @@ nv50_identify(struct nouveau_device *device)
2818 case 0xa3:
2819 device->cname = "GT215";
2820 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2821 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2822 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2823 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2824 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
2825 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2826 @@ -367,7 +367,7 @@ nv50_identify(struct nouveau_device *device)
2827 case 0xa5:
2828 device->cname = "GT216";
2829 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2830 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2831 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2832 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2833 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
2834 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2835 @@ -396,7 +396,7 @@ nv50_identify(struct nouveau_device *device)
2836 case 0xa8:
2837 device->cname = "GT218";
2838 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2839 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2840 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2841 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2842 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
2843 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2844 @@ -425,7 +425,7 @@ nv50_identify(struct nouveau_device *device)
2845 case 0xaf:
2846 device->cname = "MCP89";
2847 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2848 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2849 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2850 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2851 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
2852 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2853 diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
2854 index b4a2917ce555..da153a2cb6b5 100644
2855 --- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
2856 +++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
2857 @@ -60,7 +60,7 @@ nvc0_identify(struct nouveau_device *device)
2858 case 0xc0:
2859 device->cname = "GF100";
2860 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2861 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2862 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2863 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2864 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2865 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2866 @@ -92,7 +92,7 @@ nvc0_identify(struct nouveau_device *device)
2867 case 0xc4:
2868 device->cname = "GF104";
2869 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2870 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2871 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2872 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2873 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2874 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2875 @@ -124,7 +124,7 @@ nvc0_identify(struct nouveau_device *device)
2876 case 0xc3:
2877 device->cname = "GF106";
2878 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2879 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2880 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2881 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2882 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2883 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2884 @@ -155,7 +155,7 @@ nvc0_identify(struct nouveau_device *device)
2885 case 0xce:
2886 device->cname = "GF114";
2887 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2888 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2889 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2890 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2891 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2892 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2893 @@ -187,7 +187,7 @@ nvc0_identify(struct nouveau_device *device)
2894 case 0xcf:
2895 device->cname = "GF116";
2896 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2897 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2898 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2899 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2900 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2901 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2902 @@ -219,7 +219,7 @@ nvc0_identify(struct nouveau_device *device)
2903 case 0xc1:
2904 device->cname = "GF108";
2905 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2906 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2907 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2908 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2909 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2910 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2911 @@ -250,7 +250,7 @@ nvc0_identify(struct nouveau_device *device)
2912 case 0xc8:
2913 device->cname = "GF110";
2914 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
2915 - device->oclass[NVDEV_SUBDEV_GPIO ] = nv92_gpio_oclass;
2916 + device->oclass[NVDEV_SUBDEV_GPIO ] = nv94_gpio_oclass;
2917 device->oclass[NVDEV_SUBDEV_I2C ] = nv94_i2c_oclass;
2918 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
2919 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
2920 diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
2921 index b73733d21cc7..f855140dbcb7 100644
2922 --- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
2923 +++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
2924 @@ -40,7 +40,7 @@ nouveau_gpio(void *obj)
2925
2926 extern struct nouveau_oclass *nv10_gpio_oclass;
2927 extern struct nouveau_oclass *nv50_gpio_oclass;
2928 -extern struct nouveau_oclass *nv92_gpio_oclass;
2929 +extern struct nouveau_oclass *nv94_gpio_oclass;
2930 extern struct nouveau_oclass *nvd0_gpio_oclass;
2931 extern struct nouveau_oclass *nve0_gpio_oclass;
2932
2933 diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
2934 index 88606bfaf847..bd8d348385b3 100644
2935 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
2936 +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
2937 @@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
2938 struct dcb_output *outp)
2939 {
2940 u16 dcb = dcb_outp(bios, idx, ver, len);
2941 + memset(outp, 0x00, sizeof(*outp));
2942 if (dcb) {
2943 if (*ver >= 0x20) {
2944 u32 conn = nv_ro32(bios, dcb + 0x00);
2945 diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
2946 deleted file mode 100644
2947 index 252083d376f5..000000000000
2948 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv92.c
2949 +++ /dev/null
2950 @@ -1,74 +0,0 @@
2951 -/*
2952 - * Copyright 2012 Red Hat Inc.
2953 - *
2954 - * Permission is hereby granted, free of charge, to any person obtaining a
2955 - * copy of this software and associated documentation files (the "Software"),
2956 - * to deal in the Software without restriction, including without limitation
2957 - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2958 - * and/or sell copies of the Software, and to permit persons to whom the
2959 - * Software is furnished to do so, subject to the following conditions:
2960 - *
2961 - * The above copyright notice and this permission notice shall be included in
2962 - * all copies or substantial portions of the Software.
2963 - *
2964 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2965 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2966 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
2967 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
2968 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2969 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2970 - * OTHER DEALINGS IN THE SOFTWARE.
2971 - *
2972 - * Authors: Ben Skeggs
2973 - */
2974 -
2975 -#include "priv.h"
2976 -
2977 -void
2978 -nv92_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
2979 -{
2980 - u32 intr0 = nv_rd32(gpio, 0x00e054);
2981 - u32 intr1 = nv_rd32(gpio, 0x00e074);
2982 - u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
2983 - u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
2984 - *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
2985 - *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
2986 - nv_wr32(gpio, 0x00e054, intr0);
2987 - nv_wr32(gpio, 0x00e074, intr1);
2988 -}
2989 -
2990 -void
2991 -nv92_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
2992 -{
2993 - u32 inte0 = nv_rd32(gpio, 0x00e050);
2994 - u32 inte1 = nv_rd32(gpio, 0x00e070);
2995 - if (type & NVKM_GPIO_LO)
2996 - inte0 = (inte0 & ~(mask << 16)) | (data << 16);
2997 - if (type & NVKM_GPIO_HI)
2998 - inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
2999 - mask >>= 16;
3000 - data >>= 16;
3001 - if (type & NVKM_GPIO_LO)
3002 - inte1 = (inte1 & ~(mask << 16)) | (data << 16);
3003 - if (type & NVKM_GPIO_HI)
3004 - inte1 = (inte1 & ~mask) | data;
3005 - nv_wr32(gpio, 0x00e050, inte0);
3006 - nv_wr32(gpio, 0x00e070, inte1);
3007 -}
3008 -
3009 -struct nouveau_oclass *
3010 -nv92_gpio_oclass = &(struct nouveau_gpio_impl) {
3011 - .base.handle = NV_SUBDEV(GPIO, 0x92),
3012 - .base.ofuncs = &(struct nouveau_ofuncs) {
3013 - .ctor = _nouveau_gpio_ctor,
3014 - .dtor = _nouveau_gpio_dtor,
3015 - .init = _nouveau_gpio_init,
3016 - .fini = _nouveau_gpio_fini,
3017 - },
3018 - .lines = 32,
3019 - .intr_stat = nv92_gpio_intr_stat,
3020 - .intr_mask = nv92_gpio_intr_mask,
3021 - .drive = nv50_gpio_drive,
3022 - .sense = nv50_gpio_sense,
3023 - .reset = nv50_gpio_reset,
3024 -}.base;
3025 diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
3026 new file mode 100644
3027 index 000000000000..cae404ccadac
3028 --- /dev/null
3029 +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv94.c
3030 @@ -0,0 +1,74 @@
3031 +/*
3032 + * Copyright 2012 Red Hat Inc.
3033 + *
3034 + * Permission is hereby granted, free of charge, to any person obtaining a
3035 + * copy of this software and associated documentation files (the "Software"),
3036 + * to deal in the Software without restriction, including without limitation
3037 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
3038 + * and/or sell copies of the Software, and to permit persons to whom the
3039 + * Software is furnished to do so, subject to the following conditions:
3040 + *
3041 + * The above copyright notice and this permission notice shall be included in
3042 + * all copies or substantial portions of the Software.
3043 + *
3044 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
3045 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
3046 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
3047 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
3048 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
3049 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
3050 + * OTHER DEALINGS IN THE SOFTWARE.
3051 + *
3052 + * Authors: Ben Skeggs
3053 + */
3054 +
3055 +#include "priv.h"
3056 +
3057 +void
3058 +nv94_gpio_intr_stat(struct nouveau_gpio *gpio, u32 *hi, u32 *lo)
3059 +{
3060 + u32 intr0 = nv_rd32(gpio, 0x00e054);
3061 + u32 intr1 = nv_rd32(gpio, 0x00e074);
3062 + u32 stat0 = nv_rd32(gpio, 0x00e050) & intr0;
3063 + u32 stat1 = nv_rd32(gpio, 0x00e070) & intr1;
3064 + *lo = (stat1 & 0xffff0000) | (stat0 >> 16);
3065 + *hi = (stat1 << 16) | (stat0 & 0x0000ffff);
3066 + nv_wr32(gpio, 0x00e054, intr0);
3067 + nv_wr32(gpio, 0x00e074, intr1);
3068 +}
3069 +
3070 +void
3071 +nv94_gpio_intr_mask(struct nouveau_gpio *gpio, u32 type, u32 mask, u32 data)
3072 +{
3073 + u32 inte0 = nv_rd32(gpio, 0x00e050);
3074 + u32 inte1 = nv_rd32(gpio, 0x00e070);
3075 + if (type & NVKM_GPIO_LO)
3076 + inte0 = (inte0 & ~(mask << 16)) | (data << 16);
3077 + if (type & NVKM_GPIO_HI)
3078 + inte0 = (inte0 & ~(mask & 0xffff)) | (data & 0xffff);
3079 + mask >>= 16;
3080 + data >>= 16;
3081 + if (type & NVKM_GPIO_LO)
3082 + inte1 = (inte1 & ~(mask << 16)) | (data << 16);
3083 + if (type & NVKM_GPIO_HI)
3084 + inte1 = (inte1 & ~mask) | data;
3085 + nv_wr32(gpio, 0x00e050, inte0);
3086 + nv_wr32(gpio, 0x00e070, inte1);
3087 +}
3088 +
3089 +struct nouveau_oclass *
3090 +nv94_gpio_oclass = &(struct nouveau_gpio_impl) {
3091 + .base.handle = NV_SUBDEV(GPIO, 0x94),
3092 + .base.ofuncs = &(struct nouveau_ofuncs) {
3093 + .ctor = _nouveau_gpio_ctor,
3094 + .dtor = _nouveau_gpio_dtor,
3095 + .init = _nouveau_gpio_init,
3096 + .fini = _nouveau_gpio_fini,
3097 + },
3098 + .lines = 32,
3099 + .intr_stat = nv94_gpio_intr_stat,
3100 + .intr_mask = nv94_gpio_intr_mask,
3101 + .drive = nv50_gpio_drive,
3102 + .sense = nv50_gpio_sense,
3103 + .reset = nv50_gpio_reset,
3104 +}.base;
3105 diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
3106 index a4682b0956ad..480d6d2af770 100644
3107 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
3108 +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
3109 @@ -77,8 +77,8 @@ nvd0_gpio_oclass = &(struct nouveau_gpio_impl) {
3110 .fini = _nouveau_gpio_fini,
3111 },
3112 .lines = 32,
3113 - .intr_stat = nv92_gpio_intr_stat,
3114 - .intr_mask = nv92_gpio_intr_mask,
3115 + .intr_stat = nv94_gpio_intr_stat,
3116 + .intr_mask = nv94_gpio_intr_mask,
3117 .drive = nvd0_gpio_drive,
3118 .sense = nvd0_gpio_sense,
3119 .reset = nvd0_gpio_reset,
3120 diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
3121 index e1724dfc86ae..bff98b86e2b5 100644
3122 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
3123 +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
3124 @@ -56,8 +56,8 @@ void nv50_gpio_reset(struct nouveau_gpio *, u8);
3125 int nv50_gpio_drive(struct nouveau_gpio *, int, int, int);
3126 int nv50_gpio_sense(struct nouveau_gpio *, int);
3127
3128 -void nv92_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
3129 -void nv92_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
3130 +void nv94_gpio_intr_stat(struct nouveau_gpio *, u32 *, u32 *);
3131 +void nv94_gpio_intr_mask(struct nouveau_gpio *, u32, u32, u32);
3132
3133 void nvd0_gpio_reset(struct nouveau_gpio *, u8);
3134 int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
3135 diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
3136 index 3440fc999f2f..497ea013f7d0 100644
3137 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c
3138 +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
3139 @@ -400,15 +400,20 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
3140 struct nouveau_channel **pchan)
3141 {
3142 struct nouveau_cli *cli = (void *)nvif_client(&device->base);
3143 + bool super;
3144 int ret;
3145
3146 + /* hack until fencenv50 is fixed, and agp access relaxed */
3147 + super = cli->base.super;
3148 + cli->base.super = true;
3149 +
3150 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
3151 if (ret) {
3152 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
3153 ret = nouveau_channel_dma(drm, device, handle, pchan);
3154 if (ret) {
3155 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
3156 - return ret;
3157 + goto done;
3158 }
3159 }
3160
3161 @@ -416,8 +421,9 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
3162 if (ret) {
3163 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
3164 nouveau_channel_del(pchan);
3165 - return ret;
3166 }
3167
3168 - return 0;
3169 +done:
3170 + cli->base.super = super;
3171 + return ret;
3172 }
3173 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
3174 index 03949eaa629f..bca5d8c41231 100644
3175 --- a/drivers/gpu/drm/nouveau/nv50_display.c
3176 +++ b/drivers/gpu/drm/nouveau/nv50_display.c
3177 @@ -1653,15 +1653,17 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
3178 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3179 struct nouveau_connector *nv_connector;
3180 struct nv50_disp *disp = nv50_disp(encoder->dev);
3181 - struct {
3182 - struct nv50_disp_mthd_v1 base;
3183 - struct nv50_disp_sor_hda_eld_v0 eld;
3184 + struct __packed {
3185 + struct {
3186 + struct nv50_disp_mthd_v1 mthd;
3187 + struct nv50_disp_sor_hda_eld_v0 eld;
3188 + } base;
3189 u8 data[sizeof(nv_connector->base.eld)];
3190 } args = {
3191 - .base.version = 1,
3192 - .base.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
3193 - .base.hasht = nv_encoder->dcb->hasht,
3194 - .base.hashm = nv_encoder->dcb->hashm,
3195 + .base.mthd.version = 1,
3196 + .base.mthd.method = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
3197 + .base.mthd.hasht = nv_encoder->dcb->hasht,
3198 + .base.mthd.hashm = nv_encoder->dcb->hashm,
3199 };
3200
3201 nv_connector = nouveau_encoder_connector_get(nv_encoder);
3202 @@ -1671,7 +1673,7 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
3203 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
3204 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
3205
3206 - nvif_mthd(disp->disp, 0, &args, sizeof(args));
3207 + nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4);
3208 }
3209
3210 static void
3211 diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
3212 index b8ced08b6291..bac1fd43e15a 100644
3213 --- a/drivers/gpu/drm/qxl/qxl_display.c
3214 +++ b/drivers/gpu/drm/qxl/qxl_display.c
3215 @@ -523,7 +523,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
3216 struct qxl_framebuffer *qfb;
3217 struct qxl_bo *bo, *old_bo = NULL;
3218 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
3219 - uint32_t width, height, base_offset;
3220 bool recreate_primary = false;
3221 int ret;
3222 int surf_id;
3223 @@ -553,9 +552,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
3224 if (qcrtc->index == 0)
3225 recreate_primary = true;
3226
3227 - width = mode->hdisplay;
3228 - height = mode->vdisplay;
3229 - base_offset = 0;
3230 + if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
3231 + DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
3232 + return -EINVAL;
3233 + }
3234
3235 ret = qxl_bo_reserve(bo, false);
3236 if (ret != 0)
3237 @@ -569,10 +569,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
3238 if (recreate_primary) {
3239 qxl_io_destroy_primary(qdev);
3240 qxl_io_log(qdev,
3241 - "recreate primary: %dx%d (was %dx%d,%d,%d)\n",
3242 - width, height, bo->surf.width,
3243 - bo->surf.height, bo->surf.stride, bo->surf.format);
3244 - qxl_io_create_primary(qdev, base_offset, bo);
3245 + "recreate primary: %dx%d,%d,%d\n",
3246 + bo->surf.width, bo->surf.height,
3247 + bo->surf.stride, bo->surf.format);
3248 + qxl_io_create_primary(qdev, 0, bo);
3249 bo->is_primary = true;
3250 }
3251
3252 diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
3253 index c4ffa54b1e3d..e8eea36b52d1 100644
3254 --- a/drivers/gpu/drm/radeon/cik_sdma.c
3255 +++ b/drivers/gpu/drm/radeon/cik_sdma.c
3256 @@ -610,16 +610,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
3257 {
3258 unsigned i;
3259 int r;
3260 - void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3261 + unsigned index;
3262 u32 tmp;
3263 + u64 gpu_addr;
3264
3265 - if (!ptr) {
3266 - DRM_ERROR("invalid vram scratch pointer\n");
3267 - return -EINVAL;
3268 - }
3269 + if (ring->idx == R600_RING_TYPE_DMA_INDEX)
3270 + index = R600_WB_DMA_RING_TEST_OFFSET;
3271 + else
3272 + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
3273 +
3274 + gpu_addr = rdev->wb.gpu_addr + index;
3275
3276 tmp = 0xCAFEDEAD;
3277 - writel(tmp, ptr);
3278 + rdev->wb.wb[index/4] = cpu_to_le32(tmp);
3279
3280 r = radeon_ring_lock(rdev, ring, 5);
3281 if (r) {
3282 @@ -627,14 +630,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
3283 return r;
3284 }
3285 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
3286 - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
3287 - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));
3288 + radeon_ring_write(ring, lower_32_bits(gpu_addr));
3289 + radeon_ring_write(ring, upper_32_bits(gpu_addr));
3290 radeon_ring_write(ring, 1); /* number of DWs to follow */
3291 radeon_ring_write(ring, 0xDEADBEEF);
3292 radeon_ring_unlock_commit(rdev, ring, false);
3293
3294 for (i = 0; i < rdev->usec_timeout; i++) {
3295 - tmp = readl(ptr);
3296 + tmp = le32_to_cpu(rdev->wb.wb[index/4]);
3297 if (tmp == 0xDEADBEEF)
3298 break;
3299 DRM_UDELAY(1);
3300 diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
3301 index 51800e340a57..71f4d26669cd 100644
3302 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
3303 +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
3304 @@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
3305
3306 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
3307 if (sad_count < 0) {
3308 - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
3309 - return;
3310 + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
3311 + sad_count = 0;
3312 }
3313
3314 /* program the speaker allocation */
3315 diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
3316 index ab29f953a767..790d8cafdb87 100644
3317 --- a/drivers/gpu/drm/radeon/dce6_afmt.c
3318 +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
3319 @@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
3320 }
3321
3322 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
3323 - if (sad_count <= 0) {
3324 - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
3325 - return;
3326 + if (sad_count < 0) {
3327 + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
3328 + sad_count = 0;
3329 }
3330
3331 /* program the speaker allocation */
3332 diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
3333 index 278c7a139d74..71ebdf89fd76 100644
3334 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
3335 +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
3336 @@ -118,9 +118,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
3337 }
3338
3339 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
3340 - if (sad_count <= 0) {
3341 - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
3342 - return;
3343 + if (sad_count < 0) {
3344 + DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
3345 + sad_count = 0;
3346 }
3347
3348 /* program the speaker allocation */
3349 diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
3350 index 67cb472d188c..e79b7ebf5894 100644
3351 --- a/drivers/gpu/drm/radeon/kv_dpm.c
3352 +++ b/drivers/gpu/drm/radeon/kv_dpm.c
3353 @@ -2725,7 +2725,11 @@ int kv_dpm_init(struct radeon_device *rdev)
3354
3355 pi->sram_end = SMC_RAM_END;
3356
3357 - pi->enable_nb_dpm = true;
3358 + /* Enabling nb dpm on an asrock system prevents dpm from working */
3359 + if (rdev->pdev->subsystem_vendor == 0x1849)
3360 + pi->enable_nb_dpm = false;
3361 + else
3362 + pi->enable_nb_dpm = true;
3363
3364 pi->caps_power_containment = true;
3365 pi->caps_cac = true;
3366 @@ -2740,10 +2744,19 @@ int kv_dpm_init(struct radeon_device *rdev)
3367 pi->caps_sclk_ds = true;
3368 pi->enable_auto_thermal_throttling = true;
3369 pi->disable_nb_ps3_in_battery = false;
3370 - if (radeon_bapm == 0)
3371 + if (radeon_bapm == -1) {
3372 + /* There are stability issues reported on with
3373 + * bapm enabled on an asrock system.
3374 + */
3375 + if (rdev->pdev->subsystem_vendor == 0x1849)
3376 + pi->bapm_enable = false;
3377 + else
3378 + pi->bapm_enable = true;
3379 + } else if (radeon_bapm == 0) {
3380 pi->bapm_enable = false;
3381 - else
3382 + } else {
3383 pi->bapm_enable = true;
3384 + }
3385 pi->voltage_drop_t = 0;
3386 pi->caps_sclk_throttle_low_notification = false;
3387 pi->caps_fps = false; /* true? */
3388 diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
3389 index a908daa006d2..44379bfca61f 100644
3390 --- a/drivers/gpu/drm/radeon/r600_dma.c
3391 +++ b/drivers/gpu/drm/radeon/r600_dma.c
3392 @@ -232,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
3393 {
3394 unsigned i;
3395 int r;
3396 - void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3397 + unsigned index;
3398 u32 tmp;
3399 + u64 gpu_addr;
3400
3401 - if (!ptr) {
3402 - DRM_ERROR("invalid vram scratch pointer\n");
3403 - return -EINVAL;
3404 - }
3405 + if (ring->idx == R600_RING_TYPE_DMA_INDEX)
3406 + index = R600_WB_DMA_RING_TEST_OFFSET;
3407 + else
3408 + index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
3409 +
3410 + gpu_addr = rdev->wb.gpu_addr + index;
3411
3412 tmp = 0xCAFEDEAD;
3413 - writel(tmp, ptr);
3414 + rdev->wb.wb[index/4] = cpu_to_le32(tmp);
3415
3416 r = radeon_ring_lock(rdev, ring, 4);
3417 if (r) {
3418 @@ -249,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
3419 return r;
3420 }
3421 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3422 - radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
3423 - radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
3424 + radeon_ring_write(ring, lower_32_bits(gpu_addr));
3425 + radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
3426 radeon_ring_write(ring, 0xDEADBEEF);
3427 radeon_ring_unlock_commit(rdev, ring, false);
3428
3429 for (i = 0; i < rdev->usec_timeout; i++) {
3430 - tmp = readl(ptr);
3431 + tmp = le32_to_cpu(rdev->wb.wb[index/4]);
3432 if (tmp == 0xDEADBEEF)
3433 break;
3434 DRM_UDELAY(1);
3435 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
3436 index 3247bfd14410..e8410581dd09 100644
3437 --- a/drivers/gpu/drm/radeon/radeon.h
3438 +++ b/drivers/gpu/drm/radeon/radeon.h
3439 @@ -1120,6 +1120,8 @@ struct radeon_wb {
3440 #define R600_WB_EVENT_OFFSET 3072
3441 #define CIK_WB_CP1_WPTR_OFFSET 3328
3442 #define CIK_WB_CP2_WPTR_OFFSET 3584
3443 +#define R600_WB_DMA_RING_TEST_OFFSET 3588
3444 +#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
3445
3446 /**
3447 * struct radeon_pm - power management datas
3448 diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
3449 index 83f382e8e40e..e244c2d72730 100644
3450 --- a/drivers/gpu/drm/radeon/radeon_cs.c
3451 +++ b/drivers/gpu/drm/radeon/radeon_cs.c
3452 @@ -418,7 +418,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
3453 kfree(parser->track);
3454 kfree(parser->relocs);
3455 kfree(parser->relocs_ptr);
3456 - kfree(parser->vm_bos);
3457 + drm_free_large(parser->vm_bos);
3458 for (i = 0; i < parser->nchunks; i++)
3459 drm_free_large(parser->chunks[i].kdata);
3460 kfree(parser->chunks);
3461 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
3462 index 12c8329644c4..6684fbf09929 100644
3463 --- a/drivers/gpu/drm/radeon/radeon_device.c
3464 +++ b/drivers/gpu/drm/radeon/radeon_device.c
3465 @@ -1130,7 +1130,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
3466 if (radeon_vm_block_size == -1) {
3467
3468 /* Total bits covered by PD + PTs */
3469 - unsigned bits = ilog2(radeon_vm_size) + 17;
3470 + unsigned bits = ilog2(radeon_vm_size) + 18;
3471
3472 /* Make sure the PD is 4K in size up to 8GB address space.
3473 Above that split equal between PD and PTs */
3474 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
3475 index d65607902537..93234350c34e 100644
3476 --- a/drivers/gpu/drm/radeon/radeon_ring.c
3477 +++ b/drivers/gpu/drm/radeon/radeon_ring.c
3478 @@ -335,7 +335,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
3479 }
3480
3481 /* and then save the content of the ring */
3482 - *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
3483 + *data = drm_malloc_ab(size, sizeof(uint32_t));
3484 if (!*data) {
3485 mutex_unlock(&rdev->ring_lock);
3486 return 0;
3487 @@ -377,7 +377,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
3488 }
3489
3490 radeon_ring_unlock_commit(rdev, ring, false);
3491 - kfree(data);
3492 + drm_free_large(data);
3493 return 0;
3494 }
3495
3496 diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
3497 index 088ffdc2f577..a3b3e098624a 100644
3498 --- a/drivers/gpu/drm/radeon/radeon_vm.c
3499 +++ b/drivers/gpu/drm/radeon/radeon_vm.c
3500 @@ -132,8 +132,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
3501 struct radeon_cs_reloc *list;
3502 unsigned i, idx;
3503
3504 - list = kmalloc_array(vm->max_pde_used + 2,
3505 - sizeof(struct radeon_cs_reloc), GFP_KERNEL);
3506 + list = drm_malloc_ab(vm->max_pde_used + 2,
3507 + sizeof(struct radeon_cs_reloc));
3508 if (!list)
3509 return NULL;
3510
3511 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
3512 index 70e61ffeace2..1202e0fd4292 100644
3513 --- a/drivers/gpu/drm/radeon/si_dpm.c
3514 +++ b/drivers/gpu/drm/radeon/si_dpm.c
3515 @@ -6255,7 +6255,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
3516 if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
3517 index == 0) {
3518 /* XXX disable for A0 tahiti */
3519 - si_pi->ulv.supported = true;
3520 + si_pi->ulv.supported = false;
3521 si_pi->ulv.pl = *pl;
3522 si_pi->ulv.one_pcie_lane_in_ulv = false;
3523 si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
3524 diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
3525 index 6be623b4a86f..000428e5773f 100644
3526 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
3527 +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
3528 @@ -84,6 +84,7 @@ static int modeset_init(struct drm_device *dev)
3529 if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) {
3530 /* oh nos! */
3531 dev_err(dev->dev, "no encoders/connectors found\n");
3532 + drm_mode_config_cleanup(dev);
3533 return -ENXIO;
3534 }
3535
3536 @@ -172,33 +173,37 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
3537 dev->dev_private = priv;
3538
3539 priv->wq = alloc_ordered_workqueue("tilcdc", 0);
3540 + if (!priv->wq) {
3541 + ret = -ENOMEM;
3542 + goto fail_free_priv;
3543 + }
3544
3545 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3546 if (!res) {
3547 dev_err(dev->dev, "failed to get memory resource\n");
3548 ret = -EINVAL;
3549 - goto fail;
3550 + goto fail_free_wq;
3551 }
3552
3553 priv->mmio = ioremap_nocache(res->start, resource_size(res));
3554 if (!priv->mmio) {
3555 dev_err(dev->dev, "failed to ioremap\n");
3556 ret = -ENOMEM;
3557 - goto fail;
3558 + goto fail_free_wq;
3559 }
3560
3561 priv->clk = clk_get(dev->dev, "fck");
3562 if (IS_ERR(priv->clk)) {
3563 dev_err(dev->dev, "failed to get functional clock\n");
3564 ret = -ENODEV;
3565 - goto fail;
3566 + goto fail_iounmap;
3567 }
3568
3569 priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
3570 if (IS_ERR(priv->clk)) {
3571 dev_err(dev->dev, "failed to get display clock\n");
3572 ret = -ENODEV;
3573 - goto fail;
3574 + goto fail_put_clk;
3575 }
3576
3577 #ifdef CONFIG_CPU_FREQ
3578 @@ -208,7 +213,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
3579 CPUFREQ_TRANSITION_NOTIFIER);
3580 if (ret) {
3581 dev_err(dev->dev, "failed to register cpufreq notifier\n");
3582 - goto fail;
3583 + goto fail_put_disp_clk;
3584 }
3585 #endif
3586
3587 @@ -253,13 +258,13 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
3588 ret = modeset_init(dev);
3589 if (ret < 0) {
3590 dev_err(dev->dev, "failed to initialize mode setting\n");
3591 - goto fail;
3592 + goto fail_cpufreq_unregister;
3593 }
3594
3595 ret = drm_vblank_init(dev, 1);
3596 if (ret < 0) {
3597 dev_err(dev->dev, "failed to initialize vblank\n");
3598 - goto fail;
3599 + goto fail_mode_config_cleanup;
3600 }
3601
3602 pm_runtime_get_sync(dev->dev);
3603 @@ -267,7 +272,7 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
3604 pm_runtime_put_sync(dev->dev);
3605 if (ret < 0) {
3606 dev_err(dev->dev, "failed to install IRQ handler\n");
3607 - goto fail;
3608 + goto fail_vblank_cleanup;
3609 }
3610
3611 platform_set_drvdata(pdev, dev);
3612 @@ -283,13 +288,48 @@ static int tilcdc_load(struct drm_device *dev, unsigned long flags)
3613 priv->fbdev = drm_fbdev_cma_init(dev, bpp,
3614 dev->mode_config.num_crtc,
3615 dev->mode_config.num_connector);
3616 + if (IS_ERR(priv->fbdev)) {
3617 + ret = PTR_ERR(priv->fbdev);
3618 + goto fail_irq_uninstall;
3619 + }
3620
3621 drm_kms_helper_poll_init(dev);
3622
3623 return 0;
3624
3625 -fail:
3626 - tilcdc_unload(dev);
3627 +fail_irq_uninstall:
3628 + pm_runtime_get_sync(dev->dev);
3629 + drm_irq_uninstall(dev);
3630 + pm_runtime_put_sync(dev->dev);
3631 +
3632 +fail_vblank_cleanup:
3633 + drm_vblank_cleanup(dev);
3634 +
3635 +fail_mode_config_cleanup:
3636 + drm_mode_config_cleanup(dev);
3637 +
3638 +fail_cpufreq_unregister:
3639 + pm_runtime_disable(dev->dev);
3640 +#ifdef CONFIG_CPU_FREQ
3641 + cpufreq_unregister_notifier(&priv->freq_transition,
3642 + CPUFREQ_TRANSITION_NOTIFIER);
3643 +fail_put_disp_clk:
3644 + clk_put(priv->disp_clk);
3645 +#endif
3646 +
3647 +fail_put_clk:
3648 + clk_put(priv->clk);
3649 +
3650 +fail_iounmap:
3651 + iounmap(priv->mmio);
3652 +
3653 +fail_free_wq:
3654 + flush_workqueue(priv->wq);
3655 + destroy_workqueue(priv->wq);
3656 +
3657 +fail_free_priv:
3658 + dev->dev_private = NULL;
3659 + kfree(priv);
3660 return ret;
3661 }
3662
3663 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
3664 index 18b54acacfbb..14b2f50ad7e9 100644
3665 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
3666 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
3667 @@ -688,7 +688,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
3668 goto out_err0;
3669 }
3670
3671 - if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
3672 + /*
3673 + * Limit back buffer size to VRAM size. Remove this once
3674 + * screen targets are implemented.
3675 + */
3676 + if (dev_priv->prim_bb_mem > dev_priv->vram_size)
3677 dev_priv->prim_bb_mem = dev_priv->vram_size;
3678
3679 mutex_unlock(&dev_priv->hw_mutex);
3680 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
3681 index d2bc2b03d4c6..10fc4c3e1c0c 100644
3682 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
3683 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
3684 @@ -1950,6 +1950,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
3685 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
3686 };
3687 int i;
3688 + u32 assumed_bpp = 2;
3689 +
3690 + /*
3691 + * If using screen objects, then assume 32-bpp because that's what the
3692 + * SVGA device is assuming
3693 + */
3694 + if (dev_priv->sou_priv)
3695 + assumed_bpp = 4;
3696
3697 /* Add preferred mode */
3698 {
3699 @@ -1960,8 +1968,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
3700 mode->vdisplay = du->pref_height;
3701 vmw_guess_mode_timing(mode);
3702
3703 - if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
3704 - mode->vdisplay)) {
3705 + if (vmw_kms_validate_mode_vram(dev_priv,
3706 + mode->hdisplay * assumed_bpp,
3707 + mode->vdisplay)) {
3708 drm_mode_probed_add(connector, mode);
3709 } else {
3710 drm_mode_destroy(dev, mode);
3711 @@ -1983,7 +1992,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
3712 bmode->vdisplay > max_height)
3713 continue;
3714
3715 - if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
3716 + if (!vmw_kms_validate_mode_vram(dev_priv,
3717 + bmode->hdisplay * assumed_bpp,
3718 bmode->vdisplay))
3719 continue;
3720
3721 diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
3722 index 84c3cb15ccdd..8bf61d295ffd 100644
3723 --- a/drivers/hid/hid-debug.c
3724 +++ b/drivers/hid/hid-debug.c
3725 @@ -946,6 +946,12 @@ static const char *keys[KEY_MAX + 1] = {
3726 [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
3727 [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
3728 [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
3729 + [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev",
3730 + [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext",
3731 + [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
3732 + [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup",
3733 + [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept",
3734 + [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel",
3735 };
3736
3737 static const char *relatives[REL_MAX + 1] = {
3738 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
3739 index 25cd674d6064..c3a712c8c915 100644
3740 --- a/drivers/hid/hid-ids.h
3741 +++ b/drivers/hid/hid-ids.h
3742 @@ -296,6 +296,11 @@
3743 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7
3744 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
3745
3746 +#define USB_VENDOR_ID_ELAN 0x04f3
3747 +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
3748 +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
3749 +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
3750 +
3751 #define USB_VENDOR_ID_ELECOM 0x056e
3752 #define USB_DEVICE_ID_ELECOM_BM084 0x0061
3753
3754 @@ -733,6 +738,8 @@
3755 #define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
3756
3757 #define USB_VENDOR_ID_PIXART 0x093a
3758 +#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2 0x0137
3759 +#define USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE 0x2510
3760 #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
3761 #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
3762 #define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
3763 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
3764 index 2619f7f4517a..62e828655922 100644
3765 --- a/drivers/hid/hid-input.c
3766 +++ b/drivers/hid/hid-input.c
3767 @@ -689,7 +689,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
3768 break;
3769
3770 case 0x5b: /* TransducerSerialNumber */
3771 - set_bit(MSC_SERIAL, input->mscbit);
3772 + usage->type = EV_MSC;
3773 + usage->code = MSC_SERIAL;
3774 + bit = input->mscbit;
3775 + max = MSC_MAX;
3776 break;
3777
3778 default: goto unknown;
3779 @@ -856,6 +859,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
3780 case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
3781 case 0x28c: map_key_clear(KEY_SEND); break;
3782
3783 + case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
3784 + case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
3785 + case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
3786 + case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP); break;
3787 + case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
3788 + case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
3789 +
3790 default: goto ignore;
3791 }
3792 break;
3793 diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
3794 index 79cf503e37bf..ddd547ad6d7e 100644
3795 --- a/drivers/hid/usbhid/hid-core.c
3796 +++ b/drivers/hid/usbhid/hid-core.c
3797 @@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid)
3798 struct usbhid_device *usbhid = hid->driver_data;
3799
3800 spin_lock_irqsave(&usbhid->lock, flags);
3801 - if (hid->open > 0 &&
3802 + if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) &&
3803 !test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
3804 !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
3805 !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
3806 @@ -292,6 +292,8 @@ static void hid_irq_in(struct urb *urb)
3807 case 0: /* success */
3808 usbhid_mark_busy(usbhid);
3809 usbhid->retry_delay = 0;
3810 + if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
3811 + break;
3812 hid_input_report(urb->context, HID_INPUT_REPORT,
3813 urb->transfer_buffer,
3814 urb->actual_length, 1);
3815 @@ -735,8 +737,10 @@ void usbhid_close(struct hid_device *hid)
3816 if (!--hid->open) {
3817 spin_unlock_irq(&usbhid->lock);
3818 hid_cancel_delayed_stuff(usbhid);
3819 - usb_kill_urb(usbhid->urbin);
3820 - usbhid->intf->needs_remote_wakeup = 0;
3821 + if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) {
3822 + usb_kill_urb(usbhid->urbin);
3823 + usbhid->intf->needs_remote_wakeup = 0;
3824 + }
3825 } else {
3826 spin_unlock_irq(&usbhid->lock);
3827 }
3828 @@ -1134,6 +1138,19 @@ static int usbhid_start(struct hid_device *hid)
3829
3830 set_bit(HID_STARTED, &usbhid->iofl);
3831
3832 + if (hid->quirks & HID_QUIRK_ALWAYS_POLL) {
3833 + ret = usb_autopm_get_interface(usbhid->intf);
3834 + if (ret)
3835 + goto fail;
3836 + usbhid->intf->needs_remote_wakeup = 1;
3837 + ret = hid_start_in(hid);
3838 + if (ret) {
3839 + dev_err(&hid->dev,
3840 + "failed to start in urb: %d\n", ret);
3841 + }
3842 + usb_autopm_put_interface(usbhid->intf);
3843 + }
3844 +
3845 /* Some keyboards don't work until their LEDs have been set.
3846 * Since BIOSes do set the LEDs, it must be safe for any device
3847 * that supports the keyboard boot protocol.
3848 @@ -1166,6 +1183,9 @@ static void usbhid_stop(struct hid_device *hid)
3849 if (WARN_ON(!usbhid))
3850 return;
3851
3852 + if (hid->quirks & HID_QUIRK_ALWAYS_POLL)
3853 + usbhid->intf->needs_remote_wakeup = 0;
3854 +
3855 clear_bit(HID_STARTED, &usbhid->iofl);
3856 spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
3857 set_bit(HID_DISCONNECTED, &usbhid->iofl);
3858 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
3859 index 15225f3eaed1..5014bb567b29 100644
3860 --- a/drivers/hid/usbhid/hid-quirks.c
3861 +++ b/drivers/hid/usbhid/hid-quirks.c
3862 @@ -70,6 +70,9 @@ static const struct hid_blacklist {
3863 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
3864 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
3865 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
3866 + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
3867 + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
3868 + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
3869 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
3870 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
3871 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
3872 @@ -79,6 +82,8 @@ static const struct hid_blacklist {
3873 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
3874 { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
3875 { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
3876 + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
3877 + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2, HID_QUIRK_ALWAYS_POLL },
3878 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
3879 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
3880 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
3881 diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
3882 index 917d54588d95..e05a672db3e5 100644
3883 --- a/drivers/i2c/busses/i2c-at91.c
3884 +++ b/drivers/i2c/busses/i2c-at91.c
3885 @@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
3886 }
3887 }
3888
3889 - ret = wait_for_completion_io_timeout(&dev->cmd_complete,
3890 + ret = wait_for_completion_timeout(&dev->cmd_complete,
3891 dev->adapter.timeout);
3892 if (ret == 0) {
3893 dev_err(dev->dev, "controller timed out\n");
3894 diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
3895 index 1665c8e4b62b..e18bc6782256 100644
3896 --- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
3897 +++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
3898 @@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
3899 goto st_sensors_free_memory;
3900 }
3901
3902 - for (i = 0; i < n * num_data_channels; i++) {
3903 + for (i = 0; i < n * byte_for_channel; i++) {
3904 if (i < n)
3905 buf[i] = rx_array[i];
3906 else
3907 diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
3908 index 5e780ef206f3..8349cc0fdf66 100644
3909 --- a/drivers/iio/proximity/as3935.c
3910 +++ b/drivers/iio/proximity/as3935.c
3911 @@ -330,7 +330,7 @@ static int as3935_probe(struct spi_device *spi)
3912 return -EINVAL;
3913 }
3914
3915 - indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(st));
3916 + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
3917 if (!indio_dev)
3918 return -ENOMEM;
3919
3920 diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
3921 index bda5994ceb68..8b72cf392b34 100644
3922 --- a/drivers/infiniband/hw/mlx4/main.c
3923 +++ b/drivers/infiniband/hw/mlx4/main.c
3924 @@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
3925 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
3926 &mflow->reg_id[i]);
3927 if (err)
3928 - goto err_free;
3929 + goto err_create_flow;
3930 i++;
3931 }
3932
3933 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
3934 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
3935 if (err)
3936 - goto err_free;
3937 + goto err_create_flow;
3938 + i++;
3939 }
3940
3941 return &mflow->ibflow;
3942
3943 +err_create_flow:
3944 + while (i) {
3945 + (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
3946 + i--;
3947 + }
3948 err_free:
3949 kfree(mflow);
3950 return ERR_PTR(err);
3951 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
3952 index da8ff124762a..4d35bc71e2d6 100644
3953 --- a/drivers/infiniband/ulp/isert/ib_isert.c
3954 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
3955 @@ -2185,7 +2185,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
3956 isert_cmd->tx_desc.num_sge = 2;
3957 }
3958
3959 - isert_init_send_wr(isert_conn, isert_cmd, send_wr, true);
3960 + isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
3961
3962 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
3963
3964 @@ -2884,7 +2884,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
3965 &isert_cmd->tx_desc.iscsi_header);
3966 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
3967 isert_init_send_wr(isert_conn, isert_cmd,
3968 - &isert_cmd->tx_desc.send_wr, true);
3969 + &isert_cmd->tx_desc.send_wr, false);
3970 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
3971 wr->send_wr_num += 1;
3972 }
3973 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
3974 index 35a49bf57227..2b0ae8cc8e51 100644
3975 --- a/drivers/input/mouse/alps.c
3976 +++ b/drivers/input/mouse/alps.c
3977 @@ -835,8 +835,8 @@ static void alps_process_packet_v4(struct psmouse *psmouse)
3978 f->fingers = alps_process_bitmap(priv, f);
3979 }
3980
3981 - f->left = packet[4] & 0x01;
3982 - f->right = packet[4] & 0x02;
3983 + f->left = !!(packet[4] & 0x01);
3984 + f->right = !!(packet[4] & 0x02);
3985
3986 f->st.x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
3987 ((packet[0] & 0x30) >> 4);
3988 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
3989 index fd23181c1fb7..b5b630c484c5 100644
3990 --- a/drivers/input/mouse/synaptics.c
3991 +++ b/drivers/input/mouse/synaptics.c
3992 @@ -618,6 +618,8 @@ static void synaptics_parse_agm(const unsigned char buf[],
3993 priv->agm_pending = true;
3994 }
3995
3996 +static bool is_forcepad;
3997 +
3998 static int synaptics_parse_hw_state(const unsigned char buf[],
3999 struct synaptics_data *priv,
4000 struct synaptics_hw_state *hw)
4001 @@ -647,7 +649,7 @@ static int synaptics_parse_hw_state(const unsigned char buf[],
4002 hw->left = (buf[0] & 0x01) ? 1 : 0;
4003 hw->right = (buf[0] & 0x02) ? 1 : 0;
4004
4005 - if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) {
4006 + if (is_forcepad) {
4007 /*
4008 * ForcePads, like Clickpads, use middle button
4009 * bits to report primary button clicks.
4010 @@ -1678,11 +1680,29 @@ static const struct dmi_system_id __initconst cr48_dmi_table[] = {
4011 { }
4012 };
4013
4014 +static const struct dmi_system_id forcepad_dmi_table[] __initconst = {
4015 +#if defined(CONFIG_DMI) && defined(CONFIG_X86)
4016 + {
4017 + .matches = {
4018 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
4019 + DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Folio 1040 G1"),
4020 + },
4021 + },
4022 +#endif
4023 + { }
4024 +};
4025 +
4026 void __init synaptics_module_init(void)
4027 {
4028 impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
4029 broken_olpc_ec = dmi_check_system(olpc_dmi_table);
4030 cr48_profile_sensor = dmi_check_system(cr48_dmi_table);
4031 +
4032 + /*
4033 + * Unfortunately ForcePad capability is not exported over PS/2,
4034 + * so we have to resort to checking DMI.
4035 + */
4036 + is_forcepad = dmi_check_system(forcepad_dmi_table);
4037 }
4038
4039 static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
4040 diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
4041 index fb2e076738ae..1bd01f21783b 100644
4042 --- a/drivers/input/mouse/synaptics.h
4043 +++ b/drivers/input/mouse/synaptics.h
4044 @@ -77,12 +77,9 @@
4045 * for noise.
4046 * 2 0x08 image sensor image sensor tracks 5 fingers, but only
4047 * reports 2.
4048 + * 2 0x01 uniform clickpad whole clickpad moves instead of being
4049 + * hinged at the top.
4050 * 2 0x20 report min query 0x0f gives min coord reported
4051 - * 2 0x80 forcepad forcepad is a variant of clickpad that
4052 - * does not have physical buttons but rather
4053 - * uses pressure above certain threshold to
4054 - * report primary clicks. Forcepads also have
4055 - * clickpad bit set.
4056 */
4057 #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
4058 #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
4059 @@ -91,7 +88,6 @@
4060 #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000)
4061 #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400)
4062 #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800)
4063 -#define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000)
4064
4065 /* synaptics modes query bits */
4066 #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7))
4067 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
4068 index 40b7d6c0ff17..faeeb1372462 100644
4069 --- a/drivers/input/serio/i8042-x86ia64io.h
4070 +++ b/drivers/input/serio/i8042-x86ia64io.h
4071 @@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
4072 },
4073 {
4074 .matches = {
4075 + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
4076 + DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"),
4077 + },
4078 + },
4079 + {
4080 + .matches = {
4081 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
4082 DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"),
4083 DMI_MATCH(DMI_PRODUCT_VERSION, "8500"),
4084 @@ -623,6 +629,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
4085 },
4086 },
4087 {
4088 + /* Fujitsu A544 laptop */
4089 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
4090 + .matches = {
4091 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
4092 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
4093 + },
4094 + },
4095 + {
4096 + /* Fujitsu AH544 laptop */
4097 + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
4098 + .matches = {
4099 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
4100 + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
4101 + },
4102 + },
4103 + {
4104 /* Fujitsu U574 laptop */
4105 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
4106 .matches = {
4107 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
4108 index ecb0109a5360..5aff937eb1f9 100644
4109 --- a/drivers/iommu/amd_iommu.c
4110 +++ b/drivers/iommu/amd_iommu.c
4111 @@ -260,17 +260,13 @@ static bool check_device(struct device *dev)
4112 return true;
4113 }
4114
4115 -static int init_iommu_group(struct device *dev)
4116 +static void init_iommu_group(struct device *dev)
4117 {
4118 struct iommu_group *group;
4119
4120 group = iommu_group_get_for_dev(dev);
4121 -
4122 - if (IS_ERR(group))
4123 - return PTR_ERR(group);
4124 -
4125 - iommu_group_put(group);
4126 - return 0;
4127 + if (!IS_ERR(group))
4128 + iommu_group_put(group);
4129 }
4130
4131 static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
4132 @@ -340,7 +336,6 @@ static int iommu_init_device(struct device *dev)
4133 struct pci_dev *pdev = to_pci_dev(dev);
4134 struct iommu_dev_data *dev_data;
4135 u16 alias;
4136 - int ret;
4137
4138 if (dev->archdata.iommu)
4139 return 0;
4140 @@ -364,12 +359,6 @@ static int iommu_init_device(struct device *dev)
4141 dev_data->alias_data = alias_data;
4142 }
4143
4144 - ret = init_iommu_group(dev);
4145 - if (ret) {
4146 - free_dev_data(dev_data);
4147 - return ret;
4148 - }
4149 -
4150 if (pci_iommuv2_capable(pdev)) {
4151 struct amd_iommu *iommu;
4152
4153 @@ -455,6 +444,15 @@ int __init amd_iommu_init_devices(void)
4154 goto out_free;
4155 }
4156
4157 + /*
4158 + * Initialize IOMMU groups only after iommu_init_device() has
4159 + * had a chance to populate any IVRS defined aliases.
4160 + */
4161 + for_each_pci_dev(pdev) {
4162 + if (check_device(&pdev->dev))
4163 + init_iommu_group(&pdev->dev);
4164 + }
4165 +
4166 return 0;
4167
4168 out_free:
4169 @@ -2415,6 +2413,7 @@ static int device_change_notifier(struct notifier_block *nb,
4170 case BUS_NOTIFY_ADD_DEVICE:
4171
4172 iommu_init_device(dev);
4173 + init_iommu_group(dev);
4174
4175 /*
4176 * dev_data is still NULL and
4177 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
4178 index 0639b9274b11..690818dbbeed 100644
4179 --- a/drivers/iommu/iommu.c
4180 +++ b/drivers/iommu/iommu.c
4181 @@ -30,6 +30,7 @@
4182 #include <linux/notifier.h>
4183 #include <linux/err.h>
4184 #include <linux/pci.h>
4185 +#include <linux/bitops.h>
4186 #include <trace/events/iommu.h>
4187
4188 static struct kset *iommu_group_kset;
4189 @@ -519,6 +520,9 @@ int iommu_group_id(struct iommu_group *group)
4190 }
4191 EXPORT_SYMBOL_GPL(iommu_group_id);
4192
4193 +static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
4194 + unsigned long *devfns);
4195 +
4196 /*
4197 * To consider a PCI device isolated, we require ACS to support Source
4198 * Validation, Request Redirection, Completer Redirection, and Upstream
4199 @@ -529,6 +533,86 @@ EXPORT_SYMBOL_GPL(iommu_group_id);
4200 */
4201 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
4202
4203 +/*
4204 + * For multifunction devices which are not isolated from each other, find
4205 + * all the other non-isolated functions and look for existing groups. For
4206 + * each function, we also need to look for aliases to or from other devices
4207 + * that may already have a group.
4208 + */
4209 +static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
4210 + unsigned long *devfns)
4211 +{
4212 + struct pci_dev *tmp = NULL;
4213 + struct iommu_group *group;
4214 +
4215 + if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
4216 + return NULL;
4217 +
4218 + for_each_pci_dev(tmp) {
4219 + if (tmp == pdev || tmp->bus != pdev->bus ||
4220 + PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
4221 + pci_acs_enabled(tmp, REQ_ACS_FLAGS))
4222 + continue;
4223 +
4224 + group = get_pci_alias_group(tmp, devfns);
4225 + if (group) {
4226 + pci_dev_put(tmp);
4227 + return group;
4228 + }
4229 + }
4230 +
4231 + return NULL;
4232 +}
4233 +
4234 +/*
4235 + * Look for aliases to or from the given device for exisiting groups. The
4236 + * dma_alias_devfn only supports aliases on the same bus, therefore the search
4237 + * space is quite small (especially since we're really only looking at pcie
4238 + * device, and therefore only expect multiple slots on the root complex or
4239 + * downstream switch ports). It's conceivable though that a pair of
4240 + * multifunction devices could have aliases between them that would cause a
4241 + * loop. To prevent this, we use a bitmap to track where we've been.
4242 + */
4243 +static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
4244 + unsigned long *devfns)
4245 +{
4246 + struct pci_dev *tmp = NULL;
4247 + struct iommu_group *group;
4248 +
4249 + if (test_and_set_bit(pdev->devfn & 0xff, devfns))
4250 + return NULL;
4251 +
4252 + group = iommu_group_get(&pdev->dev);
4253 + if (group)
4254 + return group;
4255 +
4256 + for_each_pci_dev(tmp) {
4257 + if (tmp == pdev || tmp->bus != pdev->bus)
4258 + continue;
4259 +
4260 + /* We alias them or they alias us */
4261 + if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
4262 + pdev->dma_alias_devfn == tmp->devfn) ||
4263 + ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) &&
4264 + tmp->dma_alias_devfn == pdev->devfn)) {
4265 +
4266 + group = get_pci_alias_group(tmp, devfns);
4267 + if (group) {
4268 + pci_dev_put(tmp);
4269 + return group;
4270 + }
4271 +
4272 + group = get_pci_function_alias_group(tmp, devfns);
4273 + if (group) {
4274 + pci_dev_put(tmp);
4275 + return group;
4276 + }
4277 + }
4278 + }
4279 +
4280 + return NULL;
4281 +}
4282 +
4283 struct group_for_pci_data {
4284 struct pci_dev *pdev;
4285 struct iommu_group *group;
4286 @@ -557,7 +641,7 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
4287 struct group_for_pci_data data;
4288 struct pci_bus *bus;
4289 struct iommu_group *group = NULL;
4290 - struct pci_dev *tmp;
4291 + u64 devfns[4] = { 0 };
4292
4293 /*
4294 * Find the upstream DMA alias for the device. A device must not
4295 @@ -591,76 +675,21 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev)
4296 }
4297
4298 /*
4299 - * Next we need to consider DMA alias quirks. If one device aliases
4300 - * to another, they should be grouped together. It's theoretically
4301 - * possible that aliases could create chains of devices where each
4302 - * device aliases another device. If we then factor in multifunction
4303 - * ACS grouping requirements, each alias could incorporate a new slot
4304 - * with multiple functions, each with aliases. This is all extremely
4305 - * unlikely as DMA alias quirks are typically only used for PCIe
4306 - * devices where we usually have a single slot per bus. Furthermore,
4307 - * the alias quirk is usually to another function within the slot
4308 - * (and ACS multifunction is not supported) or to a different slot
4309 - * that doesn't physically exist. The likely scenario is therefore
4310 - * that everything on the bus gets grouped together. To reduce the
4311 - * problem space, share the IOMMU group for all devices on the bus
4312 - * if a DMA alias quirk is present on the bus.
4313 - */
4314 - tmp = NULL;
4315 - for_each_pci_dev(tmp) {
4316 - if (tmp->bus != pdev->bus ||
4317 - !(tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN))
4318 - continue;
4319 -
4320 - pci_dev_put(tmp);
4321 - tmp = NULL;
4322 -
4323 - /* We have an alias quirk, search for an existing group */
4324 - for_each_pci_dev(tmp) {
4325 - struct iommu_group *group_tmp;
4326 -
4327 - if (tmp->bus != pdev->bus)
4328 - continue;
4329 -
4330 - group_tmp = iommu_group_get(&tmp->dev);
4331 - if (!group) {
4332 - group = group_tmp;
4333 - continue;
4334 - }
4335 -
4336 - if (group_tmp) {
4337 - WARN_ON(group != group_tmp);
4338 - iommu_group_put(group_tmp);
4339 - }
4340 - }
4341 -
4342 - return group ? group : iommu_group_alloc();
4343 - }
4344 -
4345 - /*
4346 - * Non-multifunction devices or multifunction devices supporting
4347 - * ACS get their own group.
4348 + * Look for existing groups on device aliases. If we alias another
4349 + * device or another device aliases us, use the same group.
4350 */
4351 - if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
4352 - return iommu_group_alloc();
4353 + group = get_pci_alias_group(pdev, (unsigned long *)devfns);
4354 + if (group)
4355 + return group;
4356
4357 /*
4358 - * Multifunction devices not supporting ACS share a group with other
4359 - * similar devices in the same slot.
4360 + * Look for existing groups on non-isolated functions on the same
4361 + * slot and aliases of those funcions, if any. No need to clear
4362 + * the search bitmap, the tested devfns are still valid.
4363 */
4364 - tmp = NULL;
4365 - for_each_pci_dev(tmp) {
4366 - if (tmp == pdev || tmp->bus != pdev->bus ||
4367 - PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
4368 - pci_acs_enabled(tmp, REQ_ACS_FLAGS))
4369 - continue;
4370 -
4371 - group = iommu_group_get(&tmp->dev);
4372 - if (group) {
4373 - pci_dev_put(tmp);
4374 - return group;
4375 - }
4376 - }
4377 + group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
4378 + if (group)
4379 + return group;
4380
4381 /* No shared group found, allocate new */
4382 return iommu_group_alloc();
4383 diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
4384 index 574aba0eba4e..1cb538fd85e9 100644
4385 --- a/drivers/irqchip/irq-armada-370-xp.c
4386 +++ b/drivers/irqchip/irq-armada-370-xp.c
4387 @@ -43,6 +43,7 @@
4388 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
4389 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
4390 #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
4391 +#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
4392
4393 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
4394 #define ARMADA_375_PPI_CAUSE (0x10)
4395 @@ -410,19 +411,29 @@ static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
4396 struct irq_desc *desc)
4397 {
4398 struct irq_chip *chip = irq_get_chip(irq);
4399 - unsigned long irqmap, irqn;
4400 + unsigned long irqmap, irqn, irqsrc, cpuid;
4401 unsigned int cascade_irq;
4402
4403 chained_irq_enter(chip, desc);
4404
4405 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
4406 -
4407 - if (irqmap & BIT(0)) {
4408 - armada_370_xp_handle_msi_irq(NULL, true);
4409 - irqmap &= ~BIT(0);
4410 - }
4411 + cpuid = cpu_logical_map(smp_processor_id());
4412
4413 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
4414 + irqsrc = readl_relaxed(main_int_base +
4415 + ARMADA_370_XP_INT_SOURCE_CTL(irqn));
4416 +
4417 + /* Check if the interrupt is not masked on current CPU.
4418 + * Test IRQ (0-1) and FIQ (8-9) mask bits.
4419 + */
4420 + if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
4421 + continue;
4422 +
4423 + if (irqn == 1) {
4424 + armada_370_xp_handle_msi_irq(NULL, true);
4425 + continue;
4426 + }
4427 +
4428 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
4429 generic_handle_irq(cascade_irq);
4430 }
4431 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
4432 index ab472c557d18..9ea5b6041eb2 100644
4433 --- a/drivers/md/dm-bufio.c
4434 +++ b/drivers/md/dm-bufio.c
4435 @@ -465,6 +465,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
4436 c->n_buffers[dirty]++;
4437 b->list_mode = dirty;
4438 list_move(&b->lru_list, &c->lru[dirty]);
4439 + b->last_accessed = jiffies;
4440 }
4441
4442 /*----------------------------------------------------------------
4443 @@ -1472,9 +1473,9 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
4444 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
4445 freed += __cleanup_old_buffer(b, gfp_mask, 0);
4446 if (!--nr_to_scan)
4447 - break;
4448 + return freed;
4449 + dm_bufio_cond_resched();
4450 }
4451 - dm_bufio_cond_resched();
4452 }
4453 return freed;
4454 }
4455 diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
4456 index b428c0ae63d5..39ad9664d397 100644
4457 --- a/drivers/md/dm-log-userspace-transfer.c
4458 +++ b/drivers/md/dm-log-userspace-transfer.c
4459 @@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void)
4460
4461 r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback);
4462 if (r) {
4463 - cn_del_callback(&ulog_cn_id);
4464 + kfree(prealloced_cn_msg);
4465 return r;
4466 }
4467
4468 diff --git a/drivers/media/common/siano/sms-cards.c b/drivers/media/common/siano/sms-cards.c
4469 index 82769993eeb7..82c7a1289f05 100644
4470 --- a/drivers/media/common/siano/sms-cards.c
4471 +++ b/drivers/media/common/siano/sms-cards.c
4472 @@ -157,6 +157,12 @@ static struct sms_board sms_boards[] = {
4473 .type = SMS_DENVER_2160,
4474 .default_mode = DEVICE_MODE_DAB_TDMB,
4475 },
4476 + [SMS1XXX_BOARD_PCTV_77E] = {
4477 + .name = "Hauppauge microStick 77e",
4478 + .type = SMS_NOVA_B0,
4479 + .fw[DEVICE_MODE_DVBT_BDA] = SMS_FW_DVB_NOVA_12MHZ_B0,
4480 + .default_mode = DEVICE_MODE_DVBT_BDA,
4481 + },
4482 };
4483
4484 struct sms_board *sms_get_board(unsigned id)
4485 diff --git a/drivers/media/common/siano/sms-cards.h b/drivers/media/common/siano/sms-cards.h
4486 index c63b544c49c5..4c4caddf9869 100644
4487 --- a/drivers/media/common/siano/sms-cards.h
4488 +++ b/drivers/media/common/siano/sms-cards.h
4489 @@ -45,6 +45,7 @@
4490 #define SMS1XXX_BOARD_SIANO_RIO 18
4491 #define SMS1XXX_BOARD_SIANO_DENVER_1530 19
4492 #define SMS1XXX_BOARD_SIANO_DENVER_2160 20
4493 +#define SMS1XXX_BOARD_PCTV_77E 21
4494
4495 struct sms_board_gpio_cfg {
4496 int lna_vhf_exist;
4497 diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
4498 index 335daeff91b9..9d0d0347758f 100644
4499 --- a/drivers/media/dvb-frontends/ds3000.c
4500 +++ b/drivers/media/dvb-frontends/ds3000.c
4501 @@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
4502 memcpy(&state->frontend.ops, &ds3000_ops,
4503 sizeof(struct dvb_frontend_ops));
4504 state->frontend.demodulator_priv = state;
4505 +
4506 + /*
4507 + * Some devices like T480 starts with voltage on. Be sure
4508 + * to turn voltage off during init, as this can otherwise
4509 + * interfere with Unicable SCR systems.
4510 + */
4511 + ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
4512 return &state->frontend;
4513
4514 error3:
4515 diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c
4516 index 72af644fa051..cf93021a6500 100644
4517 --- a/drivers/media/i2c/tda7432.c
4518 +++ b/drivers/media/i2c/tda7432.c
4519 @@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl)
4520 if (t->mute->val) {
4521 lf |= TDA7432_MUTE;
4522 lr |= TDA7432_MUTE;
4523 - lf |= TDA7432_MUTE;
4524 + rf |= TDA7432_MUTE;
4525 rr |= TDA7432_MUTE;
4526 }
4527 /* Mute & update balance*/
4528 diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
4529 index 6d86646d9743..5d666af2310c 100644
4530 --- a/drivers/media/platform/Kconfig
4531 +++ b/drivers/media/platform/Kconfig
4532 @@ -158,7 +158,7 @@ config VIDEO_MEM2MEM_DEINTERLACE
4533
4534 config VIDEO_SAMSUNG_S5P_G2D
4535 tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
4536 - depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
4537 + depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_S5PV210 || ARCH_EXYNOS)
4538 select VIDEOBUF2_DMA_CONTIG
4539 select V4L2_MEM2MEM_DEV
4540 default n
4541 @@ -168,7 +168,7 @@ config VIDEO_SAMSUNG_S5P_G2D
4542
4543 config VIDEO_SAMSUNG_S5P_JPEG
4544 tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
4545 - depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
4546 + depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_S5PV210 || ARCH_EXYNOS)
4547 select VIDEOBUF2_DMA_CONTIG
4548 select V4L2_MEM2MEM_DEV
4549 ---help---
4550 @@ -177,7 +177,7 @@ config VIDEO_SAMSUNG_S5P_JPEG
4551
4552 config VIDEO_SAMSUNG_S5P_MFC
4553 tristate "Samsung S5P MFC Video Codec"
4554 - depends on VIDEO_DEV && VIDEO_V4L2 && (PLAT_S5P || ARCH_EXYNOS)
4555 + depends on VIDEO_DEV && VIDEO_V4L2 && (ARCH_S5PV210 || ARCH_EXYNOS)
4556 select VIDEOBUF2_DMA_CONTIG
4557 default n
4558 help
4559 diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
4560 index 5dcaa0a80540..ec5d7c4b5d28 100644
4561 --- a/drivers/media/platform/exynos4-is/Kconfig
4562 +++ b/drivers/media/platform/exynos4-is/Kconfig
4563 @@ -2,7 +2,7 @@
4564 config VIDEO_SAMSUNG_EXYNOS4_IS
4565 bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
4566 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
4567 - depends on (PLAT_S5P || ARCH_EXYNOS)
4568 + depends on ARCH_S5PV210 || ARCH_EXYNOS
4569 depends on OF && COMMON_CLK
4570 help
4571 Say Y here to enable camera host interface devices for
4572 diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
4573 index 369a4c191e18..dc28ad2849c5 100644
4574 --- a/drivers/media/platform/s5p-tv/Kconfig
4575 +++ b/drivers/media/platform/s5p-tv/Kconfig
4576 @@ -8,7 +8,7 @@
4577
4578 config VIDEO_SAMSUNG_S5P_TV
4579 bool "Samsung TV driver for S5P platform"
4580 - depends on (PLAT_S5P || ARCH_EXYNOS) && PM_RUNTIME
4581 + depends on (ARCH_S5PV210 || ARCH_EXYNOS) && PM_RUNTIME
4582 default n
4583 ---help---
4584 Say Y here to enable selecting the TV output devices for
4585 diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
4586 index 7115e68ba697..71c9039dc9ee 100644
4587 --- a/drivers/media/rc/imon.c
4588 +++ b/drivers/media/rc/imon.c
4589 @@ -1579,7 +1579,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
4590 if (press_type == 0)
4591 rc_keyup(ictx->rdev);
4592 else {
4593 - if (ictx->rc_type == RC_BIT_RC6_MCE)
4594 + if (ictx->rc_type == RC_BIT_RC6_MCE ||
4595 + ictx->rc_type == RC_BIT_OTHER)
4596 rc_keydown(ictx->rdev,
4597 ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER,
4598 ictx->rc_scancode, ictx->rc_toggle);
4599 diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
4600 index e8fff2add265..b732ac6a26d8 100644
4601 --- a/drivers/media/rc/rc-ir-raw.c
4602 +++ b/drivers/media/rc/rc-ir-raw.c
4603 @@ -262,7 +262,6 @@ int ir_raw_event_register(struct rc_dev *dev)
4604 return -ENOMEM;
4605
4606 dev->raw->dev = dev;
4607 - dev->enabled_protocols = ~0;
4608 dev->change_protocol = change_protocol;
4609 rc = kfifo_alloc(&dev->raw->kfifo,
4610 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
4611 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
4612 index a7991c7d010a..8d3b74c5a717 100644
4613 --- a/drivers/media/rc/rc-main.c
4614 +++ b/drivers/media/rc/rc-main.c
4615 @@ -1421,6 +1421,8 @@ int rc_register_device(struct rc_dev *dev)
4616
4617 if (dev->change_protocol) {
4618 u64 rc_type = (1 << rc_map->rc_type);
4619 + if (dev->driver_type == RC_DRIVER_IR_RAW)
4620 + rc_type |= RC_BIT_LIRC;
4621 rc = dev->change_protocol(dev, &rc_type);
4622 if (rc < 0)
4623 goto out_raw;
4624 diff --git a/drivers/media/tuners/m88ts2022.c b/drivers/media/tuners/m88ts2022.c
4625 index 40c42dec721b..7a62097aa9ea 100644
4626 --- a/drivers/media/tuners/m88ts2022.c
4627 +++ b/drivers/media/tuners/m88ts2022.c
4628 @@ -314,7 +314,7 @@ static int m88ts2022_set_params(struct dvb_frontend *fe)
4629 div_min = gdiv28 * 78 / 100;
4630 div_max = clamp_val(div_max, 0U, 63U);
4631
4632 - f_3db_hz = c->symbol_rate * 135UL / 200UL;
4633 + f_3db_hz = mult_frac(c->symbol_rate, 135, 200);
4634 f_3db_hz += 2000000U + (frequency_offset_khz * 1000U);
4635 f_3db_hz = clamp(f_3db_hz, 7000000U, 40000000U);
4636
4637 diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
4638 index 9da812b8a786..9c61c3f37778 100644
4639 --- a/drivers/media/usb/em28xx/em28xx-cards.c
4640 +++ b/drivers/media/usb/em28xx/em28xx-cards.c
4641 @@ -3098,16 +3098,6 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
4642 }
4643 }
4644
4645 - if (dev->chip_id == CHIP_ID_EM2870 ||
4646 - dev->chip_id == CHIP_ID_EM2874 ||
4647 - dev->chip_id == CHIP_ID_EM28174 ||
4648 - dev->chip_id == CHIP_ID_EM28178) {
4649 - /* Digital only device - don't load any alsa module */
4650 - dev->audio_mode.has_audio = false;
4651 - dev->has_audio_class = false;
4652 - dev->has_alsa_audio = false;
4653 - }
4654 -
4655 if (chip_name != default_chip_name)
4656 printk(KERN_INFO DRIVER_NAME
4657 ": chip ID is %s\n", chip_name);
4658 @@ -3377,7 +3367,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
4659 dev->alt = -1;
4660 dev->is_audio_only = has_audio && !(has_video || has_dvb);
4661 dev->has_alsa_audio = has_audio;
4662 - dev->audio_mode.has_audio = has_audio;
4663 dev->has_video = has_video;
4664 dev->ifnum = ifnum;
4665
4666 diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
4667 index 523d7e92bf47..0f6caa4912b8 100644
4668 --- a/drivers/media/usb/em28xx/em28xx-core.c
4669 +++ b/drivers/media/usb/em28xx/em28xx-core.c
4670 @@ -506,8 +506,18 @@ int em28xx_audio_setup(struct em28xx *dev)
4671 int vid1, vid2, feat, cfg;
4672 u32 vid;
4673
4674 - if (!dev->audio_mode.has_audio)
4675 + if (dev->chip_id == CHIP_ID_EM2870 ||
4676 + dev->chip_id == CHIP_ID_EM2874 ||
4677 + dev->chip_id == CHIP_ID_EM28174 ||
4678 + dev->chip_id == CHIP_ID_EM28178) {
4679 + /* Digital only device - don't load any alsa module */
4680 + dev->audio_mode.has_audio = false;
4681 + dev->has_audio_class = false;
4682 + dev->has_alsa_audio = false;
4683 return 0;
4684 + }
4685 +
4686 + dev->audio_mode.has_audio = true;
4687
4688 /* See how this device is configured */
4689 cfg = em28xx_read_reg(dev, EM28XX_R00_CHIPCFG);
4690 diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
4691 index 29abc379551e..5122cbe085f1 100644
4692 --- a/drivers/media/usb/em28xx/em28xx-video.c
4693 +++ b/drivers/media/usb/em28xx/em28xx-video.c
4694 @@ -435,7 +435,10 @@ static inline void finish_buffer(struct em28xx *dev,
4695 em28xx_isocdbg("[%p/%d] wakeup\n", buf, buf->top_field);
4696
4697 buf->vb.v4l2_buf.sequence = dev->v4l2->field_count++;
4698 - buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
4699 + if (dev->v4l2->progressive)
4700 + buf->vb.v4l2_buf.field = V4L2_FIELD_NONE;
4701 + else
4702 + buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
4703 v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
4704
4705 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
4706 @@ -994,13 +997,16 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
4707 }
4708
4709 spin_lock_irqsave(&dev->slock, flags);
4710 + if (dev->usb_ctl.vid_buf != NULL) {
4711 + vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR);
4712 + dev->usb_ctl.vid_buf = NULL;
4713 + }
4714 while (!list_empty(&vidq->active)) {
4715 struct em28xx_buffer *buf;
4716 buf = list_entry(vidq->active.next, struct em28xx_buffer, list);
4717 list_del(&buf->list);
4718 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
4719 }
4720 - dev->usb_ctl.vid_buf = NULL;
4721 spin_unlock_irqrestore(&dev->slock, flags);
4722 }
4723
4724 @@ -1021,13 +1027,16 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
4725 }
4726
4727 spin_lock_irqsave(&dev->slock, flags);
4728 + if (dev->usb_ctl.vbi_buf != NULL) {
4729 + vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR);
4730 + dev->usb_ctl.vbi_buf = NULL;
4731 + }
4732 while (!list_empty(&vbiq->active)) {
4733 struct em28xx_buffer *buf;
4734 buf = list_entry(vbiq->active.next, struct em28xx_buffer, list);
4735 list_del(&buf->list);
4736 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
4737 }
4738 - dev->usb_ctl.vbi_buf = NULL;
4739 spin_unlock_irqrestore(&dev->slock, flags);
4740 }
4741
4742 diff --git a/drivers/media/usb/siano/smsusb.c b/drivers/media/usb/siano/smsusb.c
4743 index 1836a416d806..89c86ee2b225 100644
4744 --- a/drivers/media/usb/siano/smsusb.c
4745 +++ b/drivers/media/usb/siano/smsusb.c
4746 @@ -655,6 +655,8 @@ static const struct usb_device_id smsusb_id_table[] = {
4747 .driver_info = SMS1XXX_BOARD_ONDA_MDTV_DATA_CARD },
4748 { USB_DEVICE(0x3275, 0x0080),
4749 .driver_info = SMS1XXX_BOARD_SIANO_RIO },
4750 + { USB_DEVICE(0x2013, 0x0257),
4751 + .driver_info = SMS1XXX_BOARD_PCTV_77E },
4752 { } /* Terminating entry */
4753 };
4754
4755 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
4756 index f8135f4e3b52..f3c126915ff2 100644
4757 --- a/drivers/media/usb/uvc/uvc_driver.c
4758 +++ b/drivers/media/usb/uvc/uvc_driver.c
4759 @@ -2229,6 +2229,15 @@ static struct usb_device_id uvc_ids[] = {
4760 .bInterfaceSubClass = 1,
4761 .bInterfaceProtocol = 0,
4762 .driver_info = UVC_QUIRK_PROBE_DEF },
4763 + /* Dell XPS M1330 (OmniVision OV7670 webcam) */
4764 + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
4765 + | USB_DEVICE_ID_MATCH_INT_INFO,
4766 + .idVendor = 0x05a9,
4767 + .idProduct = 0x7670,
4768 + .bInterfaceClass = USB_CLASS_VIDEO,
4769 + .bInterfaceSubClass = 1,
4770 + .bInterfaceProtocol = 0,
4771 + .driver_info = UVC_QUIRK_PROBE_DEF },
4772 /* Apple Built-In iSight */
4773 { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
4774 | USB_DEVICE_ID_MATCH_INT_INFO,
4775 diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
4776 index ccaa38f65cf1..2e9d81f4c1a5 100644
4777 --- a/drivers/media/v4l2-core/v4l2-common.c
4778 +++ b/drivers/media/v4l2-core/v4l2-common.c
4779 @@ -435,16 +435,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min,
4780 /* Bits that must be zero to be aligned */
4781 unsigned int mask = ~((1 << align) - 1);
4782
4783 + /* Clamp to aligned min and max */
4784 + x = clamp(x, (min + ~mask) & mask, max & mask);
4785 +
4786 /* Round to nearest aligned value */
4787 if (align)
4788 x = (x + (1 << (align - 1))) & mask;
4789
4790 - /* Clamp to aligned value of min and max */
4791 - if (x < min)
4792 - x = (min + ~mask) & mask;
4793 - else if (x > max)
4794 - x = max & mask;
4795 -
4796 return x;
4797 }
4798
4799 diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
4800 index 3c8cc023a5a5..3ff15f1c9d70 100644
4801 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c
4802 +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
4803 @@ -253,9 +253,11 @@ int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
4804 return 0;
4805 out_free_pages:
4806 while (i > 0) {
4807 - void *addr = page_address(dma->vaddr_pages[i]);
4808 - dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
4809 + void *addr;
4810 +
4811 i--;
4812 + addr = page_address(dma->vaddr_pages[i]);
4813 + dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
4814 }
4815 kfree(dma->dma_addr);
4816 dma->dma_addr = NULL;
4817 diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
4818 index d01b8c249231..f2643c221d34 100644
4819 --- a/drivers/mfd/rtsx_pcr.c
4820 +++ b/drivers/mfd/rtsx_pcr.c
4821 @@ -1197,7 +1197,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
4822 pcr->msi_en = msi_en;
4823 if (pcr->msi_en) {
4824 ret = pci_enable_msi(pcidev);
4825 - if (ret < 0)
4826 + if (ret)
4827 pcr->msi_en = false;
4828 }
4829
4830 diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
4831 index dd4bf5816221..121add8be456 100644
4832 --- a/drivers/mfd/ti_am335x_tscadc.c
4833 +++ b/drivers/mfd/ti_am335x_tscadc.c
4834 @@ -53,11 +53,11 @@ void am335x_tsc_se_set_cache(struct ti_tscadc_dev *tsadc, u32 val)
4835 unsigned long flags;
4836
4837 spin_lock_irqsave(&tsadc->reg_lock, flags);
4838 - tsadc->reg_se_cache = val;
4839 + tsadc->reg_se_cache |= val;
4840 if (tsadc->adc_waiting)
4841 wake_up(&tsadc->reg_se_wait);
4842 else if (!tsadc->adc_in_use)
4843 - tscadc_writel(tsadc, REG_SE, val);
4844 + tscadc_writel(tsadc, REG_SE, tsadc->reg_se_cache);
4845
4846 spin_unlock_irqrestore(&tsadc->reg_lock, flags);
4847 }
4848 @@ -96,6 +96,7 @@ static void am335x_tscadc_need_adc(struct ti_tscadc_dev *tsadc)
4849 void am335x_tsc_se_set_once(struct ti_tscadc_dev *tsadc, u32 val)
4850 {
4851 spin_lock_irq(&tsadc->reg_lock);
4852 + tsadc->reg_se_cache |= val;
4853 am335x_tscadc_need_adc(tsadc);
4854
4855 tscadc_writel(tsadc, REG_SE, val);
4856 diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
4857 index e636d9e99e4a..3fc40a7140a8 100644
4858 --- a/drivers/mmc/core/sdio.c
4859 +++ b/drivers/mmc/core/sdio.c
4860 @@ -992,8 +992,16 @@ static int mmc_sdio_resume(struct mmc_host *host)
4861 }
4862 }
4863
4864 - if (!err && host->sdio_irqs)
4865 - wake_up_process(host->sdio_irq_thread);
4866 + if (!err && host->sdio_irqs) {
4867 + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
4868 + wake_up_process(host->sdio_irq_thread);
4869 + } else if (host->caps & MMC_CAP_SDIO_IRQ) {
4870 + mmc_host_clk_hold(host);
4871 + host->ops->enable_sdio_irq(host, 1);
4872 + mmc_host_clk_release(host);
4873 + }
4874 + }
4875 +
4876 mmc_release_host(host);
4877
4878 host->pm_flags &= ~MMC_PM_KEEP_POWER;
4879 diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
4880 index 5cc13c8d35bb..696eca493844 100644
4881 --- a/drivers/mmc/core/sdio_irq.c
4882 +++ b/drivers/mmc/core/sdio_irq.c
4883 @@ -208,7 +208,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
4884 host->sdio_irqs--;
4885 return err;
4886 }
4887 - } else {
4888 + } else if (host->caps & MMC_CAP_SDIO_IRQ) {
4889 mmc_host_clk_hold(host);
4890 host->ops->enable_sdio_irq(host, 1);
4891 mmc_host_clk_release(host);
4892 @@ -229,7 +229,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
4893 if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
4894 atomic_set(&host->sdio_irq_thread_abort, 1);
4895 kthread_stop(host->sdio_irq_thread);
4896 - } else {
4897 + } else if (host->caps & MMC_CAP_SDIO_IRQ) {
4898 mmc_host_clk_hold(host);
4899 host->ops->enable_sdio_irq(host, 0);
4900 mmc_host_clk_release(host);
4901 diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
4902 index 5f89cb83d5f0..187f48a5795a 100644
4903 --- a/drivers/mmc/core/slot-gpio.c
4904 +++ b/drivers/mmc/core/slot-gpio.c
4905 @@ -221,8 +221,6 @@ int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio,
4906 ctx->override_cd_active_level = true;
4907 ctx->cd_gpio = gpio_to_desc(gpio);
4908
4909 - mmc_gpiod_request_cd_irq(host);
4910 -
4911 return 0;
4912 }
4913 EXPORT_SYMBOL(mmc_gpio_request_cd);
4914 diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
4915 index cc8d4a6099cd..e4a07546f8b6 100644
4916 --- a/drivers/mmc/host/mmc_spi.c
4917 +++ b/drivers/mmc/host/mmc_spi.c
4918 @@ -1436,6 +1436,7 @@ static int mmc_spi_probe(struct spi_device *spi)
4919 host->pdata->cd_debounce);
4920 if (status != 0)
4921 goto fail_add_host;
4922 + mmc_gpiod_request_cd_irq(mmc);
4923 }
4924
4925 if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
4926 diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c
4927 index dfde4a210238..b2537e2f26b1 100644
4928 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c
4929 +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c
4930 @@ -412,6 +412,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host,
4931 }
4932
4933 if (rsp_type == SD_RSP_TYPE_R2) {
4934 + /*
4935 + * The controller offloads the last byte {CRC-7, end bit 1'b1}
4936 + * of response type R2. Assign dummy CRC, 0, and end bit to the
4937 + * byte(ptr[16], goes into the LSB of resp[3] later).
4938 + */
4939 + ptr[16] = 1;
4940 +
4941 for (i = 0; i < 4; i++) {
4942 cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
4943 dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
4944 diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c
4945 index 5d3766e792f0..d9153a7d160d 100644
4946 --- a/drivers/mmc/host/rtsx_usb_sdmmc.c
4947 +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c
4948 @@ -435,6 +435,13 @@ static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host,
4949 }
4950
4951 if (rsp_type == SD_RSP_TYPE_R2) {
4952 + /*
4953 + * The controller offloads the last byte {CRC-7, end bit 1'b1}
4954 + * of response type R2. Assign dummy CRC, 0, and end bit to the
4955 + * byte(ptr[16], goes into the LSB of resp[3] later).
4956 + */
4957 + ptr[16] = 1;
4958 +
4959 for (i = 0; i < 4; i++) {
4960 cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4);
4961 dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n",
4962 diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
4963 index 6f842fb8e6b8..3434c7933de0 100644
4964 --- a/drivers/mmc/host/sdhci-pxav3.c
4965 +++ b/drivers/mmc/host/sdhci-pxav3.c
4966 @@ -224,12 +224,11 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
4967
4968 static const struct sdhci_ops pxav3_sdhci_ops = {
4969 .set_clock = sdhci_set_clock,
4970 - .set_uhs_signaling = pxav3_set_uhs_signaling,
4971 .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
4972 .get_max_clock = sdhci_pltfm_clk_get_max_clock,
4973 .set_bus_width = sdhci_set_bus_width,
4974 .reset = pxav3_reset,
4975 - .set_uhs_signaling = sdhci_set_uhs_signaling,
4976 + .set_uhs_signaling = pxav3_set_uhs_signaling,
4977 };
4978
4979 static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
4980 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
4981 index fa5954a05449..1e47903fa184 100644
4982 --- a/drivers/mmc/host/sdhci-s3c.c
4983 +++ b/drivers/mmc/host/sdhci-s3c.c
4984 @@ -606,8 +606,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
4985 ret = sdhci_add_host(host);
4986 if (ret) {
4987 dev_err(dev, "sdhci_add_host() failed\n");
4988 - pm_runtime_forbid(&pdev->dev);
4989 - pm_runtime_get_noresume(&pdev->dev);
4990 goto err_req_regs;
4991 }
4992
4993 @@ -618,6 +616,8 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
4994 return 0;
4995
4996 err_req_regs:
4997 + pm_runtime_disable(&pdev->dev);
4998 +
4999 err_no_busclks:
5000 clk_disable_unprepare(sc->clk_io);
5001
5002 diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c
5003 index 17004531d089..b6db259aea9e 100644
5004 --- a/drivers/mmc/host/sdhci-sirf.c
5005 +++ b/drivers/mmc/host/sdhci-sirf.c
5006 @@ -94,6 +94,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev)
5007 ret);
5008 goto err_request_cd;
5009 }
5010 + mmc_gpiod_request_cd_irq(host->mmc);
5011 }
5012
5013 return 0;
5014 diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
5015 index faf0924e71cb..59d9a7249b2e 100644
5016 --- a/drivers/mmc/host/tmio_mmc_pio.c
5017 +++ b/drivers/mmc/host/tmio_mmc_pio.c
5018 @@ -1103,6 +1103,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host **host,
5019 tmio_mmc_host_remove(_host);
5020 return ret;
5021 }
5022 + mmc_gpiod_request_cd_irq(mmc);
5023 }
5024
5025 *host = _host;
5026 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
5027 index a7543ba3e190..3096f3ded3ad 100644
5028 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
5029 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
5030 @@ -2590,6 +2590,8 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
5031
5032 /* Go to known state. Chip may have been power cycled */
5033 if (chip->state == FL_PM_SUSPENDED) {
5034 + /* Refresh LH28F640BF Partition Config. Register */
5035 + fixup_LH28F640BF(mtd);
5036 map_write(map, CMD(0xFF), cfi->chips[i].start);
5037 chip->oldstate = chip->state = FL_READY;
5038 wake_up(&chip->wq);
5039 diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
5040 index ed7e0a1bed3c..5935f0a3ac6f 100644
5041 --- a/drivers/mtd/devices/m25p80.c
5042 +++ b/drivers/mtd/devices/m25p80.c
5043 @@ -245,6 +245,56 @@ static int m25p_remove(struct spi_device *spi)
5044 }
5045
5046
5047 +/*
5048 + * XXX This needs to be kept in sync with spi_nor_ids. We can't share
5049 + * it with spi-nor, because if this is built as a module then modpost
5050 + * won't be able to read it and add appropriate aliases.
5051 + */
5052 +static const struct spi_device_id m25p_ids[] = {
5053 + {"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
5054 + {"at25df641"}, {"at26f004"}, {"at26df081a"}, {"at26df161a"},
5055 + {"at26df321"}, {"at45db081d"},
5056 + {"en25f32"}, {"en25p32"}, {"en25q32b"}, {"en25p64"},
5057 + {"en25q64"}, {"en25qh128"}, {"en25qh256"},
5058 + {"f25l32pa"},
5059 + {"mr25h256"}, {"mr25h10"},
5060 + {"gd25q32"}, {"gd25q64"},
5061 + {"160s33b"}, {"320s33b"}, {"640s33b"},
5062 + {"mx25l2005a"}, {"mx25l4005a"}, {"mx25l8005"}, {"mx25l1606e"},
5063 + {"mx25l3205d"}, {"mx25l3255e"}, {"mx25l6405d"}, {"mx25l12805d"},
5064 + {"mx25l12855e"},{"mx25l25635e"},{"mx25l25655e"},{"mx66l51235l"},
5065 + {"mx66l1g55g"},
5066 + {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q256a"},
5067 + {"n25q512a"}, {"n25q512ax3"}, {"n25q00"},
5068 + {"pm25lv512"}, {"pm25lv010"}, {"pm25lq032"},
5069 + {"s25sl032p"}, {"s25sl064p"}, {"s25fl256s0"}, {"s25fl256s1"},
5070 + {"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
5071 + {"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
5072 + {"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
5073 + {"s25fl016k"}, {"s25fl064k"},
5074 + {"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
5075 + {"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
5076 + {"sst25wf040"},
5077 + {"m25p05"}, {"m25p10"}, {"m25p20"}, {"m25p40"},
5078 + {"m25p80"}, {"m25p16"}, {"m25p32"}, {"m25p64"},
5079 + {"m25p128"}, {"n25q032"},
5080 + {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
5081 + {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
5082 + {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
5083 + {"m45pe10"}, {"m45pe80"}, {"m45pe16"},
5084 + {"m25pe20"}, {"m25pe80"}, {"m25pe16"},
5085 + {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
5086 + {"m25px64"},
5087 + {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
5088 + {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
5089 + {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"},
5090 + {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"},
5091 + {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
5092 + { },
5093 +};
5094 +MODULE_DEVICE_TABLE(spi, m25p_ids);
5095 +
5096 +
5097 static struct spi_driver m25p80_driver = {
5098 .driver = {
5099 .name = "m25p80",
5100 diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
5101 index 33c64955d4d7..5f9a1e2f3fd4 100644
5102 --- a/drivers/mtd/ubi/block.c
5103 +++ b/drivers/mtd/ubi/block.c
5104 @@ -188,8 +188,9 @@ static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
5105
5106 ret = ubi_read(dev->desc, leb, buffer, offset, len);
5107 if (ret) {
5108 - ubi_err("%s ubi_read error %d",
5109 - dev->gd->disk_name, ret);
5110 + ubi_err("%s: error %d while reading from LEB %d (offset %d, "
5111 + "length %d)", dev->gd->disk_name, ret, leb, offset,
5112 + len);
5113 return ret;
5114 }
5115 return 0;
5116 @@ -378,7 +379,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
5117 {
5118 struct ubiblock *dev;
5119 struct gendisk *gd;
5120 - u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
5121 + u64 disk_capacity = vi->used_bytes >> 9;
5122 int ret;
5123
5124 if ((sector_t)disk_capacity != disk_capacity)
5125 @@ -502,7 +503,7 @@ int ubiblock_remove(struct ubi_volume_info *vi)
5126 static int ubiblock_resize(struct ubi_volume_info *vi)
5127 {
5128 struct ubiblock *dev;
5129 - u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
5130 + u64 disk_capacity = vi->used_bytes >> 9;
5131
5132 if ((sector_t)disk_capacity != disk_capacity) {
5133 ubi_warn("%s: the volume is too big, cannot resize (%d LEBs)",
5134 @@ -522,8 +523,12 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
5135 }
5136
5137 mutex_lock(&dev->dev_mutex);
5138 - set_capacity(dev->gd, disk_capacity);
5139 - ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
5140 +
5141 + if (get_capacity(dev->gd) != disk_capacity) {
5142 + set_capacity(dev->gd, disk_capacity);
5143 + ubi_msg("%s resized to %lld bytes", dev->gd->disk_name,
5144 + vi->used_bytes);
5145 + }
5146 mutex_unlock(&dev->dev_mutex);
5147 mutex_unlock(&devices_mutex);
5148 return 0;
5149 @@ -547,6 +552,14 @@ static int ubiblock_notify(struct notifier_block *nb,
5150 case UBI_VOLUME_RESIZED:
5151 ubiblock_resize(&nt->vi);
5152 break;
5153 + case UBI_VOLUME_UPDATED:
5154 + /*
5155 + * If the volume is static, a content update might mean the
5156 + * size (i.e. used_bytes) was also changed.
5157 + */
5158 + if (nt->vi.vol_type == UBI_STATIC_VOLUME)
5159 + ubiblock_resize(&nt->vi);
5160 + break;
5161 default:
5162 break;
5163 }
5164 diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
5165 index 7646220ca6e2..20aeb277d8d4 100644
5166 --- a/drivers/mtd/ubi/cdev.c
5167 +++ b/drivers/mtd/ubi/cdev.c
5168 @@ -425,8 +425,10 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
5169 break;
5170
5171 err = ubi_start_update(ubi, vol, bytes);
5172 - if (bytes == 0)
5173 + if (bytes == 0) {
5174 + ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
5175 revoke_exclusive(desc, UBI_READWRITE);
5176 + }
5177 break;
5178 }
5179
5180 diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
5181 index 0431b46d9fd9..c701369090fb 100644
5182 --- a/drivers/mtd/ubi/fastmap.c
5183 +++ b/drivers/mtd/ubi/fastmap.c
5184 @@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
5185 av = tmp_av;
5186 else {
5187 ubi_err("orphaned volume in fastmap pool!");
5188 + kmem_cache_free(ai->aeb_slab_cache, new_aeb);
5189 return UBI_BAD_FASTMAP;
5190 }
5191
5192 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
5193 index c6f6f69f8961..2f8f251a926f 100644
5194 --- a/drivers/net/Kconfig
5195 +++ b/drivers/net/Kconfig
5196 @@ -135,6 +135,7 @@ config MACVLAN
5197 config MACVTAP
5198 tristate "MAC-VLAN based tap driver"
5199 depends on MACVLAN
5200 + depends on INET
5201 help
5202 This adds a specialized tap character device driver that is based
5203 on the MAC-VLAN network interface, called macvtap. A macvtap device
5204 @@ -201,6 +202,7 @@ config RIONET_RX_SIZE
5205
5206 config TUN
5207 tristate "Universal TUN/TAP device driver support"
5208 + depends on INET
5209 select CRC32
5210 ---help---
5211 TUN/TAP provides packet reception and transmission for user space
5212 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5213 index e5be511a3c38..fac3821cef87 100644
5214 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5215 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
5216 @@ -6557,6 +6557,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5217
5218 spin_lock_init(&adapter->stats_lock);
5219 spin_lock_init(&adapter->tid_release_lock);
5220 + spin_lock_init(&adapter->win0_lock);
5221
5222 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5223 INIT_WORK(&adapter->db_full_task, process_db_full);
5224 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
5225 index dae3da6d8dd0..c2c77434d147 100644
5226 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
5227 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
5228 @@ -808,8 +808,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
5229 tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
5230 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
5231 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
5232 - tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
5233 - MLX4_WQE_CTRL_TCP_UDP_CSUM);
5234 + if (!skb->encapsulation)
5235 + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
5236 + MLX4_WQE_CTRL_TCP_UDP_CSUM);
5237 + else
5238 + tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
5239 ring->tx_csum++;
5240 }
5241
5242 diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
5243 index ca0f98c95105..872843179f44 100644
5244 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
5245 +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
5246 @@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
5247 cur->ib.dst_gid_msk);
5248 break;
5249
5250 + case MLX4_NET_TRANS_RULE_ID_VXLAN:
5251 + len += snprintf(buf + len, BUF_SIZE - len,
5252 + "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
5253 + break;
5254 case MLX4_NET_TRANS_RULE_ID_IPV6:
5255 break;
5256
5257 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5258 index 655a23bbc451..e17a970eaf2b 100644
5259 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5260 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5261 @@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg;
5262 static void stmmac_default_data(void)
5263 {
5264 memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
5265 +
5266 plat_dat.bus_id = 1;
5267 plat_dat.phy_addr = 0;
5268 plat_dat.interface = PHY_INTERFACE_MODE_GMII;
5269 @@ -47,6 +48,12 @@ static void stmmac_default_data(void)
5270 dma_cfg.pbl = 32;
5271 dma_cfg.burst_len = DMA_AXI_BLEN_256;
5272 plat_dat.dma_cfg = &dma_cfg;
5273 +
5274 + /* Set default value for multicast hash bins */
5275 + plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
5276 +
5277 + /* Set default value for unicast filter entries */
5278 + plat_dat.unicast_filter_entries = 1;
5279 }
5280
5281 /**
5282 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
5283 index 0fcb5e7eb073..148fda3be898 100644
5284 --- a/drivers/net/hyperv/netvsc_drv.c
5285 +++ b/drivers/net/hyperv/netvsc_drv.c
5286 @@ -556,6 +556,7 @@ do_lso:
5287 do_send:
5288 /* Start filling in the page buffers with the rndis hdr */
5289 rndis_msg->msg_len += rndis_msg_size;
5290 + packet->total_data_buflen = rndis_msg->msg_len;
5291 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
5292 skb, &packet->page_buf[0]);
5293
5294 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
5295 index 726edabff26b..5f17ad02917c 100644
5296 --- a/drivers/net/macvlan.c
5297 +++ b/drivers/net/macvlan.c
5298 @@ -201,7 +201,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
5299 struct sk_buff *skb;
5300 struct sk_buff_head list;
5301
5302 - skb_queue_head_init(&list);
5303 + __skb_queue_head_init(&list);
5304
5305 spin_lock_bh(&port->bc_queue.lock);
5306 skb_queue_splice_tail_init(&port->bc_queue, &list);
5307 @@ -941,9 +941,15 @@ static void macvlan_port_destroy(struct net_device *dev)
5308 {
5309 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
5310
5311 - cancel_work_sync(&port->bc_work);
5312 dev->priv_flags &= ~IFF_MACVLAN_PORT;
5313 netdev_rx_handler_unregister(dev);
5314 +
5315 + /* After this point, no packet can schedule bc_work anymore,
5316 + * but we need to cancel it and purge left skbs if any.
5317 + */
5318 + cancel_work_sync(&port->bc_work);
5319 + __skb_queue_purge(&port->bc_queue);
5320 +
5321 kfree_rcu(port, rcu);
5322 }
5323
5324 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
5325 index 0c6adaaf898c..9b5481c70b4c 100644
5326 --- a/drivers/net/macvtap.c
5327 +++ b/drivers/net/macvtap.c
5328 @@ -16,6 +16,7 @@
5329 #include <linux/idr.h>
5330 #include <linux/fs.h>
5331
5332 +#include <net/ipv6.h>
5333 #include <net/net_namespace.h>
5334 #include <net/rtnetlink.h>
5335 #include <net/sock.h>
5336 @@ -570,6 +571,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
5337 break;
5338 case VIRTIO_NET_HDR_GSO_UDP:
5339 gso_type = SKB_GSO_UDP;
5340 + if (skb->protocol == htons(ETH_P_IPV6))
5341 + ipv6_proxy_select_ident(skb);
5342 break;
5343 default:
5344 return -EINVAL;
5345 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
5346 index fa0d71727894..90c639b0f18d 100644
5347 --- a/drivers/net/ppp/ppp_generic.c
5348 +++ b/drivers/net/ppp/ppp_generic.c
5349 @@ -594,7 +594,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
5350 if (file == ppp->owner)
5351 ppp_shutdown_interface(ppp);
5352 }
5353 - if (atomic_long_read(&file->f_count) <= 2) {
5354 + if (atomic_long_read(&file->f_count) < 2) {
5355 ppp_release(NULL, file);
5356 err = 0;
5357 } else
5358 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
5359 index acaaf6784179..610d1662c500 100644
5360 --- a/drivers/net/tun.c
5361 +++ b/drivers/net/tun.c
5362 @@ -65,6 +65,7 @@
5363 #include <linux/nsproxy.h>
5364 #include <linux/virtio_net.h>
5365 #include <linux/rcupdate.h>
5366 +#include <net/ipv6.h>
5367 #include <net/net_namespace.h>
5368 #include <net/netns/generic.h>
5369 #include <net/rtnetlink.h>
5370 @@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
5371 break;
5372 }
5373
5374 + skb_reset_network_header(skb);
5375 +
5376 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
5377 pr_debug("GSO!\n");
5378 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
5379 @@ -1150,6 +1153,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
5380 break;
5381 case VIRTIO_NET_HDR_GSO_UDP:
5382 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
5383 + if (skb->protocol == htons(ETH_P_IPV6))
5384 + ipv6_proxy_select_ident(skb);
5385 break;
5386 default:
5387 tun->dev->stats.rx_frame_errors++;
5388 @@ -1179,7 +1184,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
5389 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
5390 }
5391
5392 - skb_reset_network_header(skb);
5393 skb_probe_transport_header(skb, 0);
5394
5395 rxhash = skb_get_hash(skb);
5396 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
5397 index be4275721039..e6338c16081a 100644
5398 --- a/drivers/net/usb/ax88179_178a.c
5399 +++ b/drivers/net/usb/ax88179_178a.c
5400 @@ -937,6 +937,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
5401 {
5402 struct usbnet *dev = netdev_priv(net);
5403 struct sockaddr *addr = p;
5404 + int ret;
5405
5406 if (netif_running(net))
5407 return -EBUSY;
5408 @@ -946,8 +947,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
5409 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
5410
5411 /* Set the MAC address */
5412 - return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
5413 + ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
5414 ETH_ALEN, net->dev_addr);
5415 + if (ret < 0)
5416 + return ret;
5417 +
5418 + return 0;
5419 }
5420
5421 static const struct net_device_ops ax88179_netdev_ops = {
5422 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
5423 index beb377b2d4b7..b4831274b0ab 100644
5424 --- a/drivers/net/vxlan.c
5425 +++ b/drivers/net/vxlan.c
5426 @@ -1440,9 +1440,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
5427 if (!in6_dev)
5428 goto out;
5429
5430 - if (!pskb_may_pull(skb, skb->len))
5431 - goto out;
5432 -
5433 iphdr = ipv6_hdr(skb);
5434 saddr = &iphdr->saddr;
5435 daddr = &iphdr->daddr;
5436 @@ -1717,6 +1714,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
5437 struct pcpu_sw_netstats *tx_stats, *rx_stats;
5438 union vxlan_addr loopback;
5439 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
5440 + struct net_device *dev = skb->dev;
5441 + int len = skb->len;
5442
5443 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
5444 rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
5445 @@ -1740,16 +1739,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
5446
5447 u64_stats_update_begin(&tx_stats->syncp);
5448 tx_stats->tx_packets++;
5449 - tx_stats->tx_bytes += skb->len;
5450 + tx_stats->tx_bytes += len;
5451 u64_stats_update_end(&tx_stats->syncp);
5452
5453 if (netif_rx(skb) == NET_RX_SUCCESS) {
5454 u64_stats_update_begin(&rx_stats->syncp);
5455 rx_stats->rx_packets++;
5456 - rx_stats->rx_bytes += skb->len;
5457 + rx_stats->rx_bytes += len;
5458 u64_stats_update_end(&rx_stats->syncp);
5459 } else {
5460 - skb->dev->stats.rx_dropped++;
5461 + dev->stats.rx_dropped++;
5462 }
5463 }
5464
5465 @@ -1927,7 +1926,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
5466 return arp_reduce(dev, skb);
5467 #if IS_ENABLED(CONFIG_IPV6)
5468 else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
5469 - skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
5470 + pskb_may_pull(skb, sizeof(struct ipv6hdr)
5471 + + sizeof(struct nd_msg)) &&
5472 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
5473 struct nd_msg *msg;
5474
5475 @@ -1936,6 +1936,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
5476 msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
5477 return neigh_reduce(dev, skb);
5478 }
5479 + eth = eth_hdr(skb);
5480 #endif
5481 }
5482
5483 diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
5484 index afb98f4fdaf3..913398525947 100644
5485 --- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
5486 +++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
5487 @@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5488 u32 queues, bool drop)
5489 {
5490 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
5491 + u32 scd_queues;
5492
5493 mutex_lock(&priv->mutex);
5494 IWL_DEBUG_MAC80211(priv, "enter\n");
5495 @@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5496 goto done;
5497 }
5498
5499 - /*
5500 - * mac80211 will not push any more frames for transmit
5501 - * until the flush is completed
5502 - */
5503 - if (drop) {
5504 - IWL_DEBUG_MAC80211(priv, "send flush command\n");
5505 - if (iwlagn_txfifo_flush(priv, 0)) {
5506 - IWL_ERR(priv, "flush request fail\n");
5507 - goto done;
5508 - }
5509 + scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
5510 + scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
5511 + BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
5512 +
5513 + if (vif)
5514 + scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
5515 +
5516 + IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
5517 + if (iwlagn_txfifo_flush(priv, scd_queues)) {
5518 + IWL_ERR(priv, "flush request fail\n");
5519 + goto done;
5520 }
5521 - IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
5522 + IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
5523 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
5524 done:
5525 mutex_unlock(&priv->mutex);
5526 diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
5527 index 656371a668da..86fb12162af2 100644
5528 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h
5529 +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
5530 @@ -548,6 +548,7 @@ enum iwl_trans_state {
5531 * Set during transport allocation.
5532 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
5533 * @pm_support: set to true in start_hw if link pm is supported
5534 + * @ltr_enabled: set to true if the LTR is enabled
5535 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
5536 * The user should use iwl_trans_{alloc,free}_tx_cmd.
5537 * @dev_cmd_headroom: room needed for the transport's private use before the
5538 @@ -574,6 +575,7 @@ struct iwl_trans {
5539 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
5540
5541 bool pm_support;
5542 + bool ltr_enabled;
5543
5544 /* The following fields are internal only */
5545 struct kmem_cache *dev_cmd_pool;
5546 diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
5547 index ce71625f497f..103fc93e9158 100644
5548 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c
5549 +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
5550 @@ -301,8 +301,8 @@ static const __le64 iwl_ci_mask[][3] = {
5551 };
5552
5553 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
5554 - cpu_to_le32(0x28412201),
5555 - cpu_to_le32(0x11118451),
5556 + cpu_to_le32(0x2e402280),
5557 + cpu_to_le32(0x7711a751),
5558 };
5559
5560 struct corunning_block_luts {
5561 diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
5562 index a3be33359927..d55c2a8f724f 100644
5563 --- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
5564 +++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
5565 @@ -289,8 +289,8 @@ static const __le64 iwl_ci_mask[][3] = {
5566 };
5567
5568 static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
5569 - cpu_to_le32(0x28412201),
5570 - cpu_to_le32(0x11118451),
5571 + cpu_to_le32(0x2e402280),
5572 + cpu_to_le32(0x7711a751),
5573 };
5574
5575 struct corunning_block_luts {
5576 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
5577 index c3a8c86b550d..4d8932c1cd6d 100644
5578 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
5579 +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
5580 @@ -66,13 +66,46 @@
5581
5582 /* Power Management Commands, Responses, Notifications */
5583
5584 +/**
5585 + * enum iwl_ltr_config_flags - masks for LTR config command flags
5586 + * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
5587 + * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
5588 + * memory access
5589 + * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
5590 + * reg change
5591 + * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
5592 + * D0 to D3
5593 + * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
5594 + * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
5595 + * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
5596 + */
5597 +enum iwl_ltr_config_flags {
5598 + LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
5599 + LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
5600 + LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
5601 + LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
5602 + LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
5603 + LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
5604 + LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
5605 +};
5606 +
5607 +/**
5608 + * struct iwl_ltr_config_cmd - configures the LTR
5609 + * @flags: See %enum iwl_ltr_config_flags
5610 + */
5611 +struct iwl_ltr_config_cmd {
5612 + __le32 flags;
5613 + __le32 static_long;
5614 + __le32 static_short;
5615 +} __packed;
5616 +
5617 /* Radio LP RX Energy Threshold measured in dBm */
5618 #define POWER_LPRX_RSSI_THRESHOLD 75
5619 #define POWER_LPRX_RSSI_THRESHOLD_MAX 94
5620 #define POWER_LPRX_RSSI_THRESHOLD_MIN 30
5621
5622 /**
5623 - * enum iwl_scan_flags - masks for power table command flags
5624 + * enum iwl_power_flags - masks for power table command flags
5625 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
5626 * receiver and transmitter. '0' - does not allow.
5627 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
5628 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
5629 index 9a922f3bd16b..7b73ed4903c4 100644
5630 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
5631 +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
5632 @@ -148,6 +148,7 @@ enum {
5633 /* Power - legacy power table command */
5634 POWER_TABLE_CMD = 0x77,
5635 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
5636 + LTR_CONFIG = 0xee,
5637
5638 /* Thermal Throttling*/
5639 REPLY_THERMAL_MNG_BACKOFF = 0x7e,
5640 diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
5641 index 883e702152d5..bf720a875e6b 100644
5642 --- a/drivers/net/wireless/iwlwifi/mvm/fw.c
5643 +++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
5644 @@ -475,6 +475,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
5645 /* Initialize tx backoffs to the minimal possible */
5646 iwl_mvm_tt_tx_backoff(mvm, 0);
5647
5648 + if (mvm->trans->ltr_enabled) {
5649 + struct iwl_ltr_config_cmd cmd = {
5650 + .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
5651 + };
5652 +
5653 + WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
5654 + sizeof(cmd), &cmd));
5655 + }
5656 +
5657 ret = iwl_mvm_power_update_device(mvm);
5658 if (ret)
5659 goto error;
5660 diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
5661 index 610dbcb0dc27..d31a1178ae35 100644
5662 --- a/drivers/net/wireless/iwlwifi/mvm/ops.c
5663 +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
5664 @@ -332,6 +332,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
5665 CMD(REPLY_BEACON_FILTERING_CMD),
5666 CMD(REPLY_THERMAL_MNG_BACKOFF),
5667 CMD(MAC_PM_POWER_TABLE),
5668 + CMD(LTR_CONFIG),
5669 CMD(BT_COEX_CI),
5670 CMD(BT_COEX_UPDATE_SW_BOOST),
5671 CMD(BT_COEX_UPDATE_CORUN_LUT),
5672 diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
5673 index 9ee410bf6da2..dbc870713882 100644
5674 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c
5675 +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
5676 @@ -168,14 +168,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
5677
5678 /*
5679 * for data packets, rate info comes from the table inside the fw. This
5680 - * table is controlled by LINK_QUALITY commands. Exclude ctrl port
5681 - * frames like EAPOLs which should be treated as mgmt frames. This
5682 - * avoids them being sent initially in high rates which increases the
5683 - * chances for completion of the 4-Way handshake.
5684 + * table is controlled by LINK_QUALITY commands
5685 */
5686
5687 - if (ieee80211_is_data(fc) && sta &&
5688 - !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
5689 + if (ieee80211_is_data(fc) && sta) {
5690 tx_cmd->initial_rate_index = 0;
5691 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
5692 return;
5693 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
5694 index 06e04aaf61ee..d7231a82fe42 100644
5695 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
5696 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
5697 @@ -172,6 +172,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
5698 {
5699 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5700 u16 lctl;
5701 + u16 cap;
5702
5703 /*
5704 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
5705 @@ -182,16 +183,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
5706 * power savings, even without L1.
5707 */
5708 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
5709 - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
5710 - /* L1-ASPM enabled; disable(!) L0S */
5711 + if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
5712 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
5713 - dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
5714 - } else {
5715 - /* L1-ASPM disabled; enable(!) L0S */
5716 + else
5717 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
5718 - dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
5719 - }
5720 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
5721 +
5722 + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
5723 + trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
5724 + dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
5725 + (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
5726 + trans->ltr_enabled ? "En" : "Dis");
5727 }
5728
5729 /*
5730 diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
5731 index 7cf6081a05a1..ebd5625d13f1 100644
5732 --- a/drivers/net/wireless/rt2x00/rt2800.h
5733 +++ b/drivers/net/wireless/rt2x00/rt2800.h
5734 @@ -52,6 +52,7 @@
5735 * RF5592 2.4G/5G 2T2R
5736 * RF3070 2.4G 1T1R
5737 * RF5360 2.4G 1T1R
5738 + * RF5362 2.4G 1T1R
5739 * RF5370 2.4G 1T1R
5740 * RF5390 2.4G 1T1R
5741 */
5742 @@ -72,6 +73,7 @@
5743 #define RF3070 0x3070
5744 #define RF3290 0x3290
5745 #define RF5360 0x5360
5746 +#define RF5362 0x5362
5747 #define RF5370 0x5370
5748 #define RF5372 0x5372
5749 #define RF5390 0x5390
5750 @@ -2145,7 +2147,7 @@ struct mac_iveiv_entry {
5751 /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */
5752 #define RFCSR3_PA1_BIAS_CCK FIELD8(0x70)
5753 #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
5754 -/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
5755 +/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */
5756 #define RFCSR3_VCOCAL_EN FIELD8(0x80)
5757 /* Bits for RF3050 */
5758 #define RFCSR3_BIT1 FIELD8(0x02)
5759 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
5760 index 893c9d5f3d6f..9f57a2db791c 100644
5761 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
5762 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
5763 @@ -3186,6 +3186,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
5764 break;
5765 case RF3070:
5766 case RF5360:
5767 + case RF5362:
5768 case RF5370:
5769 case RF5372:
5770 case RF5390:
5771 @@ -3203,6 +3204,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
5772 rt2x00_rf(rt2x00dev, RF3290) ||
5773 rt2x00_rf(rt2x00dev, RF3322) ||
5774 rt2x00_rf(rt2x00dev, RF5360) ||
5775 + rt2x00_rf(rt2x00dev, RF5362) ||
5776 rt2x00_rf(rt2x00dev, RF5370) ||
5777 rt2x00_rf(rt2x00dev, RF5372) ||
5778 rt2x00_rf(rt2x00dev, RF5390) ||
5779 @@ -4317,6 +4319,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
5780 case RF3070:
5781 case RF3290:
5782 case RF5360:
5783 + case RF5362:
5784 case RF5370:
5785 case RF5372:
5786 case RF5390:
5787 @@ -7095,6 +7098,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
5788 case RF3320:
5789 case RF3322:
5790 case RF5360:
5791 + case RF5362:
5792 case RF5370:
5793 case RF5372:
5794 case RF5390:
5795 @@ -7551,6 +7555,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5796 case RF3320:
5797 case RF3322:
5798 case RF5360:
5799 + case RF5362:
5800 case RF5370:
5801 case RF5372:
5802 case RF5390:
5803 @@ -7680,6 +7685,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5804 case RF3070:
5805 case RF3290:
5806 case RF5360:
5807 + case RF5362:
5808 case RF5370:
5809 case RF5372:
5810 case RF5390:
5811 diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
5812 index 573897b8e878..8444313eabe2 100644
5813 --- a/drivers/net/wireless/rt2x00/rt2800usb.c
5814 +++ b/drivers/net/wireless/rt2x00/rt2800usb.c
5815 @@ -1111,6 +1111,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
5816 /* Ovislink */
5817 { USB_DEVICE(0x1b75, 0x3071) },
5818 { USB_DEVICE(0x1b75, 0x3072) },
5819 + { USB_DEVICE(0x1b75, 0xa200) },
5820 /* Para */
5821 { USB_DEVICE(0x20b8, 0x8888) },
5822 /* Pegatron */
5823 diff --git a/drivers/of/base.c b/drivers/of/base.c
5824 index 293ed4b687ba..902b1b09efed 100644
5825 --- a/drivers/of/base.c
5826 +++ b/drivers/of/base.c
5827 @@ -1277,52 +1277,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
5828 EXPORT_SYMBOL_GPL(of_property_read_string);
5829
5830 /**
5831 - * of_property_read_string_index - Find and read a string from a multiple
5832 - * strings property.
5833 - * @np: device node from which the property value is to be read.
5834 - * @propname: name of the property to be searched.
5835 - * @index: index of the string in the list of strings
5836 - * @out_string: pointer to null terminated return string, modified only if
5837 - * return value is 0.
5838 - *
5839 - * Search for a property in a device tree node and retrieve a null
5840 - * terminated string value (pointer to data, not a copy) in the list of strings
5841 - * contained in that property.
5842 - * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
5843 - * property does not have a value, and -EILSEQ if the string is not
5844 - * null-terminated within the length of the property data.
5845 - *
5846 - * The out_string pointer is modified only if a valid string can be decoded.
5847 - */
5848 -int of_property_read_string_index(struct device_node *np, const char *propname,
5849 - int index, const char **output)
5850 -{
5851 - struct property *prop = of_find_property(np, propname, NULL);
5852 - int i = 0;
5853 - size_t l = 0, total = 0;
5854 - const char *p;
5855 -
5856 - if (!prop)
5857 - return -EINVAL;
5858 - if (!prop->value)
5859 - return -ENODATA;
5860 - if (strnlen(prop->value, prop->length) >= prop->length)
5861 - return -EILSEQ;
5862 -
5863 - p = prop->value;
5864 -
5865 - for (i = 0; total < prop->length; total += l, p += l) {
5866 - l = strlen(p) + 1;
5867 - if (i++ == index) {
5868 - *output = p;
5869 - return 0;
5870 - }
5871 - }
5872 - return -ENODATA;
5873 -}
5874 -EXPORT_SYMBOL_GPL(of_property_read_string_index);
5875 -
5876 -/**
5877 * of_property_match_string() - Find string in a list and return index
5878 * @np: pointer to node containing string list property
5879 * @propname: string list property name
5880 @@ -1348,7 +1302,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
5881 end = p + prop->length;
5882
5883 for (i = 0; p < end; i++, p += l) {
5884 - l = strlen(p) + 1;
5885 + l = strnlen(p, end - p) + 1;
5886 if (p + l > end)
5887 return -EILSEQ;
5888 pr_debug("comparing %s with %s\n", string, p);
5889 @@ -1360,39 +1314,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
5890 EXPORT_SYMBOL_GPL(of_property_match_string);
5891
5892 /**
5893 - * of_property_count_strings - Find and return the number of strings from a
5894 - * multiple strings property.
5895 + * of_property_read_string_util() - Utility helper for parsing string properties
5896 * @np: device node from which the property value is to be read.
5897 * @propname: name of the property to be searched.
5898 + * @out_strs: output array of string pointers.
5899 + * @sz: number of array elements to read.
5900 + * @skip: Number of strings to skip over at beginning of list.
5901 *
5902 - * Search for a property in a device tree node and retrieve the number of null
5903 - * terminated string contain in it. Returns the number of strings on
5904 - * success, -EINVAL if the property does not exist, -ENODATA if property
5905 - * does not have a value, and -EILSEQ if the string is not null-terminated
5906 - * within the length of the property data.
5907 + * Don't call this function directly. It is a utility helper for the
5908 + * of_property_read_string*() family of functions.
5909 */
5910 -int of_property_count_strings(struct device_node *np, const char *propname)
5911 +int of_property_read_string_helper(struct device_node *np, const char *propname,
5912 + const char **out_strs, size_t sz, int skip)
5913 {
5914 struct property *prop = of_find_property(np, propname, NULL);
5915 - int i = 0;
5916 - size_t l = 0, total = 0;
5917 - const char *p;
5918 + int l = 0, i = 0;
5919 + const char *p, *end;
5920
5921 if (!prop)
5922 return -EINVAL;
5923 if (!prop->value)
5924 return -ENODATA;
5925 - if (strnlen(prop->value, prop->length) >= prop->length)
5926 - return -EILSEQ;
5927 -
5928 p = prop->value;
5929 + end = p + prop->length;
5930
5931 - for (i = 0; total < prop->length; total += l, p += l, i++)
5932 - l = strlen(p) + 1;
5933 -
5934 - return i;
5935 + for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
5936 + l = strnlen(p, end - p) + 1;
5937 + if (p + l > end)
5938 + return -EILSEQ;
5939 + if (out_strs && i >= skip)
5940 + *out_strs++ = p;
5941 + }
5942 + i -= skip;
5943 + return i <= 0 ? -ENODATA : i;
5944 }
5945 -EXPORT_SYMBOL_GPL(of_property_count_strings);
5946 +EXPORT_SYMBOL_GPL(of_property_read_string_helper);
5947
5948 void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
5949 {
5950 diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
5951 index a737cb5974de..c92de69fcf7f 100644
5952 --- a/drivers/of/selftest.c
5953 +++ b/drivers/of/selftest.c
5954 @@ -247,8 +247,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
5955 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
5956 }
5957
5958 -static void __init of_selftest_property_match_string(void)
5959 +static void __init of_selftest_property_string(void)
5960 {
5961 + const char *strings[4];
5962 struct device_node *np;
5963 int rc;
5964
5965 @@ -265,13 +266,66 @@ static void __init of_selftest_property_match_string(void)
5966 rc = of_property_match_string(np, "phandle-list-names", "third");
5967 selftest(rc == 2, "third expected:0 got:%i\n", rc);
5968 rc = of_property_match_string(np, "phandle-list-names", "fourth");
5969 - selftest(rc == -ENODATA, "unmatched string; rc=%i", rc);
5970 + selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
5971 rc = of_property_match_string(np, "missing-property", "blah");
5972 - selftest(rc == -EINVAL, "missing property; rc=%i", rc);
5973 + selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
5974 rc = of_property_match_string(np, "empty-property", "blah");
5975 - selftest(rc == -ENODATA, "empty property; rc=%i", rc);
5976 + selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
5977 rc = of_property_match_string(np, "unterminated-string", "blah");
5978 - selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
5979 + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
5980 +
5981 + /* of_property_count_strings() tests */
5982 + rc = of_property_count_strings(np, "string-property");
5983 + selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
5984 + rc = of_property_count_strings(np, "phandle-list-names");
5985 + selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
5986 + rc = of_property_count_strings(np, "unterminated-string");
5987 + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
5988 + rc = of_property_count_strings(np, "unterminated-string-list");
5989 + selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
5990 +
5991 + /* of_property_read_string_index() tests */
5992 + rc = of_property_read_string_index(np, "string-property", 0, strings);
5993 + selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
5994 + strings[0] = NULL;
5995 + rc = of_property_read_string_index(np, "string-property", 1, strings);
5996 + selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
5997 + rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
5998 + selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
5999 + rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
6000 + selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
6001 + rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
6002 + selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
6003 + strings[0] = NULL;
6004 + rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
6005 + selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
6006 + strings[0] = NULL;
6007 + rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
6008 + selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
6009 + rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
6010 + selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
6011 + strings[0] = NULL;
6012 + rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
6013 + selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
6014 + strings[1] = NULL;
6015 +
6016 + /* of_property_read_string_array() tests */
6017 + rc = of_property_read_string_array(np, "string-property", strings, 4);
6018 + selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
6019 + rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
6020 + selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
6021 + rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
6022 + selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
6023 + /* -- An incorrectly formed string should cause a failure */
6024 + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
6025 + selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
6026 + /* -- parsing the correctly formed strings should still work: */
6027 + strings[2] = NULL;
6028 + rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
6029 + selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
6030 + strings[1] = NULL;
6031 + rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
6032 + selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
6033 }
6034
6035 #define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
6036 @@ -783,7 +837,7 @@ static int __init of_selftest(void)
6037 of_selftest_find_node_by_name();
6038 of_selftest_dynamic();
6039 of_selftest_parse_phandle_with_args();
6040 - of_selftest_property_match_string();
6041 + of_selftest_property_string();
6042 of_selftest_property_copy();
6043 of_selftest_changeset();
6044 of_selftest_parse_interrupts();
6045 diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
6046 index ce0fe083d406..5b1527e8a7fb 100644
6047 --- a/drivers/of/testcase-data/tests-phandle.dtsi
6048 +++ b/drivers/of/testcase-data/tests-phandle.dtsi
6049 @@ -39,7 +39,9 @@
6050 phandle-list-bad-args = <&provider2 1 0>,
6051 <&provider3 0>;
6052 empty-property;
6053 + string-property = "foobar";
6054 unterminated-string = [40 41 42 43];
6055 + unterminated-string-list = "first", "second", [40 41 42 43];
6056 };
6057 };
6058 };
6059 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
6060 index 76ef7914c9aa..6d04771e4903 100644
6061 --- a/drivers/pci/pci-sysfs.c
6062 +++ b/drivers/pci/pci-sysfs.c
6063 @@ -185,7 +185,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
6064 }
6065 static DEVICE_ATTR_RO(modalias);
6066
6067 -static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
6068 +static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
6069 const char *buf, size_t count)
6070 {
6071 struct pci_dev *pdev = to_pci_dev(dev);
6072 @@ -210,7 +210,7 @@ static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
6073 return result < 0 ? result : count;
6074 }
6075
6076 -static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
6077 +static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
6078 char *buf)
6079 {
6080 struct pci_dev *pdev;
6081 @@ -218,7 +218,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
6082 pdev = to_pci_dev(dev);
6083 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
6084 }
6085 -static DEVICE_ATTR_RW(enabled);
6086 +static DEVICE_ATTR_RW(enable);
6087
6088 #ifdef CONFIG_NUMA
6089 static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
6090 @@ -564,7 +564,7 @@ static struct attribute *pci_dev_attrs[] = {
6091 #endif
6092 &dev_attr_dma_mask_bits.attr,
6093 &dev_attr_consistent_dma_mask_bits.attr,
6094 - &dev_attr_enabled.attr,
6095 + &dev_attr_enable.attr,
6096 &dev_attr_broken_parity_status.attr,
6097 &dev_attr_msi_bus.attr,
6098 #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
6099 diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
6100 index 93d78359246c..acc13f8179c3 100644
6101 --- a/drivers/phy/phy-omap-usb2.c
6102 +++ b/drivers/phy/phy-omap-usb2.c
6103 @@ -262,14 +262,16 @@ static int omap_usb2_probe(struct platform_device *pdev)
6104 otg->phy = &phy->phy;
6105
6106 platform_set_drvdata(pdev, phy);
6107 + pm_runtime_enable(phy->dev);
6108
6109 generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL);
6110 - if (IS_ERR(generic_phy))
6111 + if (IS_ERR(generic_phy)) {
6112 + pm_runtime_disable(phy->dev);
6113 return PTR_ERR(generic_phy);
6114 + }
6115
6116 phy_set_drvdata(generic_phy, phy);
6117
6118 - pm_runtime_enable(phy->dev);
6119 phy_provider = devm_of_phy_provider_register(phy->dev,
6120 of_phy_simple_xlate);
6121 if (IS_ERR(phy_provider)) {
6122 diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
6123 index e12e5b07f6d7..c23d8ded936d 100644
6124 --- a/drivers/pinctrl/pinctrl-baytrail.c
6125 +++ b/drivers/pinctrl/pinctrl-baytrail.c
6126 @@ -318,7 +318,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
6127 "Potential Error: Setting GPIO with direct_irq_en to output");
6128
6129 reg_val = readl(reg) | BYT_DIR_MASK;
6130 - reg_val &= ~BYT_OUTPUT_EN;
6131 + reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
6132
6133 if (value)
6134 writel(reg_val | BYT_LEVEL, reg);
6135 diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
6136 index 96a0b75c52c9..26c4fd1394da 100644
6137 --- a/drivers/platform/x86/acer-wmi.c
6138 +++ b/drivers/platform/x86/acer-wmi.c
6139 @@ -579,6 +579,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] __initconst = {
6140 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
6141 },
6142 },
6143 + {
6144 + /*
6145 + * Note no video_set_backlight_video_vendor, we must use the
6146 + * acer interface, as there is no native backlight interface.
6147 + */
6148 + .ident = "Acer KAV80",
6149 + .matches = {
6150 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
6151 + DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
6152 + },
6153 + },
6154 {}
6155 };
6156
6157 diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
6158 index 5a5966512277..ff765d8e1a09 100644
6159 --- a/drivers/platform/x86/samsung-laptop.c
6160 +++ b/drivers/platform/x86/samsung-laptop.c
6161 @@ -1561,6 +1561,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
6162 },
6163 {
6164 .callback = samsung_dmi_matched,
6165 + .ident = "NC210",
6166 + .matches = {
6167 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
6168 + DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
6169 + DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
6170 + },
6171 + .driver_data = &samsung_broken_acpi_video,
6172 + },
6173 + {
6174 + .callback = samsung_dmi_matched,
6175 .ident = "730U3E/740U3E",
6176 .matches = {
6177 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
6178 diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
6179 index 9e4dab46eefd..ef1f4c928431 100644
6180 --- a/drivers/power/charger-manager.c
6181 +++ b/drivers/power/charger-manager.c
6182 @@ -1720,6 +1720,11 @@ static int charger_manager_probe(struct platform_device *pdev)
6183 return -EINVAL;
6184 }
6185
6186 + if (!desc->psy_fuel_gauge) {
6187 + dev_err(&pdev->dev, "No fuel gauge power supply defined\n");
6188 + return -EINVAL;
6189 + }
6190 +
6191 /* Counting index only */
6192 while (desc->psy_charger_stat[i])
6193 i++;
6194 diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
6195 index c67ff05fc1dd..d158f71fa128 100644
6196 --- a/drivers/regulator/max77693.c
6197 +++ b/drivers/regulator/max77693.c
6198 @@ -227,7 +227,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
6199 struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
6200 struct max77693_regulator_data *rdata = NULL;
6201 int num_rdata, i;
6202 - struct regulator_config config;
6203 + struct regulator_config config = { };
6204
6205 num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
6206 if (!rdata || num_rdata <= 0) {
6207 diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
6208 index a168e96142b9..54ef393b0def 100644
6209 --- a/drivers/rtc/Kconfig
6210 +++ b/drivers/rtc/Kconfig
6211 @@ -806,7 +806,7 @@ config RTC_DRV_DA9063
6212
6213 config RTC_DRV_EFI
6214 tristate "EFI RTC"
6215 - depends on EFI
6216 + depends on EFI && !X86
6217 help
6218 If you say yes here you will get support for the EFI
6219 Real Time Clock.
6220 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
6221 index e2beab962096..4747d2c66024 100644
6222 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
6223 +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
6224 @@ -757,7 +757,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
6225 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
6226
6227 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
6228 - WARN_ON(node && (node != se_nacl));
6229 + if (WARN_ON(node && (node != se_nacl))) {
6230 + /*
6231 + * The nacl no longer matches what we think it should be.
6232 + * Most likely a new dynamic acl has been added while
6233 + * someone dropped the hardware lock. It clearly is a
6234 + * bug elsewhere, but this bit can't make things worse.
6235 + */
6236 + btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
6237 + node, GFP_ATOMIC);
6238 + }
6239
6240 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
6241 se_nacl, nacl->nport_wwnn, nacl->nport_id);
6242 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
6243 index aaea4b98af16..7cb8c7310a27 100644
6244 --- a/drivers/scsi/scsi_lib.c
6245 +++ b/drivers/scsi/scsi_lib.c
6246 @@ -1887,6 +1887,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
6247 req->cmd_flags |= REQ_DONTPREP;
6248 }
6249
6250 + if (blk_queue_tagged(q))
6251 + req->cmd_flags |= REQ_QUEUED;
6252 + else
6253 + req->cmd_flags &= ~REQ_QUEUED;
6254 +
6255 scsi_init_cmd_errh(cmd);
6256 cmd->scsi_done = scsi_mq_done;
6257
6258 diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
6259 index 5021ddf03f60..fde711380d1a 100644
6260 --- a/drivers/spi/spi-fsl-dspi.c
6261 +++ b/drivers/spi/spi-fsl-dspi.c
6262 @@ -46,7 +46,7 @@
6263
6264 #define SPI_TCR 0x08
6265
6266 -#define SPI_CTAR(x) (0x0c + (x * 4))
6267 +#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
6268 #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
6269 #define SPI_CTAR_CPOL(x) ((x) << 26)
6270 #define SPI_CTAR_CPHA(x) ((x) << 25)
6271 @@ -70,7 +70,7 @@
6272
6273 #define SPI_PUSHR 0x34
6274 #define SPI_PUSHR_CONT (1 << 31)
6275 -#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28)
6276 +#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
6277 #define SPI_PUSHR_EOQ (1 << 27)
6278 #define SPI_PUSHR_CTCNT (1 << 26)
6279 #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
6280 diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
6281 index f1f0a587e4fc..dbd576dbd15a 100644
6282 --- a/drivers/spi/spi-pl022.c
6283 +++ b/drivers/spi/spi-pl022.c
6284 @@ -1074,7 +1074,7 @@ err_rxdesc:
6285 pl022->sgt_tx.nents, DMA_TO_DEVICE);
6286 err_tx_sgmap:
6287 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
6288 - pl022->sgt_tx.nents, DMA_FROM_DEVICE);
6289 + pl022->sgt_rx.nents, DMA_FROM_DEVICE);
6290 err_rx_sgmap:
6291 sg_free_table(&pl022->sgt_tx);
6292 err_alloc_tx_sg:
6293 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
6294 index 46f45ca2c694..9090dad99bb2 100644
6295 --- a/drivers/spi/spi-pxa2xx.c
6296 +++ b/drivers/spi/spi-pxa2xx.c
6297 @@ -1276,7 +1276,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
6298 if (status != 0)
6299 return status;
6300 write_SSCR0(0, drv_data->ioaddr);
6301 - clk_disable_unprepare(ssp->clk);
6302 +
6303 + if (!pm_runtime_suspended(dev))
6304 + clk_disable_unprepare(ssp->clk);
6305
6306 return 0;
6307 }
6308 @@ -1290,7 +1292,8 @@ static int pxa2xx_spi_resume(struct device *dev)
6309 pxa2xx_spi_dma_resume(drv_data);
6310
6311 /* Enable the SSP clock */
6312 - clk_prepare_enable(ssp->clk);
6313 + if (!pm_runtime_suspended(dev))
6314 + clk_prepare_enable(ssp->clk);
6315
6316 /* Restore LPSS private register bits */
6317 lpss_ssp_setup(drv_data);
6318 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
6319 index 2182c7463cdb..7a2e9c023257 100644
6320 --- a/drivers/staging/comedi/comedi_fops.c
6321 +++ b/drivers/staging/comedi/comedi_fops.c
6322 @@ -1462,10 +1462,7 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
6323 unsigned int *chanlist;
6324 int ret;
6325
6326 - /* user_chanlist could be NULL for do_cmdtest ioctls */
6327 - if (!user_chanlist)
6328 - return 0;
6329 -
6330 + cmd->chanlist = NULL;
6331 chanlist = memdup_user(user_chanlist,
6332 cmd->chanlist_len * sizeof(unsigned int));
6333 if (IS_ERR(chanlist))
6334 @@ -1609,13 +1606,18 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
6335
6336 s = &dev->subdevices[cmd.subdev];
6337
6338 - /* load channel/gain list */
6339 - ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
6340 - if (ret)
6341 - return ret;
6342 + /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */
6343 + if (user_chanlist) {
6344 + /* load channel/gain list */
6345 + ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
6346 + if (ret)
6347 + return ret;
6348 + }
6349
6350 ret = s->do_cmdtest(dev, s, &cmd);
6351
6352 + kfree(cmd.chanlist); /* free kernel copy of user chanlist */
6353 +
6354 /* restore chanlist pointer before copying back */
6355 cmd.chanlist = (unsigned int __force *)user_chanlist;
6356
6357 diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
6358 index 468327f4a753..d3436114a6f4 100644
6359 --- a/drivers/staging/iio/adc/mxs-lradc.c
6360 +++ b/drivers/staging/iio/adc/mxs-lradc.c
6361 @@ -1565,14 +1565,16 @@ static int mxs_lradc_probe(struct platform_device *pdev)
6362 /* Grab all IRQ sources */
6363 for (i = 0; i < of_cfg->irq_count; i++) {
6364 lradc->irq[i] = platform_get_irq(pdev, i);
6365 - if (lradc->irq[i] < 0)
6366 - return lradc->irq[i];
6367 + if (lradc->irq[i] < 0) {
6368 + ret = lradc->irq[i];
6369 + goto err_clk;
6370 + }
6371
6372 ret = devm_request_irq(dev, lradc->irq[i],
6373 mxs_lradc_handle_irq, 0,
6374 of_cfg->irq_name[i], iio);
6375 if (ret)
6376 - return ret;
6377 + goto err_clk;
6378 }
6379
6380 lradc->vref_mv = of_cfg->vref_mv;
6381 @@ -1594,7 +1596,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
6382 &mxs_lradc_trigger_handler,
6383 &mxs_lradc_buffer_ops);
6384 if (ret)
6385 - return ret;
6386 + goto err_clk;
6387
6388 ret = mxs_lradc_trigger_init(iio);
6389 if (ret)
6390 @@ -1649,6 +1651,8 @@ err_dev:
6391 mxs_lradc_trigger_remove(iio);
6392 err_trig:
6393 iio_triggered_buffer_cleanup(iio);
6394 +err_clk:
6395 + clk_disable_unprepare(lradc->clk);
6396 return ret;
6397 }
6398
6399 diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
6400 index 2b96665da8a2..97d4b3fb7e95 100644
6401 --- a/drivers/staging/iio/impedance-analyzer/ad5933.c
6402 +++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
6403 @@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
6404 .channel = 0,
6405 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
6406 .address = AD5933_REG_TEMP_DATA,
6407 + .scan_index = -1,
6408 .scan_type = {
6409 .sign = 's',
6410 .realbits = 14,
6411 @@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
6412 .type = IIO_VOLTAGE,
6413 .indexed = 1,
6414 .channel = 0,
6415 - .extend_name = "real_raw",
6416 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
6417 - BIT(IIO_CHAN_INFO_SCALE),
6418 + .extend_name = "real",
6419 .address = AD5933_REG_REAL_DATA,
6420 .scan_index = 0,
6421 .scan_type = {
6422 @@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
6423 .type = IIO_VOLTAGE,
6424 .indexed = 1,
6425 .channel = 0,
6426 - .extend_name = "imag_raw",
6427 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
6428 - BIT(IIO_CHAN_INFO_SCALE),
6429 + .extend_name = "imag",
6430 .address = AD5933_REG_IMAG_DATA,
6431 .scan_index = 1,
6432 .scan_type = {
6433 @@ -748,14 +745,14 @@ static int ad5933_probe(struct i2c_client *client,
6434 indio_dev->name = id->name;
6435 indio_dev->modes = INDIO_DIRECT_MODE;
6436 indio_dev->channels = ad5933_channels;
6437 - indio_dev->num_channels = 1; /* only register temp0_input */
6438 + indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
6439
6440 ret = ad5933_register_ring_funcs_and_init(indio_dev);
6441 if (ret)
6442 goto error_disable_reg;
6443
6444 - /* skip temp0_input, register in0_(real|imag)_raw */
6445 - ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2);
6446 + ret = iio_buffer_register(indio_dev, ad5933_channels,
6447 + ARRAY_SIZE(ad5933_channels));
6448 if (ret)
6449 goto error_unreg_ring;
6450
6451 diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
6452 index 07318203a836..e8c98cf57070 100644
6453 --- a/drivers/staging/iio/meter/ade7758.h
6454 +++ b/drivers/staging/iio/meter/ade7758.h
6455 @@ -119,7 +119,6 @@ struct ade7758_state {
6456 u8 *tx;
6457 u8 *rx;
6458 struct mutex buf_lock;
6459 - const struct iio_chan_spec *ade7758_ring_channels;
6460 struct spi_transfer ring_xfer[4];
6461 struct spi_message ring_msg;
6462 /*
6463 diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
6464 index cba183e24838..94d9914a602c 100644
6465 --- a/drivers/staging/iio/meter/ade7758_core.c
6466 +++ b/drivers/staging/iio/meter/ade7758_core.c
6467 @@ -630,9 +630,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
6468 .type = IIO_VOLTAGE,
6469 .indexed = 1,
6470 .channel = 0,
6471 - .extend_name = "raw",
6472 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6473 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6474 .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
6475 .scan_index = 0,
6476 .scan_type = {
6477 @@ -644,9 +641,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
6478 .type = IIO_CURRENT,
6479 .indexed = 1,
6480 .channel = 0,
6481 - .extend_name = "raw",
6482 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6483 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6484 .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
6485 .scan_index = 1,
6486 .scan_type = {
6487 @@ -658,9 +652,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6488 .type = IIO_POWER,
6489 .indexed = 1,
6490 .channel = 0,
6491 - .extend_name = "apparent_raw",
6492 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6493 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6494 + .extend_name = "apparent",
6495 .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
6496 .scan_index = 2,
6497 .scan_type = {
6498 @@ -672,9 +664,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6499 .type = IIO_POWER,
6500 .indexed = 1,
6501 .channel = 0,
6502 - .extend_name = "active_raw",
6503 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6504 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6505 + .extend_name = "active",
6506 .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
6507 .scan_index = 3,
6508 .scan_type = {
6509 @@ -686,9 +676,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6510 .type = IIO_POWER,
6511 .indexed = 1,
6512 .channel = 0,
6513 - .extend_name = "reactive_raw",
6514 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6515 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6516 + .extend_name = "reactive",
6517 .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
6518 .scan_index = 4,
6519 .scan_type = {
6520 @@ -700,9 +688,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
6521 .type = IIO_VOLTAGE,
6522 .indexed = 1,
6523 .channel = 1,
6524 - .extend_name = "raw",
6525 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6526 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6527 .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
6528 .scan_index = 5,
6529 .scan_type = {
6530 @@ -714,9 +699,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
6531 .type = IIO_CURRENT,
6532 .indexed = 1,
6533 .channel = 1,
6534 - .extend_name = "raw",
6535 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6536 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6537 .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
6538 .scan_index = 6,
6539 .scan_type = {
6540 @@ -728,9 +710,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6541 .type = IIO_POWER,
6542 .indexed = 1,
6543 .channel = 1,
6544 - .extend_name = "apparent_raw",
6545 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6546 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6547 + .extend_name = "apparent",
6548 .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
6549 .scan_index = 7,
6550 .scan_type = {
6551 @@ -742,9 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6552 .type = IIO_POWER,
6553 .indexed = 1,
6554 .channel = 1,
6555 - .extend_name = "active_raw",
6556 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6557 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6558 + .extend_name = "active",
6559 .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
6560 .scan_index = 8,
6561 .scan_type = {
6562 @@ -756,9 +734,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6563 .type = IIO_POWER,
6564 .indexed = 1,
6565 .channel = 1,
6566 - .extend_name = "reactive_raw",
6567 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6568 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6569 + .extend_name = "reactive",
6570 .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
6571 .scan_index = 9,
6572 .scan_type = {
6573 @@ -770,9 +746,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
6574 .type = IIO_VOLTAGE,
6575 .indexed = 1,
6576 .channel = 2,
6577 - .extend_name = "raw",
6578 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6579 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6580 .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
6581 .scan_index = 10,
6582 .scan_type = {
6583 @@ -784,9 +757,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
6584 .type = IIO_CURRENT,
6585 .indexed = 1,
6586 .channel = 2,
6587 - .extend_name = "raw",
6588 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6589 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6590 .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
6591 .scan_index = 11,
6592 .scan_type = {
6593 @@ -798,9 +768,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6594 .type = IIO_POWER,
6595 .indexed = 1,
6596 .channel = 2,
6597 - .extend_name = "apparent_raw",
6598 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6599 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6600 + .extend_name = "apparent",
6601 .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
6602 .scan_index = 12,
6603 .scan_type = {
6604 @@ -812,9 +780,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6605 .type = IIO_POWER,
6606 .indexed = 1,
6607 .channel = 2,
6608 - .extend_name = "active_raw",
6609 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6610 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6611 + .extend_name = "active",
6612 .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
6613 .scan_index = 13,
6614 .scan_type = {
6615 @@ -826,9 +792,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
6616 .type = IIO_POWER,
6617 .indexed = 1,
6618 .channel = 2,
6619 - .extend_name = "reactive_raw",
6620 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
6621 - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
6622 + .extend_name = "reactive",
6623 .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
6624 .scan_index = 14,
6625 .scan_type = {
6626 @@ -869,13 +833,14 @@ static int ade7758_probe(struct spi_device *spi)
6627 goto error_free_rx;
6628 }
6629 st->us = spi;
6630 - st->ade7758_ring_channels = &ade7758_channels[0];
6631 mutex_init(&st->buf_lock);
6632
6633 indio_dev->name = spi->dev.driver->name;
6634 indio_dev->dev.parent = &spi->dev;
6635 indio_dev->info = &ade7758_info;
6636 indio_dev->modes = INDIO_DIRECT_MODE;
6637 + indio_dev->channels = ade7758_channels;
6638 + indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
6639
6640 ret = ade7758_configure_ring(indio_dev);
6641 if (ret)
6642 diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
6643 index c0accf8cce93..6e9006490742 100644
6644 --- a/drivers/staging/iio/meter/ade7758_ring.c
6645 +++ b/drivers/staging/iio/meter/ade7758_ring.c
6646 @@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
6647 **/
6648 static int ade7758_ring_preenable(struct iio_dev *indio_dev)
6649 {
6650 - struct ade7758_state *st = iio_priv(indio_dev);
6651 unsigned channel;
6652
6653 - if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
6654 + if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
6655 return -EINVAL;
6656
6657 channel = find_first_bit(indio_dev->active_scan_mask,
6658 indio_dev->masklength);
6659
6660 ade7758_write_waveform_type(&indio_dev->dev,
6661 - st->ade7758_ring_channels[channel].address);
6662 + indio_dev->channels[channel].address);
6663
6664 return 0;
6665 }
6666 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
6667 index 98da90167159..15a1c133ec05 100644
6668 --- a/drivers/target/target_core_device.c
6669 +++ b/drivers/target/target_core_device.c
6670 @@ -1409,7 +1409,8 @@ int core_dev_add_initiator_node_lun_acl(
6671 * Check to see if there are any existing persistent reservation APTPL
6672 * pre-registrations that need to be enabled for this LUN ACL..
6673 */
6674 - core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
6675 + core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
6676 + lacl->mapped_lun);
6677 return 0;
6678 }
6679
6680 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
6681 index df357862286e..1aadcfc9a8c1 100644
6682 --- a/drivers/target/target_core_pr.c
6683 +++ b/drivers/target/target_core_pr.c
6684 @@ -944,10 +944,10 @@ int core_scsi3_check_aptpl_registration(
6685 struct se_device *dev,
6686 struct se_portal_group *tpg,
6687 struct se_lun *lun,
6688 - struct se_lun_acl *lun_acl)
6689 + struct se_node_acl *nacl,
6690 + u32 mapped_lun)
6691 {
6692 - struct se_node_acl *nacl = lun_acl->se_lun_nacl;
6693 - struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
6694 + struct se_dev_entry *deve = nacl->device_list[mapped_lun];
6695
6696 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
6697 return 0;
6698 diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
6699 index 2ee2936fa0bd..749fd7bb7510 100644
6700 --- a/drivers/target/target_core_pr.h
6701 +++ b/drivers/target/target_core_pr.h
6702 @@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
6703 unsigned char *, u16, u32, int, int, u8);
6704 extern int core_scsi3_check_aptpl_registration(struct se_device *,
6705 struct se_portal_group *, struct se_lun *,
6706 - struct se_lun_acl *);
6707 + struct se_node_acl *, u32);
6708 extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
6709 struct se_node_acl *);
6710 extern void core_scsi3_free_all_registrations(struct se_device *);
6711 diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
6712 index fddfae61222f..8d8ecfbcbad7 100644
6713 --- a/drivers/target/target_core_tpg.c
6714 +++ b/drivers/target/target_core_tpg.c
6715 @@ -40,6 +40,7 @@
6716 #include <target/target_core_fabric.h>
6717
6718 #include "target_core_internal.h"
6719 +#include "target_core_pr.h"
6720
6721 extern struct se_device *g_lun0_dev;
6722
6723 @@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
6724
6725 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
6726 lun_access, acl, tpg);
6727 + /*
6728 + * Check to see if there are any existing persistent reservation
6729 + * APTPL pre-registrations that need to be enabled for this dynamic
6730 + * LUN ACL now..
6731 + */
6732 + core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
6733 + lun->unpacked_lun);
6734 spin_lock(&tpg->tpg_lun_lock);
6735 }
6736 spin_unlock(&tpg->tpg_lun_lock);
6737 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
6738 index 7fa62fc93e0b..ab610146681d 100644
6739 --- a/drivers/target/target_core_transport.c
6740 +++ b/drivers/target/target_core_transport.c
6741 @@ -1877,8 +1877,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
6742 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
6743 trace_target_cmd_complete(cmd);
6744 ret = cmd->se_tfo->queue_status(cmd);
6745 - if (ret)
6746 - goto out;
6747 + goto out;
6748 }
6749
6750 switch (cmd->data_direction) {
6751 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
6752 index 0da0b5474e98..077570a48828 100644
6753 --- a/drivers/tty/serial/msm_serial.c
6754 +++ b/drivers/tty/serial/msm_serial.c
6755 @@ -683,17 +683,6 @@ static void msm_power(struct uart_port *port, unsigned int state,
6756 }
6757
6758 #ifdef CONFIG_CONSOLE_POLL
6759 -static int msm_poll_init(struct uart_port *port)
6760 -{
6761 - struct msm_port *msm_port = UART_TO_MSM(port);
6762 -
6763 - /* Enable single character mode on RX FIFO */
6764 - if (msm_port->is_uartdm >= UARTDM_1P4)
6765 - msm_write(port, UARTDM_DMEN_RX_SC_ENABLE, UARTDM_DMEN);
6766 -
6767 - return 0;
6768 -}
6769 -
6770 static int msm_poll_get_char_single(struct uart_port *port)
6771 {
6772 struct msm_port *msm_port = UART_TO_MSM(port);
6773 @@ -705,7 +694,7 @@ static int msm_poll_get_char_single(struct uart_port *port)
6774 return msm_read(port, rf_reg) & 0xff;
6775 }
6776
6777 -static int msm_poll_get_char_dm_1p3(struct uart_port *port)
6778 +static int msm_poll_get_char_dm(struct uart_port *port)
6779 {
6780 int c;
6781 static u32 slop;
6782 @@ -729,6 +718,10 @@ static int msm_poll_get_char_dm_1p3(struct uart_port *port)
6783 slop = msm_read(port, UARTDM_RF);
6784 c = sp[0];
6785 count--;
6786 + msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
6787 + msm_write(port, 0xFFFFFF, UARTDM_DMRX);
6788 + msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE,
6789 + UART_CR);
6790 } else {
6791 c = NO_POLL_CHAR;
6792 }
6793 @@ -752,8 +745,8 @@ static int msm_poll_get_char(struct uart_port *port)
6794 imr = msm_read(port, UART_IMR);
6795 msm_write(port, 0, UART_IMR);
6796
6797 - if (msm_port->is_uartdm == UARTDM_1P3)
6798 - c = msm_poll_get_char_dm_1p3(port);
6799 + if (msm_port->is_uartdm)
6800 + c = msm_poll_get_char_dm(port);
6801 else
6802 c = msm_poll_get_char_single(port);
6803
6804 @@ -812,7 +805,6 @@ static struct uart_ops msm_uart_pops = {
6805 .verify_port = msm_verify_port,
6806 .pm = msm_power,
6807 #ifdef CONFIG_CONSOLE_POLL
6808 - .poll_init = msm_poll_init,
6809 .poll_get_char = msm_poll_get_char,
6810 .poll_put_char = msm_poll_put_char,
6811 #endif
6812 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
6813 index 29a7be47389a..0f03988d81fc 100644
6814 --- a/drivers/tty/serial/serial_core.c
6815 +++ b/drivers/tty/serial/serial_core.c
6816 @@ -362,7 +362,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
6817 * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
6818 * Die! Die! Die!
6819 */
6820 - if (baud == 38400)
6821 + if (try == 0 && baud == 38400)
6822 baud = altbaud;
6823
6824 /*
6825 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
6826 index 8fbad3410c75..848c17a58c3a 100644
6827 --- a/drivers/tty/tty_io.c
6828 +++ b/drivers/tty/tty_io.c
6829 @@ -1686,6 +1686,7 @@ int tty_release(struct inode *inode, struct file *filp)
6830 int pty_master, tty_closing, o_tty_closing, do_sleep;
6831 int idx;
6832 char buf[64];
6833 + long timeout = 0;
6834
6835 if (tty_paranoia_check(tty, inode, __func__))
6836 return 0;
6837 @@ -1770,7 +1771,11 @@ int tty_release(struct inode *inode, struct file *filp)
6838 __func__, tty_name(tty, buf));
6839 tty_unlock_pair(tty, o_tty);
6840 mutex_unlock(&tty_mutex);
6841 - schedule();
6842 + schedule_timeout_killable(timeout);
6843 + if (timeout < 120 * HZ)
6844 + timeout = 2 * timeout + 1;
6845 + else
6846 + timeout = MAX_SCHEDULE_TIMEOUT;
6847 }
6848
6849 /*
6850 diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
6851 index 610b720d3b91..59b25e039968 100644
6852 --- a/drivers/tty/vt/consolemap.c
6853 +++ b/drivers/tty/vt/consolemap.c
6854 @@ -539,6 +539,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
6855
6856 /* Save original vc_unipagdir_loc in case we allocate a new one */
6857 p = *vc->vc_uni_pagedir_loc;
6858 +
6859 + if (!p) {
6860 + err = -EINVAL;
6861 +
6862 + goto out_unlock;
6863 + }
6864
6865 if (p->refcount > 1) {
6866 int j, k;
6867 @@ -623,6 +629,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
6868 set_inverse_transl(vc, p, i); /* Update inverse translations */
6869 set_inverse_trans_unicode(vc, p);
6870
6871 +out_unlock:
6872 console_unlock();
6873 return err;
6874 }
6875 diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
6876 index 619d13e29995..4ecb6501a7ea 100644
6877 --- a/drivers/usb/chipidea/core.c
6878 +++ b/drivers/usb/chipidea/core.c
6879 @@ -732,7 +732,6 @@ static int ci_hdrc_remove(struct platform_device *pdev)
6880 ci_role_destroy(ci);
6881 ci_hdrc_enter_lpm(ci, true);
6882 usb_phy_shutdown(ci->transceiver);
6883 - kfree(ci->hw_bank.regmap);
6884
6885 return 0;
6886 }
6887 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
6888 index e934e19f49f5..7daaef192c28 100644
6889 --- a/drivers/usb/class/cdc-acm.c
6890 +++ b/drivers/usb/class/cdc-acm.c
6891 @@ -145,8 +145,15 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
6892 /* devices aren't required to support these requests.
6893 * the cdc acm descriptor tells whether they do...
6894 */
6895 -#define acm_set_control(acm, control) \
6896 - acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0)
6897 +static inline int acm_set_control(struct acm *acm, int control)
6898 +{
6899 + if (acm->quirks & QUIRK_CONTROL_LINE_STATE)
6900 + return -EOPNOTSUPP;
6901 +
6902 + return acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE,
6903 + control, NULL, 0);
6904 +}
6905 +
6906 #define acm_set_line(acm, line) \
6907 acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line))
6908 #define acm_send_break(acm, ms) \
6909 @@ -980,11 +987,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
6910 /* FIXME: Needs to clear unsupported bits in the termios */
6911 acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
6912
6913 - if (!newline.dwDTERate) {
6914 + if (C_BAUD(tty) == B0) {
6915 newline.dwDTERate = acm->line.dwDTERate;
6916 newctrl &= ~ACM_CTRL_DTR;
6917 - } else
6918 + } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
6919 newctrl |= ACM_CTRL_DTR;
6920 + }
6921
6922 if (newctrl != acm->ctrlout)
6923 acm_set_control(acm, acm->ctrlout = newctrl);
6924 @@ -1314,6 +1322,7 @@ made_compressed_probe:
6925 tty_port_init(&acm->port);
6926 acm->port.ops = &acm_port_ops;
6927 init_usb_anchor(&acm->delayed);
6928 + acm->quirks = quirks;
6929
6930 buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
6931 if (!buf) {
6932 @@ -1681,6 +1690,9 @@ static const struct usb_device_id acm_ids[] = {
6933 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
6934 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
6935 },
6936 + { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
6937 + .driver_info = QUIRK_CONTROL_LINE_STATE, },
6938 + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
6939 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
6940 },
6941 /* Motorola H24 HSPA module: */
6942 diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
6943 index fc75651afe1c..d3251ebd09e2 100644
6944 --- a/drivers/usb/class/cdc-acm.h
6945 +++ b/drivers/usb/class/cdc-acm.h
6946 @@ -121,6 +121,7 @@ struct acm {
6947 unsigned int throttle_req:1; /* throttle requested */
6948 u8 bInterval;
6949 struct usb_anchor delayed; /* writes queued for a device about to be woken */
6950 + unsigned long quirks;
6951 };
6952
6953 #define CDC_DATA_INTERFACE_TYPE 0x0a
6954 @@ -132,3 +133,4 @@ struct acm {
6955 #define NOT_A_MODEM BIT(3)
6956 #define NO_DATA_INTERFACE BIT(4)
6957 #define IGNORE_DEVICE BIT(5)
6958 +#define QUIRK_CONTROL_LINE_STATE BIT(6)
6959 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
6960 index 487abcfcccd8..258e6fecdf5e 100644
6961 --- a/drivers/usb/core/hcd.c
6962 +++ b/drivers/usb/core/hcd.c
6963 @@ -2057,6 +2057,8 @@ int usb_alloc_streams(struct usb_interface *interface,
6964 return -EINVAL;
6965 if (dev->speed != USB_SPEED_SUPER)
6966 return -EINVAL;
6967 + if (dev->state < USB_STATE_CONFIGURED)
6968 + return -ENODEV;
6969
6970 for (i = 0; i < num_eps; i++) {
6971 /* Streams only apply to bulk endpoints. */
6972 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
6973 index dc849154f9de..674c262907d9 100644
6974 --- a/drivers/usb/core/hub.c
6975 +++ b/drivers/usb/core/hub.c
6976 @@ -4540,6 +4540,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1)
6977 struct usb_qualifier_descriptor *qual;
6978 int status;
6979
6980 + if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER)
6981 + return;
6982 +
6983 qual = kmalloc (sizeof *qual, GFP_KERNEL);
6984 if (qual == NULL)
6985 return;
6986 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
6987 index 814e712655e4..39b4081b632d 100644
6988 --- a/drivers/usb/core/quirks.c
6989 +++ b/drivers/usb/core/quirks.c
6990 @@ -93,6 +93,16 @@ static const struct usb_device_id usb_quirk_list[] = {
6991 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
6992 USB_QUIRK_CONFIG_INTF_STRINGS },
6993
6994 + /* Elan Touchscreen */
6995 + { USB_DEVICE(0x04f3, 0x0089), .driver_info =
6996 + USB_QUIRK_DEVICE_QUALIFIER },
6997 +
6998 + { USB_DEVICE(0x04f3, 0x009b), .driver_info =
6999 + USB_QUIRK_DEVICE_QUALIFIER },
7000 +
7001 + { USB_DEVICE(0x04f3, 0x016f), .driver_info =
7002 + USB_QUIRK_DEVICE_QUALIFIER },
7003 +
7004 /* Roland SC-8820 */
7005 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
7006
7007 diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
7008 index fc0de3753648..97a5a0c7df7d 100644
7009 --- a/drivers/usb/dwc3/dwc3-omap.c
7010 +++ b/drivers/usb/dwc3/dwc3-omap.c
7011 @@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev)
7012 {
7013 struct dwc3_omap *omap = dev_get_drvdata(dev);
7014
7015 - dwc3_omap_write_irqmisc_set(omap, 0x00);
7016 + dwc3_omap_disable_irqs(omap);
7017
7018 return 0;
7019 }
7020 @@ -607,19 +607,8 @@ static int dwc3_omap_prepare(struct device *dev)
7021 static void dwc3_omap_complete(struct device *dev)
7022 {
7023 struct dwc3_omap *omap = dev_get_drvdata(dev);
7024 - u32 reg;
7025
7026 - reg = (USBOTGSS_IRQMISC_OEVT |
7027 - USBOTGSS_IRQMISC_DRVVBUS_RISE |
7028 - USBOTGSS_IRQMISC_CHRGVBUS_RISE |
7029 - USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
7030 - USBOTGSS_IRQMISC_IDPULLUP_RISE |
7031 - USBOTGSS_IRQMISC_DRVVBUS_FALL |
7032 - USBOTGSS_IRQMISC_CHRGVBUS_FALL |
7033 - USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
7034 - USBOTGSS_IRQMISC_IDPULLUP_FALL);
7035 -
7036 - dwc3_omap_write_irqmisc_set(omap, reg);
7037 + dwc3_omap_enable_irqs(omap);
7038 }
7039
7040 static int dwc3_omap_suspend(struct device *dev)
7041 diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
7042 index 21a352079bc2..0985ff715c0c 100644
7043 --- a/drivers/usb/dwc3/ep0.c
7044 +++ b/drivers/usb/dwc3/ep0.c
7045 @@ -251,7 +251,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
7046
7047 /* stall is always issued on EP0 */
7048 dep = dwc->eps[0];
7049 - __dwc3_gadget_ep_set_halt(dep, 1);
7050 + __dwc3_gadget_ep_set_halt(dep, 1, false);
7051 dep->flags = DWC3_EP_ENABLED;
7052 dwc->delayed_status = false;
7053
7054 @@ -461,7 +461,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
7055 return -EINVAL;
7056 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
7057 break;
7058 - ret = __dwc3_gadget_ep_set_halt(dep, set);
7059 + ret = __dwc3_gadget_ep_set_halt(dep, set, true);
7060 if (ret)
7061 return -EINVAL;
7062 break;
7063 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
7064 index 490a6ca00733..8cbbb540eca3 100644
7065 --- a/drivers/usb/dwc3/gadget.c
7066 +++ b/drivers/usb/dwc3/gadget.c
7067 @@ -615,12 +615,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
7068 if (!usb_endpoint_xfer_isoc(desc))
7069 return 0;
7070
7071 - memset(&trb_link, 0, sizeof(trb_link));
7072 -
7073 /* Link TRB for ISOC. The HWO bit is never reset */
7074 trb_st_hw = &dep->trb_pool[0];
7075
7076 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
7077 + memset(trb_link, 0, sizeof(*trb_link));
7078
7079 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
7080 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
7081 @@ -671,7 +670,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
7082
7083 /* make sure HW endpoint isn't stalled */
7084 if (dep->flags & DWC3_EP_STALL)
7085 - __dwc3_gadget_ep_set_halt(dep, 0);
7086 + __dwc3_gadget_ep_set_halt(dep, 0, false);
7087
7088 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
7089 reg &= ~DWC3_DALEPENA_EP(dep->number);
7090 @@ -1287,7 +1286,7 @@ out0:
7091 return ret;
7092 }
7093
7094 -int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
7095 +int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
7096 {
7097 struct dwc3_gadget_ep_cmd_params params;
7098 struct dwc3 *dwc = dep->dwc;
7099 @@ -1296,6 +1295,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value)
7100 memset(&params, 0x00, sizeof(params));
7101
7102 if (value) {
7103 + if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
7104 + (!list_empty(&dep->req_queued) ||
7105 + !list_empty(&dep->request_list)))) {
7106 + dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
7107 + dep->name);
7108 + return -EAGAIN;
7109 + }
7110 +
7111 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
7112 DWC3_DEPCMD_SETSTALL, &params);
7113 if (ret)
7114 @@ -1333,7 +1340,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
7115 goto out;
7116 }
7117
7118 - ret = __dwc3_gadget_ep_set_halt(dep, value);
7119 + ret = __dwc3_gadget_ep_set_halt(dep, value, false);
7120 out:
7121 spin_unlock_irqrestore(&dwc->lock, flags);
7122
7123 @@ -1353,7 +1360,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
7124 if (dep->number == 0 || dep->number == 1)
7125 return dwc3_gadget_ep0_set_halt(ep, 1);
7126 else
7127 - return dwc3_gadget_ep_set_halt(ep, 1);
7128 + return __dwc3_gadget_ep_set_halt(dep, 1, false);
7129 }
7130
7131 /* -------------------------------------------------------------------------- */
7132 diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
7133 index a0ee75b68a80..ac62558231be 100644
7134 --- a/drivers/usb/dwc3/gadget.h
7135 +++ b/drivers/usb/dwc3/gadget.h
7136 @@ -85,7 +85,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc);
7137 int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
7138 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
7139 gfp_t gfp_flags);
7140 -int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value);
7141 +int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
7142
7143 /**
7144 * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
7145 diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
7146 index ab1065afbbd0..3384486c2884 100644
7147 --- a/drivers/usb/gadget/function/f_acm.c
7148 +++ b/drivers/usb/gadget/function/f_acm.c
7149 @@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
7150 if (acm->notify->driver_data) {
7151 VDBG(cdev, "reset acm control interface %d\n", intf);
7152 usb_ep_disable(acm->notify);
7153 - } else {
7154 - VDBG(cdev, "init acm ctrl interface %d\n", intf);
7155 + }
7156 +
7157 + if (!acm->notify->desc)
7158 if (config_ep_by_speed(cdev->gadget, f, acm->notify))
7159 return -EINVAL;
7160 - }
7161 +
7162 usb_ep_enable(acm->notify);
7163 acm->notify->driver_data = acm;
7164
7165 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
7166 index 7ad7137ba39a..a3c277c46046 100644
7167 --- a/drivers/usb/gadget/function/f_fs.c
7168 +++ b/drivers/usb/gadget/function/f_fs.c
7169 @@ -648,15 +648,26 @@ static void ffs_user_copy_worker(struct work_struct *work)
7170 if (io_data->read && ret > 0) {
7171 int i;
7172 size_t pos = 0;
7173 +
7174 + /*
7175 + * Since req->length may be bigger than io_data->len (after
7176 + * being rounded up to maxpacketsize), we may end up with more
7177 + * data then user space has space for.
7178 + */
7179 + ret = min_t(int, ret, io_data->len);
7180 +
7181 use_mm(io_data->mm);
7182 for (i = 0; i < io_data->nr_segs; i++) {
7183 + size_t len = min_t(size_t, ret - pos,
7184 + io_data->iovec[i].iov_len);
7185 + if (!len)
7186 + break;
7187 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
7188 - &io_data->buf[pos],
7189 - io_data->iovec[i].iov_len))) {
7190 + &io_data->buf[pos], len))) {
7191 ret = -EFAULT;
7192 break;
7193 }
7194 - pos += io_data->iovec[i].iov_len;
7195 + pos += len;
7196 }
7197 unuse_mm(io_data->mm);
7198 }
7199 @@ -688,7 +699,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
7200 struct ffs_epfile *epfile = file->private_data;
7201 struct ffs_ep *ep;
7202 char *data = NULL;
7203 - ssize_t ret, data_len;
7204 + ssize_t ret, data_len = -EINVAL;
7205 int halt;
7206
7207 /* Are we still active? */
7208 @@ -788,13 +799,30 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
7209 /* Fire the request */
7210 struct usb_request *req;
7211
7212 + /*
7213 + * Sanity Check: even though data_len can't be used
7214 + * uninitialized at the time I write this comment, some
7215 + * compilers complain about this situation.
7216 + * In order to keep the code clean from warnings, data_len is
7217 + * being initialized to -EINVAL during its declaration, which
7218 + * means we can't rely on compiler anymore to warn no future
7219 + * changes won't result in data_len being used uninitialized.
7220 + * For such reason, we're adding this redundant sanity check
7221 + * here.
7222 + */
7223 + if (unlikely(data_len == -EINVAL)) {
7224 + WARN(1, "%s: data_len == -EINVAL\n", __func__);
7225 + ret = -EINVAL;
7226 + goto error_lock;
7227 + }
7228 +
7229 if (io_data->aio) {
7230 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
7231 if (unlikely(!req))
7232 goto error_lock;
7233
7234 req->buf = data;
7235 - req->length = io_data->len;
7236 + req->length = data_len;
7237
7238 io_data->buf = data;
7239 io_data->ep = ep->ep;
7240 @@ -816,7 +844,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
7241
7242 req = ep->req;
7243 req->buf = data;
7244 - req->length = io_data->len;
7245 + req->length = data_len;
7246
7247 req->context = &done;
7248 req->complete = ffs_epfile_io_complete;
7249 @@ -2626,8 +2654,6 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
7250 func->conf = c;
7251 func->gadget = c->cdev->gadget;
7252
7253 - ffs_data_get(func->ffs);
7254 -
7255 /*
7256 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
7257 * configurations are bound in sequence with list_for_each_entry,
7258 diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
7259 index b0d98172bc07..38913eac6e7c 100644
7260 --- a/drivers/usb/gadget/udc/udc-core.c
7261 +++ b/drivers/usb/gadget/udc/udc-core.c
7262 @@ -458,6 +458,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
7263 {
7264 struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
7265
7266 + if (!udc->driver) {
7267 + dev_err(dev, "soft-connect without a gadget driver\n");
7268 + return -EOPNOTSUPP;
7269 + }
7270 +
7271 if (sysfs_streq(buf, "connect")) {
7272 usb_gadget_udc_start(udc->gadget, udc->driver);
7273 usb_gadget_connect(udc->gadget);
7274 diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
7275 index 82800a775501..6f1d48ebc986 100644
7276 --- a/drivers/usb/host/Kconfig
7277 +++ b/drivers/usb/host/Kconfig
7278 @@ -220,7 +220,7 @@ config USB_EHCI_SH
7279
7280 config USB_EHCI_EXYNOS
7281 tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
7282 - depends on PLAT_S5P || ARCH_EXYNOS
7283 + depends on ARCH_S5PV210 || ARCH_EXYNOS
7284 help
7285 Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
7286
7287 @@ -527,7 +527,7 @@ config USB_OHCI_SH
7288
7289 config USB_OHCI_EXYNOS
7290 tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
7291 - depends on PLAT_S5P || ARCH_EXYNOS
7292 + depends on ARCH_S5PV210 || ARCH_EXYNOS
7293 help
7294 Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
7295
7296 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
7297 index c22a3e15a16e..d125568d73ff 100644
7298 --- a/drivers/usb/host/xhci-pci.c
7299 +++ b/drivers/usb/host/xhci-pci.c
7300 @@ -126,20 +126,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
7301 xhci->quirks |= XHCI_AVOID_BEI;
7302 }
7303 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
7304 - (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
7305 - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
7306 - /* Workaround for occasional spurious wakeups from S5 (or
7307 - * any other sleep) on Haswell machines with LPT and LPT-LP
7308 - * with the new Intel BIOS
7309 - */
7310 - /* Limit the quirk to only known vendors, as this triggers
7311 - * yet another BIOS bug on some other machines
7312 - * https://bugzilla.kernel.org/show_bug.cgi?id=66171
7313 - */
7314 - if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
7315 - xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
7316 - }
7317 - if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
7318 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
7319 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
7320 }
7321 @@ -160,6 +146,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
7322 pdev->device == 0x3432)
7323 xhci->quirks |= XHCI_BROKEN_STREAMS;
7324
7325 + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
7326 + pdev->device == 0x1042)
7327 + xhci->quirks |= XHCI_BROKEN_STREAMS;
7328 +
7329 if (xhci->quirks & XHCI_RESET_ON_RESUME)
7330 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
7331 "QUIRK: Resetting on resume");
7332 diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
7333 index 3ee133f675ab..013fd1c034da 100644
7334 --- a/drivers/usb/musb/musb_cppi41.c
7335 +++ b/drivers/usb/musb/musb_cppi41.c
7336 @@ -209,7 +209,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
7337 }
7338 }
7339
7340 - if (!list_empty(&controller->early_tx_list)) {
7341 + if (!list_empty(&controller->early_tx_list) &&
7342 + !hrtimer_is_queued(&controller->early_tx)) {
7343 ret = HRTIMER_RESTART;
7344 hrtimer_forward_now(&controller->early_tx,
7345 ktime_set(0, 50 * NSEC_PER_USEC));
7346 diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
7347 index 154bcf1b5dfa..b18f8d5e4f98 100644
7348 --- a/drivers/usb/musb/musb_dsps.c
7349 +++ b/drivers/usb/musb/musb_dsps.c
7350 @@ -896,7 +896,9 @@ static int dsps_resume(struct device *dev)
7351 dsps_writel(mbase, wrp->mode, glue->context.mode);
7352 dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
7353 dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
7354 - setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
7355 + if (musb->xceiv->state == OTG_STATE_B_IDLE &&
7356 + musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
7357 + mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
7358
7359 return 0;
7360 }
7361 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
7362 index eca1747ca8c7..cfd009dc4018 100644
7363 --- a/drivers/usb/serial/cp210x.c
7364 +++ b/drivers/usb/serial/cp210x.c
7365 @@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = {
7366 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
7367 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
7368 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
7369 + { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
7370 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
7371 { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
7372 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
7373 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
7374 index dc72b924c399..0dad8ce5a609 100644
7375 --- a/drivers/usb/serial/ftdi_sio.c
7376 +++ b/drivers/usb/serial/ftdi_sio.c
7377 @@ -140,6 +140,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
7378 * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
7379 */
7380 static const struct usb_device_id id_table_combined[] = {
7381 + { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
7382 { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
7383 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
7384 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
7385 @@ -661,6 +662,8 @@ static const struct usb_device_id id_table_combined[] = {
7386 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
7387 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
7388 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
7389 + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
7390 + { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
7391 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
7392 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
7393 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
7394 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
7395 index 5937b2d242f2..6786b705ccf6 100644
7396 --- a/drivers/usb/serial/ftdi_sio_ids.h
7397 +++ b/drivers/usb/serial/ftdi_sio_ids.h
7398 @@ -30,6 +30,12 @@
7399
7400 /*** third-party PIDs (using FTDI_VID) ***/
7401
7402 +/*
7403 + * Certain versions of the official Windows FTDI driver reprogrammed
7404 + * counterfeit FTDI devices to PID 0. Support these devices anyway.
7405 + */
7406 +#define FTDI_BRICK_PID 0x0000
7407 +
7408 #define FTDI_LUMEL_PD12_PID 0x6002
7409
7410 /*
7411 @@ -143,8 +149,12 @@
7412 * Xsens Technologies BV products (http://www.xsens.com).
7413 */
7414 #define XSENS_VID 0x2639
7415 -#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
7416 +#define XSENS_AWINDA_STATION_PID 0x0101
7417 +#define XSENS_AWINDA_DONGLE_PID 0x0102
7418 #define XSENS_MTW_PID 0x0200 /* Xsens MTw */
7419 +#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
7420 +
7421 +/* Xsens devices using FTDI VID */
7422 #define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
7423 #define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
7424 #define XSENS_CONVERTER_2_PID 0xD38A
7425 diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
7426 index 078f9ed419c8..a31ff1503a99 100644
7427 --- a/drivers/usb/serial/kobil_sct.c
7428 +++ b/drivers/usb/serial/kobil_sct.c
7429 @@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
7430 port->interrupt_out_urb->transfer_buffer_length = length;
7431
7432 priv->cur_pos = priv->cur_pos + length;
7433 - result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO);
7434 + result = usb_submit_urb(port->interrupt_out_urb,
7435 + GFP_ATOMIC);
7436 dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
7437 todo = priv->filled - priv->cur_pos;
7438
7439 @@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
7440 if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
7441 priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
7442 result = usb_submit_urb(port->interrupt_in_urb,
7443 - GFP_NOIO);
7444 + GFP_ATOMIC);
7445 dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
7446 }
7447 }
7448 diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
7449 index 4856fb7e637e..4b7bfb394a32 100644
7450 --- a/drivers/usb/serial/opticon.c
7451 +++ b/drivers/usb/serial/opticon.c
7452 @@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
7453
7454 /* The connected devices do not have a bulk write endpoint,
7455 * to transmit data to de barcode device the control endpoint is used */
7456 - dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
7457 + dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
7458 if (!dr) {
7459 count = -ENOMEM;
7460 goto error_no_dr;
7461 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7462 index 54a8120897a6..e87219a0f2f7 100644
7463 --- a/drivers/usb/serial/option.c
7464 +++ b/drivers/usb/serial/option.c
7465 @@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
7466 #define TELIT_PRODUCT_DE910_DUAL 0x1010
7467 #define TELIT_PRODUCT_UE910_V2 0x1012
7468 #define TELIT_PRODUCT_LE920 0x1200
7469 +#define TELIT_PRODUCT_LE910 0x1201
7470
7471 /* ZTE PRODUCTS */
7472 #define ZTE_VENDOR_ID 0x19d2
7473 @@ -361,6 +362,7 @@ static void option_instat_callback(struct urb *urb);
7474
7475 /* Haier products */
7476 #define HAIER_VENDOR_ID 0x201e
7477 +#define HAIER_PRODUCT_CE81B 0x10f8
7478 #define HAIER_PRODUCT_CE100 0x2009
7479
7480 /* Cinterion (formerly Siemens) products */
7481 @@ -588,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
7482 .reserved = BIT(3) | BIT(4),
7483 };
7484
7485 +static const struct option_blacklist_info telit_le910_blacklist = {
7486 + .sendsetup = BIT(0),
7487 + .reserved = BIT(1) | BIT(2),
7488 +};
7489 +
7490 static const struct option_blacklist_info telit_le920_blacklist = {
7491 .sendsetup = BIT(0),
7492 .reserved = BIT(1) | BIT(5),
7493 @@ -1137,6 +1144,8 @@ static const struct usb_device_id option_ids[] = {
7494 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
7495 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
7496 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
7497 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
7498 + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
7499 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
7500 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
7501 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
7502 @@ -1612,6 +1621,7 @@ static const struct usb_device_id option_ids[] = {
7503 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
7504 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
7505 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
7506 + { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
7507 /* Pirelli */
7508 { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
7509 { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
7510 diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
7511 index 22c7d4360fa2..b1d815eb6d0b 100644
7512 --- a/drivers/usb/storage/transport.c
7513 +++ b/drivers/usb/storage/transport.c
7514 @@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
7515 */
7516 if (result == USB_STOR_XFER_LONG)
7517 fake_sense = 1;
7518 +
7519 + /*
7520 + * Sometimes a device will mistakenly skip the data phase
7521 + * and go directly to the status phase without sending a
7522 + * zero-length packet. If we get a 13-byte response here,
7523 + * check whether it really is a CSW.
7524 + */
7525 + if (result == USB_STOR_XFER_SHORT &&
7526 + srb->sc_data_direction == DMA_FROM_DEVICE &&
7527 + transfer_length - scsi_get_resid(srb) ==
7528 + US_BULK_CS_WRAP_LEN) {
7529 + struct scatterlist *sg = NULL;
7530 + unsigned int offset = 0;
7531 +
7532 + if (usb_stor_access_xfer_buf((unsigned char *) bcs,
7533 + US_BULK_CS_WRAP_LEN, srb, &sg,
7534 + &offset, FROM_XFER_BUF) ==
7535 + US_BULK_CS_WRAP_LEN &&
7536 + bcs->Signature ==
7537 + cpu_to_le32(US_BULK_CS_SIGN)) {
7538 + usb_stor_dbg(us, "Device skipped data phase\n");
7539 + scsi_set_resid(srb, transfer_length);
7540 + goto skipped_data_phase;
7541 + }
7542 + }
7543 }
7544
7545 /* See flow chart on pg 15 of the Bulk Only Transport spec for
7546 @@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
7547 if (result != USB_STOR_XFER_GOOD)
7548 return USB_STOR_TRANSPORT_ERROR;
7549
7550 + skipped_data_phase:
7551 /* check bulk status */
7552 residue = le32_to_cpu(bcs->Residue);
7553 usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
7554 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
7555 index 8511b54a65d9..2fefaf923e4a 100644
7556 --- a/drivers/usb/storage/unusual_uas.h
7557 +++ b/drivers/usb/storage/unusual_uas.h
7558 @@ -54,6 +54,20 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
7559 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7560 US_FL_NO_ATA_1X),
7561
7562 +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
7563 +UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
7564 + "Seagate",
7565 + "Expansion Desk",
7566 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7567 + US_FL_NO_ATA_1X),
7568 +
7569 +/* Reported-by: Bogdan Mihalcea <bogdan.mihalcea@infim.ro> */
7570 +UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
7571 + "Seagate",
7572 + "Backup Plus",
7573 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7574 + US_FL_NO_ATA_1X),
7575 +
7576 /* https://bbs.archlinux.org/viewtopic.php?id=183190 */
7577 UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
7578 "Seagate",
7579 @@ -61,6 +75,13 @@ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
7580 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7581 US_FL_NO_ATA_1X),
7582
7583 +/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
7584 +UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
7585 + "Seagate",
7586 + "Backup+ BK",
7587 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7588 + US_FL_NO_ATA_1X),
7589 +
7590 /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
7591 UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
7592 "JMicron",
7593 @@ -75,3 +96,10 @@ UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
7594 "ASM1051",
7595 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7596 US_FL_IGNORE_UAS),
7597 +
7598 +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
7599 +UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
7600 + "VIA",
7601 + "VL711",
7602 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7603 + US_FL_NO_ATA_1X),
7604 diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
7605 index 61b182bf32a2..dbfe4eecf12e 100644
7606 --- a/drivers/video/console/bitblit.c
7607 +++ b/drivers/video/console/bitblit.c
7608 @@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
7609 static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
7610 int bottom_only)
7611 {
7612 - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
7613 unsigned int cw = vc->vc_font.width;
7614 unsigned int ch = vc->vc_font.height;
7615 unsigned int rw = info->var.xres - (vc->vc_cols*cw);
7616 @@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
7617 unsigned int bs = info->var.yres - bh;
7618 struct fb_fillrect region;
7619
7620 - region.color = attr_bgcol_ec(bgshift, vc, info);
7621 + region.color = 0;
7622 region.rop = ROP_COPY;
7623
7624 if (rw && !bottom_only) {
7625 diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c
7626 index 41b32ae23dac..5a3cbf6dff4d 100644
7627 --- a/drivers/video/console/fbcon_ccw.c
7628 +++ b/drivers/video/console/fbcon_ccw.c
7629 @@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
7630 unsigned int bh = info->var.xres - (vc->vc_rows*ch);
7631 unsigned int bs = vc->vc_rows*ch;
7632 struct fb_fillrect region;
7633 - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
7634
7635 - region.color = attr_bgcol_ec(bgshift,vc,info);
7636 + region.color = 0;
7637 region.rop = ROP_COPY;
7638
7639 if (rw && !bottom_only) {
7640 diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c
7641 index a93670ef7f89..e7ee44db4e98 100644
7642 --- a/drivers/video/console/fbcon_cw.c
7643 +++ b/drivers/video/console/fbcon_cw.c
7644 @@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
7645 unsigned int bh = info->var.xres - (vc->vc_rows*ch);
7646 unsigned int rs = info->var.yres - rw;
7647 struct fb_fillrect region;
7648 - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
7649
7650 - region.color = attr_bgcol_ec(bgshift,vc,info);
7651 + region.color = 0;
7652 region.rop = ROP_COPY;
7653
7654 if (rw && !bottom_only) {
7655 diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c
7656 index ff0872c0498b..19e3714abfe8 100644
7657 --- a/drivers/video/console/fbcon_ud.c
7658 +++ b/drivers/video/console/fbcon_ud.c
7659 @@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
7660 unsigned int rw = info->var.xres - (vc->vc_cols*cw);
7661 unsigned int bh = info->var.yres - (vc->vc_rows*ch);
7662 struct fb_fillrect region;
7663 - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
7664
7665 - region.color = attr_bgcol_ec(bgshift,vc,info);
7666 + region.color = 0;
7667 region.rop = ROP_COPY;
7668
7669 if (rw && !bottom_only) {
7670 diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
7671 index bcb57235fcc7..6d4bfeecee35 100644
7672 --- a/drivers/video/fbdev/core/cfbcopyarea.c
7673 +++ b/drivers/video/fbdev/core/cfbcopyarea.c
7674 @@ -55,8 +55,8 @@ bitcpy(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
7675 * If you suspect bug in this function, compare it with this simple
7676 * memmove implementation.
7677 */
7678 - fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
7679 - (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
7680 + memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
7681 + (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
7682 return;
7683 #endif
7684
7685 @@ -221,8 +221,8 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
7686 * If you suspect bug in this function, compare it with this simple
7687 * memmove implementation.
7688 */
7689 - fb_memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
7690 - (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
7691 + memmove((char *)dst + ((dst_idx & (bits - 1))) / 8,
7692 + (char *)src + ((src_idx & (bits - 1))) / 8, n / 8);
7693 return;
7694 #endif
7695
7696 @@ -324,7 +324,10 @@ bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, unsigned dst_idx,
7697 d0 = d0 << left | d1 >> right;
7698 }
7699 d0 = fb_rev_pixels_in_long(d0, bswapmask);
7700 - FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
7701 + if (!first)
7702 + FB_WRITEL(d0, dst);
7703 + else
7704 + FB_WRITEL(comp(d0, FB_READL(dst), first), dst);
7705 d0 = d1;
7706 dst--;
7707 n -= dst_idx+1;
7708 diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
7709 index 3d1463c6b120..add40d00dcdb 100644
7710 --- a/drivers/virtio/virtio_pci.c
7711 +++ b/drivers/virtio/virtio_pci.c
7712 @@ -789,6 +789,7 @@ static int virtio_pci_restore(struct device *dev)
7713 struct pci_dev *pci_dev = to_pci_dev(dev);
7714 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
7715 struct virtio_driver *drv;
7716 + unsigned status = 0;
7717 int ret;
7718
7719 drv = container_of(vp_dev->vdev.dev.driver,
7720 @@ -799,14 +800,40 @@ static int virtio_pci_restore(struct device *dev)
7721 return ret;
7722
7723 pci_set_master(pci_dev);
7724 + /* We always start by resetting the device, in case a previous
7725 + * driver messed it up. */
7726 + vp_reset(&vp_dev->vdev);
7727 +
7728 + /* Acknowledge that we've seen the device. */
7729 + status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
7730 + vp_set_status(&vp_dev->vdev, status);
7731 +
7732 + /* Maybe driver failed before freeze.
7733 + * Restore the failed status, for debugging. */
7734 + status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED;
7735 + vp_set_status(&vp_dev->vdev, status);
7736 +
7737 + if (!drv)
7738 + return 0;
7739 +
7740 + /* We have a driver! */
7741 + status |= VIRTIO_CONFIG_S_DRIVER;
7742 + vp_set_status(&vp_dev->vdev, status);
7743 +
7744 vp_finalize_features(&vp_dev->vdev);
7745
7746 - if (drv && drv->restore)
7747 + if (drv->restore) {
7748 ret = drv->restore(&vp_dev->vdev);
7749 + if (ret) {
7750 + status |= VIRTIO_CONFIG_S_FAILED;
7751 + vp_set_status(&vp_dev->vdev, status);
7752 + return ret;
7753 + }
7754 + }
7755
7756 /* Finally, tell the device we're all set */
7757 - if (!ret)
7758 - vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
7759 + status |= VIRTIO_CONFIG_S_DRIVER_OK;
7760 + vp_set_status(&vp_dev->vdev, status);
7761
7762 return ret;
7763 }
7764 diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
7765 index 54c84daec9b5..74097722ad9f 100644
7766 --- a/fs/btrfs/file-item.c
7767 +++ b/fs/btrfs/file-item.c
7768 @@ -423,7 +423,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
7769 ret = 0;
7770 fail:
7771 while (ret < 0 && !list_empty(&tmplist)) {
7772 - sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
7773 + sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
7774 list_del(&sums->list);
7775 kfree(sums);
7776 }
7777 diff --git a/fs/buffer.c b/fs/buffer.c
7778 index 3588a80854b2..72daaa516090 100644
7779 --- a/fs/buffer.c
7780 +++ b/fs/buffer.c
7781 @@ -2082,6 +2082,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
7782 struct page *page, void *fsdata)
7783 {
7784 struct inode *inode = mapping->host;
7785 + loff_t old_size = inode->i_size;
7786 int i_size_changed = 0;
7787
7788 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
7789 @@ -2101,6 +2102,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
7790 unlock_page(page);
7791 page_cache_release(page);
7792
7793 + if (old_size < pos)
7794 + pagecache_isize_extended(inode, old_size, pos);
7795 /*
7796 * Don't mark the inode dirty under page lock. First, it unnecessarily
7797 * makes the holding time of page lock longer. Second, it forces lock
7798 @@ -2318,6 +2321,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
7799 err = 0;
7800
7801 balance_dirty_pages_ratelimited(mapping);
7802 +
7803 + if (unlikely(fatal_signal_pending(current))) {
7804 + err = -EINTR;
7805 + goto out;
7806 + }
7807 }
7808
7809 /* page covers the boundary, find the boundary offset */
7810 diff --git a/fs/dcache.c b/fs/dcache.c
7811 index cb25a1a5e307..34b40be8af11 100644
7812 --- a/fs/dcache.c
7813 +++ b/fs/dcache.c
7814 @@ -2675,11 +2675,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
7815 if (!IS_ROOT(new)) {
7816 spin_unlock(&inode->i_lock);
7817 dput(new);
7818 + iput(inode);
7819 return ERR_PTR(-EIO);
7820 }
7821 if (d_ancestor(new, dentry)) {
7822 spin_unlock(&inode->i_lock);
7823 dput(new);
7824 + iput(inode);
7825 return ERR_PTR(-EIO);
7826 }
7827 write_seqlock(&rename_lock);
7828 @@ -2810,6 +2812,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
7829 * the beginning of the name. The sequence number check at the caller will
7830 * retry it again when a d_move() does happen. So any garbage in the buffer
7831 * due to mismatched pointer and length will be discarded.
7832 + *
7833 + * Data dependency barrier is needed to make sure that we see that terminating
7834 + * NUL. Alpha strikes again, film at 11...
7835 */
7836 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
7837 {
7838 @@ -2817,6 +2822,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
7839 u32 dlen = ACCESS_ONCE(name->len);
7840 char *p;
7841
7842 + smp_read_barrier_depends();
7843 +
7844 *buflen -= dlen + 1;
7845 if (*buflen < 0)
7846 return -ENAMETOOLONG;
7847 diff --git a/fs/ext3/super.c b/fs/ext3/super.c
7848 index 622e88249024..2c42e739e3d1 100644
7849 --- a/fs/ext3/super.c
7850 +++ b/fs/ext3/super.c
7851 @@ -1354,13 +1354,6 @@ set_qf_format:
7852 "not specified.");
7853 return 0;
7854 }
7855 - } else {
7856 - if (sbi->s_jquota_fmt) {
7857 - ext3_msg(sb, KERN_ERR, "error: journaled quota format "
7858 - "specified with no journaling "
7859 - "enabled.");
7860 - return 0;
7861 - }
7862 }
7863 #endif
7864 return 1;
7865 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
7866 index 581ef40fbe90..e0691559c8eb 100644
7867 --- a/fs/ext4/balloc.c
7868 +++ b/fs/ext4/balloc.c
7869 @@ -176,7 +176,7 @@ static unsigned int num_clusters_in_group(struct super_block *sb,
7870 }
7871
7872 /* Initializes an uninitialized block bitmap */
7873 -static void ext4_init_block_bitmap(struct super_block *sb,
7874 +static int ext4_init_block_bitmap(struct super_block *sb,
7875 struct buffer_head *bh,
7876 ext4_group_t block_group,
7877 struct ext4_group_desc *gdp)
7878 @@ -192,7 +192,6 @@ static void ext4_init_block_bitmap(struct super_block *sb,
7879 /* If checksum is bad mark all blocks used to prevent allocation
7880 * essentially implementing a per-group read-only flag. */
7881 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
7882 - ext4_error(sb, "Checksum bad for group %u", block_group);
7883 grp = ext4_get_group_info(sb, block_group);
7884 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
7885 percpu_counter_sub(&sbi->s_freeclusters_counter,
7886 @@ -205,7 +204,7 @@ static void ext4_init_block_bitmap(struct super_block *sb,
7887 count);
7888 }
7889 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
7890 - return;
7891 + return -EIO;
7892 }
7893 memset(bh->b_data, 0, sb->s_blocksize);
7894
7895 @@ -243,6 +242,7 @@ static void ext4_init_block_bitmap(struct super_block *sb,
7896 sb->s_blocksize * 8, bh->b_data);
7897 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
7898 ext4_group_desc_csum_set(sb, block_group, gdp);
7899 + return 0;
7900 }
7901
7902 /* Return the number of free blocks in a block group. It is used when
7903 @@ -438,11 +438,15 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
7904 }
7905 ext4_lock_group(sb, block_group);
7906 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
7907 - ext4_init_block_bitmap(sb, bh, block_group, desc);
7908 + int err;
7909 +
7910 + err = ext4_init_block_bitmap(sb, bh, block_group, desc);
7911 set_bitmap_uptodate(bh);
7912 set_buffer_uptodate(bh);
7913 ext4_unlock_group(sb, block_group);
7914 unlock_buffer(bh);
7915 + if (err)
7916 + ext4_error(sb, "Checksum bad for grp %u", block_group);
7917 return bh;
7918 }
7919 ext4_unlock_group(sb, block_group);
7920 diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
7921 index 3285aa5a706a..b610779a958c 100644
7922 --- a/fs/ext4/bitmap.c
7923 +++ b/fs/ext4/bitmap.c
7924 @@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
7925 __u32 provided, calculated;
7926 struct ext4_sb_info *sbi = EXT4_SB(sb);
7927
7928 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
7929 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
7930 + if (!ext4_has_metadata_csum(sb))
7931 return 1;
7932
7933 provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
7934 @@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
7935 __u32 csum;
7936 struct ext4_sb_info *sbi = EXT4_SB(sb);
7937
7938 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
7939 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
7940 + if (!ext4_has_metadata_csum(sb))
7941 return;
7942
7943 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
7944 @@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
7945 struct ext4_sb_info *sbi = EXT4_SB(sb);
7946 int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
7947
7948 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
7949 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
7950 + if (!ext4_has_metadata_csum(sb))
7951 return 1;
7952
7953 provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
7954 @@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
7955 __u32 csum;
7956 struct ext4_sb_info *sbi = EXT4_SB(sb);
7957
7958 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
7959 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
7960 + if (!ext4_has_metadata_csum(sb))
7961 return;
7962
7963 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
7964 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
7965 index b0c225cdb52c..96ac9d32a5d2 100644
7966 --- a/fs/ext4/ext4.h
7967 +++ b/fs/ext4/ext4.h
7968 @@ -2109,6 +2109,7 @@ int do_journal_get_write_access(handle_t *handle,
7969 #define CONVERT_INLINE_DATA 2
7970
7971 extern struct inode *ext4_iget(struct super_block *, unsigned long);
7972 +extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
7973 extern int ext4_write_inode(struct inode *, struct writeback_control *);
7974 extern int ext4_setattr(struct dentry *, struct iattr *);
7975 extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
7976 @@ -2332,10 +2333,18 @@ extern int ext4_register_li_request(struct super_block *sb,
7977 static inline int ext4_has_group_desc_csum(struct super_block *sb)
7978 {
7979 return EXT4_HAS_RO_COMPAT_FEATURE(sb,
7980 - EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
7981 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
7982 + EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
7983 + (EXT4_SB(sb)->s_chksum_driver != NULL);
7984 }
7985
7986 +static inline int ext4_has_metadata_csum(struct super_block *sb)
7987 +{
7988 + WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
7989 + EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
7990 + !EXT4_SB(sb)->s_chksum_driver);
7991 +
7992 + return (EXT4_SB(sb)->s_chksum_driver != NULL);
7993 +}
7994 static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
7995 {
7996 return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
7997 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
7998 index 74292a71b384..18d8dc83e61b 100644
7999 --- a/fs/ext4/extents.c
8000 +++ b/fs/ext4/extents.c
8001 @@ -73,8 +73,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
8002 {
8003 struct ext4_extent_tail *et;
8004
8005 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8006 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8007 + if (!ext4_has_metadata_csum(inode->i_sb))
8008 return 1;
8009
8010 et = find_ext4_extent_tail(eh);
8011 @@ -88,8 +87,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
8012 {
8013 struct ext4_extent_tail *et;
8014
8015 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8016 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8017 + if (!ext4_has_metadata_csum(inode->i_sb))
8018 return;
8019
8020 et = find_ext4_extent_tail(eh);
8021 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
8022 index aca7b24a4432..8131be8c0af3 100644
8023 --- a/fs/ext4/file.c
8024 +++ b/fs/ext4/file.c
8025 @@ -137,10 +137,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
8026 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
8027 }
8028
8029 + iocb->private = &overwrite;
8030 if (o_direct) {
8031 blk_start_plug(&plug);
8032
8033 - iocb->private = &overwrite;
8034
8035 /* check whether we do a DIO overwrite or not */
8036 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
8037 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
8038 index 5b87fc36aab8..ac644c31ca67 100644
8039 --- a/fs/ext4/ialloc.c
8040 +++ b/fs/ext4/ialloc.c
8041 @@ -887,6 +887,10 @@ got:
8042 struct buffer_head *block_bitmap_bh;
8043
8044 block_bitmap_bh = ext4_read_block_bitmap(sb, group);
8045 + if (!block_bitmap_bh) {
8046 + err = -EIO;
8047 + goto out;
8048 + }
8049 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
8050 err = ext4_journal_get_write_access(handle, block_bitmap_bh);
8051 if (err) {
8052 @@ -1011,8 +1015,7 @@ got:
8053 spin_unlock(&sbi->s_next_gen_lock);
8054
8055 /* Precompute checksum seed for inode metadata */
8056 - if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
8057 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
8058 + if (ext4_has_metadata_csum(sb)) {
8059 __u32 csum;
8060 __le32 inum = cpu_to_le32(inode->i_ino);
8061 __le32 gen = cpu_to_le32(inode->i_generation);
8062 diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
8063 index bea662bd0ca6..aa8e69556c16 100644
8064 --- a/fs/ext4/inline.c
8065 +++ b/fs/ext4/inline.c
8066 @@ -1126,8 +1126,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
8067 memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
8068 inline_size - EXT4_INLINE_DOTDOT_SIZE);
8069
8070 - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8071 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8072 + if (ext4_has_metadata_csum(inode->i_sb))
8073 csum_size = sizeof(struct ext4_dir_entry_tail);
8074
8075 inode->i_size = inode->i_sb->s_blocksize;
8076 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
8077 index 3aa26e9117c4..7d1057bf2b86 100644
8078 --- a/fs/ext4/inode.c
8079 +++ b/fs/ext4/inode.c
8080 @@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
8081
8082 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
8083 cpu_to_le32(EXT4_OS_LINUX) ||
8084 - !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8085 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8086 + !ext4_has_metadata_csum(inode->i_sb))
8087 return 1;
8088
8089 provided = le16_to_cpu(raw->i_checksum_lo);
8090 @@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
8091
8092 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
8093 cpu_to_le32(EXT4_OS_LINUX) ||
8094 - !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8095 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8096 + !ext4_has_metadata_csum(inode->i_sb))
8097 return;
8098
8099 csum = ext4_inode_csum(inode, raw, ei);
8100 @@ -224,16 +222,15 @@ void ext4_evict_inode(struct inode *inode)
8101 goto no_delete;
8102 }
8103
8104 - if (!is_bad_inode(inode))
8105 - dquot_initialize(inode);
8106 + if (is_bad_inode(inode))
8107 + goto no_delete;
8108 + dquot_initialize(inode);
8109
8110 if (ext4_should_order_data(inode))
8111 ext4_begin_ordered_truncate(inode, 0);
8112 truncate_inode_pages_final(&inode->i_data);
8113
8114 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
8115 - if (is_bad_inode(inode))
8116 - goto no_delete;
8117
8118 /*
8119 * Protect us against freezing - iput() caller didn't have to have any
8120 @@ -2515,6 +2512,20 @@ static int ext4_nonda_switch(struct super_block *sb)
8121 return 0;
8122 }
8123
8124 +/* We always reserve for an inode update; the superblock could be there too */
8125 +static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
8126 +{
8127 + if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8128 + EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
8129 + return 1;
8130 +
8131 + if (pos + len <= 0x7fffffffULL)
8132 + return 1;
8133 +
8134 + /* We might need to update the superblock to set LARGE_FILE */
8135 + return 2;
8136 +}
8137 +
8138 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
8139 loff_t pos, unsigned len, unsigned flags,
8140 struct page **pagep, void **fsdata)
8141 @@ -2565,7 +2576,8 @@ retry_grab:
8142 * of file which has an already mapped buffer.
8143 */
8144 retry_journal:
8145 - handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
8146 + handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
8147 + ext4_da_write_credits(inode, pos, len));
8148 if (IS_ERR(handle)) {
8149 page_cache_release(page);
8150 return PTR_ERR(handle);
8151 @@ -3936,8 +3948,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
8152 ei->i_extra_isize = 0;
8153
8154 /* Precompute checksum seed for inode metadata */
8155 - if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
8156 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
8157 + if (ext4_has_metadata_csum(sb)) {
8158 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
8159 __u32 csum;
8160 __le32 inum = cpu_to_le32(inode->i_ino);
8161 @@ -4127,6 +4138,13 @@ bad_inode:
8162 return ERR_PTR(ret);
8163 }
8164
8165 +struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
8166 +{
8167 + if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
8168 + return ERR_PTR(-EIO);
8169 + return ext4_iget(sb, ino);
8170 +}
8171 +
8172 static int ext4_inode_blocks_set(handle_t *handle,
8173 struct ext4_inode *raw_inode,
8174 struct ext4_inode_info *ei)
8175 @@ -4536,8 +4554,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
8176 ext4_orphan_del(NULL, inode);
8177 goto err_out;
8178 }
8179 - } else
8180 + } else {
8181 + loff_t oldsize = inode->i_size;
8182 +
8183 i_size_write(inode, attr->ia_size);
8184 + pagecache_isize_extended(inode, oldsize, inode->i_size);
8185 + }
8186
8187 /*
8188 * Blocks are going to be removed from the inode. Wait
8189 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
8190 index 0f2252ec274d..bfda18a15592 100644
8191 --- a/fs/ext4/ioctl.c
8192 +++ b/fs/ext4/ioctl.c
8193 @@ -331,8 +331,7 @@ flags_out:
8194 if (!inode_owner_or_capable(inode))
8195 return -EPERM;
8196
8197 - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8198 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
8199 + if (ext4_has_metadata_csum(inode->i_sb)) {
8200 ext4_warning(sb, "Setting inode version is not "
8201 "supported with metadata_csum enabled.");
8202 return -ENOTTY;
8203 @@ -532,9 +531,17 @@ group_add_out:
8204 }
8205
8206 case EXT4_IOC_SWAP_BOOT:
8207 + {
8208 + int err;
8209 if (!(filp->f_mode & FMODE_WRITE))
8210 return -EBADF;
8211 - return swap_inode_boot_loader(sb, inode);
8212 + err = mnt_want_write_file(filp);
8213 + if (err)
8214 + return err;
8215 + err = swap_inode_boot_loader(sb, inode);
8216 + mnt_drop_write_file(filp);
8217 + return err;
8218 + }
8219
8220 case EXT4_IOC_RESIZE_FS: {
8221 ext4_fsblk_t n_blocks_count;
8222 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
8223 index 32bce844c2e1..8313ca3324ec 100644
8224 --- a/fs/ext4/mmp.c
8225 +++ b/fs/ext4/mmp.c
8226 @@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
8227
8228 static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
8229 {
8230 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
8231 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8232 + if (!ext4_has_metadata_csum(sb))
8233 return 1;
8234
8235 return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
8236 @@ -29,8 +28,7 @@ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
8237
8238 static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
8239 {
8240 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
8241 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8242 + if (!ext4_has_metadata_csum(sb))
8243 return;
8244
8245 mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
8246 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
8247 index 603e4ebbd0ac..5b7dad62d029 100644
8248 --- a/fs/ext4/namei.c
8249 +++ b/fs/ext4/namei.c
8250 @@ -124,8 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
8251 "directory leaf block found instead of index block");
8252 return ERR_PTR(-EIO);
8253 }
8254 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8255 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
8256 + if (!ext4_has_metadata_csum(inode->i_sb) ||
8257 buffer_verified(bh))
8258 return bh;
8259
8260 @@ -340,8 +339,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
8261 {
8262 struct ext4_dir_entry_tail *t;
8263
8264 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8265 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8266 + if (!ext4_has_metadata_csum(inode->i_sb))
8267 return 1;
8268
8269 t = get_dirent_tail(inode, dirent);
8270 @@ -362,8 +360,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
8271 {
8272 struct ext4_dir_entry_tail *t;
8273
8274 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8275 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8276 + if (!ext4_has_metadata_csum(inode->i_sb))
8277 return;
8278
8279 t = get_dirent_tail(inode, dirent);
8280 @@ -438,8 +435,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
8281 struct dx_tail *t;
8282 int count_offset, limit, count;
8283
8284 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8285 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8286 + if (!ext4_has_metadata_csum(inode->i_sb))
8287 return 1;
8288
8289 c = get_dx_countlimit(inode, dirent, &count_offset);
8290 @@ -468,8 +464,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
8291 struct dx_tail *t;
8292 int count_offset, limit, count;
8293
8294 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8295 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8296 + if (!ext4_has_metadata_csum(inode->i_sb))
8297 return;
8298
8299 c = get_dx_countlimit(inode, dirent, &count_offset);
8300 @@ -557,8 +552,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
8301 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
8302 EXT4_DIR_REC_LEN(2) - infosize;
8303
8304 - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
8305 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8306 + if (ext4_has_metadata_csum(dir->i_sb))
8307 entry_space -= sizeof(struct dx_tail);
8308 return entry_space / sizeof(struct dx_entry);
8309 }
8310 @@ -567,8 +561,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
8311 {
8312 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
8313
8314 - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
8315 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8316 + if (ext4_has_metadata_csum(dir->i_sb))
8317 entry_space -= sizeof(struct dx_tail);
8318 return entry_space / sizeof(struct dx_entry);
8319 }
8320 @@ -1441,7 +1434,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
8321 dentry);
8322 return ERR_PTR(-EIO);
8323 }
8324 - inode = ext4_iget(dir->i_sb, ino);
8325 + inode = ext4_iget_normal(dir->i_sb, ino);
8326 if (inode == ERR_PTR(-ESTALE)) {
8327 EXT4_ERROR_INODE(dir,
8328 "deleted inode referenced: %u",
8329 @@ -1474,7 +1467,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
8330 return ERR_PTR(-EIO);
8331 }
8332
8333 - return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino));
8334 + return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
8335 }
8336
8337 /*
8338 @@ -1548,8 +1541,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
8339 int csum_size = 0;
8340 int err = 0, i;
8341
8342 - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
8343 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8344 + if (ext4_has_metadata_csum(dir->i_sb))
8345 csum_size = sizeof(struct ext4_dir_entry_tail);
8346
8347 bh2 = ext4_append(handle, dir, &newblock);
8348 @@ -1718,8 +1710,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
8349 int csum_size = 0;
8350 int err;
8351
8352 - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8353 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8354 + if (ext4_has_metadata_csum(inode->i_sb))
8355 csum_size = sizeof(struct ext4_dir_entry_tail);
8356
8357 if (!de) {
8358 @@ -1786,8 +1777,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
8359 struct fake_dirent *fde;
8360 int csum_size = 0;
8361
8362 - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8363 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8364 + if (ext4_has_metadata_csum(inode->i_sb))
8365 csum_size = sizeof(struct ext4_dir_entry_tail);
8366
8367 blocksize = dir->i_sb->s_blocksize;
8368 @@ -1904,8 +1894,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
8369 ext4_lblk_t block, blocks;
8370 int csum_size = 0;
8371
8372 - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8373 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8374 + if (ext4_has_metadata_csum(inode->i_sb))
8375 csum_size = sizeof(struct ext4_dir_entry_tail);
8376
8377 sb = dir->i_sb;
8378 @@ -2167,8 +2156,7 @@ static int ext4_delete_entry(handle_t *handle,
8379 return err;
8380 }
8381
8382 - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
8383 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8384 + if (ext4_has_metadata_csum(dir->i_sb))
8385 csum_size = sizeof(struct ext4_dir_entry_tail);
8386
8387 BUFFER_TRACE(bh, "get_write_access");
8388 @@ -2387,8 +2375,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
8389 int csum_size = 0;
8390 int err;
8391
8392 - if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
8393 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8394 + if (ext4_has_metadata_csum(dir->i_sb))
8395 csum_size = sizeof(struct ext4_dir_entry_tail);
8396
8397 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
8398 @@ -2573,7 +2560,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
8399 int err = 0, rc;
8400 bool dirty = false;
8401
8402 - if (!sbi->s_journal)
8403 + if (!sbi->s_journal || is_bad_inode(inode))
8404 return 0;
8405
8406 WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
8407 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
8408 index 1e43b905ff98..ca4588388fc3 100644
8409 --- a/fs/ext4/resize.c
8410 +++ b/fs/ext4/resize.c
8411 @@ -1081,7 +1081,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
8412 break;
8413
8414 if (meta_bg == 0)
8415 - backup_block = group * bpg + blk_off;
8416 + backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
8417 else
8418 backup_block = (ext4_group_first_block_no(sb, group) +
8419 ext4_bg_has_super(sb, group));
8420 @@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
8421 {
8422 struct buffer_head *bh;
8423
8424 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
8425 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8426 + if (!ext4_has_metadata_csum(sb))
8427 return 0;
8428
8429 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
8430 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
8431 index 0b28b36e7915..b1f0ac748320 100644
8432 --- a/fs/ext4/super.c
8433 +++ b/fs/ext4/super.c
8434 @@ -141,8 +141,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
8435 static int ext4_superblock_csum_verify(struct super_block *sb,
8436 struct ext4_super_block *es)
8437 {
8438 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
8439 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8440 + if (!ext4_has_metadata_csum(sb))
8441 return 1;
8442
8443 return es->s_checksum == ext4_superblock_csum(sb, es);
8444 @@ -152,8 +151,7 @@ void ext4_superblock_csum_set(struct super_block *sb)
8445 {
8446 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
8447
8448 - if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
8449 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8450 + if (!ext4_has_metadata_csum(sb))
8451 return;
8452
8453 es->s_checksum = ext4_superblock_csum(sb, es);
8454 @@ -1002,7 +1000,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
8455 * Currently we don't know the generation for parent directory, so
8456 * a generation of 0 means "accept any"
8457 */
8458 - inode = ext4_iget(sb, ino);
8459 + inode = ext4_iget_normal(sb, ino);
8460 if (IS_ERR(inode))
8461 return ERR_CAST(inode);
8462 if (generation && inode->i_generation != generation) {
8463 @@ -1712,13 +1710,6 @@ static int parse_options(char *options, struct super_block *sb,
8464 "not specified");
8465 return 0;
8466 }
8467 - } else {
8468 - if (sbi->s_jquota_fmt) {
8469 - ext4_msg(sb, KERN_ERR, "journaled quota format "
8470 - "specified with no journaling "
8471 - "enabled");
8472 - return 0;
8473 - }
8474 }
8475 #endif
8476 if (test_opt(sb, DIOREAD_NOLOCK)) {
8477 @@ -2016,8 +2007,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
8478 __u16 crc = 0;
8479 __le32 le_group = cpu_to_le32(block_group);
8480
8481 - if ((sbi->s_es->s_feature_ro_compat &
8482 - cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
8483 + if (ext4_has_metadata_csum(sbi->s_sb)) {
8484 /* Use new metadata_csum algorithm */
8485 __le16 save_csum;
8486 __u32 csum32;
8487 @@ -2035,6 +2025,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
8488 }
8489
8490 /* old crc16 code */
8491 + if (!(sbi->s_es->s_feature_ro_compat &
8492 + cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
8493 + return 0;
8494 +
8495 offset = offsetof(struct ext4_group_desc, bg_checksum);
8496
8497 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
8498 @@ -3179,8 +3173,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
8499 int compat, incompat;
8500 struct ext4_sb_info *sbi = EXT4_SB(sb);
8501
8502 - if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
8503 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
8504 + if (ext4_has_metadata_csum(sb)) {
8505 /* journal checksum v3 */
8506 compat = 0;
8507 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
8508 @@ -3487,8 +3480,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
8509 }
8510
8511 /* Precompute checksum seed for all metadata */
8512 - if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
8513 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8514 + if (ext4_has_metadata_csum(sb))
8515 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
8516 sizeof(es->s_uuid));
8517
8518 @@ -3506,6 +3498,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
8519 #ifdef CONFIG_EXT4_FS_POSIX_ACL
8520 set_opt(sb, POSIX_ACL);
8521 #endif
8522 + /* don't forget to enable journal_csum when metadata_csum is enabled. */
8523 + if (ext4_has_metadata_csum(sb))
8524 + set_opt(sb, JOURNAL_CHECKSUM);
8525 +
8526 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
8527 set_opt(sb, JOURNAL_DATA);
8528 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
8529 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
8530 index e7387337060c..2d1e5803839f 100644
8531 --- a/fs/ext4/xattr.c
8532 +++ b/fs/ext4/xattr.c
8533 @@ -142,8 +142,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
8534 sector_t block_nr,
8535 struct ext4_xattr_header *hdr)
8536 {
8537 - if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8538 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
8539 + if (ext4_has_metadata_csum(inode->i_sb) &&
8540 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
8541 return 0;
8542 return 1;
8543 @@ -153,8 +152,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode,
8544 sector_t block_nr,
8545 struct ext4_xattr_header *hdr)
8546 {
8547 - if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
8548 - EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
8549 + if (!ext4_has_metadata_csum(inode->i_sb))
8550 return;
8551
8552 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
8553 @@ -190,14 +188,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
8554 }
8555
8556 static int
8557 -ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
8558 +ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
8559 + void *value_start)
8560 {
8561 - while (!IS_LAST_ENTRY(entry)) {
8562 - struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
8563 + struct ext4_xattr_entry *e = entry;
8564 +
8565 + while (!IS_LAST_ENTRY(e)) {
8566 + struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
8567 if ((void *)next >= end)
8568 return -EIO;
8569 - entry = next;
8570 + e = next;
8571 }
8572 +
8573 + while (!IS_LAST_ENTRY(entry)) {
8574 + if (entry->e_value_size != 0 &&
8575 + (value_start + le16_to_cpu(entry->e_value_offs) <
8576 + (void *)e + sizeof(__u32) ||
8577 + value_start + le16_to_cpu(entry->e_value_offs) +
8578 + le32_to_cpu(entry->e_value_size) > end))
8579 + return -EIO;
8580 + entry = EXT4_XATTR_NEXT(entry);
8581 + }
8582 +
8583 return 0;
8584 }
8585
8586 @@ -214,7 +226,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
8587 return -EIO;
8588 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
8589 return -EIO;
8590 - error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
8591 + error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
8592 + bh->b_data);
8593 if (!error)
8594 set_buffer_verified(bh);
8595 return error;
8596 @@ -331,7 +344,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
8597 header = IHDR(inode, raw_inode);
8598 entry = IFIRST(header);
8599 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
8600 - error = ext4_xattr_check_names(entry, end);
8601 + error = ext4_xattr_check_names(entry, end, entry);
8602 if (error)
8603 goto cleanup;
8604 error = ext4_xattr_find_entry(&entry, name_index, name,
8605 @@ -463,7 +476,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
8606 raw_inode = ext4_raw_inode(&iloc);
8607 header = IHDR(inode, raw_inode);
8608 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
8609 - error = ext4_xattr_check_names(IFIRST(header), end);
8610 + error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
8611 if (error)
8612 goto cleanup;
8613 error = ext4_xattr_list_entries(dentry, IFIRST(header),
8614 @@ -986,7 +999,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
8615 is->s.here = is->s.first;
8616 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
8617 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
8618 - error = ext4_xattr_check_names(IFIRST(header), is->s.end);
8619 + error = ext4_xattr_check_names(IFIRST(header), is->s.end,
8620 + IFIRST(header));
8621 if (error)
8622 return error;
8623 /* Find the named attribute. */
8624 diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
8625 index 9b329b55ffe3..bcbef08a4d8f 100644
8626 --- a/fs/jbd2/recovery.c
8627 +++ b/fs/jbd2/recovery.c
8628 @@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal,
8629 !jbd2_descr_block_csum_verify(journal,
8630 bh->b_data)) {
8631 err = -EIO;
8632 + brelse(bh);
8633 goto failed;
8634 }
8635
8636 diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
8637 index 413ef89c2d1b..046fee8b6e9b 100644
8638 --- a/fs/jffs2/jffs2_fs_sb.h
8639 +++ b/fs/jffs2/jffs2_fs_sb.h
8640 @@ -134,8 +134,6 @@ struct jffs2_sb_info {
8641 struct rw_semaphore wbuf_sem; /* Protects the write buffer */
8642
8643 struct delayed_work wbuf_dwork; /* write-buffer write-out work */
8644 - int wbuf_queued; /* non-zero delayed work is queued */
8645 - spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
8646
8647 unsigned char *oobbuf;
8648 int oobavail; /* How many bytes are available for JFFS2 in OOB */
8649 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
8650 index a6597d60d76d..09ed55190ee2 100644
8651 --- a/fs/jffs2/wbuf.c
8652 +++ b/fs/jffs2/wbuf.c
8653 @@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work)
8654 struct jffs2_sb_info *c = work_to_sb(work);
8655 struct super_block *sb = OFNI_BS_2SFFJ(c);
8656
8657 - spin_lock(&c->wbuf_dwork_lock);
8658 - c->wbuf_queued = 0;
8659 - spin_unlock(&c->wbuf_dwork_lock);
8660 -
8661 if (!(sb->s_flags & MS_RDONLY)) {
8662 jffs2_dbg(1, "%s()\n", __func__);
8663 jffs2_flush_wbuf_gc(c, 0);
8664 @@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c)
8665 if (sb->s_flags & MS_RDONLY)
8666 return;
8667
8668 - spin_lock(&c->wbuf_dwork_lock);
8669 - if (!c->wbuf_queued) {
8670 + delay = msecs_to_jiffies(dirty_writeback_interval * 10);
8671 + if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay))
8672 jffs2_dbg(1, "%s()\n", __func__);
8673 - delay = msecs_to_jiffies(dirty_writeback_interval * 10);
8674 - queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
8675 - c->wbuf_queued = 1;
8676 - }
8677 - spin_unlock(&c->wbuf_dwork_lock);
8678 }
8679
8680 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
8681 @@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
8682
8683 /* Initialise write buffer */
8684 init_rwsem(&c->wbuf_sem);
8685 - spin_lock_init(&c->wbuf_dwork_lock);
8686 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
8687 c->wbuf_pagesize = c->mtd->writesize;
8688 c->wbuf_ofs = 0xFFFFFFFF;
8689 @@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
8690
8691 /* Initialize write buffer */
8692 init_rwsem(&c->wbuf_sem);
8693 - spin_lock_init(&c->wbuf_dwork_lock);
8694 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
8695 c->wbuf_pagesize = c->mtd->erasesize;
8696
8697 @@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
8698
8699 /* Initialize write buffer */
8700 init_rwsem(&c->wbuf_sem);
8701 - spin_lock_init(&c->wbuf_dwork_lock);
8702 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
8703
8704 c->wbuf_pagesize = c->mtd->writesize;
8705 @@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
8706 return 0;
8707
8708 init_rwsem(&c->wbuf_sem);
8709 - spin_lock_init(&c->wbuf_dwork_lock);
8710 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
8711
8712 c->wbuf_pagesize = c->mtd->writesize;
8713 diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
8714 index daa8e7514eae..9106f42c472c 100644
8715 --- a/fs/lockd/mon.c
8716 +++ b/fs/lockd/mon.c
8717 @@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res,
8718
8719 msg.rpc_proc = &clnt->cl_procinfo[proc];
8720 status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
8721 + if (status == -ECONNREFUSED) {
8722 + dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n",
8723 + status);
8724 + rpc_force_rebind(clnt);
8725 + status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN);
8726 + }
8727 if (status < 0)
8728 dprintk("lockd: NSM upcall RPC failed, status=%d\n",
8729 status);
8730 diff --git a/fs/namei.c b/fs/namei.c
8731 index 3ddb044f3702..bb02687272d7 100644
8732 --- a/fs/namei.c
8733 +++ b/fs/namei.c
8734 @@ -3154,7 +3154,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
8735 if (error)
8736 goto out2;
8737 audit_inode(pathname, nd->path.dentry, 0);
8738 - error = may_open(&nd->path, op->acc_mode, op->open_flag);
8739 + /* Don't check for other permissions, the inode was just created */
8740 + error = may_open(&nd->path, MAY_OPEN, op->open_flag);
8741 if (error)
8742 goto out2;
8743 file->f_path.mnt = nd->path.mnt;
8744 diff --git a/fs/namespace.c b/fs/namespace.c
8745 index 7f67b463a5b4..550dbff08677 100644
8746 --- a/fs/namespace.c
8747 +++ b/fs/namespace.c
8748 @@ -2822,6 +2822,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
8749 /* make sure we can reach put_old from new_root */
8750 if (!is_path_reachable(old_mnt, old.dentry, &new))
8751 goto out4;
8752 + /* make certain new is below the root */
8753 + if (!is_path_reachable(new_mnt, new.dentry, &root))
8754 + goto out4;
8755 root_mp->m_count++; /* pin it so it won't go away */
8756 lock_mount_hash();
8757 detach_mnt(new_mnt, &parent_path);
8758 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
8759 index 5e0dc528a0e8..1d3cb479b002 100644
8760 --- a/fs/nfsd/nfs4proc.c
8761 +++ b/fs/nfsd/nfs4proc.c
8762 @@ -1229,7 +1229,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
8763 */
8764 if (argp->opcnt == resp->opcnt)
8765 return false;
8766 -
8767 + if (next->opnum == OP_ILLEGAL)
8768 + return false;
8769 nextd = OPDESC(next);
8770 /*
8771 * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
8772 @@ -1546,7 +1547,8 @@ static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op
8773 static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
8774 struct nfsd4_op *op)
8775 {
8776 - return NFS4_MAX_SESSIONID_LEN + 20;
8777 + return (op_encode_hdr_size
8778 + + XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
8779 }
8780
8781 static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
8782 @@ -1850,6 +1852,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
8783 .op_func = (nfsd4op_func)nfsd4_sequence,
8784 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
8785 .op_name = "OP_SEQUENCE",
8786 + .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
8787 },
8788 [OP_DESTROY_CLIENTID] = {
8789 .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
8790 diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
8791 index ea34952f9496..485d22d649b3 100644
8792 --- a/fs/ocfs2/cluster/tcp.c
8793 +++ b/fs/ocfs2/cluster/tcp.c
8794 @@ -925,7 +925,7 @@ static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
8795 size_t veclen, size_t total)
8796 {
8797 int ret;
8798 - struct msghdr msg;
8799 + struct msghdr msg = {.msg_flags = 0,};
8800
8801 if (sock == NULL) {
8802 ret = -EINVAL;
8803 diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
8804 index 192297b0090d..fafb7a02a5d6 100644
8805 --- a/fs/pstore/inode.c
8806 +++ b/fs/pstore/inode.c
8807 @@ -320,10 +320,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
8808 compressed ? ".enc.z" : "");
8809 break;
8810 case PSTORE_TYPE_CONSOLE:
8811 - sprintf(name, "console-%s", psname);
8812 + sprintf(name, "console-%s-%lld", psname, id);
8813 break;
8814 case PSTORE_TYPE_FTRACE:
8815 - sprintf(name, "ftrace-%s", psname);
8816 + sprintf(name, "ftrace-%s-%lld", psname, id);
8817 break;
8818 case PSTORE_TYPE_MCE:
8819 sprintf(name, "mce-%s-%lld", psname, id);
8820 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
8821 index f2d0eee9d1f1..23c548d7ae66 100644
8822 --- a/fs/quota/dquot.c
8823 +++ b/fs/quota/dquot.c
8824 @@ -634,7 +634,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
8825 dqstats_inc(DQST_LOOKUPS);
8826 err = sb->dq_op->write_dquot(dquot);
8827 if (!ret && err)
8828 - err = ret;
8829 + ret = err;
8830 dqput(dquot);
8831 spin_lock(&dq_list_lock);
8832 }
8833 diff --git a/fs/super.c b/fs/super.c
8834 index b9a214d2fe98..6f8c954315c0 100644
8835 --- a/fs/super.c
8836 +++ b/fs/super.c
8837 @@ -80,6 +80,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
8838 inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid);
8839 dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid);
8840 total_objects = dentries + inodes + fs_objects + 1;
8841 + if (!total_objects)
8842 + total_objects = 1;
8843
8844 /* proportion the scan between the caches */
8845 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
8846 diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
8847 index aa13ad053b14..26b69b2d4a45 100644
8848 --- a/fs/ubifs/commit.c
8849 +++ b/fs/ubifs/commit.c
8850 @@ -166,10 +166,6 @@ static int do_commit(struct ubifs_info *c)
8851 err = ubifs_orphan_end_commit(c);
8852 if (err)
8853 goto out;
8854 - old_ltail_lnum = c->ltail_lnum;
8855 - err = ubifs_log_end_commit(c, new_ltail_lnum);
8856 - if (err)
8857 - goto out;
8858 err = dbg_check_old_index(c, &zroot);
8859 if (err)
8860 goto out;
8861 @@ -202,7 +198,9 @@ static int do_commit(struct ubifs_info *c)
8862 c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
8863 else
8864 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
8865 - err = ubifs_write_master(c);
8866 +
8867 + old_ltail_lnum = c->ltail_lnum;
8868 + err = ubifs_log_end_commit(c, new_ltail_lnum);
8869 if (err)
8870 goto out;
8871
8872 diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
8873 index a47ddfc9be6b..c14628fbeee2 100644
8874 --- a/fs/ubifs/log.c
8875 +++ b/fs/ubifs/log.c
8876 @@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
8877 h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
8878 t = (long long)c->ltail_lnum * c->leb_size;
8879
8880 - if (h >= t)
8881 + if (h > t)
8882 return c->log_bytes - h + t;
8883 - else
8884 + else if (h != t)
8885 return t - h;
8886 + else if (c->lhead_lnum != c->ltail_lnum)
8887 + return 0;
8888 + else
8889 + return c->log_bytes;
8890 }
8891
8892 /**
8893 @@ -447,9 +451,9 @@ out:
8894 * @ltail_lnum: new log tail LEB number
8895 *
8896 * This function is called on when the commit operation was finished. It
8897 - * moves log tail to new position and unmaps LEBs which contain obsolete data.
8898 - * Returns zero in case of success and a negative error code in case of
8899 - * failure.
8900 + * moves log tail to new position and updates the master node so that it stores
8901 + * the new log tail LEB number. Returns zero in case of success and a negative
8902 + * error code in case of failure.
8903 */
8904 int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
8905 {
8906 @@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
8907 spin_unlock(&c->buds_lock);
8908
8909 err = dbg_check_bud_bytes(c);
8910 + if (err)
8911 + goto out;
8912
8913 + err = ubifs_write_master(c);
8914 +
8915 +out:
8916 mutex_unlock(&c->log_mutex);
8917 return err;
8918 }
8919 diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
8920 index f1deb961a296..894924a5129b 100644
8921 --- a/fs/xfs/xfs_itable.c
8922 +++ b/fs/xfs/xfs_itable.c
8923 @@ -236,8 +236,10 @@ xfs_bulkstat_grab_ichunk(
8924 XFS_WANT_CORRUPTED_RETURN(stat == 1);
8925
8926 /* Check if the record contains the inode in request */
8927 - if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
8928 - return -EINVAL;
8929 + if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
8930 + *icount = 0;
8931 + return 0;
8932 + }
8933
8934 idx = agino - irec->ir_startino + 1;
8935 if (idx < XFS_INODES_PER_CHUNK &&
8936 @@ -262,75 +264,76 @@ xfs_bulkstat_grab_ichunk(
8937
8938 #define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
8939
8940 +struct xfs_bulkstat_agichunk {
8941 + char __user **ac_ubuffer;/* pointer into user's buffer */
8942 + int ac_ubleft; /* bytes left in user's buffer */
8943 + int ac_ubelem; /* spaces used in user's buffer */
8944 +};
8945 +
8946 /*
8947 * Process inodes in chunk with a pointer to a formatter function
8948 * that will iget the inode and fill in the appropriate structure.
8949 */
8950 -int
8951 +static int
8952 xfs_bulkstat_ag_ichunk(
8953 struct xfs_mount *mp,
8954 xfs_agnumber_t agno,
8955 struct xfs_inobt_rec_incore *irbp,
8956 bulkstat_one_pf formatter,
8957 size_t statstruct_size,
8958 - struct xfs_bulkstat_agichunk *acp)
8959 + struct xfs_bulkstat_agichunk *acp,
8960 + xfs_agino_t *last_agino)
8961 {
8962 - xfs_ino_t lastino = acp->ac_lastino;
8963 char __user **ubufp = acp->ac_ubuffer;
8964 - int ubleft = acp->ac_ubleft;
8965 - int ubelem = acp->ac_ubelem;
8966 - int chunkidx, clustidx;
8967 + int chunkidx;
8968 int error = 0;
8969 - xfs_agino_t agino;
8970 + xfs_agino_t agino = irbp->ir_startino;
8971
8972 - for (agino = irbp->ir_startino, chunkidx = clustidx = 0;
8973 - XFS_BULKSTAT_UBLEFT(ubleft) &&
8974 - irbp->ir_freecount < XFS_INODES_PER_CHUNK;
8975 - chunkidx++, clustidx++, agino++) {
8976 - int fmterror; /* bulkstat formatter result */
8977 + for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
8978 + chunkidx++, agino++) {
8979 + int fmterror;
8980 int ubused;
8981 - xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
8982
8983 - ASSERT(chunkidx < XFS_INODES_PER_CHUNK);
8984 + /* inode won't fit in buffer, we are done */
8985 + if (acp->ac_ubleft < statstruct_size)
8986 + break;
8987
8988 /* Skip if this inode is free */
8989 - if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) {
8990 - lastino = ino;
8991 + if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
8992 continue;
8993 - }
8994 -
8995 - /*
8996 - * Count used inodes as free so we can tell when the
8997 - * chunk is used up.
8998 - */
8999 - irbp->ir_freecount++;
9000
9001 /* Get the inode and fill in a single buffer */
9002 ubused = statstruct_size;
9003 - error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror);
9004 - if (fmterror == BULKSTAT_RV_NOTHING) {
9005 - if (error && error != -ENOENT && error != -EINVAL) {
9006 - ubleft = 0;
9007 - break;
9008 - }
9009 - lastino = ino;
9010 - continue;
9011 - }
9012 - if (fmterror == BULKSTAT_RV_GIVEUP) {
9013 - ubleft = 0;
9014 + error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
9015 + *ubufp, acp->ac_ubleft, &ubused, &fmterror);
9016 +
9017 + if (fmterror == BULKSTAT_RV_GIVEUP ||
9018 + (error && error != -ENOENT && error != -EINVAL)) {
9019 + acp->ac_ubleft = 0;
9020 ASSERT(error);
9021 break;
9022 }
9023 - if (*ubufp)
9024 - *ubufp += ubused;
9025 - ubleft -= ubused;
9026 - ubelem++;
9027 - lastino = ino;
9028 +
9029 + /* be careful not to leak error if at end of chunk */
9030 + if (fmterror == BULKSTAT_RV_NOTHING || error) {
9031 + error = 0;
9032 + continue;
9033 + }
9034 +
9035 + *ubufp += ubused;
9036 + acp->ac_ubleft -= ubused;
9037 + acp->ac_ubelem++;
9038 }
9039
9040 - acp->ac_lastino = lastino;
9041 - acp->ac_ubleft = ubleft;
9042 - acp->ac_ubelem = ubelem;
9043 + /*
9044 + * Post-update *last_agino. At this point, agino will always point one
9045 + * inode past the last inode we processed successfully. Hence we
9046 + * substract that inode when setting the *last_agino cursor so that we
9047 + * return the correct cookie to userspace. On the next bulkstat call,
9048 + * the inode under the lastino cookie will be skipped as we have already
9049 + * processed it here.
9050 + */
9051 + *last_agino = agino - 1;
9052
9053 return error;
9054 }
9055 @@ -353,45 +356,33 @@ xfs_bulkstat(
9056 xfs_agino_t agino; /* inode # in allocation group */
9057 xfs_agnumber_t agno; /* allocation group number */
9058 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
9059 - int end_of_ag; /* set if we've seen the ag end */
9060 - int error; /* error code */
9061 - int fmterror;/* bulkstat formatter result */
9062 - int i; /* loop index */
9063 - int icount; /* count of inodes good in irbuf */
9064 size_t irbsize; /* size of irec buffer in bytes */
9065 - xfs_ino_t ino; /* inode number (filesystem) */
9066 - xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
9067 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
9068 - xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
9069 - xfs_ino_t lastino; /* last inode number returned */
9070 int nirbuf; /* size of irbuf */
9071 - int rval; /* return value error code */
9072 - int tmp; /* result value from btree calls */
9073 int ubcount; /* size of user's buffer */
9074 - int ubleft; /* bytes left in user's buffer */
9075 - char __user *ubufp; /* pointer into user's buffer */
9076 - int ubelem; /* spaces used in user's buffer */
9077 + struct xfs_bulkstat_agichunk ac;
9078 + int error = 0;
9079
9080 /*
9081 * Get the last inode value, see if there's nothing to do.
9082 */
9083 - ino = (xfs_ino_t)*lastinop;
9084 - lastino = ino;
9085 - agno = XFS_INO_TO_AGNO(mp, ino);
9086 - agino = XFS_INO_TO_AGINO(mp, ino);
9087 + agno = XFS_INO_TO_AGNO(mp, *lastinop);
9088 + agino = XFS_INO_TO_AGINO(mp, *lastinop);
9089 if (agno >= mp->m_sb.sb_agcount ||
9090 - ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
9091 + *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
9092 *done = 1;
9093 *ubcountp = 0;
9094 return 0;
9095 }
9096
9097 ubcount = *ubcountp; /* statstruct's */
9098 - ubleft = ubcount * statstruct_size; /* bytes */
9099 - *ubcountp = ubelem = 0;
9100 + ac.ac_ubuffer = &ubuffer;
9101 + ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
9102 + ac.ac_ubelem = 0;
9103 +
9104 + *ubcountp = 0;
9105 *done = 0;
9106 - fmterror = 0;
9107 - ubufp = ubuffer;
9108 +
9109 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
9110 if (!irbuf)
9111 return -ENOMEM;
9112 @@ -402,9 +393,13 @@ xfs_bulkstat(
9113 * Loop over the allocation groups, starting from the last
9114 * inode returned; 0 means start of the allocation group.
9115 */
9116 - rval = 0;
9117 - while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
9118 - cond_resched();
9119 + while (agno < mp->m_sb.sb_agcount) {
9120 + struct xfs_inobt_rec_incore *irbp = irbuf;
9121 + struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
9122 + bool end_of_ag = false;
9123 + int icount = 0;
9124 + int stat;
9125 +
9126 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
9127 if (error)
9128 break;
9129 @@ -414,10 +409,6 @@ xfs_bulkstat(
9130 */
9131 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
9132 XFS_BTNUM_INO);
9133 - irbp = irbuf;
9134 - irbufend = irbuf + nirbuf;
9135 - end_of_ag = 0;
9136 - icount = 0;
9137 if (agino > 0) {
9138 /*
9139 * In the middle of an allocation group, we need to get
9140 @@ -427,22 +418,23 @@ xfs_bulkstat(
9141
9142 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
9143 if (error)
9144 - break;
9145 + goto del_cursor;
9146 if (icount) {
9147 irbp->ir_startino = r.ir_startino;
9148 irbp->ir_freecount = r.ir_freecount;
9149 irbp->ir_free = r.ir_free;
9150 irbp++;
9151 - agino = r.ir_startino + XFS_INODES_PER_CHUNK;
9152 }
9153 /* Increment to the next record */
9154 - error = xfs_btree_increment(cur, 0, &tmp);
9155 + error = xfs_btree_increment(cur, 0, &stat);
9156 } else {
9157 /* Start of ag. Lookup the first inode chunk */
9158 - error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp);
9159 + error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
9160 + }
9161 + if (error || stat == 0) {
9162 + end_of_ag = true;
9163 + goto del_cursor;
9164 }
9165 - if (error)
9166 - break;
9167
9168 /*
9169 * Loop through inode btree records in this ag,
9170 @@ -451,10 +443,10 @@ xfs_bulkstat(
9171 while (irbp < irbufend && icount < ubcount) {
9172 struct xfs_inobt_rec_incore r;
9173
9174 - error = xfs_inobt_get_rec(cur, &r, &i);
9175 - if (error || i == 0) {
9176 - end_of_ag = 1;
9177 - break;
9178 + error = xfs_inobt_get_rec(cur, &r, &stat);
9179 + if (error || stat == 0) {
9180 + end_of_ag = true;
9181 + goto del_cursor;
9182 }
9183
9184 /*
9185 @@ -469,77 +461,79 @@ xfs_bulkstat(
9186 irbp++;
9187 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
9188 }
9189 - /*
9190 - * Set agino to after this chunk and bump the cursor.
9191 - */
9192 - agino = r.ir_startino + XFS_INODES_PER_CHUNK;
9193 - error = xfs_btree_increment(cur, 0, &tmp);
9194 + error = xfs_btree_increment(cur, 0, &stat);
9195 + if (error || stat == 0) {
9196 + end_of_ag = true;
9197 + goto del_cursor;
9198 + }
9199 cond_resched();
9200 }
9201 +
9202 /*
9203 - * Drop the btree buffers and the agi buffer.
9204 - * We can't hold any of the locks these represent
9205 - * when calling iget.
9206 + * Drop the btree buffers and the agi buffer as we can't hold any
9207 + * of the locks these represent when calling iget. If there is a
9208 + * pending error, then we are done.
9209 */
9210 +del_cursor:
9211 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
9212 xfs_buf_relse(agbp);
9213 + if (error)
9214 + break;
9215 /*
9216 - * Now format all the good inodes into the user's buffer.
9217 + * Now format all the good inodes into the user's buffer. The
9218 + * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
9219 + * for the next loop iteration.
9220 */
9221 irbufend = irbp;
9222 for (irbp = irbuf;
9223 - irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) {
9224 - struct xfs_bulkstat_agichunk ac;
9225 -
9226 - ac.ac_lastino = lastino;
9227 - ac.ac_ubuffer = &ubuffer;
9228 - ac.ac_ubleft = ubleft;
9229 - ac.ac_ubelem = ubelem;
9230 + irbp < irbufend && ac.ac_ubleft >= statstruct_size;
9231 + irbp++) {
9232 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
9233 - formatter, statstruct_size, &ac);
9234 + formatter, statstruct_size, &ac,
9235 + &agino);
9236 if (error)
9237 - rval = error;
9238 -
9239 - lastino = ac.ac_lastino;
9240 - ubleft = ac.ac_ubleft;
9241 - ubelem = ac.ac_ubelem;
9242 + break;
9243
9244 cond_resched();
9245 }
9246 +
9247 /*
9248 - * Set up for the next loop iteration.
9249 + * If we've run out of space or had a formatting error, we
9250 + * are now done
9251 */
9252 - if (XFS_BULKSTAT_UBLEFT(ubleft)) {
9253 - if (end_of_ag) {
9254 - agno++;
9255 - agino = 0;
9256 - } else
9257 - agino = XFS_INO_TO_AGINO(mp, lastino);
9258 - } else
9259 + if (ac.ac_ubleft < statstruct_size || error)
9260 break;
9261 +
9262 + if (end_of_ag) {
9263 + agno++;
9264 + agino = 0;
9265 + }
9266 }
9267 /*
9268 * Done, we're either out of filesystem or space to put the data.
9269 */
9270 kmem_free(irbuf);
9271 - *ubcountp = ubelem;
9272 + *ubcountp = ac.ac_ubelem;
9273 +
9274 /*
9275 - * Found some inodes, return them now and return the error next time.
9276 + * We found some inodes, so clear the error status and return them.
9277 + * The lastino pointer will point directly at the inode that triggered
9278 + * any error that occurred, so on the next call the error will be
9279 + * triggered again and propagated to userspace as there will be no
9280 + * formatted inodes in the buffer.
9281 */
9282 - if (ubelem)
9283 - rval = 0;
9284 - if (agno >= mp->m_sb.sb_agcount) {
9285 - /*
9286 - * If we ran out of filesystem, mark lastino as off
9287 - * the end of the filesystem, so the next call
9288 - * will return immediately.
9289 - */
9290 - *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0);
9291 + if (ac.ac_ubelem)
9292 + error = 0;
9293 +
9294 + /*
9295 + * If we ran out of filesystem, lastino will point off the end of
9296 + * the filesystem so the next call will return immediately.
9297 + */
9298 + *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
9299 + if (agno >= mp->m_sb.sb_agcount)
9300 *done = 1;
9301 - } else
9302 - *lastinop = (xfs_ino_t)lastino;
9303
9304 - return rval;
9305 + return error;
9306 }
9307
9308 int
9309 diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
9310 index aaed08022eb9..6ea8b3912fa4 100644
9311 --- a/fs/xfs/xfs_itable.h
9312 +++ b/fs/xfs/xfs_itable.h
9313 @@ -30,22 +30,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
9314 int *ubused,
9315 int *stat);
9316
9317 -struct xfs_bulkstat_agichunk {
9318 - xfs_ino_t ac_lastino; /* last inode returned */
9319 - char __user **ac_ubuffer;/* pointer into user's buffer */
9320 - int ac_ubleft; /* bytes left in user's buffer */
9321 - int ac_ubelem; /* spaces used in user's buffer */
9322 -};
9323 -
9324 -int
9325 -xfs_bulkstat_ag_ichunk(
9326 - struct xfs_mount *mp,
9327 - xfs_agnumber_t agno,
9328 - struct xfs_inobt_rec_incore *irbp,
9329 - bulkstat_one_pf formatter,
9330 - size_t statstruct_size,
9331 - struct xfs_bulkstat_agichunk *acp);
9332 -
9333 /*
9334 * Values for stat return value.
9335 */
9336 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
9337 index e973540cd15b..2dd405c9be78 100644
9338 --- a/include/drm/drm_pciids.h
9339 +++ b/include/drm/drm_pciids.h
9340 @@ -74,7 +74,6 @@
9341 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
9342 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
9343 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
9344 - {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
9345 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
9346 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
9347 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
9348 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
9349 index 518b46555b80..f2057ff80784 100644
9350 --- a/include/linux/blkdev.h
9351 +++ b/include/linux/blkdev.h
9352 @@ -1142,8 +1142,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
9353 /*
9354 * tag stuff
9355 */
9356 -#define blk_rq_tagged(rq) \
9357 - ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
9358 +#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
9359 extern int blk_queue_start_tag(struct request_queue *, struct request *);
9360 extern struct request *blk_queue_find_tag(struct request_queue *, int);
9361 extern void blk_queue_end_tag(struct request_queue *, struct request *);
9362 @@ -1285,10 +1284,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
9363 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
9364 {
9365 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
9366 - unsigned int alignment = (sector << 9) & (granularity - 1);
9367 + unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
9368
9369 - return (granularity + lim->alignment_offset - alignment)
9370 - & (granularity - 1);
9371 + return (granularity + lim->alignment_offset - alignment) % granularity;
9372 }
9373
9374 static inline int bdev_alignment_offset(struct block_device *bdev)
9375 diff --git a/include/linux/hid.h b/include/linux/hid.h
9376 index f53c4a9cca1d..26ee25fced27 100644
9377 --- a/include/linux/hid.h
9378 +++ b/include/linux/hid.h
9379 @@ -287,6 +287,7 @@ struct hid_item {
9380 #define HID_QUIRK_HIDINPUT_FORCE 0x00000080
9381 #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
9382 #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
9383 +#define HID_QUIRK_ALWAYS_POLL 0x00000400
9384 #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
9385 #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
9386 #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
9387 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
9388 index e0752d204d9e..2d946c9b8e8d 100644
9389 --- a/include/linux/memcontrol.h
9390 +++ b/include/linux/memcontrol.h
9391 @@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
9392 return false;
9393 }
9394
9395 -void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
9396 - unsigned long *flags);
9397 -
9398 -extern atomic_t memcg_moving;
9399 -
9400 -static inline void mem_cgroup_begin_update_page_stat(struct page *page,
9401 - bool *locked, unsigned long *flags)
9402 -{
9403 - if (mem_cgroup_disabled())
9404 - return;
9405 - rcu_read_lock();
9406 - *locked = false;
9407 - if (atomic_read(&memcg_moving))
9408 - __mem_cgroup_begin_update_page_stat(page, locked, flags);
9409 -}
9410 -
9411 -void __mem_cgroup_end_update_page_stat(struct page *page,
9412 - unsigned long *flags);
9413 -static inline void mem_cgroup_end_update_page_stat(struct page *page,
9414 - bool *locked, unsigned long *flags)
9415 -{
9416 - if (mem_cgroup_disabled())
9417 - return;
9418 - if (*locked)
9419 - __mem_cgroup_end_update_page_stat(page, flags);
9420 - rcu_read_unlock();
9421 -}
9422 -
9423 -void mem_cgroup_update_page_stat(struct page *page,
9424 - enum mem_cgroup_stat_index idx,
9425 - int val);
9426 -
9427 -static inline void mem_cgroup_inc_page_stat(struct page *page,
9428 +struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
9429 + unsigned long *flags);
9430 +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
9431 + unsigned long flags);
9432 +void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
9433 + enum mem_cgroup_stat_index idx, int val);
9434 +
9435 +static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
9436 enum mem_cgroup_stat_index idx)
9437 {
9438 - mem_cgroup_update_page_stat(page, idx, 1);
9439 + mem_cgroup_update_page_stat(memcg, idx, 1);
9440 }
9441
9442 -static inline void mem_cgroup_dec_page_stat(struct page *page,
9443 +static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
9444 enum mem_cgroup_stat_index idx)
9445 {
9446 - mem_cgroup_update_page_stat(page, idx, -1);
9447 + mem_cgroup_update_page_stat(memcg, idx, -1);
9448 }
9449
9450 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
9451 @@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
9452 {
9453 }
9454
9455 -static inline void mem_cgroup_begin_update_page_stat(struct page *page,
9456 +static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
9457 bool *locked, unsigned long *flags)
9458 {
9459 + return NULL;
9460 }
9461
9462 -static inline void mem_cgroup_end_update_page_stat(struct page *page,
9463 - bool *locked, unsigned long *flags)
9464 +static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
9465 + bool locked, unsigned long flags)
9466 {
9467 }
9468
9469 @@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
9470 return false;
9471 }
9472
9473 -static inline void mem_cgroup_inc_page_stat(struct page *page,
9474 +static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
9475 enum mem_cgroup_stat_index idx)
9476 {
9477 }
9478
9479 -static inline void mem_cgroup_dec_page_stat(struct page *page,
9480 +static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
9481 enum mem_cgroup_stat_index idx)
9482 {
9483 }
9484 diff --git a/include/linux/mm.h b/include/linux/mm.h
9485 index 16e6f1effef8..f952cc8b185d 100644
9486 --- a/include/linux/mm.h
9487 +++ b/include/linux/mm.h
9488 @@ -1174,6 +1174,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
9489
9490 extern void truncate_pagecache(struct inode *inode, loff_t new);
9491 extern void truncate_setsize(struct inode *inode, loff_t newsize);
9492 +void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
9493 void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
9494 int truncate_inode_page(struct address_space *mapping, struct page *page);
9495 int generic_error_remove_page(struct address_space *mapping, struct page *page);
9496 @@ -1232,7 +1233,6 @@ int __set_page_dirty_no_writeback(struct page *page);
9497 int redirty_page_for_writepage(struct writeback_control *wbc,
9498 struct page *page);
9499 void account_page_dirtied(struct page *page, struct address_space *mapping);
9500 -void account_page_writeback(struct page *page);
9501 int set_page_dirty(struct page *page);
9502 int set_page_dirty_lock(struct page *page);
9503 int clear_page_dirty_for_io(struct page *page);
9504 diff --git a/include/linux/of.h b/include/linux/of.h
9505 index 6c4363b8ddc3..ee0fc7ea68d1 100644
9506 --- a/include/linux/of.h
9507 +++ b/include/linux/of.h
9508 @@ -267,14 +267,12 @@ extern int of_property_read_u64(const struct device_node *np,
9509 extern int of_property_read_string(struct device_node *np,
9510 const char *propname,
9511 const char **out_string);
9512 -extern int of_property_read_string_index(struct device_node *np,
9513 - const char *propname,
9514 - int index, const char **output);
9515 extern int of_property_match_string(struct device_node *np,
9516 const char *propname,
9517 const char *string);
9518 -extern int of_property_count_strings(struct device_node *np,
9519 - const char *propname);
9520 +extern int of_property_read_string_helper(struct device_node *np,
9521 + const char *propname,
9522 + const char **out_strs, size_t sz, int index);
9523 extern int of_device_is_compatible(const struct device_node *device,
9524 const char *);
9525 extern int of_device_is_available(const struct device_node *device);
9526 @@ -486,15 +484,9 @@ static inline int of_property_read_string(struct device_node *np,
9527 return -ENOSYS;
9528 }
9529
9530 -static inline int of_property_read_string_index(struct device_node *np,
9531 - const char *propname, int index,
9532 - const char **out_string)
9533 -{
9534 - return -ENOSYS;
9535 -}
9536 -
9537 -static inline int of_property_count_strings(struct device_node *np,
9538 - const char *propname)
9539 +static inline int of_property_read_string_helper(struct device_node *np,
9540 + const char *propname,
9541 + const char **out_strs, size_t sz, int index)
9542 {
9543 return -ENOSYS;
9544 }
9545 @@ -668,6 +660,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
9546 }
9547
9548 /**
9549 + * of_property_read_string_array() - Read an array of strings from a multiple
9550 + * strings property.
9551 + * @np: device node from which the property value is to be read.
9552 + * @propname: name of the property to be searched.
9553 + * @out_strs: output array of string pointers.
9554 + * @sz: number of array elements to read.
9555 + *
9556 + * Search for a property in a device tree node and retrieve a list of
9557 + * terminated string values (pointer to data, not a copy) in that property.
9558 + *
9559 + * If @out_strs is NULL, the number of strings in the property is returned.
9560 + */
9561 +static inline int of_property_read_string_array(struct device_node *np,
9562 + const char *propname, const char **out_strs,
9563 + size_t sz)
9564 +{
9565 + return of_property_read_string_helper(np, propname, out_strs, sz, 0);
9566 +}
9567 +
9568 +/**
9569 + * of_property_count_strings() - Find and return the number of strings from a
9570 + * multiple strings property.
9571 + * @np: device node from which the property value is to be read.
9572 + * @propname: name of the property to be searched.
9573 + *
9574 + * Search for a property in a device tree node and retrieve the number of null
9575 + * terminated string contain in it. Returns the number of strings on
9576 + * success, -EINVAL if the property does not exist, -ENODATA if property
9577 + * does not have a value, and -EILSEQ if the string is not null-terminated
9578 + * within the length of the property data.
9579 + */
9580 +static inline int of_property_count_strings(struct device_node *np,
9581 + const char *propname)
9582 +{
9583 + return of_property_read_string_helper(np, propname, NULL, 0, 0);
9584 +}
9585 +
9586 +/**
9587 + * of_property_read_string_index() - Find and read a string from a multiple
9588 + * strings property.
9589 + * @np: device node from which the property value is to be read.
9590 + * @propname: name of the property to be searched.
9591 + * @index: index of the string in the list of strings
9592 + * @out_string: pointer to null terminated return string, modified only if
9593 + * return value is 0.
9594 + *
9595 + * Search for a property in a device tree node and retrieve a null
9596 + * terminated string value (pointer to data, not a copy) in the list of strings
9597 + * contained in that property.
9598 + * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
9599 + * property does not have a value, and -EILSEQ if the string is not
9600 + * null-terminated within the length of the property data.
9601 + *
9602 + * The out_string pointer is modified only if a valid string can be decoded.
9603 + */
9604 +static inline int of_property_read_string_index(struct device_node *np,
9605 + const char *propname,
9606 + int index, const char **output)
9607 +{
9608 + int rc = of_property_read_string_helper(np, propname, output, 1, index);
9609 + return rc < 0 ? rc : 0;
9610 +}
9611 +
9612 +/**
9613 * of_property_read_bool - Findfrom a property
9614 * @np: device node from which the property value is to be read.
9615 * @propname: name of the property to be searched.
9616 diff --git a/include/linux/oom.h b/include/linux/oom.h
9617 index 647395a1a550..e8d6e1058723 100644
9618 --- a/include/linux/oom.h
9619 +++ b/include/linux/oom.h
9620 @@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
9621 extern unsigned long oom_badness(struct task_struct *p,
9622 struct mem_cgroup *memcg, const nodemask_t *nodemask,
9623 unsigned long totalpages);
9624 +
9625 +extern int oom_kills_count(void);
9626 +extern void note_oom_kill(void);
9627 extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
9628 unsigned int points, unsigned long totalpages,
9629 struct mem_cgroup *memcg, nodemask_t *nodemask,
9630 diff --git a/include/linux/string.h b/include/linux/string.h
9631 index d36977e029af..3b42b3732da6 100644
9632 --- a/include/linux/string.h
9633 +++ b/include/linux/string.h
9634 @@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
9635 #endif
9636
9637 extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
9638 - const void *from, size_t available);
9639 + const void *from, size_t available);
9640
9641 /**
9642 * strstarts - does @str start with @prefix?
9643 @@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
9644 return strncmp(str, prefix, strlen(prefix)) == 0;
9645 }
9646
9647 -extern size_t memweight(const void *ptr, size_t bytes);
9648 +size_t memweight(const void *ptr, size_t bytes);
9649 +void memzero_explicit(void *s, size_t count);
9650
9651 /**
9652 * kbasename - return the last part of a pathname.
9653 diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
9654 index fcbfe8783243..cf391eef2e6d 100644
9655 --- a/include/linux/sunrpc/xprt.h
9656 +++ b/include/linux/sunrpc/xprt.h
9657 @@ -357,6 +357,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
9658 #define XPRT_CONNECTION_ABORT (7)
9659 #define XPRT_CONNECTION_CLOSE (8)
9660 #define XPRT_CONGESTED (9)
9661 +#define XPRT_CONNECTION_REUSE (10)
9662
9663 static inline void xprt_set_connected(struct rpc_xprt *xprt)
9664 {
9665 diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
9666 index 32e0f5c04e72..4a185a0f6242 100644
9667 --- a/include/linux/usb/quirks.h
9668 +++ b/include/linux/usb/quirks.h
9669 @@ -44,4 +44,7 @@
9670 /* device generates spurious wakeup, ignore remote wakeup capability */
9671 #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200
9672
9673 +/* device can't handle device_qualifier descriptor requests */
9674 +#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100
9675 +
9676 #endif /* __LINUX_USB_QUIRKS_H */
9677 diff --git a/include/net/ipv6.h b/include/net/ipv6.h
9678 index a2db816e8461..268c8f12aac2 100644
9679 --- a/include/net/ipv6.h
9680 +++ b/include/net/ipv6.h
9681 @@ -669,6 +669,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
9682 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
9683 }
9684
9685 +void ipv6_proxy_select_ident(struct sk_buff *skb);
9686 +
9687 int ip6_dst_hoplimit(struct dst_entry *dst);
9688
9689 static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
9690 diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
9691 index e64583560701..56ed843969ca 100644
9692 --- a/include/scsi/scsi_tcq.h
9693 +++ b/include/scsi/scsi_tcq.h
9694 @@ -67,8 +67,9 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
9695 if (!sdev->tagged_supported)
9696 return;
9697
9698 - if (!shost_use_blk_mq(sdev->host) &&
9699 - !blk_queue_tagged(sdev->request_queue))
9700 + if (shost_use_blk_mq(sdev->host))
9701 + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, sdev->request_queue);
9702 + else if (!blk_queue_tagged(sdev->request_queue))
9703 blk_queue_init_tags(sdev->request_queue, depth,
9704 sdev->host->bqt);
9705
9706 @@ -81,8 +82,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
9707 **/
9708 static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
9709 {
9710 - if (!shost_use_blk_mq(sdev->host) &&
9711 - blk_queue_tagged(sdev->request_queue))
9712 + if (blk_queue_tagged(sdev->request_queue))
9713 blk_queue_free_tags(sdev->request_queue);
9714 scsi_adjust_queue_depth(sdev, 0, depth);
9715 }
9716 diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
9717 index 4fc66f6b12ce..c472bedbe38e 100644
9718 --- a/include/uapi/drm/vmwgfx_drm.h
9719 +++ b/include/uapi/drm/vmwgfx_drm.h
9720 @@ -29,7 +29,7 @@
9721 #define __VMWGFX_DRM_H__
9722
9723 #ifndef __KERNEL__
9724 -#include <drm.h>
9725 +#include <drm/drm.h>
9726 #endif
9727
9728 #define DRM_VMW_MAX_SURFACE_FACES 6
9729 diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
9730 index 1874ebe9ac1e..a1d7e931ab72 100644
9731 --- a/include/uapi/linux/input.h
9732 +++ b/include/uapi/linux/input.h
9733 @@ -739,6 +739,13 @@ struct input_keymap_entry {
9734 #define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
9735 #define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
9736
9737 +#define KEY_KBDINPUTASSIST_PREV 0x260
9738 +#define KEY_KBDINPUTASSIST_NEXT 0x261
9739 +#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
9740 +#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263
9741 +#define KEY_KBDINPUTASSIST_ACCEPT 0x264
9742 +#define KEY_KBDINPUTASSIST_CANCEL 0x265
9743 +
9744 #define BTN_TRIGGER_HAPPY 0x2c0
9745 #define BTN_TRIGGER_HAPPY1 0x2c0
9746 #define BTN_TRIGGER_HAPPY2 0x2c1
9747 diff --git a/kernel/events/core.c b/kernel/events/core.c
9748 index 963bf139e2b2..658f232af04c 100644
9749 --- a/kernel/events/core.c
9750 +++ b/kernel/events/core.c
9751 @@ -902,13 +902,23 @@ static void put_ctx(struct perf_event_context *ctx)
9752 }
9753 }
9754
9755 -static void unclone_ctx(struct perf_event_context *ctx)
9756 +/*
9757 + * This must be done under the ctx->lock, such as to serialize against
9758 + * context_equiv(), therefore we cannot call put_ctx() since that might end up
9759 + * calling scheduler related locks and ctx->lock nests inside those.
9760 + */
9761 +static __must_check struct perf_event_context *
9762 +unclone_ctx(struct perf_event_context *ctx)
9763 {
9764 - if (ctx->parent_ctx) {
9765 - put_ctx(ctx->parent_ctx);
9766 + struct perf_event_context *parent_ctx = ctx->parent_ctx;
9767 +
9768 + lockdep_assert_held(&ctx->lock);
9769 +
9770 + if (parent_ctx)
9771 ctx->parent_ctx = NULL;
9772 - }
9773 ctx->generation++;
9774 +
9775 + return parent_ctx;
9776 }
9777
9778 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
9779 @@ -2210,6 +2220,9 @@ static void ctx_sched_out(struct perf_event_context *ctx,
9780 static int context_equiv(struct perf_event_context *ctx1,
9781 struct perf_event_context *ctx2)
9782 {
9783 + lockdep_assert_held(&ctx1->lock);
9784 + lockdep_assert_held(&ctx2->lock);
9785 +
9786 /* Pinning disables the swap optimization */
9787 if (ctx1->pin_count || ctx2->pin_count)
9788 return 0;
9789 @@ -2943,6 +2956,7 @@ static int event_enable_on_exec(struct perf_event *event,
9790 */
9791 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
9792 {
9793 + struct perf_event_context *clone_ctx = NULL;
9794 struct perf_event *event;
9795 unsigned long flags;
9796 int enabled = 0;
9797 @@ -2974,7 +2988,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
9798 * Unclone this context if we enabled any event.
9799 */
9800 if (enabled)
9801 - unclone_ctx(ctx);
9802 + clone_ctx = unclone_ctx(ctx);
9803
9804 raw_spin_unlock(&ctx->lock);
9805
9806 @@ -2984,6 +2998,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
9807 perf_event_context_sched_in(ctx, ctx->task);
9808 out:
9809 local_irq_restore(flags);
9810 +
9811 + if (clone_ctx)
9812 + put_ctx(clone_ctx);
9813 }
9814
9815 void perf_event_exec(void)
9816 @@ -3135,7 +3152,7 @@ errout:
9817 static struct perf_event_context *
9818 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
9819 {
9820 - struct perf_event_context *ctx;
9821 + struct perf_event_context *ctx, *clone_ctx = NULL;
9822 struct perf_cpu_context *cpuctx;
9823 unsigned long flags;
9824 int ctxn, err;
9825 @@ -3169,9 +3186,12 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
9826 retry:
9827 ctx = perf_lock_task_context(task, ctxn, &flags);
9828 if (ctx) {
9829 - unclone_ctx(ctx);
9830 + clone_ctx = unclone_ctx(ctx);
9831 ++ctx->pin_count;
9832 raw_spin_unlock_irqrestore(&ctx->lock, flags);
9833 +
9834 + if (clone_ctx)
9835 + put_ctx(clone_ctx);
9836 } else {
9837 ctx = alloc_perf_context(pmu, task);
9838 err = -ENOMEM;
9839 @@ -7523,7 +7543,7 @@ __perf_event_exit_task(struct perf_event *child_event,
9840 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9841 {
9842 struct perf_event *child_event, *next;
9843 - struct perf_event_context *child_ctx, *parent_ctx;
9844 + struct perf_event_context *child_ctx, *clone_ctx = NULL;
9845 unsigned long flags;
9846
9847 if (likely(!child->perf_event_ctxp[ctxn])) {
9848 @@ -7550,28 +7570,16 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
9849 child->perf_event_ctxp[ctxn] = NULL;
9850
9851 /*
9852 - * In order to avoid freeing: child_ctx->parent_ctx->task
9853 - * under perf_event_context::lock, grab another reference.
9854 - */
9855 - parent_ctx = child_ctx->parent_ctx;
9856 - if (parent_ctx)
9857 - get_ctx(parent_ctx);
9858 -
9859 - /*
9860 * If this context is a clone; unclone it so it can't get
9861 * swapped to another process while we're removing all
9862 * the events from it.
9863 */
9864 - unclone_ctx(child_ctx);
9865 + clone_ctx = unclone_ctx(child_ctx);
9866 update_context_time(child_ctx);
9867 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
9868
9869 - /*
9870 - * Now that we no longer hold perf_event_context::lock, drop
9871 - * our extra child_ctx->parent_ctx reference.
9872 - */
9873 - if (parent_ctx)
9874 - put_ctx(parent_ctx);
9875 + if (clone_ctx)
9876 + put_ctx(clone_ctx);
9877
9878 /*
9879 * Report the task dead after unscheduling the events so that we
9880 diff --git a/kernel/freezer.c b/kernel/freezer.c
9881 index aa6a8aadb911..8f9279b9c6d7 100644
9882 --- a/kernel/freezer.c
9883 +++ b/kernel/freezer.c
9884 @@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
9885 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
9886 return false;
9887
9888 + if (test_thread_flag(TIF_MEMDIE))
9889 + return false;
9890 +
9891 if (pm_nosig_freezing || cgroup_freezing(p))
9892 return true;
9893
9894 diff --git a/kernel/futex.c b/kernel/futex.c
9895 index f3a3a071283c..22b3f1b58201 100644
9896 --- a/kernel/futex.c
9897 +++ b/kernel/futex.c
9898 @@ -641,8 +641,14 @@ static struct futex_pi_state * alloc_pi_state(void)
9899 return pi_state;
9900 }
9901
9902 +/*
9903 + * Must be called with the hb lock held.
9904 + */
9905 static void free_pi_state(struct futex_pi_state *pi_state)
9906 {
9907 + if (!pi_state)
9908 + return;
9909 +
9910 if (!atomic_dec_and_test(&pi_state->refcount))
9911 return;
9912
9913 @@ -1521,15 +1527,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
9914 }
9915
9916 retry:
9917 - if (pi_state != NULL) {
9918 - /*
9919 - * We will have to lookup the pi_state again, so free this one
9920 - * to keep the accounting correct.
9921 - */
9922 - free_pi_state(pi_state);
9923 - pi_state = NULL;
9924 - }
9925 -
9926 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
9927 if (unlikely(ret != 0))
9928 goto out;
9929 @@ -1619,6 +1616,8 @@ retry_private:
9930 case 0:
9931 break;
9932 case -EFAULT:
9933 + free_pi_state(pi_state);
9934 + pi_state = NULL;
9935 double_unlock_hb(hb1, hb2);
9936 hb_waiters_dec(hb2);
9937 put_futex_key(&key2);
9938 @@ -1634,6 +1633,8 @@ retry_private:
9939 * exit to complete.
9940 * - The user space value changed.
9941 */
9942 + free_pi_state(pi_state);
9943 + pi_state = NULL;
9944 double_unlock_hb(hb1, hb2);
9945 hb_waiters_dec(hb2);
9946 put_futex_key(&key2);
9947 @@ -1710,6 +1711,7 @@ retry_private:
9948 }
9949
9950 out_unlock:
9951 + free_pi_state(pi_state);
9952 double_unlock_hb(hb1, hb2);
9953 hb_waiters_dec(hb2);
9954
9955 @@ -1727,8 +1729,6 @@ out_put_keys:
9956 out_put_key1:
9957 put_futex_key(&key1);
9958 out:
9959 - if (pi_state != NULL)
9960 - free_pi_state(pi_state);
9961 return ret ? ret : task_count;
9962 }
9963
9964 diff --git a/kernel/module.c b/kernel/module.c
9965 index 03214bd288e9..1c47139d161c 100644
9966 --- a/kernel/module.c
9967 +++ b/kernel/module.c
9968 @@ -1842,7 +1842,9 @@ static void free_module(struct module *mod)
9969
9970 /* We leave it in list to prevent duplicate loads, but make sure
9971 * that noone uses it while it's being deconstructed. */
9972 + mutex_lock(&module_mutex);
9973 mod->state = MODULE_STATE_UNFORMED;
9974 + mutex_unlock(&module_mutex);
9975
9976 /* Remove dynamic debug info */
9977 ddebug_remove_module(mod->name);
9978 diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
9979 index a9dfa79b6bab..1f35a3478f3c 100644
9980 --- a/kernel/power/hibernate.c
9981 +++ b/kernel/power/hibernate.c
9982 @@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode)
9983 error = dpm_suspend_start(PMSG_QUIESCE);
9984 if (!error) {
9985 error = resume_target_kernel(platform_mode);
9986 - dpm_resume_end(PMSG_RECOVER);
9987 + /*
9988 + * The above should either succeed and jump to the new kernel,
9989 + * or return with an error. Otherwise things are just
9990 + * undefined, so let's be paranoid.
9991 + */
9992 + BUG_ON(!error);
9993 }
9994 + dpm_resume_end(PMSG_RECOVER);
9995 pm_restore_gfp_mask();
9996 resume_console();
9997 pm_restore_console();
9998 diff --git a/kernel/power/process.c b/kernel/power/process.c
9999 index 4ee194eb524b..7a37cf3eb1a2 100644
10000 --- a/kernel/power/process.c
10001 +++ b/kernel/power/process.c
10002 @@ -108,6 +108,28 @@ static int try_to_freeze_tasks(bool user_only)
10003 return todo ? -EBUSY : 0;
10004 }
10005
10006 +/*
10007 + * Returns true if all freezable tasks (except for current) are frozen already
10008 + */
10009 +static bool check_frozen_processes(void)
10010 +{
10011 + struct task_struct *g, *p;
10012 + bool ret = true;
10013 +
10014 + read_lock(&tasklist_lock);
10015 + for_each_process_thread(g, p) {
10016 + if (p != current && !freezer_should_skip(p) &&
10017 + !frozen(p)) {
10018 + ret = false;
10019 + goto done;
10020 + }
10021 + }
10022 +done:
10023 + read_unlock(&tasklist_lock);
10024 +
10025 + return ret;
10026 +}
10027 +
10028 /**
10029 * freeze_processes - Signal user space processes to enter the refrigerator.
10030 * The current thread will not be frozen. The same process that calls
10031 @@ -118,6 +140,7 @@ static int try_to_freeze_tasks(bool user_only)
10032 int freeze_processes(void)
10033 {
10034 int error;
10035 + int oom_kills_saved;
10036
10037 error = __usermodehelper_disable(UMH_FREEZING);
10038 if (error)
10039 @@ -131,12 +154,27 @@ int freeze_processes(void)
10040
10041 printk("Freezing user space processes ... ");
10042 pm_freezing = true;
10043 + oom_kills_saved = oom_kills_count();
10044 error = try_to_freeze_tasks(true);
10045 if (!error) {
10046 - printk("done.");
10047 __usermodehelper_set_disable_depth(UMH_DISABLED);
10048 oom_killer_disable();
10049 +
10050 + /*
10051 + * There might have been an OOM kill while we were
10052 + * freezing tasks and the killed task might be still
10053 + * on the way out so we have to double check for race.
10054 + */
10055 + if (oom_kills_count() != oom_kills_saved &&
10056 + !check_frozen_processes()) {
10057 + __usermodehelper_set_disable_depth(UMH_ENABLED);
10058 + printk("OOM in progress.");
10059 + error = -EBUSY;
10060 + goto done;
10061 + }
10062 + printk("done.");
10063 }
10064 +done:
10065 printk("\n");
10066 BUG_ON(in_atomic());
10067
10068 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
10069 index ec1a286684a5..6d7cb9123dec 100644
10070 --- a/kernel/sched/core.c
10071 +++ b/kernel/sched/core.c
10072 @@ -1977,6 +1977,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
10073 #ifdef CONFIG_SMP
10074 inline struct dl_bw *dl_bw_of(int i)
10075 {
10076 + rcu_lockdep_assert(rcu_read_lock_sched_held(),
10077 + "sched RCU must be held");
10078 return &cpu_rq(i)->rd->dl_bw;
10079 }
10080
10081 @@ -1985,6 +1987,8 @@ static inline int dl_bw_cpus(int i)
10082 struct root_domain *rd = cpu_rq(i)->rd;
10083 int cpus = 0;
10084
10085 + rcu_lockdep_assert(rcu_read_lock_sched_held(),
10086 + "sched RCU must be held");
10087 for_each_cpu_and(i, rd->span, cpu_active_mask)
10088 cpus++;
10089
10090 @@ -4004,13 +4008,14 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
10091 * root_domain.
10092 */
10093 #ifdef CONFIG_SMP
10094 - if (task_has_dl_policy(p)) {
10095 - const struct cpumask *span = task_rq(p)->rd->span;
10096 -
10097 - if (dl_bandwidth_enabled() && !cpumask_subset(span, new_mask)) {
10098 + if (task_has_dl_policy(p) && dl_bandwidth_enabled()) {
10099 + rcu_read_lock();
10100 + if (!cpumask_subset(task_rq(p)->rd->span, new_mask)) {
10101 retval = -EBUSY;
10102 + rcu_read_unlock();
10103 goto out_unlock;
10104 }
10105 + rcu_read_unlock();
10106 }
10107 #endif
10108 again:
10109 @@ -7580,6 +7585,8 @@ static int sched_dl_global_constraints(void)
10110 int cpu, ret = 0;
10111 unsigned long flags;
10112
10113 + rcu_read_lock();
10114 +
10115 /*
10116 * Here we want to check the bandwidth not being set to some
10117 * value smaller than the currently allocated bandwidth in
10118 @@ -7601,6 +7608,8 @@ static int sched_dl_global_constraints(void)
10119 break;
10120 }
10121
10122 + rcu_read_unlock();
10123 +
10124 return ret;
10125 }
10126
10127 @@ -7616,6 +7625,7 @@ static void sched_dl_do_global(void)
10128 if (global_rt_runtime() != RUNTIME_INF)
10129 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
10130
10131 + rcu_read_lock();
10132 /*
10133 * FIXME: As above...
10134 */
10135 @@ -7626,6 +7636,7 @@ static void sched_dl_do_global(void)
10136 dl_b->bw = new_bw;
10137 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
10138 }
10139 + rcu_read_unlock();
10140 }
10141
10142 static int sched_rt_global_validate(void)
10143 diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
10144 index 42b463ad90f2..31ea01f42e1f 100644
10145 --- a/kernel/time/posix-timers.c
10146 +++ b/kernel/time/posix-timers.c
10147 @@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
10148 goto out;
10149 }
10150 } else {
10151 + memset(&event.sigev_value, 0, sizeof(event.sigev_value));
10152 event.sigev_notify = SIGEV_SIGNAL;
10153 event.sigev_signo = SIGALRM;
10154 event.sigev_value.sival_int = new_timer->it_id;
10155 diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
10156 index 759d5e004517..7e3cd7aaec83 100644
10157 --- a/kernel/trace/trace_syscalls.c
10158 +++ b/kernel/trace/trace_syscalls.c
10159 @@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
10160 int size;
10161
10162 syscall_nr = trace_get_syscall_nr(current, regs);
10163 - if (syscall_nr < 0)
10164 + if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
10165 return;
10166
10167 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
10168 @@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
10169 int syscall_nr;
10170
10171 syscall_nr = trace_get_syscall_nr(current, regs);
10172 - if (syscall_nr < 0)
10173 + if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
10174 return;
10175
10176 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
10177 @@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
10178 int size;
10179
10180 syscall_nr = trace_get_syscall_nr(current, regs);
10181 - if (syscall_nr < 0)
10182 + if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
10183 return;
10184 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
10185 return;
10186 @@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
10187 int size;
10188
10189 syscall_nr = trace_get_syscall_nr(current, regs);
10190 - if (syscall_nr < 0)
10191 + if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
10192 return;
10193 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
10194 return;
10195 diff --git a/lib/bitmap.c b/lib/bitmap.c
10196 index 1e031f2c9aba..33ce01178b43 100644
10197 --- a/lib/bitmap.c
10198 +++ b/lib/bitmap.c
10199 @@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
10200 lower = src[off + k];
10201 if (left && off + k == lim - 1)
10202 lower &= mask;
10203 - dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem;
10204 + dst[k] = lower >> rem;
10205 + if (rem)
10206 + dst[k] |= upper << (BITS_PER_LONG - rem);
10207 if (left && k == lim - 1)
10208 dst[k] &= mask;
10209 }
10210 @@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
10211 upper = src[k];
10212 if (left && k == lim - 1)
10213 upper &= (1UL << left) - 1;
10214 - dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem;
10215 + dst[k + off] = upper << rem;
10216 + if (rem)
10217 + dst[k + off] |= lower >> (BITS_PER_LONG - rem);
10218 if (left && k + off == lim - 1)
10219 dst[k + off] &= (1UL << left) - 1;
10220 }
10221 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
10222 index 9cdf62f8accd..c9f2e8c6ccc9 100644
10223 --- a/lib/scatterlist.c
10224 +++ b/lib/scatterlist.c
10225 @@ -203,10 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
10226 }
10227
10228 table->orig_nents -= sg_size;
10229 - if (!skip_first_chunk) {
10230 - free_fn(sgl, alloc_size);
10231 + if (skip_first_chunk)
10232 skip_first_chunk = false;
10233 - }
10234 + else
10235 + free_fn(sgl, alloc_size);
10236 sgl = next;
10237 }
10238
10239 diff --git a/lib/string.c b/lib/string.c
10240 index f3c6ff596414..70db57a81f7c 100644
10241 --- a/lib/string.c
10242 +++ b/lib/string.c
10243 @@ -604,6 +604,22 @@ void *memset(void *s, int c, size_t count)
10244 EXPORT_SYMBOL(memset);
10245 #endif
10246
10247 +/**
10248 + * memzero_explicit - Fill a region of memory (e.g. sensitive
10249 + * keying data) with 0s.
10250 + * @s: Pointer to the start of the area.
10251 + * @count: The size of the area.
10252 + *
10253 + * memzero_explicit() doesn't need an arch-specific version as
10254 + * it just invokes the one of memset() implicitly.
10255 + */
10256 +void memzero_explicit(void *s, size_t count)
10257 +{
10258 + memset(s, 0, count);
10259 + OPTIMIZER_HIDE_VAR(s);
10260 +}
10261 +EXPORT_SYMBOL(memzero_explicit);
10262 +
10263 #ifndef __HAVE_ARCH_MEMCPY
10264 /**
10265 * memcpy - Copy one area of memory to another
10266 diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
10267 index 52abeeb3cb9d..1811ea2c6302 100644
10268 --- a/mm/balloon_compaction.c
10269 +++ b/mm/balloon_compaction.c
10270 @@ -93,11 +93,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
10271 * to be released by the balloon driver.
10272 */
10273 if (trylock_page(page)) {
10274 +#ifdef CONFIG_BALLOON_COMPACTION
10275 if (!PagePrivate(page)) {
10276 /* raced with isolation */
10277 unlock_page(page);
10278 continue;
10279 }
10280 +#endif
10281 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
10282 balloon_page_delete(page);
10283 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
10284 diff --git a/mm/cma.c b/mm/cma.c
10285 index 0ab564623ea8..2904f45beab7 100644
10286 --- a/mm/cma.c
10287 +++ b/mm/cma.c
10288 @@ -123,6 +123,7 @@ static int __init cma_activate_area(struct cma *cma)
10289
10290 err:
10291 kfree(cma->bitmap);
10292 + cma->count = 0;
10293 return -EINVAL;
10294 }
10295
10296 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
10297 index f8ffd9412ec5..45c6d6738dfa 100644
10298 --- a/mm/huge_memory.c
10299 +++ b/mm/huge_memory.c
10300 @@ -200,7 +200,7 @@ retry:
10301 preempt_disable();
10302 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
10303 preempt_enable();
10304 - __free_page(zero_page);
10305 + __free_pages(zero_page, compound_order(zero_page));
10306 goto retry;
10307 }
10308
10309 @@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
10310 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
10311 struct page *zero_page = xchg(&huge_zero_page, NULL);
10312 BUG_ON(zero_page == NULL);
10313 - __free_page(zero_page);
10314 + __free_pages(zero_page, compound_order(zero_page));
10315 return HPAGE_PMD_NR;
10316 }
10317
10318 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
10319 index 28928ce9b07f..48914e124e57 100644
10320 --- a/mm/memcontrol.c
10321 +++ b/mm/memcontrol.c
10322 @@ -1545,12 +1545,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
10323 * start move here.
10324 */
10325
10326 -/* for quick checking without looking up memcg */
10327 -atomic_t memcg_moving __read_mostly;
10328 -
10329 static void mem_cgroup_start_move(struct mem_cgroup *memcg)
10330 {
10331 - atomic_inc(&memcg_moving);
10332 atomic_inc(&memcg->moving_account);
10333 synchronize_rcu();
10334 }
10335 @@ -1561,10 +1557,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
10336 * Now, mem_cgroup_clear_mc() may call this function with NULL.
10337 * We check NULL in callee rather than caller.
10338 */
10339 - if (memcg) {
10340 - atomic_dec(&memcg_moving);
10341 + if (memcg)
10342 atomic_dec(&memcg->moving_account);
10343 - }
10344 }
10345
10346 /*
10347 @@ -2249,41 +2243,52 @@ cleanup:
10348 return true;
10349 }
10350
10351 -/*
10352 - * Used to update mapped file or writeback or other statistics.
10353 +/**
10354 + * mem_cgroup_begin_page_stat - begin a page state statistics transaction
10355 + * @page: page that is going to change accounted state
10356 + * @locked: &memcg->move_lock slowpath was taken
10357 + * @flags: IRQ-state flags for &memcg->move_lock
10358 *
10359 - * Notes: Race condition
10360 + * This function must mark the beginning of an accounted page state
10361 + * change to prevent double accounting when the page is concurrently
10362 + * being moved to another memcg:
10363 *
10364 - * Charging occurs during page instantiation, while the page is
10365 - * unmapped and locked in page migration, or while the page table is
10366 - * locked in THP migration. No race is possible.
10367 + * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
10368 + * if (TestClearPageState(page))
10369 + * mem_cgroup_update_page_stat(memcg, state, -1);
10370 + * mem_cgroup_end_page_stat(memcg, locked, flags);
10371 *
10372 - * Uncharge happens to pages with zero references, no race possible.
10373 + * The RCU lock is held throughout the transaction. The fast path can
10374 + * get away without acquiring the memcg->move_lock (@locked is false)
10375 + * because page moving starts with an RCU grace period.
10376 *
10377 - * Charge moving between groups is protected by checking mm->moving
10378 - * account and taking the move_lock in the slowpath.
10379 + * The RCU lock also protects the memcg from being freed when the page
10380 + * state that is going to change is the only thing preventing the page
10381 + * from being uncharged. E.g. end-writeback clearing PageWriteback(),
10382 + * which allows migration to go ahead and uncharge the page before the
10383 + * account transaction might be complete.
10384 */
10385 -
10386 -void __mem_cgroup_begin_update_page_stat(struct page *page,
10387 - bool *locked, unsigned long *flags)
10388 +struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
10389 + bool *locked,
10390 + unsigned long *flags)
10391 {
10392 struct mem_cgroup *memcg;
10393 struct page_cgroup *pc;
10394
10395 + rcu_read_lock();
10396 +
10397 + if (mem_cgroup_disabled())
10398 + return NULL;
10399 +
10400 pc = lookup_page_cgroup(page);
10401 again:
10402 memcg = pc->mem_cgroup;
10403 if (unlikely(!memcg || !PageCgroupUsed(pc)))
10404 - return;
10405 - /*
10406 - * If this memory cgroup is not under account moving, we don't
10407 - * need to take move_lock_mem_cgroup(). Because we already hold
10408 - * rcu_read_lock(), any calls to move_account will be delayed until
10409 - * rcu_read_unlock().
10410 - */
10411 - VM_BUG_ON(!rcu_read_lock_held());
10412 + return NULL;
10413 +
10414 + *locked = false;
10415 if (atomic_read(&memcg->moving_account) <= 0)
10416 - return;
10417 + return memcg;
10418
10419 move_lock_mem_cgroup(memcg, flags);
10420 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
10421 @@ -2291,36 +2296,40 @@ again:
10422 goto again;
10423 }
10424 *locked = true;
10425 +
10426 + return memcg;
10427 }
10428
10429 -void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags)
10430 +/**
10431 + * mem_cgroup_end_page_stat - finish a page state statistics transaction
10432 + * @memcg: the memcg that was accounted against
10433 + * @locked: value received from mem_cgroup_begin_page_stat()
10434 + * @flags: value received from mem_cgroup_begin_page_stat()
10435 + */
10436 +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
10437 + unsigned long flags)
10438 {
10439 - struct page_cgroup *pc = lookup_page_cgroup(page);
10440 + if (memcg && locked)
10441 + move_unlock_mem_cgroup(memcg, &flags);
10442
10443 - /*
10444 - * It's guaranteed that pc->mem_cgroup never changes while
10445 - * lock is held because a routine modifies pc->mem_cgroup
10446 - * should take move_lock_mem_cgroup().
10447 - */
10448 - move_unlock_mem_cgroup(pc->mem_cgroup, flags);
10449 + rcu_read_unlock();
10450 }
10451
10452 -void mem_cgroup_update_page_stat(struct page *page,
10453 +/**
10454 + * mem_cgroup_update_page_stat - update page state statistics
10455 + * @memcg: memcg to account against
10456 + * @idx: page state item to account
10457 + * @val: number of pages (positive or negative)
10458 + *
10459 + * See mem_cgroup_begin_page_stat() for locking requirements.
10460 + */
10461 +void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
10462 enum mem_cgroup_stat_index idx, int val)
10463 {
10464 - struct mem_cgroup *memcg;
10465 - struct page_cgroup *pc = lookup_page_cgroup(page);
10466 - unsigned long uninitialized_var(flags);
10467 -
10468 - if (mem_cgroup_disabled())
10469 - return;
10470 -
10471 VM_BUG_ON(!rcu_read_lock_held());
10472 - memcg = pc->mem_cgroup;
10473 - if (unlikely(!memcg || !PageCgroupUsed(pc)))
10474 - return;
10475
10476 - this_cpu_add(memcg->stat->count[idx], val);
10477 + if (memcg)
10478 + this_cpu_add(memcg->stat->count[idx], val);
10479 }
10480
10481 /*
10482 diff --git a/mm/memory.c b/mm/memory.c
10483 index e229970e4223..37b80fc3a9b6 100644
10484 --- a/mm/memory.c
10485 +++ b/mm/memory.c
10486 @@ -1147,6 +1147,7 @@ again:
10487 print_bad_pte(vma, addr, ptent, page);
10488 if (unlikely(!__tlb_remove_page(tlb, page))) {
10489 force_flush = 1;
10490 + addr += PAGE_SIZE;
10491 break;
10492 }
10493 continue;
10494 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
10495 index 1e11df8fa7ec..f1fb141720b4 100644
10496 --- a/mm/oom_kill.c
10497 +++ b/mm/oom_kill.c
10498 @@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
10499 dump_tasks(memcg, nodemask);
10500 }
10501
10502 +/*
10503 + * Number of OOM killer invocations (including memcg OOM killer).
10504 + * Primarily used by PM freezer to check for potential races with
10505 + * OOM killed frozen task.
10506 + */
10507 +static atomic_t oom_kills = ATOMIC_INIT(0);
10508 +
10509 +int oom_kills_count(void)
10510 +{
10511 + return atomic_read(&oom_kills);
10512 +}
10513 +
10514 +void note_oom_kill(void)
10515 +{
10516 + atomic_inc(&oom_kills);
10517 +}
10518 +
10519 #define K(x) ((x) << (PAGE_SHIFT-10))
10520 /*
10521 * Must be called while holding a reference to p, which will be released upon
10522 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
10523 index 91d73ef1744d..ba5fd97d8cbc 100644
10524 --- a/mm/page-writeback.c
10525 +++ b/mm/page-writeback.c
10526 @@ -2116,23 +2116,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
10527 EXPORT_SYMBOL(account_page_dirtied);
10528
10529 /*
10530 - * Helper function for set_page_writeback family.
10531 - *
10532 - * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
10533 - * while calling this function.
10534 - * See test_set_page_writeback for example.
10535 - *
10536 - * NOTE: Unlike account_page_dirtied this does not rely on being atomic
10537 - * wrt interrupts.
10538 - */
10539 -void account_page_writeback(struct page *page)
10540 -{
10541 - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
10542 - inc_zone_page_state(page, NR_WRITEBACK);
10543 -}
10544 -EXPORT_SYMBOL(account_page_writeback);
10545 -
10546 -/*
10547 * For address_spaces which do not use buffers. Just tag the page as dirty in
10548 * its radix tree.
10549 *
10550 @@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
10551 int test_clear_page_writeback(struct page *page)
10552 {
10553 struct address_space *mapping = page_mapping(page);
10554 - int ret;
10555 - bool locked;
10556 unsigned long memcg_flags;
10557 + struct mem_cgroup *memcg;
10558 + bool locked;
10559 + int ret;
10560
10561 - mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
10562 + memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
10563 if (mapping) {
10564 struct backing_dev_info *bdi = mapping->backing_dev_info;
10565 unsigned long flags;
10566 @@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page)
10567 ret = TestClearPageWriteback(page);
10568 }
10569 if (ret) {
10570 - mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
10571 + mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
10572 dec_zone_page_state(page, NR_WRITEBACK);
10573 inc_zone_page_state(page, NR_WRITTEN);
10574 }
10575 - mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
10576 + mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
10577 return ret;
10578 }
10579
10580 int __test_set_page_writeback(struct page *page, bool keep_write)
10581 {
10582 struct address_space *mapping = page_mapping(page);
10583 - int ret;
10584 - bool locked;
10585 unsigned long memcg_flags;
10586 + struct mem_cgroup *memcg;
10587 + bool locked;
10588 + int ret;
10589
10590 - mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags);
10591 + memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
10592 if (mapping) {
10593 struct backing_dev_info *bdi = mapping->backing_dev_info;
10594 unsigned long flags;
10595 @@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
10596 } else {
10597 ret = TestSetPageWriteback(page);
10598 }
10599 - if (!ret)
10600 - account_page_writeback(page);
10601 - mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags);
10602 + if (!ret) {
10603 + mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
10604 + inc_zone_page_state(page, NR_WRITEBACK);
10605 + }
10606 + mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
10607 return ret;
10608
10609 }
10610 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
10611 index eee961958021..8c5029f22bbe 100644
10612 --- a/mm/page_alloc.c
10613 +++ b/mm/page_alloc.c
10614 @@ -2253,6 +2253,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
10615 }
10616
10617 /*
10618 + * PM-freezer should be notified that there might be an OOM killer on
10619 + * its way to kill and wake somebody up. This is too early and we might
10620 + * end up not killing anything but false positives are acceptable.
10621 + * See freeze_processes.
10622 + */
10623 + note_oom_kill();
10624 +
10625 + /*
10626 * Go through the zonelist yet one more time, keep very high watermark
10627 * here, this is only to catch a parallel oom killing, we must fail if
10628 * we're still under heavy pressure.
10629 diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
10630 index 3708264d2833..5331c2bd85a2 100644
10631 --- a/mm/page_cgroup.c
10632 +++ b/mm/page_cgroup.c
10633 @@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
10634 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
10635
10636 BUG_ON(PageReserved(page));
10637 + kmemleak_free(addr);
10638 free_pages_exact(addr, table_size);
10639 }
10640 }
10641 diff --git a/mm/percpu.c b/mm/percpu.c
10642 index da997f9800bd..2139e30a4b44 100644
10643 --- a/mm/percpu.c
10644 +++ b/mm/percpu.c
10645 @@ -1932,8 +1932,6 @@ void __init setup_per_cpu_areas(void)
10646
10647 if (pcpu_setup_first_chunk(ai, fc) < 0)
10648 panic("Failed to initialize percpu areas.");
10649 -
10650 - pcpu_free_alloc_info(ai);
10651 }
10652
10653 #endif /* CONFIG_SMP */
10654 diff --git a/mm/rmap.c b/mm/rmap.c
10655 index 3e8491c504f8..e01318d4b07e 100644
10656 --- a/mm/rmap.c
10657 +++ b/mm/rmap.c
10658 @@ -1042,15 +1042,16 @@ void page_add_new_anon_rmap(struct page *page,
10659 */
10660 void page_add_file_rmap(struct page *page)
10661 {
10662 - bool locked;
10663 + struct mem_cgroup *memcg;
10664 unsigned long flags;
10665 + bool locked;
10666
10667 - mem_cgroup_begin_update_page_stat(page, &locked, &flags);
10668 + memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
10669 if (atomic_inc_and_test(&page->_mapcount)) {
10670 __inc_zone_page_state(page, NR_FILE_MAPPED);
10671 - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
10672 + mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
10673 }
10674 - mem_cgroup_end_update_page_stat(page, &locked, &flags);
10675 + mem_cgroup_end_page_stat(memcg, locked, flags);
10676 }
10677
10678 /**
10679 @@ -1061,9 +1062,10 @@ void page_add_file_rmap(struct page *page)
10680 */
10681 void page_remove_rmap(struct page *page)
10682 {
10683 + struct mem_cgroup *uninitialized_var(memcg);
10684 bool anon = PageAnon(page);
10685 - bool locked;
10686 unsigned long flags;
10687 + bool locked;
10688
10689 /*
10690 * The anon case has no mem_cgroup page_stat to update; but may
10691 @@ -1071,7 +1073,7 @@ void page_remove_rmap(struct page *page)
10692 * we hold the lock against page_stat move: so avoid it on anon.
10693 */
10694 if (!anon)
10695 - mem_cgroup_begin_update_page_stat(page, &locked, &flags);
10696 + memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
10697
10698 /* page still mapped by someone else? */
10699 if (!atomic_add_negative(-1, &page->_mapcount))
10700 @@ -1096,8 +1098,7 @@ void page_remove_rmap(struct page *page)
10701 -hpage_nr_pages(page));
10702 } else {
10703 __dec_zone_page_state(page, NR_FILE_MAPPED);
10704 - mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
10705 - mem_cgroup_end_update_page_stat(page, &locked, &flags);
10706 + mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
10707 }
10708 if (unlikely(PageMlocked(page)))
10709 clear_page_mlock(page);
10710 @@ -1110,10 +1111,9 @@ void page_remove_rmap(struct page *page)
10711 * Leaving it set also helps swapoff to reinstate ptes
10712 * faster for those pages still in swapcache.
10713 */
10714 - return;
10715 out:
10716 if (!anon)
10717 - mem_cgroup_end_update_page_stat(page, &locked, &flags);
10718 + mem_cgroup_end_page_stat(memcg, locked, flags);
10719 }
10720
10721 /*
10722 diff --git a/mm/truncate.c b/mm/truncate.c
10723 index 96d167372d89..c646084e5eec 100644
10724 --- a/mm/truncate.c
10725 +++ b/mm/truncate.c
10726 @@ -20,6 +20,7 @@
10727 #include <linux/buffer_head.h> /* grr. try_to_release_page,
10728 do_invalidatepage */
10729 #include <linux/cleancache.h>
10730 +#include <linux/rmap.h>
10731 #include "internal.h"
10732
10733 static void clear_exceptional_entry(struct address_space *mapping,
10734 @@ -719,12 +720,67 @@ EXPORT_SYMBOL(truncate_pagecache);
10735 */
10736 void truncate_setsize(struct inode *inode, loff_t newsize)
10737 {
10738 + loff_t oldsize = inode->i_size;
10739 +
10740 i_size_write(inode, newsize);
10741 + if (newsize > oldsize)
10742 + pagecache_isize_extended(inode, oldsize, newsize);
10743 truncate_pagecache(inode, newsize);
10744 }
10745 EXPORT_SYMBOL(truncate_setsize);
10746
10747 /**
10748 + * pagecache_isize_extended - update pagecache after extension of i_size
10749 + * @inode: inode for which i_size was extended
10750 + * @from: original inode size
10751 + * @to: new inode size
10752 + *
10753 + * Handle extension of inode size either caused by extending truncate or by
10754 + * write starting after current i_size. We mark the page straddling current
10755 + * i_size RO so that page_mkwrite() is called on the nearest write access to
10756 + * the page. This way filesystem can be sure that page_mkwrite() is called on
10757 + * the page before user writes to the page via mmap after the i_size has been
10758 + * changed.
10759 + *
10760 + * The function must be called after i_size is updated so that page fault
10761 + * coming after we unlock the page will already see the new i_size.
10762 + * The function must be called while we still hold i_mutex - this not only
10763 + * makes sure i_size is stable but also that userspace cannot observe new
10764 + * i_size value before we are prepared to store mmap writes at new inode size.
10765 + */
10766 +void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
10767 +{
10768 + int bsize = 1 << inode->i_blkbits;
10769 + loff_t rounded_from;
10770 + struct page *page;
10771 + pgoff_t index;
10772 +
10773 + WARN_ON(to > inode->i_size);
10774 +
10775 + if (from >= to || bsize == PAGE_CACHE_SIZE)
10776 + return;
10777 + /* Page straddling @from will not have any hole block created? */
10778 + rounded_from = round_up(from, bsize);
10779 + if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
10780 + return;
10781 +
10782 + index = from >> PAGE_CACHE_SHIFT;
10783 + page = find_lock_page(inode->i_mapping, index);
10784 + /* Page not cached? Nothing to do */
10785 + if (!page)
10786 + return;
10787 + /*
10788 + * See clear_page_dirty_for_io() for details why set_page_dirty()
10789 + * is needed.
10790 + */
10791 + if (page_mkclean(page))
10792 + set_page_dirty(page);
10793 + unlock_page(page);
10794 + page_cache_release(page);
10795 +}
10796 +EXPORT_SYMBOL(pagecache_isize_extended);
10797 +
10798 +/**
10799 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
10800 * @inode: inode
10801 * @lstart: offset of beginning of hole
10802 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
10803 index b2f571dd933d..9f02369d3262 100644
10804 --- a/net/ceph/messenger.c
10805 +++ b/net/ceph/messenger.c
10806 @@ -292,7 +292,11 @@ int ceph_msgr_init(void)
10807 if (ceph_msgr_slab_init())
10808 return -ENOMEM;
10809
10810 - ceph_msgr_wq = alloc_workqueue("ceph-msgr", 0, 0);
10811 + /*
10812 + * The number of active work items is limited by the number of
10813 + * connections, so leave @max_active at default.
10814 + */
10815 + ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
10816 if (ceph_msgr_wq)
10817 return 0;
10818
10819 diff --git a/net/core/tso.c b/net/core/tso.c
10820 index 8c3203c585b0..630b30b4fb53 100644
10821 --- a/net/core/tso.c
10822 +++ b/net/core/tso.c
10823 @@ -1,6 +1,7 @@
10824 #include <linux/export.h>
10825 #include <net/ip.h>
10826 #include <net/tso.h>
10827 +#include <asm/unaligned.h>
10828
10829 /* Calculate expected number of TX descriptors */
10830 int tso_count_descs(struct sk_buff *skb)
10831 @@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
10832 iph->id = htons(tso->ip_id);
10833 iph->tot_len = htons(size + hdr_len - mac_hdr_len);
10834 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
10835 - tcph->seq = htonl(tso->tcp_seq);
10836 + put_unaligned_be32(tso->tcp_seq, &tcph->seq);
10837 tso->ip_id++;
10838
10839 if (!is_last) {
10840 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
10841 index b10cd43a4722..4a74ea85518f 100644
10842 --- a/net/ipv4/fib_semantics.c
10843 +++ b/net/ipv4/fib_semantics.c
10844 @@ -535,7 +535,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
10845 return 1;
10846
10847 attrlen = rtnh_attrlen(rtnh);
10848 - if (attrlen < 0) {
10849 + if (attrlen > 0) {
10850 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
10851
10852 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
10853 diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
10854 index 6556263c8fa5..dd73bea2a65f 100644
10855 --- a/net/ipv4/gre_offload.c
10856 +++ b/net/ipv4/gre_offload.c
10857 @@ -51,7 +51,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
10858
10859 greh = (struct gre_base_hdr *)skb_transport_header(skb);
10860
10861 - ghl = skb_inner_network_header(skb) - skb_transport_header(skb);
10862 + ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
10863 if (unlikely(ghl < sizeof(*greh)))
10864 goto out;
10865
10866 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
10867 index 215af2b155cb..c43a1e235182 100644
10868 --- a/net/ipv4/ip_output.c
10869 +++ b/net/ipv4/ip_output.c
10870 @@ -1533,6 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
10871 struct sk_buff *nskb;
10872 struct sock *sk;
10873 struct inet_sock *inet;
10874 + int err;
10875
10876 if (ip_options_echo(&replyopts.opt.opt, skb))
10877 return;
10878 @@ -1572,8 +1573,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
10879 sock_net_set(sk, net);
10880 __skb_queue_head_init(&sk->sk_write_queue);
10881 sk->sk_sndbuf = sysctl_wmem_default;
10882 - ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
10883 - &ipc, &rt, MSG_DONTWAIT);
10884 + err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
10885 + len, 0, &ipc, &rt, MSG_DONTWAIT);
10886 + if (unlikely(err)) {
10887 + ip_flush_pending_frames(sk);
10888 + goto out;
10889 + }
10890 +
10891 nskb = skb_peek(&sk->sk_write_queue);
10892 if (nskb) {
10893 if (arg->csumoffset >= 0)
10894 @@ -1585,7 +1591,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
10895 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
10896 ip_push_pending_frames(sk, &fl4);
10897 }
10898 -
10899 +out:
10900 put_cpu_var(unicast_sock);
10901
10902 ip_rt_put(rt);
10903 diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
10904 index f4c987bb7e94..88c386cf7d85 100644
10905 --- a/net/ipv4/ip_tunnel_core.c
10906 +++ b/net/ipv4/ip_tunnel_core.c
10907 @@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
10908 skb_pull_rcsum(skb, hdr_len);
10909
10910 if (inner_proto == htons(ETH_P_TEB)) {
10911 - struct ethhdr *eh = (struct ethhdr *)skb->data;
10912 + struct ethhdr *eh;
10913
10914 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
10915 return -ENOMEM;
10916
10917 + eh = (struct ethhdr *)skb->data;
10918 if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN))
10919 skb->protocol = eh->h_proto;
10920 else
10921 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
10922 index cbadb942c332..29836f8f86a6 100644
10923 --- a/net/ipv4/route.c
10924 +++ b/net/ipv4/route.c
10925 @@ -1798,6 +1798,7 @@ local_input:
10926 no_route:
10927 RT_CACHE_STAT_INC(in_no_route);
10928 res.type = RTN_UNREACHABLE;
10929 + res.fi = NULL;
10930 goto local_input;
10931
10932 /*
10933 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
10934 index 541f26a67ba2..6b0b38fdf4fc 100644
10935 --- a/net/ipv4/tcp.c
10936 +++ b/net/ipv4/tcp.c
10937 @@ -2985,61 +2985,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
10938 #endif
10939
10940 #ifdef CONFIG_TCP_MD5SIG
10941 -static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
10942 +static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
10943 static DEFINE_MUTEX(tcp_md5sig_mutex);
10944 -
10945 -static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
10946 -{
10947 - int cpu;
10948 -
10949 - for_each_possible_cpu(cpu) {
10950 - struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
10951 -
10952 - if (p->md5_desc.tfm)
10953 - crypto_free_hash(p->md5_desc.tfm);
10954 - }
10955 - free_percpu(pool);
10956 -}
10957 +static bool tcp_md5sig_pool_populated = false;
10958
10959 static void __tcp_alloc_md5sig_pool(void)
10960 {
10961 int cpu;
10962 - struct tcp_md5sig_pool __percpu *pool;
10963 -
10964 - pool = alloc_percpu(struct tcp_md5sig_pool);
10965 - if (!pool)
10966 - return;
10967
10968 for_each_possible_cpu(cpu) {
10969 - struct crypto_hash *hash;
10970 -
10971 - hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
10972 - if (IS_ERR_OR_NULL(hash))
10973 - goto out_free;
10974 + if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
10975 + struct crypto_hash *hash;
10976
10977 - per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
10978 + hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
10979 + if (IS_ERR_OR_NULL(hash))
10980 + return;
10981 + per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
10982 + }
10983 }
10984 - /* before setting tcp_md5sig_pool, we must commit all writes
10985 - * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
10986 + /* before setting tcp_md5sig_pool_populated, we must commit all writes
10987 + * to memory. See smp_rmb() in tcp_get_md5sig_pool()
10988 */
10989 smp_wmb();
10990 - tcp_md5sig_pool = pool;
10991 - return;
10992 -out_free:
10993 - __tcp_free_md5sig_pool(pool);
10994 + tcp_md5sig_pool_populated = true;
10995 }
10996
10997 bool tcp_alloc_md5sig_pool(void)
10998 {
10999 - if (unlikely(!tcp_md5sig_pool)) {
11000 + if (unlikely(!tcp_md5sig_pool_populated)) {
11001 mutex_lock(&tcp_md5sig_mutex);
11002
11003 - if (!tcp_md5sig_pool)
11004 + if (!tcp_md5sig_pool_populated)
11005 __tcp_alloc_md5sig_pool();
11006
11007 mutex_unlock(&tcp_md5sig_mutex);
11008 }
11009 - return tcp_md5sig_pool != NULL;
11010 + return tcp_md5sig_pool_populated;
11011 }
11012 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
11013
11014 @@ -3053,13 +3034,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
11015 */
11016 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
11017 {
11018 - struct tcp_md5sig_pool __percpu *p;
11019 -
11020 local_bh_disable();
11021 - p = ACCESS_ONCE(tcp_md5sig_pool);
11022 - if (p)
11023 - return __this_cpu_ptr(p);
11024
11025 + if (tcp_md5sig_pool_populated) {
11026 + /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
11027 + smp_rmb();
11028 + return this_cpu_ptr(&tcp_md5sig_pool);
11029 + }
11030 local_bh_enable();
11031 return NULL;
11032 }
11033 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
11034 index cd17f009aede..3f49eaeb2559 100644
11035 --- a/net/ipv4/tcp_ipv4.c
11036 +++ b/net/ipv4/tcp_ipv4.c
11037 @@ -208,8 +208,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
11038 inet->inet_dport = usin->sin_port;
11039 inet->inet_daddr = daddr;
11040
11041 - inet_set_txhash(sk);
11042 -
11043 inet_csk(sk)->icsk_ext_hdr_len = 0;
11044 if (inet_opt)
11045 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
11046 @@ -226,6 +224,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
11047 if (err)
11048 goto failure;
11049
11050 + inet_set_txhash(sk);
11051 +
11052 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
11053 inet->inet_sport, inet->inet_dport, sk);
11054 if (IS_ERR(rt)) {
11055 diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
11056 index 5ec867e4a8b7..1d4156ddf355 100644
11057 --- a/net/ipv6/output_core.c
11058 +++ b/net/ipv6/output_core.c
11059 @@ -3,11 +3,45 @@
11060 * not configured or static. These functions are needed by GSO/GRO implementation.
11061 */
11062 #include <linux/export.h>
11063 +#include <net/ip.h>
11064 #include <net/ipv6.h>
11065 #include <net/ip6_fib.h>
11066 #include <net/addrconf.h>
11067 #include <net/secure_seq.h>
11068
11069 +/* This function exists only for tap drivers that must support broken
11070 + * clients requesting UFO without specifying an IPv6 fragment ID.
11071 + *
11072 + * This is similar to ipv6_select_ident() but we use an independent hash
11073 + * seed to limit information leakage.
11074 + *
11075 + * The network header must be set before calling this.
11076 + */
11077 +void ipv6_proxy_select_ident(struct sk_buff *skb)
11078 +{
11079 + static u32 ip6_proxy_idents_hashrnd __read_mostly;
11080 + struct in6_addr buf[2];
11081 + struct in6_addr *addrs;
11082 + u32 hash, id;
11083 +
11084 + addrs = skb_header_pointer(skb,
11085 + skb_network_offset(skb) +
11086 + offsetof(struct ipv6hdr, saddr),
11087 + sizeof(buf), buf);
11088 + if (!addrs)
11089 + return;
11090 +
11091 + net_get_random_once(&ip6_proxy_idents_hashrnd,
11092 + sizeof(ip6_proxy_idents_hashrnd));
11093 +
11094 + hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
11095 + hash = __ipv6_addr_jhash(&addrs[0], hash);
11096 +
11097 + id = ip_idents_reserve(hash, 1);
11098 + skb_shinfo(skb)->ip6_frag_id = htonl(id);
11099 +}
11100 +EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
11101 +
11102 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
11103 {
11104 u16 offset = sizeof(struct ipv6hdr);
11105 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
11106 index 29964c3d363c..264c0f28baf4 100644
11107 --- a/net/ipv6/tcp_ipv6.c
11108 +++ b/net/ipv6/tcp_ipv6.c
11109 @@ -198,8 +198,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
11110 sk->sk_v6_daddr = usin->sin6_addr;
11111 np->flow_label = fl6.flowlabel;
11112
11113 - ip6_set_txhash(sk);
11114 -
11115 /*
11116 * TCP over IPv4
11117 */
11118 @@ -295,6 +293,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
11119 if (err)
11120 goto late_failure;
11121
11122 + ip6_set_txhash(sk);
11123 +
11124 if (!tp->write_seq && likely(!tp->repair))
11125 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
11126 sk->sk_v6_daddr.s6_addr32,
11127 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
11128 index 8fdadfd94ba8..6081329784dd 100644
11129 --- a/net/mac80211/rate.c
11130 +++ b/net/mac80211/rate.c
11131 @@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
11132 */
11133 if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
11134 u32 basic_rates = vif->bss_conf.basic_rates;
11135 - s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0;
11136 + s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
11137
11138 rate = &sband->bitrates[rates[0].idx];
11139
11140 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
11141 index c416725d28c4..f1de72de273e 100644
11142 --- a/net/netlink/af_netlink.c
11143 +++ b/net/netlink/af_netlink.c
11144 @@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
11145 static int netlink_dump(struct sock *sk);
11146 static void netlink_skb_destructor(struct sk_buff *skb);
11147
11148 +/* nl_table locking explained:
11149 + * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
11150 + * combined with an RCU read-side lock. Insertion and removal are protected
11151 + * with nl_sk_hash_lock while using RCU list modification primitives and may
11152 + * run in parallel to nl_table_lock protected lookups. Destruction of the
11153 + * Netlink socket may only occur *after* nl_table_lock has been acquired
11154 + * either during or after the socket has been removed from the list.
11155 + */
11156 DEFINE_RWLOCK(nl_table_lock);
11157 EXPORT_SYMBOL_GPL(nl_table_lock);
11158 static atomic_t nl_table_users = ATOMIC_INIT(0);
11159 @@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
11160 static int lockdep_nl_sk_hash_is_held(void)
11161 {
11162 #ifdef CONFIG_LOCKDEP
11163 - return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1;
11164 -#else
11165 - return 1;
11166 + if (debug_locks)
11167 + return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
11168 #endif
11169 + return 1;
11170 }
11171
11172 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
11173 @@ -715,7 +723,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
11174 * after validation, the socket and the ring may only be used by a
11175 * single process, otherwise we fall back to copying.
11176 */
11177 - if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 ||
11178 + if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
11179 atomic_read(&nlk->mapped) > 1)
11180 excl = false;
11181
11182 @@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
11183 struct netlink_table *table = &nl_table[protocol];
11184 struct sock *sk;
11185
11186 + read_lock(&nl_table_lock);
11187 rcu_read_lock();
11188 sk = __netlink_lookup(table, portid, net);
11189 if (sk)
11190 sock_hold(sk);
11191 rcu_read_unlock();
11192 + read_unlock(&nl_table_lock);
11193
11194 return sk;
11195 }
11196 @@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock)
11197 }
11198 netlink_table_ungrab();
11199
11200 - /* Wait for readers to complete */
11201 - synchronize_net();
11202 -
11203 kfree(nlk->groups);
11204 nlk->groups = NULL;
11205
11206 @@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock)
11207
11208 retry:
11209 cond_resched();
11210 + netlink_table_grab();
11211 rcu_read_lock();
11212 if (__netlink_lookup(table, portid, net)) {
11213 /* Bind collision, search negative portid values. */
11214 @@ -1288,9 +1296,11 @@ retry:
11215 if (rover > -4097)
11216 rover = -4097;
11217 rcu_read_unlock();
11218 + netlink_table_ungrab();
11219 goto retry;
11220 }
11221 rcu_read_unlock();
11222 + netlink_table_ungrab();
11223
11224 err = netlink_insert(sk, net, portid);
11225 if (err == -EADDRINUSE)
11226 @@ -2921,14 +2931,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
11227 }
11228
11229 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
11230 - __acquires(RCU)
11231 + __acquires(nl_table_lock) __acquires(RCU)
11232 {
11233 + read_lock(&nl_table_lock);
11234 rcu_read_lock();
11235 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
11236 }
11237
11238 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
11239 {
11240 + struct rhashtable *ht;
11241 struct netlink_sock *nlk;
11242 struct nl_seq_iter *iter;
11243 struct net *net;
11244 @@ -2943,19 +2955,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
11245 iter = seq->private;
11246 nlk = v;
11247
11248 - rht_for_each_entry_rcu(nlk, nlk->node.next, node)
11249 + i = iter->link;
11250 + ht = &nl_table[i].hash;
11251 + rht_for_each_entry(nlk, nlk->node.next, ht, node)
11252 if (net_eq(sock_net((struct sock *)nlk), net))
11253 return nlk;
11254
11255 - i = iter->link;
11256 j = iter->hash_idx + 1;
11257
11258 do {
11259 - struct rhashtable *ht = &nl_table[i].hash;
11260 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
11261
11262 for (; j < tbl->size; j++) {
11263 - rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
11264 + rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
11265 if (net_eq(sock_net((struct sock *)nlk), net)) {
11266 iter->link = i;
11267 iter->hash_idx = j;
11268 @@ -2971,9 +2983,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
11269 }
11270
11271 static void netlink_seq_stop(struct seq_file *seq, void *v)
11272 - __releases(RCU)
11273 + __releases(RCU) __releases(nl_table_lock)
11274 {
11275 rcu_read_unlock();
11276 + read_unlock(&nl_table_lock);
11277 }
11278
11279
11280 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
11281 index 488ddeed9363..e0b94ce4c4e6 100644
11282 --- a/net/sunrpc/clnt.c
11283 +++ b/net/sunrpc/clnt.c
11284 @@ -461,6 +461,8 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
11285
11286 if (args->flags & RPC_CLNT_CREATE_AUTOBIND)
11287 clnt->cl_autobind = 1;
11288 + if (args->flags & RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT)
11289 + clnt->cl_noretranstimeo = 1;
11290 if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
11291 clnt->cl_discrtry = 1;
11292 if (!(args->flags & RPC_CLNT_CREATE_QUIET))
11293 @@ -579,6 +581,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,
11294 /* Turn off autobind on clones */
11295 new->cl_autobind = 0;
11296 new->cl_softrtry = clnt->cl_softrtry;
11297 + new->cl_noretranstimeo = clnt->cl_noretranstimeo;
11298 new->cl_discrtry = clnt->cl_discrtry;
11299 new->cl_chatty = clnt->cl_chatty;
11300 return new;
11301 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
11302 index 43cd89eacfab..700f87900e2d 100644
11303 --- a/net/sunrpc/xprtsock.c
11304 +++ b/net/sunrpc/xprtsock.c
11305 @@ -845,6 +845,8 @@ static void xs_error_report(struct sock *sk)
11306 dprintk("RPC: xs_error_report client %p, error=%d...\n",
11307 xprt, -err);
11308 trace_rpc_socket_error(xprt, sk->sk_socket, err);
11309 + if (test_bit(XPRT_CONNECTION_REUSE, &xprt->state))
11310 + goto out;
11311 xprt_wake_pending_tasks(xprt, err);
11312 out:
11313 read_unlock_bh(&sk->sk_callback_lock);
11314 @@ -2245,7 +2247,9 @@ static void xs_tcp_setup_socket(struct work_struct *work)
11315 abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
11316 &xprt->state);
11317 /* "close" the socket, preserving the local port */
11318 + set_bit(XPRT_CONNECTION_REUSE, &xprt->state);
11319 xs_tcp_reuse_connection(transport);
11320 + clear_bit(XPRT_CONNECTION_REUSE, &xprt->state);
11321
11322 if (abort_and_exit)
11323 goto out_eagain;
11324 diff --git a/net/tipc/link.c b/net/tipc/link.c
11325 index fb1485dc6736..640206580f34 100644
11326 --- a/net/tipc/link.c
11327 +++ b/net/tipc/link.c
11328 @@ -1936,7 +1936,12 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
11329 }
11330 omsg = buf_msg(obuf);
11331 pos += align(msg_size(omsg));
11332 - if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) {
11333 + if (msg_isdata(omsg)) {
11334 + if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
11335 + tipc_sk_mcast_rcv(obuf);
11336 + else
11337 + tipc_sk_rcv(obuf);
11338 + } else if (msg_user(omsg) == CONN_MANAGER) {
11339 tipc_sk_rcv(obuf);
11340 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
11341 tipc_named_rcv(obuf);
11342 diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
11343 index 3bcb80df4d01..970772c731ff 100644
11344 --- a/security/integrity/evm/evm_main.c
11345 +++ b/security/integrity/evm/evm_main.c
11346 @@ -284,6 +284,13 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
11347 goto out;
11348 }
11349 evm_status = evm_verify_current_integrity(dentry);
11350 + if (evm_status == INTEGRITY_NOXATTRS) {
11351 + struct integrity_iint_cache *iint;
11352 +
11353 + iint = integrity_iint_find(dentry->d_inode);
11354 + if (iint && (iint->flags & IMA_NEW_FILE))
11355 + return 0;
11356 + }
11357 out:
11358 if (evm_status != INTEGRITY_PASS)
11359 integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
11360 @@ -311,9 +318,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
11361 {
11362 const struct evm_ima_xattr_data *xattr_data = xattr_value;
11363
11364 - if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
11365 - && (xattr_data->type == EVM_XATTR_HMAC))
11366 - return -EPERM;
11367 + if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
11368 + if (!xattr_value_len)
11369 + return -EINVAL;
11370 + if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
11371 + return -EPERM;
11372 + }
11373 return evm_protect_xattr(dentry, xattr_name, xattr_value,
11374 xattr_value_len);
11375 }
11376 diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
11377 index 225fd944a4ef..58509436de23 100644
11378 --- a/security/integrity/ima/ima_appraise.c
11379 +++ b/security/integrity/ima/ima_appraise.c
11380 @@ -378,6 +378,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
11381 result = ima_protect_xattr(dentry, xattr_name, xattr_value,
11382 xattr_value_len);
11383 if (result == 1) {
11384 + if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
11385 + return -EINVAL;
11386 ima_reset_appraise_flags(dentry->d_inode,
11387 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
11388 result = 0;
11389 diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
11390 index 904e68abd49e..6885058c645d 100644
11391 --- a/security/integrity/integrity.h
11392 +++ b/security/integrity/integrity.h
11393 @@ -61,6 +61,7 @@ enum evm_ima_xattr_type {
11394 EVM_XATTR_HMAC,
11395 EVM_IMA_XATTR_DIGSIG,
11396 IMA_XATTR_DIGEST_NG,
11397 + IMA_XATTR_LAST
11398 };
11399
11400 struct evm_ima_xattr_data {
11401 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
11402 index b0e940497e23..e03bad59c374 100644
11403 --- a/security/selinux/hooks.c
11404 +++ b/security/selinux/hooks.c
11405 @@ -481,6 +481,7 @@ next_inode:
11406 list_entry(sbsec->isec_head.next,
11407 struct inode_security_struct, list);
11408 struct inode *inode = isec->inode;
11409 + list_del_init(&isec->list);
11410 spin_unlock(&sbsec->isec_lock);
11411 inode = igrab(inode);
11412 if (inode) {
11413 @@ -489,7 +490,6 @@ next_inode:
11414 iput(inode);
11415 }
11416 spin_lock(&sbsec->isec_lock);
11417 - list_del_init(&isec->list);
11418 goto next_inode;
11419 }
11420 spin_unlock(&sbsec->isec_lock);
11421 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
11422 index 102e8fd1d450..2d957ba63557 100644
11423 --- a/sound/core/pcm_compat.c
11424 +++ b/sound/core/pcm_compat.c
11425 @@ -210,6 +210,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
11426 if (err < 0)
11427 return err;
11428
11429 + if (clear_user(src, sizeof(*src)))
11430 + return -EFAULT;
11431 if (put_user(status.state, &src->state) ||
11432 compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
11433 compat_put_timespec(&status.tstamp, &src->tstamp) ||
11434 diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
11435 index 45a0eed6d5b1..3b052ed0fbf5 100644
11436 --- a/sound/firewire/bebob/bebob_focusrite.c
11437 +++ b/sound/firewire/bebob/bebob_focusrite.c
11438 @@ -27,12 +27,14 @@
11439 #define SAFFIRE_CLOCK_SOURCE_INTERNAL 0
11440 #define SAFFIRE_CLOCK_SOURCE_SPDIF 1
11441
11442 -/* '1' is absent, why... */
11443 +/* clock sources as returned from register of Saffire Pro 10 and 26 */
11444 #define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
11445 +#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
11446 #define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
11447 -#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3
11448 -#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4
11449 +#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 /* not used on s.pro. 10 */
11450 +#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 /* not used on s.pro. 10 */
11451 #define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5
11452 +#define SAFFIREPRO_CLOCK_SOURCE_COUNT 6
11453
11454 /* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */
11455 #define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4
11456 @@ -101,13 +103,34 @@ saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value)
11457 &data, sizeof(__be32), 0);
11458 }
11459
11460 +static char *const saffirepro_10_clk_src_labels[] = {
11461 + SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
11462 +};
11463 static char *const saffirepro_26_clk_src_labels[] = {
11464 SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock"
11465 };
11466 -
11467 -static char *const saffirepro_10_clk_src_labels[] = {
11468 - SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
11469 +/* Value maps between registers and labels for SaffirePro 10/26. */
11470 +static const signed char saffirepro_clk_maps[][SAFFIREPRO_CLOCK_SOURCE_COUNT] = {
11471 + /* SaffirePro 10 */
11472 + [0] = {
11473 + [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0,
11474 + [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */
11475 + [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1,
11476 + [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = -1, /* not supported */
11477 + [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = -1, /* not supported */
11478 + [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 2,
11479 + },
11480 + /* SaffirePro 26 */
11481 + [1] = {
11482 + [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0,
11483 + [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */
11484 + [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1,
11485 + [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = 2,
11486 + [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = 3,
11487 + [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 4,
11488 + }
11489 };
11490 +
11491 static int
11492 saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate)
11493 {
11494 @@ -138,24 +161,35 @@ saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate)
11495
11496 return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id);
11497 }
11498 +
11499 +/*
11500 + * query hardware for current clock source, return our internally
11501 + * used clock index in *id, depending on hardware.
11502 + */
11503 static int
11504 saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
11505 {
11506 int err;
11507 - u32 value;
11508 + u32 value; /* clock source read from hw register */
11509 + const signed char *map;
11510
11511 err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value);
11512 if (err < 0)
11513 goto end;
11514
11515 - if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) {
11516 - if (value == SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK)
11517 - *id = 2;
11518 - else if (value == SAFFIREPRO_CLOCK_SOURCE_SPDIF)
11519 - *id = 1;
11520 - } else if (value > 1) {
11521 - *id = value - 1;
11522 + /* depending on hardware, use a different mapping */
11523 + if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels)
11524 + map = saffirepro_clk_maps[0];
11525 + else
11526 + map = saffirepro_clk_maps[1];
11527 +
11528 + /* In a case that this driver cannot handle the value of register. */
11529 + if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
11530 + err = -EIO;
11531 + goto end;
11532 }
11533 +
11534 + *id = (unsigned int)map[value];
11535 end:
11536 return err;
11537 }
11538 diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
11539 index ef4d0c9f6578..1aab0a32870c 100644
11540 --- a/sound/firewire/bebob/bebob_stream.c
11541 +++ b/sound/firewire/bebob/bebob_stream.c
11542 @@ -129,12 +129,24 @@ snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, bool *internal)
11543 /* 1.The device has its own operation to switch source of clock */
11544 if (clk_spec) {
11545 err = clk_spec->get(bebob, &id);
11546 - if (err < 0)
11547 + if (err < 0) {
11548 dev_err(&bebob->unit->device,
11549 "fail to get clock source: %d\n", err);
11550 - else if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
11551 - strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
11552 + goto end;
11553 + }
11554 +
11555 + if (id >= clk_spec->num) {
11556 + dev_err(&bebob->unit->device,
11557 + "clock source %d out of range 0..%d\n",
11558 + id, clk_spec->num - 1);
11559 + err = -EIO;
11560 + goto end;
11561 + }
11562 +
11563 + if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
11564 + strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
11565 *internal = true;
11566 +
11567 goto end;
11568 }
11569
11570 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
11571 index aa302fb03fc5..0a7f848590d2 100644
11572 --- a/sound/pci/hda/hda_intel.c
11573 +++ b/sound/pci/hda/hda_intel.c
11574 @@ -373,6 +373,8 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
11575 #ifdef CONFIG_SND_DMA_SGBUF
11576 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
11577 struct snd_sg_buf *sgbuf = dmab->private_data;
11578 + if (chip->driver_type == AZX_DRIVER_CMEDIA)
11579 + return; /* deal with only CORB/RIRB buffers */
11580 if (on)
11581 set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
11582 else
11583 @@ -1768,7 +1770,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
11584 #ifdef CONFIG_X86
11585 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
11586 struct azx *chip = apcm->chip;
11587 - if (!azx_snoop(chip))
11588 + if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
11589 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
11590 #endif
11591 }
11592 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
11593 index b7b293cc710e..0c9d5880859a 100644
11594 --- a/sound/pci/hda/patch_realtek.c
11595 +++ b/sound/pci/hda/patch_realtek.c
11596 @@ -5008,9 +5008,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
11597 SND_PCI_QUIRK(0x103c, 0x2224, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11598 SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11599 SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11600 - SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11601 - SND_PCI_QUIRK(0x103c, 0x2248, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11602 - SND_PCI_QUIRK(0x103c, 0x2249, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11603 SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11604 SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11605 SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
11606 diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
11607 index 848cab839553..2e9e90dbeecf 100644
11608 --- a/sound/soc/codecs/adau1761.c
11609 +++ b/sound/soc/codecs/adau1761.c
11610 @@ -405,6 +405,7 @@ static const struct snd_soc_dapm_widget adau1761_dapm_widgets[] = {
11611 2, 0, NULL, 0),
11612
11613 SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0),
11614 + SND_SOC_DAPM_SUPPLY("ALC Clock", ADAU1761_CLK_ENABLE0, 5, 0, NULL, 0),
11615
11616 SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1,
11617 0, 0, NULL, 0),
11618 @@ -436,6 +437,9 @@ static const struct snd_soc_dapm_route adau1761_dapm_routes[] = {
11619 { "Right Playback Mixer", NULL, "Slew Clock" },
11620 { "Left Playback Mixer", NULL, "Slew Clock" },
11621
11622 + { "Left Input Mixer", NULL, "ALC Clock" },
11623 + { "Right Input Mixer", NULL, "ALC Clock" },
11624 +
11625 { "Digital Clock 0", NULL, "SYSCLK" },
11626 { "Digital Clock 1", NULL, "SYSCLK" },
11627 };
11628 diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
11629 index 64f179ee9834..5e8626ae612b 100644
11630 --- a/sound/soc/codecs/tlv320aic3x.c
11631 +++ b/sound/soc/codecs/tlv320aic3x.c
11632 @@ -1121,6 +1121,7 @@ static int aic3x_regulator_event(struct notifier_block *nb,
11633 static int aic3x_set_power(struct snd_soc_codec *codec, int power)
11634 {
11635 struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
11636 + unsigned int pll_c, pll_d;
11637 int ret;
11638
11639 if (power) {
11640 @@ -1138,6 +1139,18 @@ static int aic3x_set_power(struct snd_soc_codec *codec, int power)
11641 /* Sync reg_cache with the hardware */
11642 regcache_cache_only(aic3x->regmap, false);
11643 regcache_sync(aic3x->regmap);
11644 +
11645 + /* Rewrite paired PLL D registers in case cached sync skipped
11646 + * writing one of them and thus caused other one also not
11647 + * being written
11648 + */
11649 + pll_c = snd_soc_read(codec, AIC3X_PLL_PROGC_REG);
11650 + pll_d = snd_soc_read(codec, AIC3X_PLL_PROGD_REG);
11651 + if (pll_c == aic3x_reg[AIC3X_PLL_PROGC_REG].def ||
11652 + pll_d == aic3x_reg[AIC3X_PLL_PROGD_REG].def) {
11653 + snd_soc_write(codec, AIC3X_PLL_PROGC_REG, pll_c);
11654 + snd_soc_write(codec, AIC3X_PLL_PROGD_REG, pll_d);
11655 + }
11656 } else {
11657 /*
11658 * Do soft reset to this codec instance in order to clear
11659 diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
11660 index 61bf6da4bb02..e8957329ea60 100644
11661 --- a/sound/soc/intel/sst-haswell-pcm.c
11662 +++ b/sound/soc/intel/sst-haswell-pcm.c
11663 @@ -693,9 +693,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
11664 }
11665
11666 #define HSW_FORMATS \
11667 - (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
11668 - SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\
11669 - SNDRV_PCM_FMTBIT_S8)
11670 + (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
11671
11672 static struct snd_soc_dai_driver hsw_dais[] = {
11673 {
11674 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
11675 index d074aa91b023..a3e0a0df9c75 100644
11676 --- a/sound/soc/soc-core.c
11677 +++ b/sound/soc/soc-core.c
11678 @@ -4315,10 +4315,10 @@ void snd_soc_remove_platform(struct snd_soc_platform *platform)
11679 snd_soc_component_del_unlocked(&platform->component);
11680 mutex_unlock(&client_mutex);
11681
11682 - snd_soc_component_cleanup(&platform->component);
11683 -
11684 dev_dbg(platform->dev, "ASoC: Unregistered platform '%s'\n",
11685 platform->component.name);
11686 +
11687 + snd_soc_component_cleanup(&platform->component);
11688 }
11689 EXPORT_SYMBOL_GPL(snd_soc_remove_platform);
11690
11691 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
11692 index 177bd8639ef9..7098e6b27b2b 100644
11693 --- a/sound/soc/soc-dapm.c
11694 +++ b/sound/soc/soc-dapm.c
11695 @@ -591,9 +591,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
11696 int shared;
11697 struct snd_kcontrol *kcontrol;
11698 bool wname_in_long_name, kcname_in_long_name;
11699 - char *long_name;
11700 + char *long_name = NULL;
11701 const char *name;
11702 - int ret;
11703 + int ret = 0;
11704
11705 prefix = soc_dapm_prefix(dapm);
11706 if (prefix)
11707 @@ -652,15 +652,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
11708
11709 kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
11710 prefix);
11711 - kfree(long_name);
11712 - if (!kcontrol)
11713 - return -ENOMEM;
11714 + if (!kcontrol) {
11715 + ret = -ENOMEM;
11716 + goto exit_free;
11717 + }
11718 +
11719 kcontrol->private_free = dapm_kcontrol_free;
11720
11721 ret = dapm_kcontrol_data_alloc(w, kcontrol);
11722 if (ret) {
11723 snd_ctl_free_one(kcontrol);
11724 - return ret;
11725 + goto exit_free;
11726 }
11727
11728 ret = snd_ctl_add(card, kcontrol);
11729 @@ -668,17 +670,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
11730 dev_err(dapm->dev,
11731 "ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
11732 w->name, name, ret);
11733 - return ret;
11734 + goto exit_free;
11735 }
11736 }
11737
11738 ret = dapm_kcontrol_add_widget(kcontrol, w);
11739 - if (ret)
11740 - return ret;
11741 + if (ret == 0)
11742 + w->kcontrols[kci] = kcontrol;
11743
11744 - w->kcontrols[kci] = kcontrol;
11745 +exit_free:
11746 + kfree(long_name);
11747
11748 - return 0;
11749 + return ret;
11750 }
11751
11752 /* create new dapm mixer control */
11753 diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
11754 index 642c86240752..002311afdeaa 100644
11755 --- a/sound/soc/soc-pcm.c
11756 +++ b/sound/soc/soc-pcm.c
11757 @@ -352,7 +352,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
11758 } else {
11759 for (i = 0; i < rtd->num_codecs; i++) {
11760 codec_dai = rtd->codec_dais[i];
11761 - if (codec_dai->driver->playback.sig_bits == 0) {
11762 + if (codec_dai->driver->capture.sig_bits == 0) {
11763 bits = 0;
11764 break;
11765 }
11766 diff --git a/sound/usb/card.c b/sound/usb/card.c
11767 index 7ecd0e8a5c51..f61ebb17cc64 100644
11768 --- a/sound/usb/card.c
11769 +++ b/sound/usb/card.c
11770 @@ -591,18 +591,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
11771 {
11772 struct snd_card *card;
11773 struct list_head *p;
11774 + bool was_shutdown;
11775
11776 if (chip == (void *)-1L)
11777 return;
11778
11779 card = chip->card;
11780 down_write(&chip->shutdown_rwsem);
11781 + was_shutdown = chip->shutdown;
11782 chip->shutdown = 1;
11783 up_write(&chip->shutdown_rwsem);
11784
11785 mutex_lock(&register_mutex);
11786 - chip->num_interfaces--;
11787 - if (chip->num_interfaces <= 0) {
11788 + if (!was_shutdown) {
11789 struct snd_usb_endpoint *ep;
11790
11791 snd_card_disconnect(card);
11792 @@ -622,6 +623,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
11793 list_for_each(p, &chip->mixer_list) {
11794 snd_usb_mixer_disconnect(p);
11795 }
11796 + }
11797 +
11798 + chip->num_interfaces--;
11799 + if (chip->num_interfaces <= 0) {
11800 usb_chip[chip->index] = NULL;
11801 mutex_unlock(&register_mutex);
11802 snd_card_free_when_closed(card);
11803 diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
11804 index 714b94932312..1f0dc1e5f1f0 100644
11805 --- a/virt/kvm/iommu.c
11806 +++ b/virt/kvm/iommu.c
11807 @@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
11808 gfn_t base_gfn, unsigned long npages);
11809
11810 static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
11811 - unsigned long size)
11812 + unsigned long npages)
11813 {
11814 gfn_t end_gfn;
11815 pfn_t pfn;
11816
11817 pfn = gfn_to_pfn_memslot(slot, gfn);
11818 - end_gfn = gfn + (size >> PAGE_SHIFT);
11819 + end_gfn = gfn + npages;
11820 gfn += 1;
11821
11822 if (is_error_noslot_pfn(pfn))
11823 @@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
11824 * Pin all pages we are about to map in memory. This is
11825 * important because we unmap and unpin in 4kb steps later.
11826 */
11827 - pfn = kvm_pin_pages(slot, gfn, page_size);
11828 + pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
11829 if (is_error_noslot_pfn(pfn)) {
11830 gfn += 1;
11831 continue;
11832 @@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
11833 if (r) {
11834 printk(KERN_ERR "kvm_iommu_map_address:"
11835 "iommu failed to map pfn=%llx\n", pfn);
11836 - kvm_unpin_pages(kvm, pfn, page_size);
11837 + kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
11838 goto unmap_pages;
11839 }
11840