Contents of /trunk/kernel-magellan/patches-3.18/0102-3.18.3-all-fixes.patch
Parent Directory | Revision Log
Revision 2543 -
(show annotations)
(download)
Fri Feb 20 13:15:30 2015 UTC (9 years, 8 months ago) by niro
File size: 156472 byte(s)
Fri Feb 20 13:15:30 2015 UTC (9 years, 8 months ago) by niro
File size: 156472 byte(s)
-linux-3.18.3
1 | diff --git a/Documentation/devicetree/bindings/i2c/i2c-designware.txt b/Documentation/devicetree/bindings/i2c/i2c-designware.txt |
2 | index 5199b0c8cf7a..fee26dc3e858 100644 |
3 | --- a/Documentation/devicetree/bindings/i2c/i2c-designware.txt |
4 | +++ b/Documentation/devicetree/bindings/i2c/i2c-designware.txt |
5 | @@ -14,10 +14,10 @@ Optional properties : |
6 | - i2c-sda-hold-time-ns : should contain the SDA hold time in nanoseconds. |
7 | This option is only supported in hardware blocks version 1.11a or newer. |
8 | |
9 | - - i2c-scl-falling-time : should contain the SCL falling time in nanoseconds. |
10 | + - i2c-scl-falling-time-ns : should contain the SCL falling time in nanoseconds. |
11 | This value which is by default 300ns is used to compute the tLOW period. |
12 | |
13 | - - i2c-sda-falling-time : should contain the SDA falling time in nanoseconds. |
14 | + - i2c-sda-falling-time-ns : should contain the SDA falling time in nanoseconds. |
15 | This value which is by default 300ns is used to compute the tHIGH period. |
16 | |
17 | Example : |
18 | diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt |
19 | index 69b3cac4749d..5d8675615e59 100644 |
20 | --- a/Documentation/ramoops.txt |
21 | +++ b/Documentation/ramoops.txt |
22 | @@ -14,11 +14,19 @@ survive after a restart. |
23 | |
24 | 1. Ramoops concepts |
25 | |
26 | -Ramoops uses a predefined memory area to store the dump. The start and size of |
27 | -the memory area are set using two variables: |
28 | +Ramoops uses a predefined memory area to store the dump. The start and size |
29 | +and type of the memory area are set using three variables: |
30 | * "mem_address" for the start |
31 | * "mem_size" for the size. The memory size will be rounded down to a |
32 | power of two. |
33 | + * "mem_type" to specifiy if the memory type (default is pgprot_writecombine). |
34 | + |
35 | +Typically the default value of mem_type=0 should be used as that sets the pstore |
36 | +mapping to pgprot_writecombine. Setting mem_type=1 attempts to use |
37 | +pgprot_noncached, which only works on some platforms. This is because pstore |
38 | +depends on atomic operations. At least on ARM, pgprot_noncached causes the |
39 | +memory to be mapped strongly ordered, and atomic operations on strongly ordered |
40 | +memory are implementation defined, and won't work on many ARMs such as omaps. |
41 | |
42 | The memory area is divided into "record_size" chunks (also rounded down to |
43 | power of two) and each oops/panic writes a "record_size" chunk of |
44 | @@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners: |
45 | static struct ramoops_platform_data ramoops_data = { |
46 | .mem_size = <...>, |
47 | .mem_address = <...>, |
48 | + .mem_type = <...>, |
49 | .record_size = <...>, |
50 | .dump_oops = <...>, |
51 | .ecc = <...>, |
52 | diff --git a/Makefile b/Makefile |
53 | index 8f73b417dc1a..91cfe8d5ee06 100644 |
54 | --- a/Makefile |
55 | +++ b/Makefile |
56 | @@ -1,6 +1,6 @@ |
57 | VERSION = 3 |
58 | PATCHLEVEL = 18 |
59 | -SUBLEVEL = 2 |
60 | +SUBLEVEL = 3 |
61 | EXTRAVERSION = |
62 | NAME = Diseased Newt |
63 | |
64 | diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts |
65 | index 87aa4f3b8b3d..53bbfc90b26a 100644 |
66 | --- a/arch/arm/boot/dts/am437x-sk-evm.dts |
67 | +++ b/arch/arm/boot/dts/am437x-sk-evm.dts |
68 | @@ -100,7 +100,7 @@ |
69 | }; |
70 | |
71 | lcd0: display { |
72 | - compatible = "osddisplays,osd057T0559-34ts", "panel-dpi"; |
73 | + compatible = "newhaven,nhd-4.3-480272ef-atxl", "panel-dpi"; |
74 | label = "lcd"; |
75 | |
76 | pinctrl-names = "default"; |
77 | @@ -112,11 +112,11 @@ |
78 | clock-frequency = <9000000>; |
79 | hactive = <480>; |
80 | vactive = <272>; |
81 | - hfront-porch = <8>; |
82 | - hback-porch = <43>; |
83 | - hsync-len = <4>; |
84 | - vback-porch = <12>; |
85 | - vfront-porch = <4>; |
86 | + hfront-porch = <2>; |
87 | + hback-porch = <2>; |
88 | + hsync-len = <41>; |
89 | + vfront-porch = <2>; |
90 | + vback-porch = <2>; |
91 | vsync-len = <10>; |
92 | hsync-active = <0>; |
93 | vsync-active = <0>; |
94 | @@ -320,8 +320,7 @@ |
95 | |
96 | lcd_pins: lcd_pins { |
97 | pinctrl-single,pins = < |
98 | - /* GPIO 5_8 to select LCD / HDMI */ |
99 | - 0x238 (PIN_OUTPUT_PULLUP | MUX_MODE7) |
100 | + 0x1c (PIN_OUTPUT_PULLDOWN | MUX_MODE7) /* gpcm_ad7.gpio1_7 */ |
101 | >; |
102 | }; |
103 | }; |
104 | diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi |
105 | index 9cc98436a982..666e796847d8 100644 |
106 | --- a/arch/arm/boot/dts/dra7.dtsi |
107 | +++ b/arch/arm/boot/dts/dra7.dtsi |
108 | @@ -653,7 +653,7 @@ |
109 | }; |
110 | |
111 | wdt2: wdt@4ae14000 { |
112 | - compatible = "ti,omap4-wdt"; |
113 | + compatible = "ti,omap3-wdt"; |
114 | reg = <0x4ae14000 0x80>; |
115 | interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>; |
116 | ti,hwmods = "wd_timer2"; |
117 | diff --git a/arch/arm/boot/dts/s3c6410-mini6410.dts b/arch/arm/boot/dts/s3c6410-mini6410.dts |
118 | index 57e00f9bce99..a25debb50401 100644 |
119 | --- a/arch/arm/boot/dts/s3c6410-mini6410.dts |
120 | +++ b/arch/arm/boot/dts/s3c6410-mini6410.dts |
121 | @@ -198,10 +198,6 @@ |
122 | status = "okay"; |
123 | }; |
124 | |
125 | -&pwm { |
126 | - status = "okay"; |
127 | -}; |
128 | - |
129 | &pinctrl0 { |
130 | gpio_leds: gpio-leds { |
131 | samsung,pins = "gpk-4", "gpk-5", "gpk-6", "gpk-7"; |
132 | diff --git a/arch/arm/boot/dts/s3c64xx.dtsi b/arch/arm/boot/dts/s3c64xx.dtsi |
133 | index ff5bdaac987a..0ccb414cd268 100644 |
134 | --- a/arch/arm/boot/dts/s3c64xx.dtsi |
135 | +++ b/arch/arm/boot/dts/s3c64xx.dtsi |
136 | @@ -172,7 +172,6 @@ |
137 | clocks = <&clocks PCLK_PWM>; |
138 | samsung,pwm-outputs = <0>, <1>; |
139 | #pwm-cells = <3>; |
140 | - status = "disabled"; |
141 | }; |
142 | |
143 | pinctrl0: pinctrl@7f008000 { |
144 | diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig |
145 | index 9d7a32f93fcf..37560f19d346 100644 |
146 | --- a/arch/arm/configs/multi_v7_defconfig |
147 | +++ b/arch/arm/configs/multi_v7_defconfig |
148 | @@ -320,6 +320,7 @@ CONFIG_USB=y |
149 | CONFIG_USB_XHCI_HCD=y |
150 | CONFIG_USB_XHCI_MVEBU=y |
151 | CONFIG_USB_EHCI_HCD=y |
152 | +CONFIG_USB_EHCI_EXYNOS=y |
153 | CONFIG_USB_EHCI_TEGRA=y |
154 | CONFIG_USB_EHCI_HCD_PLATFORM=y |
155 | CONFIG_USB_ISP1760_HCD=y |
156 | @@ -445,4 +446,4 @@ CONFIG_DEBUG_FS=y |
157 | CONFIG_MAGIC_SYSRQ=y |
158 | CONFIG_LOCKUP_DETECTOR=y |
159 | CONFIG_CRYPTO_DEV_TEGRA_AES=y |
160 | -CONFIG_GENERIC_CPUFREQ_CPU0=y |
161 | +CONFIG_CPUFREQ_DT=y |
162 | diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig |
163 | index d7346ad51043..bfe79d5b8213 100644 |
164 | --- a/arch/arm/configs/shmobile_defconfig |
165 | +++ b/arch/arm/configs/shmobile_defconfig |
166 | @@ -176,5 +176,5 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y |
167 | CONFIG_CPU_FREQ_GOV_ONDEMAND=y |
168 | CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y |
169 | CONFIG_CPU_THERMAL=y |
170 | -CONFIG_GENERIC_CPUFREQ_CPU0=y |
171 | +CONFIG_CPUFREQ_DT=y |
172 | CONFIG_REGULATOR_DA9210=y |
173 | diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c |
174 | index c03106378b49..306e1ac2c8e3 100644 |
175 | --- a/arch/arm/kernel/setup.c |
176 | +++ b/arch/arm/kernel/setup.c |
177 | @@ -1043,6 +1043,15 @@ static int c_show(struct seq_file *m, void *v) |
178 | seq_printf(m, "model name\t: %s rev %d (%s)\n", |
179 | cpu_name, cpuid & 15, elf_platform); |
180 | |
181 | +#if defined(CONFIG_SMP) |
182 | + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
183 | + per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), |
184 | + (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); |
185 | +#else |
186 | + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", |
187 | + loops_per_jiffy / (500000/HZ), |
188 | + (loops_per_jiffy / (5000/HZ)) % 100); |
189 | +#endif |
190 | /* dump out the processor features */ |
191 | seq_puts(m, "Features\t: "); |
192 | |
193 | diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c |
194 | index 13396d3d600e..a8e32aaf0383 100644 |
195 | --- a/arch/arm/kernel/smp.c |
196 | +++ b/arch/arm/kernel/smp.c |
197 | @@ -387,8 +387,17 @@ asmlinkage void secondary_start_kernel(void) |
198 | |
199 | void __init smp_cpus_done(unsigned int max_cpus) |
200 | { |
201 | - printk(KERN_INFO "SMP: Total of %d processors activated.\n", |
202 | - num_online_cpus()); |
203 | + int cpu; |
204 | + unsigned long bogosum = 0; |
205 | + |
206 | + for_each_online_cpu(cpu) |
207 | + bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; |
208 | + |
209 | + printk(KERN_INFO "SMP: Total of %d processors activated " |
210 | + "(%lu.%02lu BogoMIPS).\n", |
211 | + num_online_cpus(), |
212 | + bogosum / (500000/HZ), |
213 | + (bogosum / (5000/HZ)) % 100); |
214 | |
215 | hyp_mode_check(); |
216 | } |
217 | diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c |
218 | index 503097c72b82..e7f823b960c2 100644 |
219 | --- a/arch/arm/mach-omap2/pm44xx.c |
220 | +++ b/arch/arm/mach-omap2/pm44xx.c |
221 | @@ -160,26 +160,6 @@ static inline int omap4_init_static_deps(void) |
222 | struct clockdomain *ducati_clkdm, *l3_2_clkdm; |
223 | int ret = 0; |
224 | |
225 | - if (omap_rev() == OMAP4430_REV_ES1_0) { |
226 | - WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); |
227 | - return -ENODEV; |
228 | - } |
229 | - |
230 | - pr_err("Power Management for TI OMAP4.\n"); |
231 | - /* |
232 | - * OMAP4 chip PM currently works only with certain (newer) |
233 | - * versions of bootloaders. This is due to missing code in the |
234 | - * kernel to properly reset and initialize some devices. |
235 | - * http://www.spinics.net/lists/arm-kernel/msg218641.html |
236 | - */ |
237 | - pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n"); |
238 | - |
239 | - ret = pwrdm_for_each(pwrdms_setup, NULL); |
240 | - if (ret) { |
241 | - pr_err("Failed to setup powerdomains\n"); |
242 | - return ret; |
243 | - } |
244 | - |
245 | /* |
246 | * The dynamic dependency between MPUSS -> MEMIF and |
247 | * MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as |
248 | @@ -272,6 +252,15 @@ int __init omap4_pm_init(void) |
249 | |
250 | pr_info("Power Management for TI OMAP4+ devices.\n"); |
251 | |
252 | + /* |
253 | + * OMAP4 chip PM currently works only with certain (newer) |
254 | + * versions of bootloaders. This is due to missing code in the |
255 | + * kernel to properly reset and initialize some devices. |
256 | + * http://www.spinics.net/lists/arm-kernel/msg218641.html |
257 | + */ |
258 | + if (cpu_is_omap44xx()) |
259 | + pr_warn("OMAP4 PM: u-boot >= v2012.07 is required for full PM support\n"); |
260 | + |
261 | ret = pwrdm_for_each(pwrdms_setup, NULL); |
262 | if (ret) { |
263 | pr_err("Failed to setup powerdomains.\n"); |
264 | diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c |
265 | index 95c49ebc660d..1d85a7c5a850 100644 |
266 | --- a/arch/arm64/kernel/efi.c |
267 | +++ b/arch/arm64/kernel/efi.c |
268 | @@ -327,6 +327,7 @@ void __init efi_idmap_init(void) |
269 | |
270 | /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ |
271 | efi_setup_idmap(); |
272 | + early_memunmap(memmap.map, memmap.map_end - memmap.map); |
273 | } |
274 | |
275 | static int __init remap_region(efi_memory_desc_t *md, void **new) |
276 | @@ -381,7 +382,6 @@ static int __init arm64_enter_virtual_mode(void) |
277 | } |
278 | |
279 | mapsize = memmap.map_end - memmap.map; |
280 | - early_memunmap(memmap.map, mapsize); |
281 | |
282 | if (efi_runtime_disabled()) { |
283 | pr_info("EFI runtime services will be disabled.\n"); |
284 | diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c |
285 | index f9620154bfb0..64c4f0800ee3 100644 |
286 | --- a/arch/arm64/kernel/setup.c |
287 | +++ b/arch/arm64/kernel/setup.c |
288 | @@ -394,6 +394,7 @@ void __init setup_arch(char **cmdline_p) |
289 | request_standard_resources(); |
290 | |
291 | efi_idmap_init(); |
292 | + early_ioremap_reset(); |
293 | |
294 | unflatten_device_tree(); |
295 | |
296 | diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S |
297 | index a564b440416a..ede186cdd452 100644 |
298 | --- a/arch/arm64/kernel/sleep.S |
299 | +++ b/arch/arm64/kernel/sleep.S |
300 | @@ -147,14 +147,12 @@ cpu_resume_after_mmu: |
301 | ret |
302 | ENDPROC(cpu_resume_after_mmu) |
303 | |
304 | - .data |
305 | ENTRY(cpu_resume) |
306 | bl el2_setup // if in EL2 drop to EL1 cleanly |
307 | #ifdef CONFIG_SMP |
308 | mrs x1, mpidr_el1 |
309 | - adr x4, mpidr_hash_ptr |
310 | - ldr x5, [x4] |
311 | - add x8, x4, x5 // x8 = struct mpidr_hash phys address |
312 | + adrp x8, mpidr_hash |
313 | + add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address |
314 | /* retrieve mpidr_hash members to compute the hash */ |
315 | ldr x2, [x8, #MPIDR_HASH_MASK] |
316 | ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS] |
317 | @@ -164,14 +162,15 @@ ENTRY(cpu_resume) |
318 | #else |
319 | mov x7, xzr |
320 | #endif |
321 | - adr x0, sleep_save_sp |
322 | + adrp x0, sleep_save_sp |
323 | + add x0, x0, #:lo12:sleep_save_sp |
324 | ldr x0, [x0, #SLEEP_SAVE_SP_PHYS] |
325 | ldr x0, [x0, x7, lsl #3] |
326 | /* load sp from context */ |
327 | ldr x2, [x0, #CPU_CTX_SP] |
328 | - adr x1, sleep_idmap_phys |
329 | + adrp x1, sleep_idmap_phys |
330 | /* load physical address of identity map page table in x1 */ |
331 | - ldr x1, [x1] |
332 | + ldr x1, [x1, #:lo12:sleep_idmap_phys] |
333 | mov sp, x2 |
334 | /* |
335 | * cpu_do_resume expects x0 to contain context physical address |
336 | @@ -180,26 +179,3 @@ ENTRY(cpu_resume) |
337 | bl cpu_do_resume // PC relative jump, MMU off |
338 | b cpu_resume_mmu // Resume MMU, never returns |
339 | ENDPROC(cpu_resume) |
340 | - |
341 | - .align 3 |
342 | -mpidr_hash_ptr: |
343 | - /* |
344 | - * offset of mpidr_hash symbol from current location |
345 | - * used to obtain run-time mpidr_hash address with MMU off |
346 | - */ |
347 | - .quad mpidr_hash - . |
348 | -/* |
349 | - * physical address of identity mapped page tables |
350 | - */ |
351 | - .type sleep_idmap_phys, #object |
352 | -ENTRY(sleep_idmap_phys) |
353 | - .quad 0 |
354 | -/* |
355 | - * struct sleep_save_sp { |
356 | - * phys_addr_t *save_ptr_stash; |
357 | - * phys_addr_t save_ptr_stash_phys; |
358 | - * }; |
359 | - */ |
360 | - .type sleep_save_sp, #object |
361 | -ENTRY(sleep_save_sp) |
362 | - .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp |
363 | diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c |
364 | index 13ad4dbb1615..2d6b6065fe7f 100644 |
365 | --- a/arch/arm64/kernel/suspend.c |
366 | +++ b/arch/arm64/kernel/suspend.c |
367 | @@ -5,6 +5,7 @@ |
368 | #include <asm/debug-monitors.h> |
369 | #include <asm/pgtable.h> |
370 | #include <asm/memory.h> |
371 | +#include <asm/mmu_context.h> |
372 | #include <asm/smp_plat.h> |
373 | #include <asm/suspend.h> |
374 | #include <asm/tlbflush.h> |
375 | @@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) |
376 | */ |
377 | ret = __cpu_suspend_enter(arg, fn); |
378 | if (ret == 0) { |
379 | - cpu_switch_mm(mm->pgd, mm); |
380 | + /* |
381 | + * We are resuming from reset with TTBR0_EL1 set to the |
382 | + * idmap to enable the MMU; restore the active_mm mappings in |
383 | + * TTBR0_EL1 unless the active_mm == &init_mm, in which case |
384 | + * the thread entered __cpu_suspend with TTBR0_EL1 set to |
385 | + * reserved TTBR0 page tables and should be restored as such. |
386 | + */ |
387 | + if (mm == &init_mm) |
388 | + cpu_set_reserved_ttbr0(); |
389 | + else |
390 | + cpu_switch_mm(mm->pgd, mm); |
391 | + |
392 | flush_tlb_all(); |
393 | |
394 | /* |
395 | @@ -126,8 +138,8 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) |
396 | return ret; |
397 | } |
398 | |
399 | -extern struct sleep_save_sp sleep_save_sp; |
400 | -extern phys_addr_t sleep_idmap_phys; |
401 | +struct sleep_save_sp sleep_save_sp; |
402 | +phys_addr_t sleep_idmap_phys; |
403 | |
404 | static int __init cpu_suspend_init(void) |
405 | { |
406 | diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h |
407 | index c998279bd85b..a68ee15964b3 100644 |
408 | --- a/arch/powerpc/include/asm/reg.h |
409 | +++ b/arch/powerpc/include/asm/reg.h |
410 | @@ -118,8 +118,10 @@ |
411 | #define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) |
412 | #ifdef __BIG_ENDIAN__ |
413 | #define MSR_ __MSR |
414 | +#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV) |
415 | #else |
416 | #define MSR_ (__MSR | MSR_LE) |
417 | +#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV | MSR_LE) |
418 | #endif |
419 | #define MSR_KERNEL (MSR_ | MSR_64BIT) |
420 | #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) |
421 | diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h |
422 | index 6240698fee9a..ff21b7a2f0cc 100644 |
423 | --- a/arch/powerpc/include/asm/syscall.h |
424 | +++ b/arch/powerpc/include/asm/syscall.h |
425 | @@ -90,6 +90,10 @@ static inline void syscall_set_arguments(struct task_struct *task, |
426 | |
427 | static inline int syscall_get_arch(void) |
428 | { |
429 | - return is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64; |
430 | + int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64; |
431 | +#ifdef __LITTLE_ENDIAN__ |
432 | + arch |= __AUDIT_ARCH_LE; |
433 | +#endif |
434 | + return arch; |
435 | } |
436 | #endif /* _ASM_SYSCALL_H */ |
437 | diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S |
438 | index c0754bbf8118..283c603716a0 100644 |
439 | --- a/arch/powerpc/kernel/idle_power7.S |
440 | +++ b/arch/powerpc/kernel/idle_power7.S |
441 | @@ -101,7 +101,23 @@ _GLOBAL(power7_powersave_common) |
442 | std r9,_MSR(r1) |
443 | std r1,PACAR1(r13) |
444 | |
445 | -_GLOBAL(power7_enter_nap_mode) |
446 | + /* |
447 | + * Go to real mode to do the nap, as required by the architecture. |
448 | + * Also, we need to be in real mode before setting hwthread_state, |
449 | + * because as soon as we do that, another thread can switch |
450 | + * the MMU context to the guest. |
451 | + */ |
452 | + LOAD_REG_IMMEDIATE(r5, MSR_IDLE) |
453 | + li r6, MSR_RI |
454 | + andc r6, r9, r6 |
455 | + LOAD_REG_ADDR(r7, power7_enter_nap_mode) |
456 | + mtmsrd r6, 1 /* clear RI before setting SRR0/1 */ |
457 | + mtspr SPRN_SRR0, r7 |
458 | + mtspr SPRN_SRR1, r5 |
459 | + rfid |
460 | + |
461 | + .globl power7_enter_nap_mode |
462 | +power7_enter_nap_mode: |
463 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
464 | /* Tell KVM we're napping */ |
465 | li r4,KVM_HWTHREAD_IN_NAP |
466 | diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c |
467 | index aa9aff3d6ad3..b6f123ab90ed 100644 |
468 | --- a/arch/powerpc/kernel/mce_power.c |
469 | +++ b/arch/powerpc/kernel/mce_power.c |
470 | @@ -79,7 +79,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) |
471 | } |
472 | if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { |
473 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) |
474 | - cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); |
475 | + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); |
476 | /* reset error bits */ |
477 | dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; |
478 | } |
479 | @@ -110,7 +110,7 @@ static long mce_handle_common_ierror(uint64_t srr1) |
480 | break; |
481 | case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: |
482 | if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { |
483 | - cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); |
484 | + cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET); |
485 | handled = 1; |
486 | } |
487 | break; |
488 | diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c |
489 | index 6e7c4923b5ea..411116c38da4 100644 |
490 | --- a/arch/powerpc/kernel/udbg_16550.c |
491 | +++ b/arch/powerpc/kernel/udbg_16550.c |
492 | @@ -69,8 +69,12 @@ static void udbg_uart_putc(char c) |
493 | |
494 | static int udbg_uart_getc_poll(void) |
495 | { |
496 | - if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR)) |
497 | + if (!udbg_uart_in) |
498 | + return -1; |
499 | + |
500 | + if (!(udbg_uart_in(UART_LSR) & LSR_DR)) |
501 | return udbg_uart_in(UART_RBR); |
502 | + |
503 | return -1; |
504 | } |
505 | |
506 | diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c |
507 | index dba34088da28..d073e0679a0c 100644 |
508 | --- a/arch/powerpc/perf/hv-24x7.c |
509 | +++ b/arch/powerpc/perf/hv-24x7.c |
510 | @@ -217,11 +217,14 @@ static bool is_physical_domain(int domain) |
511 | domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE; |
512 | } |
513 | |
514 | +DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096); |
515 | +DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096); |
516 | + |
517 | static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, |
518 | u16 lpar, u64 *res, |
519 | bool success_expected) |
520 | { |
521 | - unsigned long ret = -ENOMEM; |
522 | + unsigned long ret; |
523 | |
524 | /* |
525 | * request_buffer and result_buffer are not required to be 4k aligned, |
526 | @@ -243,13 +246,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, |
527 | BUILD_BUG_ON(sizeof(*request_buffer) > 4096); |
528 | BUILD_BUG_ON(sizeof(*result_buffer) > 4096); |
529 | |
530 | - request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER); |
531 | - if (!request_buffer) |
532 | - goto out; |
533 | + request_buffer = (void *)get_cpu_var(hv_24x7_reqb); |
534 | + result_buffer = (void *)get_cpu_var(hv_24x7_resb); |
535 | |
536 | - result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER); |
537 | - if (!result_buffer) |
538 | - goto out_free_request_buffer; |
539 | + memset(request_buffer, 0, 4096); |
540 | + memset(result_buffer, 0, 4096); |
541 | |
542 | *request_buffer = (struct reqb) { |
543 | .buf = { |
544 | @@ -278,15 +279,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix, |
545 | domain, offset, ix, lpar, ret, ret, |
546 | result_buffer->buf.detailed_rc, |
547 | result_buffer->buf.failing_request_ix); |
548 | - goto out_free_result_buffer; |
549 | + goto out; |
550 | } |
551 | |
552 | *res = be64_to_cpu(result_buffer->result); |
553 | |
554 | -out_free_result_buffer: |
555 | - kfree(result_buffer); |
556 | -out_free_request_buffer: |
557 | - kfree(request_buffer); |
558 | out: |
559 | return ret; |
560 | } |
561 | diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c |
562 | index 0f961a1c64b3..6dc0ad9c7050 100644 |
563 | --- a/arch/s390/kvm/gaccess.c |
564 | +++ b/arch/s390/kvm/gaccess.c |
565 | @@ -229,10 +229,12 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu) |
566 | goto out; |
567 | ic = &vcpu->kvm->arch.sca->ipte_control; |
568 | do { |
569 | - old = ACCESS_ONCE(*ic); |
570 | + old = *ic; |
571 | + barrier(); |
572 | while (old.k) { |
573 | cond_resched(); |
574 | - old = ACCESS_ONCE(*ic); |
575 | + old = *ic; |
576 | + barrier(); |
577 | } |
578 | new = old; |
579 | new.k = 1; |
580 | @@ -251,7 +253,9 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu) |
581 | goto out; |
582 | ic = &vcpu->kvm->arch.sca->ipte_control; |
583 | do { |
584 | - new = old = ACCESS_ONCE(*ic); |
585 | + old = *ic; |
586 | + barrier(); |
587 | + new = old; |
588 | new.k = 0; |
589 | } while (cmpxchg(&ic->val, old.val, new.val) != old.val); |
590 | wake_up(&vcpu->kvm->arch.ipte_wq); |
591 | @@ -265,10 +269,12 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu) |
592 | |
593 | ic = &vcpu->kvm->arch.sca->ipte_control; |
594 | do { |
595 | - old = ACCESS_ONCE(*ic); |
596 | + old = *ic; |
597 | + barrier(); |
598 | while (old.kg) { |
599 | cond_resched(); |
600 | - old = ACCESS_ONCE(*ic); |
601 | + old = *ic; |
602 | + barrier(); |
603 | } |
604 | new = old; |
605 | new.k = 1; |
606 | @@ -282,7 +288,9 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu) |
607 | |
608 | ic = &vcpu->kvm->arch.sca->ipte_control; |
609 | do { |
610 | - new = old = ACCESS_ONCE(*ic); |
611 | + old = *ic; |
612 | + barrier(); |
613 | + new = old; |
614 | new.kh--; |
615 | if (!new.kh) |
616 | new.k = 0; |
617 | diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c |
618 | index a39838457f01..4fc3fed636dc 100644 |
619 | --- a/arch/s390/kvm/interrupt.c |
620 | +++ b/arch/s390/kvm/interrupt.c |
621 | @@ -270,7 +270,7 @@ static int __must_check __deliver_prog_irq(struct kvm_vcpu *vcpu, |
622 | break; |
623 | case PGM_MONITOR: |
624 | rc = put_guest_lc(vcpu, pgm_info->mon_class_nr, |
625 | - (u64 *)__LC_MON_CLASS_NR); |
626 | + (u16 *)__LC_MON_CLASS_NR); |
627 | rc |= put_guest_lc(vcpu, pgm_info->mon_code, |
628 | (u64 *)__LC_MON_CODE); |
629 | break; |
630 | diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c |
631 | index 72bb2dd8b9cd..9c565b6b4ccb 100644 |
632 | --- a/arch/s390/kvm/priv.c |
633 | +++ b/arch/s390/kvm/priv.c |
634 | @@ -791,7 +791,7 @@ int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) |
635 | break; |
636 | reg = (reg + 1) % 16; |
637 | } while (1); |
638 | - |
639 | + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
640 | return 0; |
641 | } |
642 | |
643 | @@ -863,7 +863,7 @@ static int handle_lctlg(struct kvm_vcpu *vcpu) |
644 | break; |
645 | reg = (reg + 1) % 16; |
646 | } while (1); |
647 | - |
648 | + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
649 | return 0; |
650 | } |
651 | |
652 | diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile |
653 | index fd0f848938cc..5a4a089e8b1f 100644 |
654 | --- a/arch/x86/crypto/Makefile |
655 | +++ b/arch/x86/crypto/Makefile |
656 | @@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o |
657 | |
658 | obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o |
659 | obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o |
660 | -obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ |
661 | obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o |
662 | obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o |
663 | obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o |
664 | @@ -46,6 +45,7 @@ endif |
665 | ifeq ($(avx2_supported),yes) |
666 | obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o |
667 | obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o |
668 | + obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ |
669 | endif |
670 | |
671 | aes-i586-y := aes-i586-asm_32.o aes_glue.o |
672 | diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S |
673 | index 2df2a0298f5a..a916c4a61165 100644 |
674 | --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S |
675 | +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S |
676 | @@ -208,7 +208,7 @@ ddq_add_8: |
677 | |
678 | .if (klen == KEY_128) |
679 | .if (load_keys) |
680 | - vmovdqa 3*16(p_keys), xkeyA |
681 | + vmovdqa 3*16(p_keys), xkey4 |
682 | .endif |
683 | .else |
684 | vmovdqa 3*16(p_keys), xkeyA |
685 | @@ -224,7 +224,7 @@ ddq_add_8: |
686 | add $(16*by), p_in |
687 | |
688 | .if (klen == KEY_128) |
689 | - vmovdqa 4*16(p_keys), xkey4 |
690 | + vmovdqa 4*16(p_keys), xkeyB |
691 | .else |
692 | .if (load_keys) |
693 | vmovdqa 4*16(p_keys), xkey4 |
694 | @@ -234,7 +234,12 @@ ddq_add_8: |
695 | .set i, 0 |
696 | .rept by |
697 | club XDATA, i |
698 | - vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ |
699 | + /* key 3 */ |
700 | + .if (klen == KEY_128) |
701 | + vaesenc xkey4, var_xdata, var_xdata |
702 | + .else |
703 | + vaesenc xkeyA, var_xdata, var_xdata |
704 | + .endif |
705 | .set i, (i +1) |
706 | .endr |
707 | |
708 | @@ -243,13 +248,18 @@ ddq_add_8: |
709 | .set i, 0 |
710 | .rept by |
711 | club XDATA, i |
712 | - vaesenc xkey4, var_xdata, var_xdata /* key 4 */ |
713 | + /* key 4 */ |
714 | + .if (klen == KEY_128) |
715 | + vaesenc xkeyB, var_xdata, var_xdata |
716 | + .else |
717 | + vaesenc xkey4, var_xdata, var_xdata |
718 | + .endif |
719 | .set i, (i +1) |
720 | .endr |
721 | |
722 | .if (klen == KEY_128) |
723 | .if (load_keys) |
724 | - vmovdqa 6*16(p_keys), xkeyB |
725 | + vmovdqa 6*16(p_keys), xkey8 |
726 | .endif |
727 | .else |
728 | vmovdqa 6*16(p_keys), xkeyB |
729 | @@ -267,12 +277,17 @@ ddq_add_8: |
730 | .set i, 0 |
731 | .rept by |
732 | club XDATA, i |
733 | - vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ |
734 | + /* key 6 */ |
735 | + .if (klen == KEY_128) |
736 | + vaesenc xkey8, var_xdata, var_xdata |
737 | + .else |
738 | + vaesenc xkeyB, var_xdata, var_xdata |
739 | + .endif |
740 | .set i, (i +1) |
741 | .endr |
742 | |
743 | .if (klen == KEY_128) |
744 | - vmovdqa 8*16(p_keys), xkey8 |
745 | + vmovdqa 8*16(p_keys), xkeyB |
746 | .else |
747 | .if (load_keys) |
748 | vmovdqa 8*16(p_keys), xkey8 |
749 | @@ -288,7 +303,7 @@ ddq_add_8: |
750 | |
751 | .if (klen == KEY_128) |
752 | .if (load_keys) |
753 | - vmovdqa 9*16(p_keys), xkeyA |
754 | + vmovdqa 9*16(p_keys), xkey12 |
755 | .endif |
756 | .else |
757 | vmovdqa 9*16(p_keys), xkeyA |
758 | @@ -297,7 +312,12 @@ ddq_add_8: |
759 | .set i, 0 |
760 | .rept by |
761 | club XDATA, i |
762 | - vaesenc xkey8, var_xdata, var_xdata /* key 8 */ |
763 | + /* key 8 */ |
764 | + .if (klen == KEY_128) |
765 | + vaesenc xkeyB, var_xdata, var_xdata |
766 | + .else |
767 | + vaesenc xkey8, var_xdata, var_xdata |
768 | + .endif |
769 | .set i, (i +1) |
770 | .endr |
771 | |
772 | @@ -306,7 +326,12 @@ ddq_add_8: |
773 | .set i, 0 |
774 | .rept by |
775 | club XDATA, i |
776 | - vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ |
777 | + /* key 9 */ |
778 | + .if (klen == KEY_128) |
779 | + vaesenc xkey12, var_xdata, var_xdata |
780 | + .else |
781 | + vaesenc xkeyA, var_xdata, var_xdata |
782 | + .endif |
783 | .set i, (i +1) |
784 | .endr |
785 | |
786 | @@ -412,7 +437,6 @@ ddq_add_8: |
787 | /* main body of aes ctr load */ |
788 | |
789 | .macro do_aes_ctrmain key_len |
790 | - |
791 | cmp $16, num_bytes |
792 | jb .Ldo_return2\key_len |
793 | |
794 | diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h |
795 | index 2a46ca720afc..2874be9aef0a 100644 |
796 | --- a/arch/x86/include/asm/vsyscall.h |
797 | +++ b/arch/x86/include/asm/vsyscall.h |
798 | @@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void) |
799 | native_read_tscp(&p); |
800 | } else { |
801 | /* Load per CPU data from GDT */ |
802 | - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
803 | + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); |
804 | } |
805 | |
806 | return p; |
807 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c |
808 | index 9762dbd9f3f7..e98f68cfea02 100644 |
809 | --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c |
810 | +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c |
811 | @@ -276,6 +276,17 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, |
812 | return box; |
813 | } |
814 | |
815 | +/* |
816 | + * Using uncore_pmu_event_init pmu event_init callback |
817 | + * as a detection point for uncore events. |
818 | + */ |
819 | +static int uncore_pmu_event_init(struct perf_event *event); |
820 | + |
821 | +static bool is_uncore_event(struct perf_event *event) |
822 | +{ |
823 | + return event->pmu->event_init == uncore_pmu_event_init; |
824 | +} |
825 | + |
826 | static int |
827 | uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp) |
828 | { |
829 | @@ -290,13 +301,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b |
830 | return -EINVAL; |
831 | |
832 | n = box->n_events; |
833 | - box->event_list[n] = leader; |
834 | - n++; |
835 | + |
836 | + if (is_uncore_event(leader)) { |
837 | + box->event_list[n] = leader; |
838 | + n++; |
839 | + } |
840 | + |
841 | if (!dogrp) |
842 | return n; |
843 | |
844 | list_for_each_entry(event, &leader->sibling_list, group_entry) { |
845 | - if (event->state <= PERF_EVENT_STATE_OFF) |
846 | + if (!is_uncore_event(event) || |
847 | + event->state <= PERF_EVENT_STATE_OFF) |
848 | continue; |
849 | |
850 | if (n >= max_count) |
851 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h |
852 | index 18eb78bbdd10..863d9b02563e 100644 |
853 | --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h |
854 | +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h |
855 | @@ -17,7 +17,7 @@ |
856 | #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) |
857 | #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) |
858 | #define UNCORE_EXTRA_PCI_DEV 0xff |
859 | -#define UNCORE_EXTRA_PCI_DEV_MAX 2 |
860 | +#define UNCORE_EXTRA_PCI_DEV_MAX 3 |
861 | |
862 | /* support up to 8 sockets */ |
863 | #define UNCORE_SOCKET_MAX 8 |
864 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c |
865 | index f9ed429d6e4f..ab474faa262b 100644 |
866 | --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c |
867 | +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c |
868 | @@ -887,6 +887,7 @@ void snbep_uncore_cpu_init(void) |
869 | enum { |
870 | SNBEP_PCI_QPI_PORT0_FILTER, |
871 | SNBEP_PCI_QPI_PORT1_FILTER, |
872 | + HSWEP_PCI_PCU_3, |
873 | }; |
874 | |
875 | static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) |
876 | @@ -2022,6 +2023,17 @@ void hswep_uncore_cpu_init(void) |
877 | { |
878 | if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
879 | hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
880 | + |
881 | + /* Detect 6-8 core systems with only two SBOXes */ |
882 | + if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) { |
883 | + u32 capid4; |
884 | + |
885 | + pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3], |
886 | + 0x94, &capid4); |
887 | + if (((capid4 >> 6) & 0x3) == 0) |
888 | + hswep_uncore_sbox.num_boxes = 2; |
889 | + } |
890 | + |
891 | uncore_msr_uncores = hswep_msr_uncores; |
892 | } |
893 | |
894 | @@ -2279,6 +2291,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = { |
895 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
896 | SNBEP_PCI_QPI_PORT1_FILTER), |
897 | }, |
898 | + { /* PCU.3 (for Capability registers) */ |
899 | + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), |
900 | + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, |
901 | + HSWEP_PCI_PCU_3), |
902 | + }, |
903 | { /* end: all zeroes */ } |
904 | }; |
905 | |
906 | diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c |
907 | index 4c540c4719d8..0de1fae2bdf0 100644 |
908 | --- a/arch/x86/kernel/xsave.c |
909 | +++ b/arch/x86/kernel/xsave.c |
910 | @@ -738,3 +738,4 @@ void *get_xsave_addr(struct xsave_struct *xsave, int xstate) |
911 | |
912 | return (void *)xsave + xstate_comp_offsets[feature]; |
913 | } |
914 | +EXPORT_SYMBOL_GPL(get_xsave_addr); |
915 | diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
916 | index 976e3a57f9ea..88f92014ba6b 100644 |
917 | --- a/arch/x86/kvm/cpuid.c |
918 | +++ b/arch/x86/kvm/cpuid.c |
919 | @@ -319,6 +319,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
920 | F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | |
921 | F(ADX) | F(SMAP); |
922 | |
923 | + /* cpuid 0xD.1.eax */ |
924 | + const u32 kvm_supported_word10_x86_features = |
925 | + F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1); |
926 | + |
927 | /* all calls to cpuid_count() should be made on the same cpu */ |
928 | get_cpu(); |
929 | |
930 | @@ -455,13 +459,18 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
931 | entry->eax &= supported; |
932 | entry->edx &= supported >> 32; |
933 | entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
934 | + if (!supported) |
935 | + break; |
936 | + |
937 | for (idx = 1, i = 1; idx < 64; ++idx) { |
938 | u64 mask = ((u64)1 << idx); |
939 | if (*nent >= maxnent) |
940 | goto out; |
941 | |
942 | do_cpuid_1_ent(&entry[i], function, idx); |
943 | - if (entry[i].eax == 0 || !(supported & mask)) |
944 | + if (idx == 1) |
945 | + entry[i].eax &= kvm_supported_word10_x86_features; |
946 | + else if (entry[i].eax == 0 || !(supported & mask)) |
947 | continue; |
948 | entry[i].flags |= |
949 | KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
950 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
951 | index 9f8a2faf5040..22e7ed9e6d8e 100644 |
952 | --- a/arch/x86/kvm/emulate.c |
953 | +++ b/arch/x86/kvm/emulate.c |
954 | @@ -2128,7 +2128,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) |
955 | /* Outer-privilege level return is not implemented */ |
956 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) |
957 | return X86EMUL_UNHANDLEABLE; |
958 | - rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, |
959 | + rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false, |
960 | &new_desc); |
961 | if (rc != X86EMUL_CONTINUE) |
962 | return rc; |
963 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
964 | index 978f402006ee..9c12e63c653f 100644 |
965 | --- a/arch/x86/kvm/mmu.c |
966 | +++ b/arch/x86/kvm/mmu.c |
967 | @@ -4449,7 +4449,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm) |
968 | * zap all shadow pages. |
969 | */ |
970 | if (unlikely(kvm_current_mmio_generation(kvm) == 0)) { |
971 | - printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); |
972 | + printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n"); |
973 | kvm_mmu_invalidate_zap_all_pages(kvm); |
974 | } |
975 | } |
976 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
977 | index 0033df32a745..506488cfa385 100644 |
978 | --- a/arch/x86/kvm/x86.c |
979 | +++ b/arch/x86/kvm/x86.c |
980 | @@ -3128,15 +3128,89 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, |
981 | return 0; |
982 | } |
983 | |
984 | +#define XSTATE_COMPACTION_ENABLED (1ULL << 63) |
985 | + |
986 | +static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) |
987 | +{ |
988 | + struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; |
989 | + u64 xstate_bv = xsave->xsave_hdr.xstate_bv; |
990 | + u64 valid; |
991 | + |
992 | + /* |
993 | + * Copy legacy XSAVE area, to avoid complications with CPUID |
994 | + * leaves 0 and 1 in the loop below. |
995 | + */ |
996 | + memcpy(dest, xsave, XSAVE_HDR_OFFSET); |
997 | + |
998 | + /* Set XSTATE_BV */ |
999 | + *(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv; |
1000 | + |
1001 | + /* |
1002 | + * Copy each region from the possibly compacted offset to the |
1003 | + * non-compacted offset. |
1004 | + */ |
1005 | + valid = xstate_bv & ~XSTATE_FPSSE; |
1006 | + while (valid) { |
1007 | + u64 feature = valid & -valid; |
1008 | + int index = fls64(feature) - 1; |
1009 | + void *src = get_xsave_addr(xsave, feature); |
1010 | + |
1011 | + if (src) { |
1012 | + u32 size, offset, ecx, edx; |
1013 | + cpuid_count(XSTATE_CPUID, index, |
1014 | + &size, &offset, &ecx, &edx); |
1015 | + memcpy(dest + offset, src, size); |
1016 | + } |
1017 | + |
1018 | + valid -= feature; |
1019 | + } |
1020 | +} |
1021 | + |
1022 | +static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) |
1023 | +{ |
1024 | + struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave; |
1025 | + u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET); |
1026 | + u64 valid; |
1027 | + |
1028 | + /* |
1029 | + * Copy legacy XSAVE area, to avoid complications with CPUID |
1030 | + * leaves 0 and 1 in the loop below. |
1031 | + */ |
1032 | + memcpy(xsave, src, XSAVE_HDR_OFFSET); |
1033 | + |
1034 | + /* Set XSTATE_BV and possibly XCOMP_BV. */ |
1035 | + xsave->xsave_hdr.xstate_bv = xstate_bv; |
1036 | + if (cpu_has_xsaves) |
1037 | + xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; |
1038 | + |
1039 | + /* |
1040 | + * Copy each region from the non-compacted offset to the |
1041 | + * possibly compacted offset. |
1042 | + */ |
1043 | + valid = xstate_bv & ~XSTATE_FPSSE; |
1044 | + while (valid) { |
1045 | + u64 feature = valid & -valid; |
1046 | + int index = fls64(feature) - 1; |
1047 | + void *dest = get_xsave_addr(xsave, feature); |
1048 | + |
1049 | + if (dest) { |
1050 | + u32 size, offset, ecx, edx; |
1051 | + cpuid_count(XSTATE_CPUID, index, |
1052 | + &size, &offset, &ecx, &edx); |
1053 | + memcpy(dest, src + offset, size); |
1054 | + } else |
1055 | + WARN_ON_ONCE(1); |
1056 | + |
1057 | + valid -= feature; |
1058 | + } |
1059 | +} |
1060 | + |
1061 | static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, |
1062 | struct kvm_xsave *guest_xsave) |
1063 | { |
1064 | if (cpu_has_xsave) { |
1065 | - memcpy(guest_xsave->region, |
1066 | - &vcpu->arch.guest_fpu.state->xsave, |
1067 | - vcpu->arch.guest_xstate_size); |
1068 | - *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] &= |
1069 | - vcpu->arch.guest_supported_xcr0 | XSTATE_FPSSE; |
1070 | + memset(guest_xsave, 0, sizeof(struct kvm_xsave)); |
1071 | + fill_xsave((u8 *) guest_xsave->region, vcpu); |
1072 | } else { |
1073 | memcpy(guest_xsave->region, |
1074 | &vcpu->arch.guest_fpu.state->fxsave, |
1075 | @@ -3160,8 +3234,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, |
1076 | */ |
1077 | if (xstate_bv & ~kvm_supported_xcr0()) |
1078 | return -EINVAL; |
1079 | - memcpy(&vcpu->arch.guest_fpu.state->xsave, |
1080 | - guest_xsave->region, vcpu->arch.guest_xstate_size); |
1081 | + load_xsave(vcpu, (u8 *)guest_xsave->region); |
1082 | } else { |
1083 | if (xstate_bv & ~XSTATE_FPSSE) |
1084 | return -EINVAL; |
1085 | @@ -6873,6 +6946,9 @@ int fx_init(struct kvm_vcpu *vcpu) |
1086 | return err; |
1087 | |
1088 | fpu_finit(&vcpu->arch.guest_fpu); |
1089 | + if (cpu_has_xsaves) |
1090 | + vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv = |
1091 | + host_xcr0 | XSTATE_COMPACTION_ENABLED; |
1092 | |
1093 | /* |
1094 | * Ensure guest xcr0 is valid for loading |
1095 | diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c |
1096 | index 970463b566cf..208c2206df46 100644 |
1097 | --- a/arch/x86/vdso/vma.c |
1098 | +++ b/arch/x86/vdso/vma.c |
1099 | @@ -54,12 +54,17 @@ subsys_initcall(init_vdso); |
1100 | |
1101 | struct linux_binprm; |
1102 | |
1103 | -/* Put the vdso above the (randomized) stack with another randomized offset. |
1104 | - This way there is no hole in the middle of address space. |
1105 | - To save memory make sure it is still in the same PTE as the stack top. |
1106 | - This doesn't give that many random bits. |
1107 | - |
1108 | - Only used for the 64-bit and x32 vdsos. */ |
1109 | +/* |
1110 | + * Put the vdso above the (randomized) stack with another randomized |
1111 | + * offset. This way there is no hole in the middle of address space. |
1112 | + * To save memory make sure it is still in the same PTE as the stack |
1113 | + * top. This doesn't give that many random bits. |
1114 | + * |
1115 | + * Note that this algorithm is imperfect: the distribution of the vdso |
1116 | + * start address within a PMD is biased toward the end. |
1117 | + * |
1118 | + * Only used for the 64-bit and x32 vdsos. |
1119 | + */ |
1120 | static unsigned long vdso_addr(unsigned long start, unsigned len) |
1121 | { |
1122 | #ifdef CONFIG_X86_32 |
1123 | @@ -67,22 +72,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) |
1124 | #else |
1125 | unsigned long addr, end; |
1126 | unsigned offset; |
1127 | - end = (start + PMD_SIZE - 1) & PMD_MASK; |
1128 | + |
1129 | + /* |
1130 | + * Round up the start address. It can start out unaligned as a result |
1131 | + * of stack start randomization. |
1132 | + */ |
1133 | + start = PAGE_ALIGN(start); |
1134 | + |
1135 | + /* Round the lowest possible end address up to a PMD boundary. */ |
1136 | + end = (start + len + PMD_SIZE - 1) & PMD_MASK; |
1137 | if (end >= TASK_SIZE_MAX) |
1138 | end = TASK_SIZE_MAX; |
1139 | end -= len; |
1140 | - /* This loses some more bits than a modulo, but is cheaper */ |
1141 | - offset = get_random_int() & (PTRS_PER_PTE - 1); |
1142 | - addr = start + (offset << PAGE_SHIFT); |
1143 | - if (addr >= end) |
1144 | - addr = end; |
1145 | + |
1146 | + if (end > start) { |
1147 | + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); |
1148 | + addr = start + (offset << PAGE_SHIFT); |
1149 | + } else { |
1150 | + addr = start; |
1151 | + } |
1152 | |
1153 | /* |
1154 | - * page-align it here so that get_unmapped_area doesn't |
1155 | - * align it wrongfully again to the next page. addr can come in 4K |
1156 | - * unaligned here as a result of stack start randomization. |
1157 | + * Forcibly align the final address in case we have a hardware |
1158 | + * issue that requires alignment for performance reasons. |
1159 | */ |
1160 | - addr = PAGE_ALIGN(addr); |
1161 | addr = align_vdso_addr(addr); |
1162 | |
1163 | return addr; |
1164 | diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h |
1165 | index 2c7901edffaf..01cef6b40829 100644 |
1166 | --- a/arch/xtensa/include/asm/highmem.h |
1167 | +++ b/arch/xtensa/include/asm/highmem.h |
1168 | @@ -25,7 +25,7 @@ |
1169 | #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) |
1170 | #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) |
1171 | |
1172 | -#define kmap_prot PAGE_KERNEL |
1173 | +#define kmap_prot PAGE_KERNEL_EXEC |
1174 | |
1175 | #if DCACHE_WAY_SIZE > PAGE_SIZE |
1176 | #define get_pkmap_color get_pkmap_color |
1177 | diff --git a/block/blk-core.c b/block/blk-core.c |
1178 | index 0421b53e6431..93f9152fc271 100644 |
1179 | --- a/block/blk-core.c |
1180 | +++ b/block/blk-core.c |
1181 | @@ -525,6 +525,9 @@ void blk_cleanup_queue(struct request_queue *q) |
1182 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); |
1183 | blk_sync_queue(q); |
1184 | |
1185 | + if (q->mq_ops) |
1186 | + blk_mq_free_queue(q); |
1187 | + |
1188 | spin_lock_irq(lock); |
1189 | if (q->queue_lock != &q->__queue_lock) |
1190 | q->queue_lock = &q->__queue_lock; |
1191 | diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c |
1192 | index 1065d7c65fa1..72e5ed691e37 100644 |
1193 | --- a/block/blk-mq-cpumap.c |
1194 | +++ b/block/blk-mq-cpumap.c |
1195 | @@ -90,7 +90,7 @@ unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) |
1196 | unsigned int *map; |
1197 | |
1198 | /* If cpus are offline, map them to first hctx */ |
1199 | - map = kzalloc_node(sizeof(*map) * num_possible_cpus(), GFP_KERNEL, |
1200 | + map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, |
1201 | set->numa_node); |
1202 | if (!map) |
1203 | return NULL; |
1204 | diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c |
1205 | index 371d8800b48a..1630a20d5dcf 100644 |
1206 | --- a/block/blk-mq-sysfs.c |
1207 | +++ b/block/blk-mq-sysfs.c |
1208 | @@ -390,16 +390,15 @@ static void blk_mq_sysfs_init(struct request_queue *q) |
1209 | { |
1210 | struct blk_mq_hw_ctx *hctx; |
1211 | struct blk_mq_ctx *ctx; |
1212 | - int i, j; |
1213 | + int i; |
1214 | |
1215 | kobject_init(&q->mq_kobj, &blk_mq_ktype); |
1216 | |
1217 | - queue_for_each_hw_ctx(q, hctx, i) { |
1218 | + queue_for_each_hw_ctx(q, hctx, i) |
1219 | kobject_init(&hctx->kobj, &blk_mq_hw_ktype); |
1220 | |
1221 | - hctx_for_each_ctx(hctx, ctx, j) |
1222 | - kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); |
1223 | - } |
1224 | + queue_for_each_ctx(q, ctx, i) |
1225 | + kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); |
1226 | } |
1227 | |
1228 | /* see blk_register_queue() */ |
1229 | diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c |
1230 | index 8317175a3009..ff18dab6b585 100644 |
1231 | --- a/block/blk-mq-tag.c |
1232 | +++ b/block/blk-mq-tag.c |
1233 | @@ -137,6 +137,7 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
1234 | static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) |
1235 | { |
1236 | int tag, org_last_tag, end; |
1237 | + bool wrap = last_tag != 0; |
1238 | |
1239 | org_last_tag = last_tag; |
1240 | end = bm->depth; |
1241 | @@ -148,15 +149,16 @@ restart: |
1242 | * We started with an offset, start from 0 to |
1243 | * exhaust the map. |
1244 | */ |
1245 | - if (org_last_tag && last_tag) { |
1246 | - end = last_tag; |
1247 | + if (wrap) { |
1248 | + wrap = false; |
1249 | + end = org_last_tag; |
1250 | last_tag = 0; |
1251 | goto restart; |
1252 | } |
1253 | return -1; |
1254 | } |
1255 | last_tag = tag + 1; |
1256 | - } while (test_and_set_bit_lock(tag, &bm->word)); |
1257 | + } while (test_and_set_bit(tag, &bm->word)); |
1258 | |
1259 | return tag; |
1260 | } |
1261 | @@ -340,11 +342,10 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) |
1262 | struct bt_wait_state *bs; |
1263 | int wait_cnt; |
1264 | |
1265 | - /* |
1266 | - * The unlock memory barrier need to order access to req in free |
1267 | - * path and clearing tag bit |
1268 | - */ |
1269 | - clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); |
1270 | + clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word); |
1271 | + |
1272 | + /* Ensure that the wait list checks occur after clear_bit(). */ |
1273 | + smp_mb(); |
1274 | |
1275 | bs = bt_wake_ptr(bt); |
1276 | if (!bs) |
1277 | diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c |
1278 | index 1fac43408911..935ea2aa0730 100644 |
1279 | --- a/block/blk-sysfs.c |
1280 | +++ b/block/blk-sysfs.c |
1281 | @@ -492,17 +492,15 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
1282 | * Currently, its primary task it to free all the &struct request |
1283 | * structures that were allocated to the queue and the queue itself. |
1284 | * |
1285 | - * Caveat: |
1286 | - * Hopefully the low level driver will have finished any |
1287 | - * outstanding requests first... |
1288 | + * Note: |
1289 | + * The low level driver must have finished any outstanding requests first |
1290 | + * via blk_cleanup_queue(). |
1291 | **/ |
1292 | static void blk_release_queue(struct kobject *kobj) |
1293 | { |
1294 | struct request_queue *q = |
1295 | container_of(kobj, struct request_queue, kobj); |
1296 | |
1297 | - blk_sync_queue(q); |
1298 | - |
1299 | blkcg_exit_queue(q); |
1300 | |
1301 | if (q->elevator) { |
1302 | @@ -517,9 +515,7 @@ static void blk_release_queue(struct kobject *kobj) |
1303 | if (q->queue_tags) |
1304 | __blk_queue_free_tags(q); |
1305 | |
1306 | - if (q->mq_ops) |
1307 | - blk_mq_free_queue(q); |
1308 | - else |
1309 | + if (!q->mq_ops) |
1310 | blk_free_flush_queue(q->fq); |
1311 | |
1312 | blk_trace_shutdown(q); |
1313 | diff --git a/block/genhd.c b/block/genhd.c |
1314 | index bd3060684ab2..0a536dc05f3b 100644 |
1315 | --- a/block/genhd.c |
1316 | +++ b/block/genhd.c |
1317 | @@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno) |
1318 | struct disk_part_tbl *old_ptbl = disk->part_tbl; |
1319 | struct disk_part_tbl *new_ptbl; |
1320 | int len = old_ptbl ? old_ptbl->len : 0; |
1321 | - int target = partno + 1; |
1322 | + int i, target; |
1323 | size_t size; |
1324 | - int i; |
1325 | + |
1326 | + /* |
1327 | + * check for int overflow, since we can get here from blkpg_ioctl() |
1328 | + * with a user passed 'partno'. |
1329 | + */ |
1330 | + target = partno + 1; |
1331 | + if (target < 0) |
1332 | + return -EINVAL; |
1333 | |
1334 | /* disk_max_parts() is zero during initialization, ignore if so */ |
1335 | if (disk_max_parts(disk) && target > disk_max_parts(disk)) |
1336 | diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c |
1337 | index 7db193160766..93b71420a046 100644 |
1338 | --- a/drivers/acpi/device_pm.c |
1339 | +++ b/drivers/acpi/device_pm.c |
1340 | @@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device) |
1341 | |
1342 | device->power.state = ACPI_STATE_UNKNOWN; |
1343 | if (!acpi_device_is_present(device)) |
1344 | - return 0; |
1345 | + return -ENXIO; |
1346 | |
1347 | result = acpi_device_get_power(device, &state); |
1348 | if (result) |
1349 | diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c |
1350 | index 0476e90b2091..c9ea3dfb4974 100644 |
1351 | --- a/drivers/acpi/scan.c |
1352 | +++ b/drivers/acpi/scan.c |
1353 | @@ -909,7 +909,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device) |
1354 | if (device->wakeup.flags.valid) |
1355 | acpi_power_resources_list_free(&device->wakeup.resources); |
1356 | |
1357 | - if (!device->flags.power_manageable) |
1358 | + if (!device->power.flags.power_resources) |
1359 | return; |
1360 | |
1361 | for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { |
1362 | @@ -1631,10 +1631,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) |
1363 | device->power.flags.power_resources) |
1364 | device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; |
1365 | |
1366 | - if (acpi_bus_init_power(device)) { |
1367 | - acpi_free_power_resources_lists(device); |
1368 | + if (acpi_bus_init_power(device)) |
1369 | device->flags.power_manageable = 0; |
1370 | - } |
1371 | } |
1372 | |
1373 | static void acpi_bus_get_flags(struct acpi_device *device) |
1374 | @@ -2202,13 +2200,18 @@ static void acpi_bus_attach(struct acpi_device *device) |
1375 | /* Skip devices that are not present. */ |
1376 | if (!acpi_device_is_present(device)) { |
1377 | device->flags.visited = false; |
1378 | + device->flags.power_manageable = 0; |
1379 | return; |
1380 | } |
1381 | if (device->handler) |
1382 | goto ok; |
1383 | |
1384 | if (!device->flags.initialized) { |
1385 | - acpi_bus_update_power(device, NULL); |
1386 | + device->flags.power_manageable = |
1387 | + device->power.states[ACPI_STATE_D0].flags.valid; |
1388 | + if (acpi_bus_init_power(device)) |
1389 | + device->flags.power_manageable = 0; |
1390 | + |
1391 | device->flags.initialized = true; |
1392 | } |
1393 | device->flags.visited = false; |
1394 | diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c |
1395 | index 9d75ead2a1f9..41322591fb43 100644 |
1396 | --- a/drivers/acpi/video.c |
1397 | +++ b/drivers/acpi/video.c |
1398 | @@ -155,6 +155,7 @@ struct acpi_video_bus { |
1399 | u8 dos_setting; |
1400 | struct acpi_video_enumerated_device *attached_array; |
1401 | u8 attached_count; |
1402 | + u8 child_count; |
1403 | struct acpi_video_bus_cap cap; |
1404 | struct acpi_video_bus_flags flags; |
1405 | struct list_head video_device_list; |
1406 | @@ -504,6 +505,23 @@ static struct dmi_system_id video_dmi_table[] __initdata = { |
1407 | DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), |
1408 | }, |
1409 | }, |
1410 | + |
1411 | + { |
1412 | + .callback = video_disable_native_backlight, |
1413 | + .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E", |
1414 | + .matches = { |
1415 | + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), |
1416 | + DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"), |
1417 | + }, |
1418 | + }, |
1419 | + { |
1420 | + .callback = video_disable_native_backlight, |
1421 | + .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V", |
1422 | + .matches = { |
1423 | + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), |
1424 | + DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), |
1425 | + }, |
1426 | + }, |
1427 | {} |
1428 | }; |
1429 | |
1430 | @@ -1159,8 +1177,12 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device) |
1431 | struct acpi_video_bus *video = device->video; |
1432 | int i; |
1433 | |
1434 | - /* If we have a broken _DOD, no need to test */ |
1435 | - if (!video->attached_count) |
1436 | + /* |
1437 | + * If we have a broken _DOD or we have more than 8 output devices |
1438 | + * under the graphics controller node that we can't proper deal with |
1439 | + * in the operation region code currently, no need to test. |
1440 | + */ |
1441 | + if (!video->attached_count || video->child_count > 8) |
1442 | return true; |
1443 | |
1444 | for (i = 0; i < video->attached_count; i++) { |
1445 | @@ -1413,6 +1435,7 @@ acpi_video_bus_get_devices(struct acpi_video_bus *video, |
1446 | dev_err(&dev->dev, "Can't attach device\n"); |
1447 | break; |
1448 | } |
1449 | + video->child_count++; |
1450 | } |
1451 | return status; |
1452 | } |
1453 | diff --git a/drivers/base/bus.c b/drivers/base/bus.c |
1454 | index 83e910a57563..876bae5ade33 100644 |
1455 | --- a/drivers/base/bus.c |
1456 | +++ b/drivers/base/bus.c |
1457 | @@ -254,13 +254,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus, |
1458 | const char *buf, size_t count) |
1459 | { |
1460 | struct device *dev; |
1461 | + int err = -EINVAL; |
1462 | |
1463 | dev = bus_find_device_by_name(bus, NULL, buf); |
1464 | if (!dev) |
1465 | return -ENODEV; |
1466 | - if (bus_rescan_devices_helper(dev, NULL) != 0) |
1467 | - return -EINVAL; |
1468 | - return count; |
1469 | + if (bus_rescan_devices_helper(dev, NULL) == 0) |
1470 | + err = count; |
1471 | + put_device(dev); |
1472 | + return err; |
1473 | } |
1474 | |
1475 | static struct device *next_device(struct klist_iter *i) |
1476 | diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c |
1477 | index 5a01c53dddeb..3b797cd5a407 100644 |
1478 | --- a/drivers/block/drbd/drbd_req.c |
1479 | +++ b/drivers/block/drbd/drbd_req.c |
1480 | @@ -1545,6 +1545,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct |
1481 | struct request_queue * const b = |
1482 | device->ldev->backing_bdev->bd_disk->queue; |
1483 | if (b->merge_bvec_fn) { |
1484 | + bvm->bi_bdev = device->ldev->backing_bdev; |
1485 | backing_limit = b->merge_bvec_fn(b, bvm, bvec); |
1486 | limit = min(limit, backing_limit); |
1487 | } |
1488 | @@ -1628,7 +1629,7 @@ void request_timer_fn(unsigned long data) |
1489 | time_after(now, req_peer->pre_send_jif + ent) && |
1490 | !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { |
1491 | drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); |
1492 | - _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); |
1493 | + _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD); |
1494 | } |
1495 | if (dt && oldest_submit_jif != now && |
1496 | time_after(now, oldest_submit_jif + dt) && |
1497 | diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c |
1498 | index d85ced27ebd5..086240cd29c3 100644 |
1499 | --- a/drivers/bluetooth/ath3k.c |
1500 | +++ b/drivers/bluetooth/ath3k.c |
1501 | @@ -105,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = { |
1502 | { USB_DEVICE(0x13d3, 0x3375) }, |
1503 | { USB_DEVICE(0x13d3, 0x3393) }, |
1504 | { USB_DEVICE(0x13d3, 0x3402) }, |
1505 | + { USB_DEVICE(0x13d3, 0x3408) }, |
1506 | { USB_DEVICE(0x13d3, 0x3432) }, |
1507 | |
1508 | /* Atheros AR5BBU12 with sflash firmware */ |
1509 | @@ -156,6 +157,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { |
1510 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, |
1511 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
1512 | { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, |
1513 | + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, |
1514 | { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, |
1515 | |
1516 | /* Atheros AR5BBU22 with sflash firmware */ |
1517 | diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
1518 | index edfc17bfcd44..091c813df8e9 100644 |
1519 | --- a/drivers/bluetooth/btusb.c |
1520 | +++ b/drivers/bluetooth/btusb.c |
1521 | @@ -182,6 +182,7 @@ static const struct usb_device_id blacklist_table[] = { |
1522 | { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, |
1523 | { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, |
1524 | { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, |
1525 | + { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, |
1526 | { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, |
1527 | |
1528 | /* Atheros AR5BBU12 with sflash firmware */ |
1529 | diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c |
1530 | index 34174d01462e..471f985e38d2 100644 |
1531 | --- a/drivers/char/i8k.c |
1532 | +++ b/drivers/char/i8k.c |
1533 | @@ -711,6 +711,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = { |
1534 | .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520], |
1535 | }, |
1536 | { |
1537 | + .ident = "Dell Latitude E6440", |
1538 | + .matches = { |
1539 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
1540 | + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"), |
1541 | + }, |
1542 | + .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_E6540], |
1543 | + }, |
1544 | + { |
1545 | .ident = "Dell Latitude E6540", |
1546 | .matches = { |
1547 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
1548 | diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c |
1549 | index ff2b434b3db4..760947e380c9 100644 |
1550 | --- a/drivers/gpu/drm/nouveau/core/core/event.c |
1551 | +++ b/drivers/gpu/drm/nouveau/core/core/event.c |
1552 | @@ -26,7 +26,7 @@ |
1553 | void |
1554 | nvkm_event_put(struct nvkm_event *event, u32 types, int index) |
1555 | { |
1556 | - BUG_ON(!spin_is_locked(&event->refs_lock)); |
1557 | + assert_spin_locked(&event->refs_lock); |
1558 | while (types) { |
1559 | int type = __ffs(types); types &= ~(1 << type); |
1560 | if (--event->refs[index * event->types_nr + type] == 0) { |
1561 | @@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index) |
1562 | void |
1563 | nvkm_event_get(struct nvkm_event *event, u32 types, int index) |
1564 | { |
1565 | - BUG_ON(!spin_is_locked(&event->refs_lock)); |
1566 | + assert_spin_locked(&event->refs_lock); |
1567 | while (types) { |
1568 | int type = __ffs(types); types &= ~(1 << type); |
1569 | if (++event->refs[index * event->types_nr + type] == 1) { |
1570 | diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c |
1571 | index d1bcde55e9d7..839a32577680 100644 |
1572 | --- a/drivers/gpu/drm/nouveau/core/core/notify.c |
1573 | +++ b/drivers/gpu/drm/nouveau/core/core/notify.c |
1574 | @@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) |
1575 | struct nvkm_event *event = notify->event; |
1576 | unsigned long flags; |
1577 | |
1578 | - BUG_ON(!spin_is_locked(&event->list_lock)); |
1579 | + assert_spin_locked(&event->list_lock); |
1580 | BUG_ON(size != notify->size); |
1581 | |
1582 | spin_lock_irqsave(&event->refs_lock, flags); |
1583 | diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c |
1584 | index 753a6def61e7..3d1cfcb96b6b 100644 |
1585 | --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c |
1586 | +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c |
1587 | @@ -28,6 +28,7 @@ |
1588 | #include "nouveau_ttm.h" |
1589 | #include "nouveau_gem.h" |
1590 | |
1591 | +#include "drm_legacy.h" |
1592 | static int |
1593 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) |
1594 | { |
1595 | @@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) |
1596 | struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); |
1597 | |
1598 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
1599 | - return -EINVAL; |
1600 | + return drm_legacy_mmap(filp, vma); |
1601 | |
1602 | return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); |
1603 | } |
1604 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
1605 | index 3402033fa52a..dfaccfca0688 100644 |
1606 | --- a/drivers/hid/hid-core.c |
1607 | +++ b/drivers/hid/hid-core.c |
1608 | @@ -1809,6 +1809,7 @@ static const struct hid_device_id hid_have_special_driver[] = { |
1609 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, |
1610 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, |
1611 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, |
1612 | + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, |
1613 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, |
1614 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, |
1615 | { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, |
1616 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
1617 | index 7c863738e419..0e28190480d7 100644 |
1618 | --- a/drivers/hid/hid-ids.h |
1619 | +++ b/drivers/hid/hid-ids.h |
1620 | @@ -300,6 +300,7 @@ |
1621 | #define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 |
1622 | #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b |
1623 | #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103 |
1624 | +#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c |
1625 | #define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f |
1626 | |
1627 | #define USB_VENDOR_ID_ELECOM 0x056e |
1628 | @@ -525,6 +526,7 @@ |
1629 | #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 |
1630 | #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 |
1631 | #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 |
1632 | +#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a |
1633 | #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 |
1634 | |
1635 | #define USB_VENDOR_ID_LABTEC 0x1020 |
1636 | diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c |
1637 | index 725f22ca47fc..8df8ceb47659 100644 |
1638 | --- a/drivers/hid/hid-input.c |
1639 | +++ b/drivers/hid/hid-input.c |
1640 | @@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = { |
1641 | USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), |
1642 | HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, |
1643 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, |
1644 | + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), |
1645 | + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, |
1646 | + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, |
1647 | USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), |
1648 | HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, |
1649 | {} |
1650 | diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c |
1651 | index b92bf01a1ae8..158fcf577fae 100644 |
1652 | --- a/drivers/hid/hid-kye.c |
1653 | +++ b/drivers/hid/hid-kye.c |
1654 | @@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1655 | } |
1656 | break; |
1657 | case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: |
1658 | + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: |
1659 | if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { |
1660 | rdesc = mousepen_i608x_rdesc_fixed; |
1661 | *rsize = sizeof(mousepen_i608x_rdesc_fixed); |
1662 | @@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) |
1663 | switch (id->product) { |
1664 | case USB_DEVICE_ID_KYE_EASYPEN_I405X: |
1665 | case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: |
1666 | + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: |
1667 | case USB_DEVICE_ID_KYE_EASYPEN_M610X: |
1668 | ret = kye_tablet_enable(hdev); |
1669 | if (ret) { |
1670 | @@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = { |
1671 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, |
1672 | USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, |
1673 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, |
1674 | + USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, |
1675 | + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, |
1676 | USB_DEVICE_ID_KYE_EASYPEN_M610X) }, |
1677 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, |
1678 | USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, |
1679 | diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c |
1680 | index 1a07e07d99a0..47d7e74231e5 100644 |
1681 | --- a/drivers/hid/hid-roccat-pyra.c |
1682 | +++ b/drivers/hid/hid-roccat-pyra.c |
1683 | @@ -35,6 +35,8 @@ static struct class *pyra_class; |
1684 | static void profile_activated(struct pyra_device *pyra, |
1685 | unsigned int new_profile) |
1686 | { |
1687 | + if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) |
1688 | + return; |
1689 | pyra->actual_profile = new_profile; |
1690 | pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; |
1691 | } |
1692 | @@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp, |
1693 | if (off != 0 || count != PYRA_SIZE_SETTINGS) |
1694 | return -EINVAL; |
1695 | |
1696 | - mutex_lock(&pyra->pyra_lock); |
1697 | - |
1698 | settings = (struct pyra_settings const *)buf; |
1699 | + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) |
1700 | + return -EINVAL; |
1701 | + |
1702 | + mutex_lock(&pyra->pyra_lock); |
1703 | |
1704 | retval = pyra_set_settings(usb_dev, settings); |
1705 | if (retval) { |
1706 | diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c |
1707 | index 747d54421e73..80e33e0abc52 100644 |
1708 | --- a/drivers/hid/i2c-hid/i2c-hid.c |
1709 | +++ b/drivers/hid/i2c-hid/i2c-hid.c |
1710 | @@ -137,6 +137,7 @@ struct i2c_hid { |
1711 | * descriptor. */ |
1712 | unsigned int bufsize; /* i2c buffer size */ |
1713 | char *inbuf; /* Input buffer */ |
1714 | + char *rawbuf; /* Raw Input buffer */ |
1715 | char *cmdbuf; /* Command buffer */ |
1716 | char *argsbuf; /* Command arguments buffer */ |
1717 | |
1718 | @@ -369,7 +370,7 @@ static int i2c_hid_hwreset(struct i2c_client *client) |
1719 | static void i2c_hid_get_input(struct i2c_hid *ihid) |
1720 | { |
1721 | int ret, ret_size; |
1722 | - int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); |
1723 | + int size = ihid->bufsize; |
1724 | |
1725 | ret = i2c_master_recv(ihid->client, ihid->inbuf, size); |
1726 | if (ret != size) { |
1727 | @@ -504,9 +505,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type, |
1728 | static void i2c_hid_free_buffers(struct i2c_hid *ihid) |
1729 | { |
1730 | kfree(ihid->inbuf); |
1731 | + kfree(ihid->rawbuf); |
1732 | kfree(ihid->argsbuf); |
1733 | kfree(ihid->cmdbuf); |
1734 | ihid->inbuf = NULL; |
1735 | + ihid->rawbuf = NULL; |
1736 | ihid->cmdbuf = NULL; |
1737 | ihid->argsbuf = NULL; |
1738 | ihid->bufsize = 0; |
1739 | @@ -522,10 +525,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size) |
1740 | report_size; /* report */ |
1741 | |
1742 | ihid->inbuf = kzalloc(report_size, GFP_KERNEL); |
1743 | + ihid->rawbuf = kzalloc(report_size, GFP_KERNEL); |
1744 | ihid->argsbuf = kzalloc(args_len, GFP_KERNEL); |
1745 | ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL); |
1746 | |
1747 | - if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) { |
1748 | + if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) { |
1749 | i2c_hid_free_buffers(ihid); |
1750 | return -ENOMEM; |
1751 | } |
1752 | @@ -552,12 +556,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, |
1753 | |
1754 | ret = i2c_hid_get_report(client, |
1755 | report_type == HID_FEATURE_REPORT ? 0x03 : 0x01, |
1756 | - report_number, ihid->inbuf, ask_count); |
1757 | + report_number, ihid->rawbuf, ask_count); |
1758 | |
1759 | if (ret < 0) |
1760 | return ret; |
1761 | |
1762 | - ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8); |
1763 | + ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8); |
1764 | |
1765 | if (ret_count <= 2) |
1766 | return 0; |
1767 | @@ -566,7 +570,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid, |
1768 | |
1769 | /* The query buffer contains the size, dropping it in the reply */ |
1770 | count = min(count, ret_count - 2); |
1771 | - memcpy(buf, ihid->inbuf + 2, count); |
1772 | + memcpy(buf, ihid->rawbuf + 2, count); |
1773 | |
1774 | return count; |
1775 | } |
1776 | @@ -702,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid) |
1777 | |
1778 | static void i2c_hid_stop(struct hid_device *hid) |
1779 | { |
1780 | - struct i2c_client *client = hid->driver_data; |
1781 | - struct i2c_hid *ihid = i2c_get_clientdata(client); |
1782 | - |
1783 | hid->claimed = 0; |
1784 | - |
1785 | - i2c_hid_free_buffers(ihid); |
1786 | } |
1787 | |
1788 | static int i2c_hid_open(struct hid_device *hid) |
1789 | diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c |
1790 | index 552671ee7c5d..4477eb7457de 100644 |
1791 | --- a/drivers/hid/usbhid/hid-quirks.c |
1792 | +++ b/drivers/hid/usbhid/hid-quirks.c |
1793 | @@ -73,6 +73,7 @@ static const struct hid_blacklist { |
1794 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, |
1795 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, |
1796 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL }, |
1797 | + { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL }, |
1798 | { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, |
1799 | { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, |
1800 | { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, |
1801 | @@ -122,6 +123,7 @@ static const struct hid_blacklist { |
1802 | { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, |
1803 | { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, |
1804 | { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, |
1805 | + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, |
1806 | { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, |
1807 | { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, |
1808 | { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS }, |
1809 | diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c |
1810 | index 8593047bb726..b6bcd251c4a8 100644 |
1811 | --- a/drivers/hid/wacom_sys.c |
1812 | +++ b/drivers/hid/wacom_sys.c |
1813 | @@ -70,22 +70,15 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report, |
1814 | static int wacom_open(struct input_dev *dev) |
1815 | { |
1816 | struct wacom *wacom = input_get_drvdata(dev); |
1817 | - int retval; |
1818 | - |
1819 | - mutex_lock(&wacom->lock); |
1820 | - retval = hid_hw_open(wacom->hdev); |
1821 | - mutex_unlock(&wacom->lock); |
1822 | |
1823 | - return retval; |
1824 | + return hid_hw_open(wacom->hdev); |
1825 | } |
1826 | |
1827 | static void wacom_close(struct input_dev *dev) |
1828 | { |
1829 | struct wacom *wacom = input_get_drvdata(dev); |
1830 | |
1831 | - mutex_lock(&wacom->lock); |
1832 | hid_hw_close(wacom->hdev); |
1833 | - mutex_unlock(&wacom->lock); |
1834 | } |
1835 | |
1836 | /* |
1837 | diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
1838 | index 586b2405b0d4..7cf998cdd011 100644 |
1839 | --- a/drivers/hid/wacom_wac.c |
1840 | +++ b/drivers/hid/wacom_wac.c |
1841 | @@ -3026,6 +3026,7 @@ const struct hid_device_id wacom_ids[] = { |
1842 | { USB_DEVICE_WACOM(0x4004) }, |
1843 | { USB_DEVICE_WACOM(0x5000) }, |
1844 | { USB_DEVICE_WACOM(0x5002) }, |
1845 | + { USB_DEVICE_LENOVO(0x6004) }, |
1846 | |
1847 | { USB_DEVICE_WACOM(HID_ANY_ID) }, |
1848 | { } |
1849 | diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c |
1850 | index a2d1a9612c86..d36ce6835fb7 100644 |
1851 | --- a/drivers/hv/channel_mgmt.c |
1852 | +++ b/drivers/hv/channel_mgmt.c |
1853 | @@ -216,9 +216,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work) |
1854 | unsigned long flags; |
1855 | struct vmbus_channel *primary_channel; |
1856 | struct vmbus_channel_relid_released msg; |
1857 | + struct device *dev; |
1858 | + |
1859 | + if (channel->device_obj) { |
1860 | + dev = get_device(&channel->device_obj->device); |
1861 | + if (dev) { |
1862 | + vmbus_device_unregister(channel->device_obj); |
1863 | + put_device(dev); |
1864 | + } |
1865 | + } |
1866 | |
1867 | - if (channel->device_obj) |
1868 | - vmbus_device_unregister(channel->device_obj); |
1869 | memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); |
1870 | msg.child_relid = channel->offermsg.child_relid; |
1871 | msg.header.msgtype = CHANNELMSG_RELID_RELEASED; |
1872 | diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c |
1873 | index d125a019383f..54ff03791940 100644 |
1874 | --- a/drivers/input/mouse/alps.c |
1875 | +++ b/drivers/input/mouse/alps.c |
1876 | @@ -919,18 +919,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt, |
1877 | |
1878 | static int alps_get_mt_count(struct input_mt_pos *mt) |
1879 | { |
1880 | - int i; |
1881 | + int i, fingers = 0; |
1882 | |
1883 | - for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++) |
1884 | - /* empty */; |
1885 | + for (i = 0; i < MAX_TOUCHES; i++) { |
1886 | + if (mt[i].x != 0 || mt[i].y != 0) |
1887 | + fingers++; |
1888 | + } |
1889 | |
1890 | - return i; |
1891 | + return fingers; |
1892 | } |
1893 | |
1894 | static int alps_decode_packet_v7(struct alps_fields *f, |
1895 | unsigned char *p, |
1896 | struct psmouse *psmouse) |
1897 | { |
1898 | + struct alps_data *priv = psmouse->private; |
1899 | unsigned char pkt_id; |
1900 | |
1901 | pkt_id = alps_get_packet_id_v7(p); |
1902 | @@ -938,19 +941,52 @@ static int alps_decode_packet_v7(struct alps_fields *f, |
1903 | return 0; |
1904 | if (pkt_id == V7_PACKET_ID_UNKNOWN) |
1905 | return -1; |
1906 | + /* |
1907 | + * NEW packets are send to indicate a discontinuity in the finger |
1908 | + * coordinate reporting. Specifically a finger may have moved from |
1909 | + * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for |
1910 | + * us. |
1911 | + * |
1912 | + * NEW packets have 3 problems: |
1913 | + * 1) They do not contain middle / right button info (on non clickpads) |
1914 | + * this can be worked around by preserving the old button state |
1915 | + * 2) They do not contain an accurate fingercount, and they are |
1916 | + * typically send when the number of fingers changes. We cannot use |
1917 | + * the old finger count as that may mismatch with the amount of |
1918 | + * touch coordinates we've available in the NEW packet |
1919 | + * 3) Their x data for the second touch is inaccurate leading to |
1920 | + * a possible jump of the x coordinate by 16 units when the first |
1921 | + * non NEW packet comes in |
1922 | + * Since problems 2 & 3 cannot be worked around, just ignore them. |
1923 | + */ |
1924 | + if (pkt_id == V7_PACKET_ID_NEW) |
1925 | + return 1; |
1926 | |
1927 | alps_get_finger_coordinate_v7(f->mt, p, pkt_id); |
1928 | |
1929 | - if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) { |
1930 | - f->left = (p[0] & 0x80) >> 7; |
1931 | + if (pkt_id == V7_PACKET_ID_TWO) |
1932 | + f->fingers = alps_get_mt_count(f->mt); |
1933 | + else /* pkt_id == V7_PACKET_ID_MULTI */ |
1934 | + f->fingers = 3 + (p[5] & 0x03); |
1935 | + |
1936 | + f->left = (p[0] & 0x80) >> 7; |
1937 | + if (priv->flags & ALPS_BUTTONPAD) { |
1938 | + if (p[0] & 0x20) |
1939 | + f->fingers++; |
1940 | + if (p[0] & 0x10) |
1941 | + f->fingers++; |
1942 | + } else { |
1943 | f->right = (p[0] & 0x20) >> 5; |
1944 | f->middle = (p[0] & 0x10) >> 4; |
1945 | } |
1946 | |
1947 | - if (pkt_id == V7_PACKET_ID_TWO) |
1948 | - f->fingers = alps_get_mt_count(f->mt); |
1949 | - else if (pkt_id == V7_PACKET_ID_MULTI) |
1950 | - f->fingers = 3 + (p[5] & 0x03); |
1951 | + /* Sometimes a single touch is reported in mt[1] rather then mt[0] */ |
1952 | + if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) { |
1953 | + f->mt[0].x = f->mt[1].x; |
1954 | + f->mt[0].y = f->mt[1].y; |
1955 | + f->mt[1].x = 0; |
1956 | + f->mt[1].y = 0; |
1957 | + } |
1958 | |
1959 | return 0; |
1960 | } |
1961 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
1962 | index a27d6cb1a793..b2b9c9264131 100644 |
1963 | --- a/drivers/iommu/intel-iommu.c |
1964 | +++ b/drivers/iommu/intel-iommu.c |
1965 | @@ -1983,7 +1983,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
1966 | { |
1967 | struct dma_pte *first_pte = NULL, *pte = NULL; |
1968 | phys_addr_t uninitialized_var(pteval); |
1969 | - unsigned long sg_res; |
1970 | + unsigned long sg_res = 0; |
1971 | unsigned int largepage_lvl = 0; |
1972 | unsigned long lvl_pages = 0; |
1973 | |
1974 | @@ -1994,10 +1994,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, |
1975 | |
1976 | prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; |
1977 | |
1978 | - if (sg) |
1979 | - sg_res = 0; |
1980 | - else { |
1981 | - sg_res = nr_pages + 1; |
1982 | + if (!sg) { |
1983 | + sg_res = nr_pages; |
1984 | pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; |
1985 | } |
1986 | |
1987 | @@ -4267,6 +4265,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, |
1988 | domain_remove_one_dev_info(old_domain, dev); |
1989 | else |
1990 | domain_remove_dev_info(old_domain); |
1991 | + |
1992 | + if (!domain_type_is_vm_or_si(old_domain) && |
1993 | + list_empty(&old_domain->devices)) |
1994 | + domain_exit(old_domain); |
1995 | } |
1996 | } |
1997 | |
1998 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
1999 | index 9c66e5997fc8..c1b0d52bfcb0 100644 |
2000 | --- a/drivers/md/raid5.c |
2001 | +++ b/drivers/md/raid5.c |
2002 | @@ -2917,8 +2917,11 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, |
2003 | (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && |
2004 | (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) && |
2005 | !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || |
2006 | - (sh->raid_conf->level == 6 && s->failed && s->to_write && |
2007 | - s->to_write - s->non_overwrite < sh->raid_conf->raid_disks - 2 && |
2008 | + ((sh->raid_conf->level == 6 || |
2009 | + sh->sector >= sh->raid_conf->mddev->recovery_cp) |
2010 | + && s->failed && s->to_write && |
2011 | + (s->to_write - s->non_overwrite < |
2012 | + sh->raid_conf->raid_disks - sh->raid_conf->max_degraded) && |
2013 | (!test_bit(R5_Insync, &dev->flags) || test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))))) { |
2014 | /* we would like to get this block, possibly by computing it, |
2015 | * otherwise read it if the backing disk is insync |
2016 | diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c |
2017 | index 7cb3b7e41739..1ca94e6fa8fb 100644 |
2018 | --- a/drivers/misc/genwqe/card_utils.c |
2019 | +++ b/drivers/misc/genwqe/card_utils.c |
2020 | @@ -590,6 +590,8 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr, |
2021 | m->nr_pages, |
2022 | 1, /* write by caller */ |
2023 | m->page_list); /* ptrs to pages */ |
2024 | + if (rc < 0) |
2025 | + goto fail_get_user_pages; |
2026 | |
2027 | /* assumption: get_user_pages can be killed by signals. */ |
2028 | if (rc < m->nr_pages) { |
2029 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
2030 | index ada1a3ea3a87..7625bd791fca 100644 |
2031 | --- a/drivers/mmc/host/sdhci.c |
2032 | +++ b/drivers/mmc/host/sdhci.c |
2033 | @@ -1319,6 +1319,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
2034 | |
2035 | sdhci_runtime_pm_get(host); |
2036 | |
2037 | + present = mmc_gpio_get_cd(host->mmc); |
2038 | + |
2039 | spin_lock_irqsave(&host->lock, flags); |
2040 | |
2041 | WARN_ON(host->mrq != NULL); |
2042 | @@ -1347,7 +1349,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
2043 | * zero: cd-gpio is used, and card is removed |
2044 | * one: cd-gpio is used, and card is present |
2045 | */ |
2046 | - present = mmc_gpio_get_cd(host->mmc); |
2047 | if (present < 0) { |
2048 | /* If polling, assume that the card is always present. */ |
2049 | if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) |
2050 | @@ -2072,15 +2073,18 @@ static void sdhci_card_event(struct mmc_host *mmc) |
2051 | { |
2052 | struct sdhci_host *host = mmc_priv(mmc); |
2053 | unsigned long flags; |
2054 | + int present; |
2055 | |
2056 | /* First check if client has provided their own card event */ |
2057 | if (host->ops->card_event) |
2058 | host->ops->card_event(host); |
2059 | |
2060 | + present = sdhci_do_get_cd(host); |
2061 | + |
2062 | spin_lock_irqsave(&host->lock, flags); |
2063 | |
2064 | /* Check host->mrq first in case we are runtime suspended */ |
2065 | - if (host->mrq && !sdhci_do_get_cd(host)) { |
2066 | + if (host->mrq && !present) { |
2067 | pr_err("%s: Card removed during transfer!\n", |
2068 | mmc_hostname(host->mmc)); |
2069 | pr_err("%s: Resetting controller.\n", |
2070 | diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c |
2071 | index ed827cf894e4..dd8f66ccd2d6 100644 |
2072 | --- a/drivers/mtd/devices/m25p80.c |
2073 | +++ b/drivers/mtd/devices/m25p80.c |
2074 | @@ -300,11 +300,11 @@ static const struct spi_device_id m25p_ids[] = { |
2075 | {"m45pe10"}, {"m45pe80"}, {"m45pe16"}, |
2076 | {"m25pe20"}, {"m25pe80"}, {"m25pe16"}, |
2077 | {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"}, |
2078 | - {"m25px64"}, |
2079 | + {"m25px64"}, {"m25px80"}, |
2080 | {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"}, |
2081 | {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, |
2082 | - {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"}, |
2083 | - {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"}, |
2084 | + {"w25x64"}, {"w25q64"}, {"w25q80"}, {"w25q80bl"}, |
2085 | + {"w25q128"}, {"w25q256"}, {"cat25c11"}, |
2086 | {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"}, |
2087 | { }, |
2088 | }; |
2089 | diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c |
2090 | index 3b357e920a0c..10d07dd20f7c 100644 |
2091 | --- a/drivers/mtd/nand/omap2.c |
2092 | +++ b/drivers/mtd/nand/omap2.c |
2093 | @@ -1741,13 +1741,6 @@ static int omap_nand_probe(struct platform_device *pdev) |
2094 | goto return_error; |
2095 | } |
2096 | |
2097 | - /* check for small page devices */ |
2098 | - if ((mtd->oobsize < 64) && (pdata->ecc_opt != OMAP_ECC_HAM1_CODE_HW)) { |
2099 | - dev_err(&info->pdev->dev, "small page devices are not supported\n"); |
2100 | - err = -EINVAL; |
2101 | - goto return_error; |
2102 | - } |
2103 | - |
2104 | /* re-populate low-level callbacks based on xfer modes */ |
2105 | switch (pdata->xfer_type) { |
2106 | case NAND_OMAP_PREFETCH_POLLED: |
2107 | diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c |
2108 | index eeab96973cf0..b55bc52a1340 100644 |
2109 | --- a/drivers/mtd/tests/torturetest.c |
2110 | +++ b/drivers/mtd/tests/torturetest.c |
2111 | @@ -264,7 +264,9 @@ static int __init tort_init(void) |
2112 | int i; |
2113 | void *patt; |
2114 | |
2115 | - mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); |
2116 | + err = mtdtest_erase_good_eraseblocks(mtd, bad_ebs, eb, ebcnt); |
2117 | + if (err) |
2118 | + goto out; |
2119 | |
2120 | /* Check if the eraseblocks contain only 0xFF bytes */ |
2121 | if (check) { |
2122 | diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c |
2123 | index ec2c2dc1c1ca..2a1b6e037e1a 100644 |
2124 | --- a/drivers/mtd/ubi/upd.c |
2125 | +++ b/drivers/mtd/ubi/upd.c |
2126 | @@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, |
2127 | ubi_assert(!vol->updating && !vol->changing_leb); |
2128 | vol->updating = 1; |
2129 | |
2130 | + vol->upd_buf = vmalloc(ubi->leb_size); |
2131 | + if (!vol->upd_buf) |
2132 | + return -ENOMEM; |
2133 | + |
2134 | err = set_update_marker(ubi, vol); |
2135 | if (err) |
2136 | return err; |
2137 | @@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol, |
2138 | err = clear_update_marker(ubi, vol, 0); |
2139 | if (err) |
2140 | return err; |
2141 | + |
2142 | + vfree(vol->upd_buf); |
2143 | vol->updating = 0; |
2144 | return 0; |
2145 | } |
2146 | |
2147 | - vol->upd_buf = vmalloc(ubi->leb_size); |
2148 | - if (!vol->upd_buf) |
2149 | - return -ENOMEM; |
2150 | - |
2151 | vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, |
2152 | vol->usable_leb_size); |
2153 | vol->upd_bytes = bytes; |
2154 | diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
2155 | index 6654f191868e..b9686c1472d2 100644 |
2156 | --- a/drivers/mtd/ubi/wl.c |
2157 | +++ b/drivers/mtd/ubi/wl.c |
2158 | @@ -1212,7 +1212,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
2159 | |
2160 | err = do_sync_erase(ubi, e1, vol_id, lnum, 0); |
2161 | if (err) { |
2162 | - kmem_cache_free(ubi_wl_entry_slab, e1); |
2163 | if (e2) |
2164 | kmem_cache_free(ubi_wl_entry_slab, e2); |
2165 | goto out_ro; |
2166 | @@ -1226,10 +1225,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
2167 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", |
2168 | e2->pnum, vol_id, lnum); |
2169 | err = do_sync_erase(ubi, e2, vol_id, lnum, 0); |
2170 | - if (err) { |
2171 | - kmem_cache_free(ubi_wl_entry_slab, e2); |
2172 | + if (err) |
2173 | goto out_ro; |
2174 | - } |
2175 | } |
2176 | |
2177 | dbg_wl("done"); |
2178 | @@ -1265,10 +1262,9 @@ out_not_moved: |
2179 | |
2180 | ubi_free_vid_hdr(ubi, vid_hdr); |
2181 | err = do_sync_erase(ubi, e2, vol_id, lnum, torture); |
2182 | - if (err) { |
2183 | - kmem_cache_free(ubi_wl_entry_slab, e2); |
2184 | + if (err) |
2185 | goto out_ro; |
2186 | - } |
2187 | + |
2188 | mutex_unlock(&ubi->move_mutex); |
2189 | return 0; |
2190 | |
2191 | diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c |
2192 | index 644e6ab8a489..dc807e10f802 100644 |
2193 | --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c |
2194 | +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c |
2195 | @@ -735,7 +735,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, |
2196 | dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); |
2197 | if (!dev->cmd_buf) { |
2198 | err = -ENOMEM; |
2199 | - goto lbl_set_intf_data; |
2200 | + goto lbl_free_candev; |
2201 | } |
2202 | |
2203 | dev->udev = usb_dev; |
2204 | @@ -775,7 +775,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, |
2205 | err = register_candev(netdev); |
2206 | if (err) { |
2207 | dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); |
2208 | - goto lbl_free_cmd_buf; |
2209 | + goto lbl_restore_intf_data; |
2210 | } |
2211 | |
2212 | if (dev->prev_siblings) |
2213 | @@ -788,14 +788,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, |
2214 | if (dev->adapter->dev_init) { |
2215 | err = dev->adapter->dev_init(dev); |
2216 | if (err) |
2217 | - goto lbl_free_cmd_buf; |
2218 | + goto lbl_unregister_candev; |
2219 | } |
2220 | |
2221 | /* set bus off */ |
2222 | if (dev->adapter->dev_set_bus) { |
2223 | err = dev->adapter->dev_set_bus(dev, 0); |
2224 | if (err) |
2225 | - goto lbl_free_cmd_buf; |
2226 | + goto lbl_unregister_candev; |
2227 | } |
2228 | |
2229 | /* get device number early */ |
2230 | @@ -807,11 +807,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter, |
2231 | |
2232 | return 0; |
2233 | |
2234 | -lbl_free_cmd_buf: |
2235 | - kfree(dev->cmd_buf); |
2236 | +lbl_unregister_candev: |
2237 | + unregister_candev(netdev); |
2238 | |
2239 | -lbl_set_intf_data: |
2240 | +lbl_restore_intf_data: |
2241 | usb_set_intfdata(intf, dev->prev_siblings); |
2242 | + kfree(dev->cmd_buf); |
2243 | + |
2244 | +lbl_free_candev: |
2245 | free_candev(netdev); |
2246 | |
2247 | return err; |
2248 | diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c |
2249 | index 263dd921edc4..f7f796a2c50b 100644 |
2250 | --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c |
2251 | +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c |
2252 | @@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, |
2253 | if (!(dev->state & PCAN_USB_STATE_CONNECTED)) |
2254 | return 0; |
2255 | |
2256 | - memset(req_addr, '\0', req_size); |
2257 | - |
2258 | req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; |
2259 | |
2260 | switch (req_id) { |
2261 | @@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, |
2262 | default: |
2263 | p = usb_rcvctrlpipe(dev->udev, 0); |
2264 | req_type |= USB_DIR_IN; |
2265 | + memset(req_addr, '\0', req_size); |
2266 | break; |
2267 | } |
2268 | |
2269 | diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c |
2270 | index 0583c69d26db..ddaad712c59a 100644 |
2271 | --- a/drivers/net/wireless/ath/ath5k/qcu.c |
2272 | +++ b/drivers/net/wireless/ath/ath5k/qcu.c |
2273 | @@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type, |
2274 | } else { |
2275 | switch (queue_type) { |
2276 | case AR5K_TX_QUEUE_DATA: |
2277 | - for (queue = AR5K_TX_QUEUE_ID_DATA_MIN; |
2278 | - ah->ah_txq[queue].tqi_type != |
2279 | - AR5K_TX_QUEUE_INACTIVE; queue++) { |
2280 | - |
2281 | - if (queue > AR5K_TX_QUEUE_ID_DATA_MAX) |
2282 | - return -EINVAL; |
2283 | - } |
2284 | + queue = queue_info->tqi_subtype; |
2285 | break; |
2286 | case AR5K_TX_QUEUE_UAPSD: |
2287 | queue = AR5K_TX_QUEUE_ID_UAPSD; |
2288 | diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h |
2289 | index 975074fc11bc..e8e8dd28bade 100644 |
2290 | --- a/drivers/net/wireless/ath/ath9k/hw.h |
2291 | +++ b/drivers/net/wireless/ath/ath9k/hw.h |
2292 | @@ -217,8 +217,8 @@ |
2293 | #define AH_WOW_BEACON_MISS BIT(3) |
2294 | |
2295 | enum ath_hw_txq_subtype { |
2296 | - ATH_TXQ_AC_BE = 0, |
2297 | - ATH_TXQ_AC_BK = 1, |
2298 | + ATH_TXQ_AC_BK = 0, |
2299 | + ATH_TXQ_AC_BE = 1, |
2300 | ATH_TXQ_AC_VI = 2, |
2301 | ATH_TXQ_AC_VO = 3, |
2302 | }; |
2303 | diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c |
2304 | index 275205ab5f15..3e58bfa0c1fd 100644 |
2305 | --- a/drivers/net/wireless/ath/ath9k/mac.c |
2306 | +++ b/drivers/net/wireless/ath/ath9k/mac.c |
2307 | @@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, |
2308 | q = ATH9K_NUM_TX_QUEUES - 3; |
2309 | break; |
2310 | case ATH9K_TX_QUEUE_DATA: |
2311 | - for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++) |
2312 | - if (ah->txq[q].tqi_type == |
2313 | - ATH9K_TX_QUEUE_INACTIVE) |
2314 | - break; |
2315 | - if (q == ATH9K_NUM_TX_QUEUES) { |
2316 | - ath_err(common, "No available TX queue\n"); |
2317 | - return -1; |
2318 | - } |
2319 | + q = qinfo->tqi_subtype; |
2320 | break; |
2321 | default: |
2322 | ath_err(common, "Invalid TX queue type: %u\n", type); |
2323 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |
2324 | index 8079a9ddcba9..0c9671f2f01a 100644 |
2325 | --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |
2326 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |
2327 | @@ -1081,8 +1081,17 @@ brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb, |
2328 | { |
2329 | struct brcmf_if *ifp; |
2330 | |
2331 | + /* The ifidx is the idx to map to matching netdev/ifp. When receiving |
2332 | + * events this is easy because it contains the bssidx which maps |
2333 | + * 1-on-1 to the netdev/ifp. But for data frames the ifidx is rcvd. |
2334 | + * bssidx 1 is used for p2p0 and no data can be received or |
2335 | + * transmitted on it. Therefor bssidx is ifidx + 1 if ifidx > 0 |
2336 | + */ |
2337 | + if (ifidx) |
2338 | + (ifidx)++; |
2339 | ifp = msgbuf->drvr->iflist[ifidx]; |
2340 | if (!ifp || !ifp->ndev) { |
2341 | + brcmf_err("Received pkt for invalid ifidx %d\n", ifidx); |
2342 | brcmu_pkt_buf_free_skb(skb); |
2343 | return; |
2344 | } |
2345 | diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h |
2346 | index 751ae1d10b7f..7a34e4d158d1 100644 |
2347 | --- a/drivers/net/wireless/iwlwifi/dvm/commands.h |
2348 | +++ b/drivers/net/wireless/iwlwifi/dvm/commands.h |
2349 | @@ -966,21 +966,21 @@ struct iwl_rem_sta_cmd { |
2350 | |
2351 | |
2352 | /* WiFi queues mask */ |
2353 | -#define IWL_SCD_BK_MSK cpu_to_le32(BIT(0)) |
2354 | -#define IWL_SCD_BE_MSK cpu_to_le32(BIT(1)) |
2355 | -#define IWL_SCD_VI_MSK cpu_to_le32(BIT(2)) |
2356 | -#define IWL_SCD_VO_MSK cpu_to_le32(BIT(3)) |
2357 | -#define IWL_SCD_MGMT_MSK cpu_to_le32(BIT(3)) |
2358 | +#define IWL_SCD_BK_MSK BIT(0) |
2359 | +#define IWL_SCD_BE_MSK BIT(1) |
2360 | +#define IWL_SCD_VI_MSK BIT(2) |
2361 | +#define IWL_SCD_VO_MSK BIT(3) |
2362 | +#define IWL_SCD_MGMT_MSK BIT(3) |
2363 | |
2364 | /* PAN queues mask */ |
2365 | -#define IWL_PAN_SCD_BK_MSK cpu_to_le32(BIT(4)) |
2366 | -#define IWL_PAN_SCD_BE_MSK cpu_to_le32(BIT(5)) |
2367 | -#define IWL_PAN_SCD_VI_MSK cpu_to_le32(BIT(6)) |
2368 | -#define IWL_PAN_SCD_VO_MSK cpu_to_le32(BIT(7)) |
2369 | -#define IWL_PAN_SCD_MGMT_MSK cpu_to_le32(BIT(7)) |
2370 | -#define IWL_PAN_SCD_MULTICAST_MSK cpu_to_le32(BIT(8)) |
2371 | +#define IWL_PAN_SCD_BK_MSK BIT(4) |
2372 | +#define IWL_PAN_SCD_BE_MSK BIT(5) |
2373 | +#define IWL_PAN_SCD_VI_MSK BIT(6) |
2374 | +#define IWL_PAN_SCD_VO_MSK BIT(7) |
2375 | +#define IWL_PAN_SCD_MGMT_MSK BIT(7) |
2376 | +#define IWL_PAN_SCD_MULTICAST_MSK BIT(8) |
2377 | |
2378 | -#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00) |
2379 | +#define IWL_AGG_TX_QUEUE_MSK 0xffc00 |
2380 | |
2381 | #define IWL_DROP_ALL BIT(1) |
2382 | |
2383 | @@ -1005,12 +1005,17 @@ struct iwl_rem_sta_cmd { |
2384 | * 1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable. |
2385 | * 2: Dump all FIFO |
2386 | */ |
2387 | -struct iwl_txfifo_flush_cmd { |
2388 | +struct iwl_txfifo_flush_cmd_v3 { |
2389 | __le32 queue_control; |
2390 | __le16 flush_control; |
2391 | __le16 reserved; |
2392 | } __packed; |
2393 | |
2394 | +struct iwl_txfifo_flush_cmd_v2 { |
2395 | + __le16 queue_control; |
2396 | + __le16 flush_control; |
2397 | +} __packed; |
2398 | + |
2399 | /* |
2400 | * REPLY_WEP_KEY = 0x20 |
2401 | */ |
2402 | diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c |
2403 | index 2191621d69c1..cfe1293692fc 100644 |
2404 | --- a/drivers/net/wireless/iwlwifi/dvm/lib.c |
2405 | +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c |
2406 | @@ -137,37 +137,38 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv, |
2407 | */ |
2408 | int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk) |
2409 | { |
2410 | - struct iwl_txfifo_flush_cmd flush_cmd; |
2411 | - struct iwl_host_cmd cmd = { |
2412 | - .id = REPLY_TXFIFO_FLUSH, |
2413 | - .len = { sizeof(struct iwl_txfifo_flush_cmd), }, |
2414 | - .data = { &flush_cmd, }, |
2415 | + struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = { |
2416 | + .flush_control = cpu_to_le16(IWL_DROP_ALL), |
2417 | + }; |
2418 | + struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = { |
2419 | + .flush_control = cpu_to_le16(IWL_DROP_ALL), |
2420 | }; |
2421 | |
2422 | - memset(&flush_cmd, 0, sizeof(flush_cmd)); |
2423 | + u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | |
2424 | + IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK; |
2425 | |
2426 | - flush_cmd.queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK | |
2427 | - IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | |
2428 | - IWL_SCD_MGMT_MSK; |
2429 | if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))) |
2430 | - flush_cmd.queue_control |= IWL_PAN_SCD_VO_MSK | |
2431 | - IWL_PAN_SCD_VI_MSK | |
2432 | - IWL_PAN_SCD_BE_MSK | |
2433 | - IWL_PAN_SCD_BK_MSK | |
2434 | - IWL_PAN_SCD_MGMT_MSK | |
2435 | - IWL_PAN_SCD_MULTICAST_MSK; |
2436 | + queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK | |
2437 | + IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK | |
2438 | + IWL_PAN_SCD_MGMT_MSK | |
2439 | + IWL_PAN_SCD_MULTICAST_MSK; |
2440 | |
2441 | if (priv->nvm_data->sku_cap_11n_enable) |
2442 | - flush_cmd.queue_control |= IWL_AGG_TX_QUEUE_MSK; |
2443 | + queue_control |= IWL_AGG_TX_QUEUE_MSK; |
2444 | |
2445 | if (scd_q_msk) |
2446 | - flush_cmd.queue_control = cpu_to_le32(scd_q_msk); |
2447 | - |
2448 | - IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", |
2449 | - flush_cmd.queue_control); |
2450 | - flush_cmd.flush_control = cpu_to_le16(IWL_DROP_ALL); |
2451 | - |
2452 | - return iwl_dvm_send_cmd(priv, &cmd); |
2453 | + queue_control = scd_q_msk; |
2454 | + |
2455 | + IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control); |
2456 | + flush_cmd_v3.queue_control = cpu_to_le32(queue_control); |
2457 | + flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control); |
2458 | + |
2459 | + if (IWL_UCODE_API(priv->fw->ucode_ver) > 2) |
2460 | + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, |
2461 | + sizeof(flush_cmd_v3), |
2462 | + &flush_cmd_v3); |
2463 | + return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0, |
2464 | + sizeof(flush_cmd_v2), &flush_cmd_v2); |
2465 | } |
2466 | |
2467 | void iwlagn_dev_txfifo_flush(struct iwl_priv *priv) |
2468 | diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h |
2469 | index c62575d86bcd..5bd902c976e7 100644 |
2470 | --- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h |
2471 | +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h |
2472 | @@ -1589,7 +1589,7 @@ enum iwl_sf_scenario { |
2473 | #define SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */ |
2474 | |
2475 | /* smart FIFO default values */ |
2476 | -#define SF_W_MARK_SISO 4096 |
2477 | +#define SF_W_MARK_SISO 6144 |
2478 | #define SF_W_MARK_MIMO2 8192 |
2479 | #define SF_W_MARK_MIMO3 6144 |
2480 | #define SF_W_MARK_LEGACY 4096 |
2481 | diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c |
2482 | index 6ced8549eb3a..05cba8c05d3f 100644 |
2483 | --- a/drivers/net/wireless/iwlwifi/pcie/drv.c |
2484 | +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c |
2485 | @@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = { |
2486 | |
2487 | /* 3165 Series */ |
2488 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, |
2489 | + {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, |
2490 | + {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, |
2491 | {IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)}, |
2492 | + {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, |
2493 | + {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, |
2494 | |
2495 | /* 7265 Series */ |
2496 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
2497 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
2498 | index c8ca98c2b480..3010ffc9029d 100644 |
2499 | --- a/drivers/pci/probe.c |
2500 | +++ b/drivers/pci/probe.c |
2501 | @@ -216,14 +216,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, |
2502 | res->flags |= IORESOURCE_SIZEALIGN; |
2503 | if (res->flags & IORESOURCE_IO) { |
2504 | l &= PCI_BASE_ADDRESS_IO_MASK; |
2505 | + sz &= PCI_BASE_ADDRESS_IO_MASK; |
2506 | mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT; |
2507 | } else { |
2508 | l &= PCI_BASE_ADDRESS_MEM_MASK; |
2509 | + sz &= PCI_BASE_ADDRESS_MEM_MASK; |
2510 | mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; |
2511 | } |
2512 | } else { |
2513 | res->flags |= (l & IORESOURCE_ROM_ENABLE); |
2514 | l &= PCI_ROM_ADDRESS_MASK; |
2515 | + sz &= PCI_ROM_ADDRESS_MASK; |
2516 | mask = (u32)PCI_ROM_ADDRESS_MASK; |
2517 | } |
2518 | |
2519 | diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c |
2520 | index adab82d5279f..697be114e21a 100644 |
2521 | --- a/drivers/regulator/s2mps11.c |
2522 | +++ b/drivers/regulator/s2mps11.c |
2523 | @@ -479,7 +479,7 @@ static struct regulator_ops s2mps14_reg_ops = { |
2524 | .enable_mask = S2MPS14_ENABLE_MASK \ |
2525 | } |
2526 | |
2527 | -#define regulator_desc_s2mps14_buck(num, min, step) { \ |
2528 | +#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \ |
2529 | .name = "BUCK"#num, \ |
2530 | .id = S2MPS14_BUCK##num, \ |
2531 | .ops = &s2mps14_reg_ops, \ |
2532 | @@ -488,7 +488,7 @@ static struct regulator_ops s2mps14_reg_ops = { |
2533 | .min_uV = min, \ |
2534 | .uV_step = step, \ |
2535 | .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \ |
2536 | - .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \ |
2537 | + .linear_min_sel = min_sel, \ |
2538 | .ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \ |
2539 | .vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \ |
2540 | .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \ |
2541 | @@ -522,11 +522,16 @@ static const struct regulator_desc s2mps14_regulators[] = { |
2542 | regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV), |
2543 | regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV), |
2544 | regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV), |
2545 | - regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV), |
2546 | - regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV), |
2547 | - regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV), |
2548 | - regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV), |
2549 | - regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV), |
2550 | + regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV, |
2551 | + S2MPS14_BUCK1235_START_SEL), |
2552 | + regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV, |
2553 | + S2MPS14_BUCK1235_START_SEL), |
2554 | + regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV, |
2555 | + S2MPS14_BUCK1235_START_SEL), |
2556 | + regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV, |
2557 | + S2MPS14_BUCK4_START_SEL), |
2558 | + regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV, |
2559 | + S2MPS14_BUCK1235_START_SEL), |
2560 | }; |
2561 | |
2562 | static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11, |
2563 | diff --git a/drivers/rtc/rtc-isl12057.c b/drivers/rtc/rtc-isl12057.c |
2564 | index 455b601d731d..8c3f60737df8 100644 |
2565 | --- a/drivers/rtc/rtc-isl12057.c |
2566 | +++ b/drivers/rtc/rtc-isl12057.c |
2567 | @@ -88,7 +88,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs) |
2568 | tm->tm_min = bcd2bin(regs[ISL12057_REG_RTC_MN]); |
2569 | |
2570 | if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_MIL) { /* AM/PM */ |
2571 | - tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x0f); |
2572 | + tm->tm_hour = bcd2bin(regs[ISL12057_REG_RTC_HR] & 0x1f); |
2573 | if (regs[ISL12057_REG_RTC_HR] & ISL12057_REG_RTC_HR_PM) |
2574 | tm->tm_hour += 12; |
2575 | } else { /* 24 hour mode */ |
2576 | @@ -97,7 +97,7 @@ static void isl12057_rtc_regs_to_tm(struct rtc_time *tm, u8 *regs) |
2577 | |
2578 | tm->tm_mday = bcd2bin(regs[ISL12057_REG_RTC_DT]); |
2579 | tm->tm_wday = bcd2bin(regs[ISL12057_REG_RTC_DW]) - 1; /* starts at 1 */ |
2580 | - tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO]) - 1; /* starts at 1 */ |
2581 | + tm->tm_mon = bcd2bin(regs[ISL12057_REG_RTC_MO] & 0x1f) - 1; /* ditto */ |
2582 | tm->tm_year = bcd2bin(regs[ISL12057_REG_RTC_YR]) + 100; |
2583 | } |
2584 | |
2585 | diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c |
2586 | index 21142e6574a9..828cb9983cc2 100644 |
2587 | --- a/drivers/rtc/rtc-omap.c |
2588 | +++ b/drivers/rtc/rtc-omap.c |
2589 | @@ -416,6 +416,8 @@ static int __init omap_rtc_probe(struct platform_device *pdev) |
2590 | rtc_writel(KICK1_VALUE, OMAP_RTC_KICK1_REG); |
2591 | } |
2592 | |
2593 | + device_init_wakeup(&pdev->dev, true); |
2594 | + |
2595 | rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
2596 | &omap_rtc_ops, THIS_MODULE); |
2597 | if (IS_ERR(rtc)) { |
2598 | @@ -431,8 +433,10 @@ static int __init omap_rtc_probe(struct platform_device *pdev) |
2599 | rtc_write(0, OMAP_RTC_INTERRUPTS_REG); |
2600 | |
2601 | /* enable RTC functional clock */ |
2602 | - if (id_entry->driver_data & OMAP_RTC_HAS_32KCLK_EN) |
2603 | - rtc_writel(OMAP_RTC_OSC_32KCLK_EN, OMAP_RTC_OSC_REG); |
2604 | + if (id_entry->driver_data & OMAP_RTC_HAS_32KCLK_EN) { |
2605 | + reg = rtc_read(OMAP_RTC_OSC_REG); |
2606 | + rtc_writel(reg | OMAP_RTC_OSC_32KCLK_EN, OMAP_RTC_OSC_REG); |
2607 | + } |
2608 | |
2609 | /* clear old status */ |
2610 | reg = rtc_read(OMAP_RTC_STATUS_REG); |
2611 | @@ -482,8 +486,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev) |
2612 | * is write-only, and always reads as zero...) |
2613 | */ |
2614 | |
2615 | - device_init_wakeup(&pdev->dev, true); |
2616 | - |
2617 | if (new_ctrl & (u8) OMAP_RTC_CTRL_SPLIT) |
2618 | pr_info("%s: split power mode\n", pdev->name); |
2619 | |
2620 | @@ -493,6 +495,7 @@ static int __init omap_rtc_probe(struct platform_device *pdev) |
2621 | return 0; |
2622 | |
2623 | fail0: |
2624 | + device_init_wakeup(&pdev->dev, false); |
2625 | if (id_entry->driver_data & OMAP_RTC_HAS_KICKER) |
2626 | rtc_writel(0, OMAP_RTC_KICK0_REG); |
2627 | pm_runtime_put_sync(&pdev->dev); |
2628 | diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c |
2629 | index 76e38007ba90..24ba97d3286e 100644 |
2630 | --- a/drivers/rtc/rtc-sirfsoc.c |
2631 | +++ b/drivers/rtc/rtc-sirfsoc.c |
2632 | @@ -286,14 +286,6 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev) |
2633 | rtc_div = ((32768 / RTC_HZ) / 2) - 1; |
2634 | sirfsoc_rtc_iobrg_writel(rtc_div, rtcdrv->rtc_base + RTC_DIV); |
2635 | |
2636 | - rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
2637 | - &sirfsoc_rtc_ops, THIS_MODULE); |
2638 | - if (IS_ERR(rtcdrv->rtc)) { |
2639 | - err = PTR_ERR(rtcdrv->rtc); |
2640 | - dev_err(&pdev->dev, "can't register RTC device\n"); |
2641 | - return err; |
2642 | - } |
2643 | - |
2644 | /* 0x3 -> RTC_CLK */ |
2645 | sirfsoc_rtc_iobrg_writel(SIRFSOC_RTC_CLK, |
2646 | rtcdrv->rtc_base + RTC_CLOCK_SWITCH); |
2647 | @@ -308,6 +300,14 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev) |
2648 | rtcdrv->overflow_rtc = |
2649 | sirfsoc_rtc_iobrg_readl(rtcdrv->rtc_base + RTC_SW_VALUE); |
2650 | |
2651 | + rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, |
2652 | + &sirfsoc_rtc_ops, THIS_MODULE); |
2653 | + if (IS_ERR(rtcdrv->rtc)) { |
2654 | + err = PTR_ERR(rtcdrv->rtc); |
2655 | + dev_err(&pdev->dev, "can't register RTC device\n"); |
2656 | + return err; |
2657 | + } |
2658 | + |
2659 | rtcdrv->irq = platform_get_irq(pdev, 0); |
2660 | err = devm_request_irq( |
2661 | &pdev->dev, |
2662 | diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c |
2663 | index 3f365402fcc0..14052936b1c5 100644 |
2664 | --- a/drivers/spi/spi-sh-msiof.c |
2665 | +++ b/drivers/spi/spi-sh-msiof.c |
2666 | @@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi) |
2667 | struct device_node *np = spi->master->dev.of_node; |
2668 | struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master); |
2669 | |
2670 | + pm_runtime_get_sync(&p->pdev->dev); |
2671 | + |
2672 | if (!np) { |
2673 | /* |
2674 | * Use spi->controller_data for CS (same strategy as spi_gpio), |
2675 | @@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi) |
2676 | if (spi->cs_gpio >= 0) |
2677 | gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH)); |
2678 | |
2679 | + |
2680 | + pm_runtime_put_sync(&p->pdev->dev); |
2681 | + |
2682 | return 0; |
2683 | } |
2684 | |
2685 | diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c |
2686 | index 2e900a98c3e3..47ca0f3b8c85 100644 |
2687 | --- a/drivers/tty/n_tty.c |
2688 | +++ b/drivers/tty/n_tty.c |
2689 | @@ -321,7 +321,8 @@ static void n_tty_check_unthrottle(struct tty_struct *tty) |
2690 | |
2691 | static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) |
2692 | { |
2693 | - *read_buf_addr(ldata, ldata->read_head++) = c; |
2694 | + *read_buf_addr(ldata, ldata->read_head) = c; |
2695 | + ldata->read_head++; |
2696 | } |
2697 | |
2698 | /** |
2699 | diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c |
2700 | index 30e9e60bc5cd..517cd073dc08 100644 |
2701 | --- a/drivers/tty/serial/men_z135_uart.c |
2702 | +++ b/drivers/tty/serial/men_z135_uart.c |
2703 | @@ -809,6 +809,7 @@ static void men_z135_remove(struct mcb_device *mdev) |
2704 | |
2705 | static const struct mcb_device_id men_z135_ids[] = { |
2706 | { .device = 0x87 }, |
2707 | + { } |
2708 | }; |
2709 | MODULE_DEVICE_TABLE(mcb, men_z135_ids); |
2710 | |
2711 | diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c |
2712 | index c78f43a481ce..587d63bcbd0e 100644 |
2713 | --- a/drivers/tty/serial/samsung.c |
2714 | +++ b/drivers/tty/serial/samsung.c |
2715 | @@ -559,11 +559,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level, |
2716 | unsigned int old) |
2717 | { |
2718 | struct s3c24xx_uart_port *ourport = to_ourport(port); |
2719 | + int timeout = 10000; |
2720 | |
2721 | ourport->pm_level = level; |
2722 | |
2723 | switch (level) { |
2724 | case 3: |
2725 | + while (--timeout && !s3c24xx_serial_txempty_nofifo(port)) |
2726 | + udelay(100); |
2727 | + |
2728 | if (!IS_ERR(ourport->baudclk)) |
2729 | clk_disable_unprepare(ourport->baudclk); |
2730 | |
2731 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
2732 | index 077d58ac3dcb..64d9c3daa856 100644 |
2733 | --- a/drivers/usb/class/cdc-acm.c |
2734 | +++ b/drivers/usb/class/cdc-acm.c |
2735 | @@ -1197,10 +1197,11 @@ next_desc: |
2736 | } else { |
2737 | control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); |
2738 | data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0)); |
2739 | - if (!control_interface || !data_interface) { |
2740 | - dev_dbg(&intf->dev, "no interfaces\n"); |
2741 | - return -ENODEV; |
2742 | - } |
2743 | + } |
2744 | + |
2745 | + if (!control_interface || !data_interface) { |
2746 | + dev_dbg(&intf->dev, "no interfaces\n"); |
2747 | + return -ENODEV; |
2748 | } |
2749 | |
2750 | if (data_interface_num != call_interface_num) |
2751 | @@ -1475,6 +1476,7 @@ alloc_fail8: |
2752 | &dev_attr_wCountryCodes); |
2753 | device_remove_file(&acm->control->dev, |
2754 | &dev_attr_iCountryCodeRelDate); |
2755 | + kfree(acm->country_codes); |
2756 | } |
2757 | device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities); |
2758 | alloc_fail7: |
2759 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
2760 | index 96fafed92b76..0ffb4ed0a945 100644 |
2761 | --- a/drivers/usb/core/quirks.c |
2762 | +++ b/drivers/usb/core/quirks.c |
2763 | @@ -103,6 +103,9 @@ static const struct usb_device_id usb_quirk_list[] = { |
2764 | { USB_DEVICE(0x04f3, 0x009b), .driver_info = |
2765 | USB_QUIRK_DEVICE_QUALIFIER }, |
2766 | |
2767 | + { USB_DEVICE(0x04f3, 0x010c), .driver_info = |
2768 | + USB_QUIRK_DEVICE_QUALIFIER }, |
2769 | + |
2770 | { USB_DEVICE(0x04f3, 0x016f), .driver_info = |
2771 | USB_QUIRK_DEVICE_QUALIFIER }, |
2772 | |
2773 | diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c |
2774 | index 9968f5331fe4..0716c1994e28 100644 |
2775 | --- a/drivers/usb/gadget/udc/at91_udc.c |
2776 | +++ b/drivers/usb/gadget/udc/at91_udc.c |
2777 | @@ -870,12 +870,10 @@ static void clk_on(struct at91_udc *udc) |
2778 | return; |
2779 | udc->clocked = 1; |
2780 | |
2781 | - if (IS_ENABLED(CONFIG_COMMON_CLK)) { |
2782 | - clk_set_rate(udc->uclk, 48000000); |
2783 | - clk_prepare_enable(udc->uclk); |
2784 | - } |
2785 | - clk_prepare_enable(udc->iclk); |
2786 | - clk_prepare_enable(udc->fclk); |
2787 | + if (IS_ENABLED(CONFIG_COMMON_CLK)) |
2788 | + clk_enable(udc->uclk); |
2789 | + clk_enable(udc->iclk); |
2790 | + clk_enable(udc->fclk); |
2791 | } |
2792 | |
2793 | static void clk_off(struct at91_udc *udc) |
2794 | @@ -884,10 +882,10 @@ static void clk_off(struct at91_udc *udc) |
2795 | return; |
2796 | udc->clocked = 0; |
2797 | udc->gadget.speed = USB_SPEED_UNKNOWN; |
2798 | - clk_disable_unprepare(udc->fclk); |
2799 | - clk_disable_unprepare(udc->iclk); |
2800 | + clk_disable(udc->fclk); |
2801 | + clk_disable(udc->iclk); |
2802 | if (IS_ENABLED(CONFIG_COMMON_CLK)) |
2803 | - clk_disable_unprepare(udc->uclk); |
2804 | + clk_disable(udc->uclk); |
2805 | } |
2806 | |
2807 | /* |
2808 | @@ -1780,14 +1778,24 @@ static int at91udc_probe(struct platform_device *pdev) |
2809 | } |
2810 | |
2811 | /* don't do anything until we have both gadget driver and VBUS */ |
2812 | + if (IS_ENABLED(CONFIG_COMMON_CLK)) { |
2813 | + clk_set_rate(udc->uclk, 48000000); |
2814 | + retval = clk_prepare(udc->uclk); |
2815 | + if (retval) |
2816 | + goto fail1; |
2817 | + } |
2818 | + retval = clk_prepare(udc->fclk); |
2819 | + if (retval) |
2820 | + goto fail1a; |
2821 | + |
2822 | retval = clk_prepare_enable(udc->iclk); |
2823 | if (retval) |
2824 | - goto fail1; |
2825 | + goto fail1b; |
2826 | at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS); |
2827 | at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff); |
2828 | /* Clear all pending interrupts - UDP may be used by bootloader. */ |
2829 | at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff); |
2830 | - clk_disable_unprepare(udc->iclk); |
2831 | + clk_disable(udc->iclk); |
2832 | |
2833 | /* request UDC and maybe VBUS irqs */ |
2834 | udc->udp_irq = platform_get_irq(pdev, 0); |
2835 | @@ -1795,7 +1803,7 @@ static int at91udc_probe(struct platform_device *pdev) |
2836 | 0, driver_name, udc); |
2837 | if (retval < 0) { |
2838 | DBG("request irq %d failed\n", udc->udp_irq); |
2839 | - goto fail1; |
2840 | + goto fail1c; |
2841 | } |
2842 | if (gpio_is_valid(udc->board.vbus_pin)) { |
2843 | retval = gpio_request(udc->board.vbus_pin, "udc_vbus"); |
2844 | @@ -1848,6 +1856,13 @@ fail3: |
2845 | gpio_free(udc->board.vbus_pin); |
2846 | fail2: |
2847 | free_irq(udc->udp_irq, udc); |
2848 | +fail1c: |
2849 | + clk_unprepare(udc->iclk); |
2850 | +fail1b: |
2851 | + clk_unprepare(udc->fclk); |
2852 | +fail1a: |
2853 | + if (IS_ENABLED(CONFIG_COMMON_CLK)) |
2854 | + clk_unprepare(udc->uclk); |
2855 | fail1: |
2856 | if (IS_ENABLED(CONFIG_COMMON_CLK) && !IS_ERR(udc->uclk)) |
2857 | clk_put(udc->uclk); |
2858 | @@ -1896,6 +1911,11 @@ static int __exit at91udc_remove(struct platform_device *pdev) |
2859 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2860 | release_mem_region(res->start, resource_size(res)); |
2861 | |
2862 | + if (IS_ENABLED(CONFIG_COMMON_CLK)) |
2863 | + clk_unprepare(udc->uclk); |
2864 | + clk_unprepare(udc->fclk); |
2865 | + clk_unprepare(udc->iclk); |
2866 | + |
2867 | clk_put(udc->iclk); |
2868 | clk_put(udc->fclk); |
2869 | if (IS_ENABLED(CONFIG_COMMON_CLK)) |
2870 | diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c |
2871 | index 2d17c10a0428..294d43c387b2 100644 |
2872 | --- a/drivers/usb/renesas_usbhs/mod_gadget.c |
2873 | +++ b/drivers/usb/renesas_usbhs/mod_gadget.c |
2874 | @@ -602,6 +602,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep) |
2875 | struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); |
2876 | struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); |
2877 | |
2878 | + if (!pipe) |
2879 | + return -EINVAL; |
2880 | + |
2881 | usbhsg_pipe_disable(uep); |
2882 | usbhs_pipe_free(pipe); |
2883 | |
2884 | diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c |
2885 | index b2aa003bf411..cb3e14780a7e 100644 |
2886 | --- a/drivers/usb/serial/qcserial.c |
2887 | +++ b/drivers/usb/serial/qcserial.c |
2888 | @@ -27,12 +27,15 @@ enum qcserial_layouts { |
2889 | QCSERIAL_G2K = 0, /* Gobi 2000 */ |
2890 | QCSERIAL_G1K = 1, /* Gobi 1000 */ |
2891 | QCSERIAL_SWI = 2, /* Sierra Wireless */ |
2892 | + QCSERIAL_HWI = 3, /* Huawei */ |
2893 | }; |
2894 | |
2895 | #define DEVICE_G1K(v, p) \ |
2896 | USB_DEVICE(v, p), .driver_info = QCSERIAL_G1K |
2897 | #define DEVICE_SWI(v, p) \ |
2898 | USB_DEVICE(v, p), .driver_info = QCSERIAL_SWI |
2899 | +#define DEVICE_HWI(v, p) \ |
2900 | + USB_DEVICE(v, p), .driver_info = QCSERIAL_HWI |
2901 | |
2902 | static const struct usb_device_id id_table[] = { |
2903 | /* Gobi 1000 devices */ |
2904 | @@ -157,6 +160,9 @@ static const struct usb_device_id id_table[] = { |
2905 | {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ |
2906 | {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ |
2907 | |
2908 | + /* Huawei devices */ |
2909 | + {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ |
2910 | + |
2911 | { } /* Terminating entry */ |
2912 | }; |
2913 | MODULE_DEVICE_TABLE(usb, id_table); |
2914 | @@ -287,6 +293,33 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) |
2915 | break; |
2916 | } |
2917 | break; |
2918 | + case QCSERIAL_HWI: |
2919 | + /* |
2920 | + * Huawei layout: |
2921 | + * 0: AT-capable modem port |
2922 | + * 1: DM/DIAG |
2923 | + * 2: AT-capable modem port |
2924 | + * 3: CCID-compatible PCSC interface |
2925 | + * 4: QMI/net |
2926 | + * 5: NMEA |
2927 | + */ |
2928 | + switch (ifnum) { |
2929 | + case 0: |
2930 | + case 2: |
2931 | + dev_dbg(dev, "Modem port found\n"); |
2932 | + break; |
2933 | + case 1: |
2934 | + dev_dbg(dev, "DM/DIAG interface found\n"); |
2935 | + break; |
2936 | + case 5: |
2937 | + dev_dbg(dev, "NMEA GPS interface found\n"); |
2938 | + break; |
2939 | + default: |
2940 | + /* don't claim any unsupported interface */ |
2941 | + altsetting = -1; |
2942 | + break; |
2943 | + } |
2944 | + break; |
2945 | default: |
2946 | dev_err(dev, "unsupported device layout type: %lu\n", |
2947 | id->driver_info); |
2948 | diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c |
2949 | index ebd8f218a788..9df5d6ec7eec 100644 |
2950 | --- a/drivers/xen/swiotlb-xen.c |
2951 | +++ b/drivers/xen/swiotlb-xen.c |
2952 | @@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
2953 | dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; |
2954 | phys_addr_t paddr = dma; |
2955 | |
2956 | - BUG_ON(paddr != dma); /* truncation has occurred, should never happen */ |
2957 | - |
2958 | paddr |= baddr & ~PAGE_MASK; |
2959 | |
2960 | return paddr; |
2961 | @@ -447,11 +445,11 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
2962 | |
2963 | BUG_ON(dir == DMA_NONE); |
2964 | |
2965 | - xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); |
2966 | + xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs); |
2967 | |
2968 | /* NOTE: We use dev_addr here, not paddr! */ |
2969 | if (is_xen_swiotlb_buffer(dev_addr)) { |
2970 | - swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); |
2971 | + swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir); |
2972 | return; |
2973 | } |
2974 | |
2975 | @@ -495,14 +493,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, |
2976 | BUG_ON(dir == DMA_NONE); |
2977 | |
2978 | if (target == SYNC_FOR_CPU) |
2979 | - xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); |
2980 | + xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir); |
2981 | |
2982 | /* NOTE: We use dev_addr here, not paddr! */ |
2983 | if (is_xen_swiotlb_buffer(dev_addr)) |
2984 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
2985 | |
2986 | if (target == SYNC_FOR_DEVICE) |
2987 | - xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); |
2988 | + xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir); |
2989 | |
2990 | if (dir != DMA_FROM_DEVICE) |
2991 | return; |
2992 | diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c |
2993 | index 054577bddaf2..de4e70fb3cbb 100644 |
2994 | --- a/fs/btrfs/delayed-inode.c |
2995 | +++ b/fs/btrfs/delayed-inode.c |
2996 | @@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode) |
2997 | { |
2998 | struct btrfs_delayed_node *delayed_node; |
2999 | |
3000 | + /* |
3001 | + * we don't do delayed inode updates during log recovery because it |
3002 | + * leads to enospc problems. This means we also can't do |
3003 | + * delayed inode refs |
3004 | + */ |
3005 | + if (BTRFS_I(inode)->root->fs_info->log_root_recovering) |
3006 | + return -EAGAIN; |
3007 | + |
3008 | delayed_node = btrfs_get_or_create_delayed_node(inode); |
3009 | if (IS_ERR(delayed_node)) |
3010 | return PTR_ERR(delayed_node); |
3011 | diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c |
3012 | index 18c06bbaf136..481529b879fe 100644 |
3013 | --- a/fs/ceph/addr.c |
3014 | +++ b/fs/ceph/addr.c |
3015 | @@ -673,7 +673,7 @@ static int ceph_writepages_start(struct address_space *mapping, |
3016 | int rc = 0; |
3017 | unsigned wsize = 1 << inode->i_blkbits; |
3018 | struct ceph_osd_request *req = NULL; |
3019 | - int do_sync; |
3020 | + int do_sync = 0; |
3021 | u64 truncate_size, snap_size; |
3022 | u32 truncate_seq; |
3023 | |
3024 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
3025 | index ef9bef118342..2d609a5fbfea 100644 |
3026 | --- a/fs/fs-writeback.c |
3027 | +++ b/fs/fs-writeback.c |
3028 | @@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) |
3029 | * write_inode() |
3030 | */ |
3031 | spin_lock(&inode->i_lock); |
3032 | - /* Clear I_DIRTY_PAGES if we've written out all dirty pages */ |
3033 | - if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
3034 | - inode->i_state &= ~I_DIRTY_PAGES; |
3035 | + |
3036 | dirty = inode->i_state & I_DIRTY; |
3037 | - inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); |
3038 | + inode->i_state &= ~I_DIRTY; |
3039 | + |
3040 | + /* |
3041 | + * Paired with smp_mb() in __mark_inode_dirty(). This allows |
3042 | + * __mark_inode_dirty() to test i_state without grabbing i_lock - |
3043 | + * either they see the I_DIRTY bits cleared or we see the dirtied |
3044 | + * inode. |
3045 | + * |
3046 | + * I_DIRTY_PAGES is always cleared together above even if @mapping |
3047 | + * still has dirty pages. The flag is reinstated after smp_mb() if |
3048 | + * necessary. This guarantees that either __mark_inode_dirty() |
3049 | + * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY. |
3050 | + */ |
3051 | + smp_mb(); |
3052 | + |
3053 | + if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
3054 | + inode->i_state |= I_DIRTY_PAGES; |
3055 | + |
3056 | spin_unlock(&inode->i_lock); |
3057 | + |
3058 | /* Don't write the inode if only I_DIRTY_PAGES was set */ |
3059 | if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { |
3060 | int err = write_inode(inode, wbc); |
3061 | @@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags) |
3062 | } |
3063 | |
3064 | /* |
3065 | - * make sure that changes are seen by all cpus before we test i_state |
3066 | - * -- mikulas |
3067 | + * Paired with smp_mb() in __writeback_single_inode() for the |
3068 | + * following lockless i_state test. See there for details. |
3069 | */ |
3070 | smp_mb(); |
3071 | |
3072 | - /* avoid the locking if we can */ |
3073 | if ((inode->i_state & flags) == flags) |
3074 | return; |
3075 | |
3076 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
3077 | index e9c3afe4b5d3..d66e3ad1de48 100644 |
3078 | --- a/fs/nfsd/nfs4state.c |
3079 | +++ b/fs/nfsd/nfs4state.c |
3080 | @@ -1711,15 +1711,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source) |
3081 | return 0; |
3082 | } |
3083 | |
3084 | -static long long |
3085 | +static int |
3086 | compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) |
3087 | { |
3088 | - long long res; |
3089 | - |
3090 | - res = o1->len - o2->len; |
3091 | - if (res) |
3092 | - return res; |
3093 | - return (long long)memcmp(o1->data, o2->data, o1->len); |
3094 | + if (o1->len < o2->len) |
3095 | + return -1; |
3096 | + if (o1->len > o2->len) |
3097 | + return 1; |
3098 | + return memcmp(o1->data, o2->data, o1->len); |
3099 | } |
3100 | |
3101 | static int same_name(const char *n1, const char *n2) |
3102 | @@ -1907,7 +1906,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) |
3103 | static struct nfs4_client * |
3104 | find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) |
3105 | { |
3106 | - long long cmp; |
3107 | + int cmp; |
3108 | struct rb_node *node = root->rb_node; |
3109 | struct nfs4_client *clp; |
3110 | |
3111 | @@ -3891,11 +3890,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, |
3112 | status = nfs4_setlease(dp); |
3113 | goto out; |
3114 | } |
3115 | - atomic_inc(&fp->fi_delegees); |
3116 | if (fp->fi_had_conflict) { |
3117 | status = -EAGAIN; |
3118 | goto out_unlock; |
3119 | } |
3120 | + atomic_inc(&fp->fi_delegees); |
3121 | hash_delegation_locked(dp, fp); |
3122 | status = 0; |
3123 | out_unlock: |
3124 | diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c |
3125 | index eeea7a90eb87..2a77603d7cfd 100644 |
3126 | --- a/fs/nfsd/nfs4xdr.c |
3127 | +++ b/fs/nfsd/nfs4xdr.c |
3128 | @@ -1795,9 +1795,12 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep, |
3129 | } |
3130 | else |
3131 | end++; |
3132 | + if (found_esc) |
3133 | + end = next; |
3134 | + |
3135 | str = end; |
3136 | } |
3137 | - pathlen = htonl(xdr->buf->len - pathlen_offset); |
3138 | + pathlen = htonl(count); |
3139 | write_bytes_to_xdr_buf(xdr->buf, pathlen_offset, &pathlen, 4); |
3140 | return 0; |
3141 | } |
3142 | diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c |
3143 | index e1fa69b341b9..8b5969538f39 100644 |
3144 | --- a/fs/nilfs2/inode.c |
3145 | +++ b/fs/nilfs2/inode.c |
3146 | @@ -49,6 +49,8 @@ struct nilfs_iget_args { |
3147 | int for_gc; |
3148 | }; |
3149 | |
3150 | +static int nilfs_iget_test(struct inode *inode, void *opaque); |
3151 | + |
3152 | void nilfs_inode_add_blocks(struct inode *inode, int n) |
3153 | { |
3154 | struct nilfs_root *root = NILFS_I(inode)->i_root; |
3155 | @@ -348,6 +350,17 @@ const struct address_space_operations nilfs_aops = { |
3156 | .is_partially_uptodate = block_is_partially_uptodate, |
3157 | }; |
3158 | |
3159 | +static int nilfs_insert_inode_locked(struct inode *inode, |
3160 | + struct nilfs_root *root, |
3161 | + unsigned long ino) |
3162 | +{ |
3163 | + struct nilfs_iget_args args = { |
3164 | + .ino = ino, .root = root, .cno = 0, .for_gc = 0 |
3165 | + }; |
3166 | + |
3167 | + return insert_inode_locked4(inode, ino, nilfs_iget_test, &args); |
3168 | +} |
3169 | + |
3170 | struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) |
3171 | { |
3172 | struct super_block *sb = dir->i_sb; |
3173 | @@ -383,7 +396,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) |
3174 | if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { |
3175 | err = nilfs_bmap_read(ii->i_bmap, NULL); |
3176 | if (err < 0) |
3177 | - goto failed_bmap; |
3178 | + goto failed_after_creation; |
3179 | |
3180 | set_bit(NILFS_I_BMAP, &ii->i_state); |
3181 | /* No lock is needed; iget() ensures it. */ |
3182 | @@ -399,21 +412,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) |
3183 | spin_lock(&nilfs->ns_next_gen_lock); |
3184 | inode->i_generation = nilfs->ns_next_generation++; |
3185 | spin_unlock(&nilfs->ns_next_gen_lock); |
3186 | - insert_inode_hash(inode); |
3187 | + if (nilfs_insert_inode_locked(inode, root, ino) < 0) { |
3188 | + err = -EIO; |
3189 | + goto failed_after_creation; |
3190 | + } |
3191 | |
3192 | err = nilfs_init_acl(inode, dir); |
3193 | if (unlikely(err)) |
3194 | - goto failed_acl; /* never occur. When supporting |
3195 | + goto failed_after_creation; /* never occur. When supporting |
3196 | nilfs_init_acl(), proper cancellation of |
3197 | above jobs should be considered */ |
3198 | |
3199 | return inode; |
3200 | |
3201 | - failed_acl: |
3202 | - failed_bmap: |
3203 | + failed_after_creation: |
3204 | clear_nlink(inode); |
3205 | + unlock_new_inode(inode); |
3206 | iput(inode); /* raw_inode will be deleted through |
3207 | - generic_delete_inode() */ |
3208 | + nilfs_evict_inode() */ |
3209 | goto failed; |
3210 | |
3211 | failed_ifile_create_inode: |
3212 | @@ -461,8 +477,8 @@ int nilfs_read_inode_common(struct inode *inode, |
3213 | inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); |
3214 | inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec); |
3215 | inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec); |
3216 | - if (inode->i_nlink == 0 && inode->i_mode == 0) |
3217 | - return -EINVAL; /* this inode is deleted */ |
3218 | + if (inode->i_nlink == 0) |
3219 | + return -ESTALE; /* this inode is deleted */ |
3220 | |
3221 | inode->i_blocks = le64_to_cpu(raw_inode->i_blocks); |
3222 | ii->i_flags = le32_to_cpu(raw_inode->i_flags); |
3223 | diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c |
3224 | index 9de78f08989e..0f84b257932c 100644 |
3225 | --- a/fs/nilfs2/namei.c |
3226 | +++ b/fs/nilfs2/namei.c |
3227 | @@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) |
3228 | int err = nilfs_add_link(dentry, inode); |
3229 | if (!err) { |
3230 | d_instantiate(dentry, inode); |
3231 | + unlock_new_inode(inode); |
3232 | return 0; |
3233 | } |
3234 | inode_dec_link_count(inode); |
3235 | + unlock_new_inode(inode); |
3236 | iput(inode); |
3237 | return err; |
3238 | } |
3239 | @@ -182,6 +184,7 @@ out: |
3240 | out_fail: |
3241 | drop_nlink(inode); |
3242 | nilfs_mark_inode_dirty(inode); |
3243 | + unlock_new_inode(inode); |
3244 | iput(inode); |
3245 | goto out; |
3246 | } |
3247 | @@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir, |
3248 | inode_inc_link_count(inode); |
3249 | ihold(inode); |
3250 | |
3251 | - err = nilfs_add_nondir(dentry, inode); |
3252 | - if (!err) |
3253 | + err = nilfs_add_link(dentry, inode); |
3254 | + if (!err) { |
3255 | + d_instantiate(dentry, inode); |
3256 | err = nilfs_transaction_commit(dir->i_sb); |
3257 | - else |
3258 | + } else { |
3259 | + inode_dec_link_count(inode); |
3260 | + iput(inode); |
3261 | nilfs_transaction_abort(dir->i_sb); |
3262 | + } |
3263 | |
3264 | return err; |
3265 | } |
3266 | @@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
3267 | |
3268 | nilfs_mark_inode_dirty(inode); |
3269 | d_instantiate(dentry, inode); |
3270 | + unlock_new_inode(inode); |
3271 | out: |
3272 | if (!err) |
3273 | err = nilfs_transaction_commit(dir->i_sb); |
3274 | @@ -255,6 +263,7 @@ out_fail: |
3275 | drop_nlink(inode); |
3276 | drop_nlink(inode); |
3277 | nilfs_mark_inode_dirty(inode); |
3278 | + unlock_new_inode(inode); |
3279 | iput(inode); |
3280 | out_dir: |
3281 | drop_nlink(dir); |
3282 | diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c |
3283 | index 1ef547e49373..c71174a0b1b5 100644 |
3284 | --- a/fs/ocfs2/aops.c |
3285 | +++ b/fs/ocfs2/aops.c |
3286 | @@ -894,7 +894,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) |
3287 | } |
3288 | } |
3289 | |
3290 | -static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) |
3291 | +static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) |
3292 | { |
3293 | int i; |
3294 | |
3295 | @@ -915,7 +915,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) |
3296 | page_cache_release(wc->w_target_page); |
3297 | } |
3298 | ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); |
3299 | +} |
3300 | |
3301 | +static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc) |
3302 | +{ |
3303 | + ocfs2_unlock_pages(wc); |
3304 | brelse(wc->w_di_bh); |
3305 | kfree(wc); |
3306 | } |
3307 | @@ -2042,11 +2046,19 @@ out_write_size: |
3308 | ocfs2_update_inode_fsync_trans(handle, inode, 1); |
3309 | ocfs2_journal_dirty(handle, wc->w_di_bh); |
3310 | |
3311 | + /* unlock pages before dealloc since it needs acquiring j_trans_barrier |
3312 | + * lock, or it will cause a deadlock since journal commit threads holds |
3313 | + * this lock and will ask for the page lock when flushing the data. |
3314 | + * put it here to preserve the unlock order. |
3315 | + */ |
3316 | + ocfs2_unlock_pages(wc); |
3317 | + |
3318 | ocfs2_commit_trans(osb, handle); |
3319 | |
3320 | ocfs2_run_deallocs(osb, &wc->w_dealloc); |
3321 | |
3322 | - ocfs2_free_write_ctxt(wc); |
3323 | + brelse(wc->w_di_bh); |
3324 | + kfree(wc); |
3325 | |
3326 | return copied; |
3327 | } |
3328 | diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c |
3329 | index b931e04e3388..914c121ec890 100644 |
3330 | --- a/fs/ocfs2/namei.c |
3331 | +++ b/fs/ocfs2/namei.c |
3332 | @@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, |
3333 | struct inode *inode, |
3334 | const char *symname); |
3335 | |
3336 | +static int ocfs2_double_lock(struct ocfs2_super *osb, |
3337 | + struct buffer_head **bh1, |
3338 | + struct inode *inode1, |
3339 | + struct buffer_head **bh2, |
3340 | + struct inode *inode2, |
3341 | + int rename); |
3342 | + |
3343 | +static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); |
3344 | /* An orphan dir name is an 8 byte value, printed as a hex string */ |
3345 | #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) |
3346 | |
3347 | @@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry, |
3348 | { |
3349 | handle_t *handle; |
3350 | struct inode *inode = old_dentry->d_inode; |
3351 | + struct inode *old_dir = old_dentry->d_parent->d_inode; |
3352 | int err; |
3353 | struct buffer_head *fe_bh = NULL; |
3354 | + struct buffer_head *old_dir_bh = NULL; |
3355 | struct buffer_head *parent_fe_bh = NULL; |
3356 | struct ocfs2_dinode *fe = NULL; |
3357 | struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); |
3358 | @@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry, |
3359 | |
3360 | dquot_initialize(dir); |
3361 | |
3362 | - err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); |
3363 | + err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, |
3364 | + &parent_fe_bh, dir, 0); |
3365 | if (err < 0) { |
3366 | if (err != -ENOENT) |
3367 | mlog_errno(err); |
3368 | return err; |
3369 | } |
3370 | |
3371 | + /* make sure both dirs have bhs |
3372 | + * get an extra ref on old_dir_bh if old==new */ |
3373 | + if (!parent_fe_bh) { |
3374 | + if (old_dir_bh) { |
3375 | + parent_fe_bh = old_dir_bh; |
3376 | + get_bh(parent_fe_bh); |
3377 | + } else { |
3378 | + mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); |
3379 | + err = -EIO; |
3380 | + goto out; |
3381 | + } |
3382 | + } |
3383 | + |
3384 | if (!dir->i_nlink) { |
3385 | err = -ENOENT; |
3386 | goto out; |
3387 | } |
3388 | |
3389 | - err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, |
3390 | + err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, |
3391 | old_dentry->d_name.len, &old_de_ino); |
3392 | if (err) { |
3393 | err = -ENOENT; |
3394 | @@ -801,10 +825,11 @@ out_unlock_inode: |
3395 | ocfs2_inode_unlock(inode, 1); |
3396 | |
3397 | out: |
3398 | - ocfs2_inode_unlock(dir, 1); |
3399 | + ocfs2_double_unlock(old_dir, dir); |
3400 | |
3401 | brelse(fe_bh); |
3402 | brelse(parent_fe_bh); |
3403 | + brelse(old_dir_bh); |
3404 | |
3405 | ocfs2_free_dir_lookup_result(&lookup); |
3406 | |
3407 | @@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb, |
3408 | } |
3409 | |
3410 | /* |
3411 | - * The only place this should be used is rename! |
3412 | + * The only place this should be used is rename and link! |
3413 | * if they have the same id, then the 1st one is the only one locked. |
3414 | */ |
3415 | static int ocfs2_double_lock(struct ocfs2_super *osb, |
3416 | struct buffer_head **bh1, |
3417 | struct inode *inode1, |
3418 | struct buffer_head **bh2, |
3419 | - struct inode *inode2) |
3420 | + struct inode *inode2, |
3421 | + int rename) |
3422 | { |
3423 | int status; |
3424 | int inode1_is_ancestor, inode2_is_ancestor; |
3425 | @@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, |
3426 | } |
3427 | /* lock id2 */ |
3428 | status = ocfs2_inode_lock_nested(inode2, bh2, 1, |
3429 | - OI_LS_RENAME1); |
3430 | + rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); |
3431 | if (status < 0) { |
3432 | if (status != -ENOENT) |
3433 | mlog_errno(status); |
3434 | @@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, |
3435 | } |
3436 | |
3437 | /* lock id1 */ |
3438 | - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); |
3439 | + status = ocfs2_inode_lock_nested(inode1, bh1, 1, |
3440 | + rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); |
3441 | if (status < 0) { |
3442 | /* |
3443 | * An error return must mean that no cluster locks |
3444 | @@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir, |
3445 | |
3446 | /* if old and new are the same, this'll just do one lock. */ |
3447 | status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, |
3448 | - &new_dir_bh, new_dir); |
3449 | + &new_dir_bh, new_dir, 1); |
3450 | if (status < 0) { |
3451 | mlog_errno(status); |
3452 | goto bail; |
3453 | diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c |
3454 | index 3b5744306ed8..5fa34243b1ae 100644 |
3455 | --- a/fs/pstore/ram.c |
3456 | +++ b/fs/pstore/ram.c |
3457 | @@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400); |
3458 | MODULE_PARM_DESC(mem_size, |
3459 | "size of reserved RAM used to store oops/panic logs"); |
3460 | |
3461 | +static unsigned int mem_type; |
3462 | +module_param(mem_type, uint, 0600); |
3463 | +MODULE_PARM_DESC(mem_type, |
3464 | + "set to 1 to try to use unbuffered memory (default 0)"); |
3465 | + |
3466 | static int dump_oops = 1; |
3467 | module_param(dump_oops, int, 0600); |
3468 | MODULE_PARM_DESC(dump_oops, |
3469 | @@ -79,6 +84,7 @@ struct ramoops_context { |
3470 | struct persistent_ram_zone *fprz; |
3471 | phys_addr_t phys_addr; |
3472 | unsigned long size; |
3473 | + unsigned int memtype; |
3474 | size_t record_size; |
3475 | size_t console_size; |
3476 | size_t ftrace_size; |
3477 | @@ -358,7 +364,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt, |
3478 | size_t sz = cxt->record_size; |
3479 | |
3480 | cxt->przs[i] = persistent_ram_new(*paddr, sz, 0, |
3481 | - &cxt->ecc_info); |
3482 | + &cxt->ecc_info, |
3483 | + cxt->memtype); |
3484 | if (IS_ERR(cxt->przs[i])) { |
3485 | err = PTR_ERR(cxt->przs[i]); |
3486 | dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", |
3487 | @@ -388,7 +395,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt, |
3488 | return -ENOMEM; |
3489 | } |
3490 | |
3491 | - *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info); |
3492 | + *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype); |
3493 | if (IS_ERR(*prz)) { |
3494 | int err = PTR_ERR(*prz); |
3495 | |
3496 | @@ -435,6 +442,7 @@ static int ramoops_probe(struct platform_device *pdev) |
3497 | |
3498 | cxt->size = pdata->mem_size; |
3499 | cxt->phys_addr = pdata->mem_address; |
3500 | + cxt->memtype = pdata->mem_type; |
3501 | cxt->record_size = pdata->record_size; |
3502 | cxt->console_size = pdata->console_size; |
3503 | cxt->ftrace_size = pdata->ftrace_size; |
3504 | @@ -564,6 +572,7 @@ static void ramoops_register_dummy(void) |
3505 | |
3506 | dummy_data->mem_size = mem_size; |
3507 | dummy_data->mem_address = mem_address; |
3508 | + dummy_data->mem_type = 0; |
3509 | dummy_data->record_size = record_size; |
3510 | dummy_data->console_size = ramoops_console_size; |
3511 | dummy_data->ftrace_size = ramoops_ftrace_size; |
3512 | diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c |
3513 | index 9d7b9a83699e..76c3f80efdfa 100644 |
3514 | --- a/fs/pstore/ram_core.c |
3515 | +++ b/fs/pstore/ram_core.c |
3516 | @@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz) |
3517 | persistent_ram_update_header_ecc(prz); |
3518 | } |
3519 | |
3520 | -static void *persistent_ram_vmap(phys_addr_t start, size_t size) |
3521 | +static void *persistent_ram_vmap(phys_addr_t start, size_t size, |
3522 | + unsigned int memtype) |
3523 | { |
3524 | struct page **pages; |
3525 | phys_addr_t page_start; |
3526 | @@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size) |
3527 | page_start = start - offset_in_page(start); |
3528 | page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); |
3529 | |
3530 | - prot = pgprot_noncached(PAGE_KERNEL); |
3531 | + if (memtype) |
3532 | + prot = pgprot_noncached(PAGE_KERNEL); |
3533 | + else |
3534 | + prot = pgprot_writecombine(PAGE_KERNEL); |
3535 | |
3536 | pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); |
3537 | if (!pages) { |
3538 | @@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size) |
3539 | return vaddr; |
3540 | } |
3541 | |
3542 | -static void *persistent_ram_iomap(phys_addr_t start, size_t size) |
3543 | +static void *persistent_ram_iomap(phys_addr_t start, size_t size, |
3544 | + unsigned int memtype) |
3545 | { |
3546 | + void *va; |
3547 | + |
3548 | if (!request_mem_region(start, size, "persistent_ram")) { |
3549 | pr_err("request mem region (0x%llx@0x%llx) failed\n", |
3550 | (unsigned long long)size, (unsigned long long)start); |
3551 | @@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size) |
3552 | buffer_start_add = buffer_start_add_locked; |
3553 | buffer_size_add = buffer_size_add_locked; |
3554 | |
3555 | - return ioremap(start, size); |
3556 | + if (memtype) |
3557 | + va = ioremap(start, size); |
3558 | + else |
3559 | + va = ioremap_wc(start, size); |
3560 | + |
3561 | + return va; |
3562 | } |
3563 | |
3564 | static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, |
3565 | - struct persistent_ram_zone *prz) |
3566 | + struct persistent_ram_zone *prz, int memtype) |
3567 | { |
3568 | prz->paddr = start; |
3569 | prz->size = size; |
3570 | |
3571 | if (pfn_valid(start >> PAGE_SHIFT)) |
3572 | - prz->vaddr = persistent_ram_vmap(start, size); |
3573 | + prz->vaddr = persistent_ram_vmap(start, size, memtype); |
3574 | else |
3575 | - prz->vaddr = persistent_ram_iomap(start, size); |
3576 | + prz->vaddr = persistent_ram_iomap(start, size, memtype); |
3577 | |
3578 | if (!prz->vaddr) { |
3579 | pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, |
3580 | @@ -500,7 +512,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz) |
3581 | } |
3582 | |
3583 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
3584 | - u32 sig, struct persistent_ram_ecc_info *ecc_info) |
3585 | + u32 sig, struct persistent_ram_ecc_info *ecc_info, |
3586 | + unsigned int memtype) |
3587 | { |
3588 | struct persistent_ram_zone *prz; |
3589 | int ret = -ENOMEM; |
3590 | @@ -511,7 +524,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
3591 | goto err; |
3592 | } |
3593 | |
3594 | - ret = persistent_ram_buffer_map(start, size, prz); |
3595 | + ret = persistent_ram_buffer_map(start, size, prz, memtype); |
3596 | if (ret) |
3597 | goto err; |
3598 | |
3599 | diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c |
3600 | index f1376c92cf74..b27ef3541490 100644 |
3601 | --- a/fs/reiserfs/super.c |
3602 | +++ b/fs/reiserfs/super.c |
3603 | @@ -2161,6 +2161,9 @@ error_unlocked: |
3604 | reiserfs_write_unlock(s); |
3605 | } |
3606 | |
3607 | + if (sbi->commit_wq) |
3608 | + destroy_workqueue(sbi->commit_wq); |
3609 | + |
3610 | cancel_delayed_work_sync(&REISERFS_SB(s)->old_work); |
3611 | |
3612 | reiserfs_free_bitmap_cache(s); |
3613 | diff --git a/include/linux/mm.h b/include/linux/mm.h |
3614 | index b46461116cd2..5ab2da9811c1 100644 |
3615 | --- a/include/linux/mm.h |
3616 | +++ b/include/linux/mm.h |
3617 | @@ -1936,7 +1936,7 @@ extern int expand_downwards(struct vm_area_struct *vma, |
3618 | #if VM_GROWSUP |
3619 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); |
3620 | #else |
3621 | - #define expand_upwards(vma, address) do { } while (0) |
3622 | + #define expand_upwards(vma, address) (0) |
3623 | #endif |
3624 | |
3625 | /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ |
3626 | diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h |
3627 | index 9974975d40db..4af3fdc85b01 100644 |
3628 | --- a/include/linux/pstore_ram.h |
3629 | +++ b/include/linux/pstore_ram.h |
3630 | @@ -53,7 +53,8 @@ struct persistent_ram_zone { |
3631 | }; |
3632 | |
3633 | struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, |
3634 | - u32 sig, struct persistent_ram_ecc_info *ecc_info); |
3635 | + u32 sig, struct persistent_ram_ecc_info *ecc_info, |
3636 | + unsigned int memtype); |
3637 | void persistent_ram_free(struct persistent_ram_zone *prz); |
3638 | void persistent_ram_zap(struct persistent_ram_zone *prz); |
3639 | |
3640 | @@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, |
3641 | struct ramoops_platform_data { |
3642 | unsigned long mem_size; |
3643 | unsigned long mem_address; |
3644 | + unsigned int mem_type; |
3645 | unsigned long record_size; |
3646 | unsigned long console_size; |
3647 | unsigned long ftrace_size; |
3648 | diff --git a/include/linux/writeback.h b/include/linux/writeback.h |
3649 | index a219be961c0a..00048339c23e 100644 |
3650 | --- a/include/linux/writeback.h |
3651 | +++ b/include/linux/writeback.h |
3652 | @@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping, |
3653 | struct writeback_control *wbc, writepage_t writepage, |
3654 | void *data); |
3655 | int do_writepages(struct address_space *mapping, struct writeback_control *wbc); |
3656 | -void set_page_dirty_balance(struct page *page); |
3657 | void writeback_set_ratelimit(void); |
3658 | void tag_pages_for_writeback(struct address_space *mapping, |
3659 | pgoff_t start, pgoff_t end); |
3660 | diff --git a/include/net/mac80211.h b/include/net/mac80211.h |
3661 | index 0ad1f47d2dc7..a9de1da73c01 100644 |
3662 | --- a/include/net/mac80211.h |
3663 | +++ b/include/net/mac80211.h |
3664 | @@ -1227,8 +1227,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); |
3665 | * |
3666 | * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the |
3667 | * driver to indicate that it requires IV generation for this |
3668 | - * particular key. Setting this flag does not necessarily mean that SKBs |
3669 | - * will have sufficient tailroom for ICV or MIC. |
3670 | + * particular key. |
3671 | * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by |
3672 | * the driver for a TKIP key if it requires Michael MIC |
3673 | * generation in software. |
3674 | @@ -1240,9 +1239,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); |
3675 | * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver |
3676 | * if space should be prepared for the IV, but the IV |
3677 | * itself should not be generated. Do not set together with |
3678 | - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does |
3679 | - * not necessarily mean that SKBs will have sufficient tailroom for ICV or |
3680 | - * MIC. |
3681 | + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. |
3682 | * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received |
3683 | * management frames. The flag can help drivers that have a hardware |
3684 | * crypto implementation that doesn't deal with management frames |
3685 | diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h |
3686 | index 0a68d5ae584e..a7d67bc14906 100644 |
3687 | --- a/include/trace/events/sched.h |
3688 | +++ b/include/trace/events/sched.h |
3689 | @@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) |
3690 | /* |
3691 | * For all intents and purposes a preempted task is a running task. |
3692 | */ |
3693 | - if (task_preempt_count(p) & PREEMPT_ACTIVE) |
3694 | + if (preempt_count() & PREEMPT_ACTIVE) |
3695 | state = TASK_RUNNING | TASK_STATE_MAX; |
3696 | #endif |
3697 | |
3698 | diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h |
3699 | index d4dbef14d4df..584bb0113e25 100644 |
3700 | --- a/include/uapi/linux/audit.h |
3701 | +++ b/include/uapi/linux/audit.h |
3702 | @@ -365,7 +365,9 @@ enum { |
3703 | #define AUDIT_ARCH_PARISC (EM_PARISC) |
3704 | #define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT) |
3705 | #define AUDIT_ARCH_PPC (EM_PPC) |
3706 | +/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */ |
3707 | #define AUDIT_ARCH_PPC64 (EM_PPC64|__AUDIT_ARCH_64BIT) |
3708 | +#define AUDIT_ARCH_PPC64LE (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) |
3709 | #define AUDIT_ARCH_S390 (EM_S390) |
3710 | #define AUDIT_ARCH_S390X (EM_S390|__AUDIT_ARCH_64BIT) |
3711 | #define AUDIT_ARCH_SH (EM_SH) |
3712 | diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h |
3713 | index 0a8e6badb29b..bb1cb73c927a 100644 |
3714 | --- a/include/uapi/linux/hyperv.h |
3715 | +++ b/include/uapi/linux/hyperv.h |
3716 | @@ -134,6 +134,7 @@ struct hv_start_fcopy { |
3717 | |
3718 | struct hv_do_fcopy { |
3719 | struct hv_fcopy_hdr hdr; |
3720 | + __u32 pad; |
3721 | __u64 offset; |
3722 | __u32 size; |
3723 | __u8 data[DATA_FRAGMENT]; |
3724 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
3725 | index 1cd5eef1fcdd..2ab023803945 100644 |
3726 | --- a/kernel/events/core.c |
3727 | +++ b/kernel/events/core.c |
3728 | @@ -7435,11 +7435,11 @@ SYSCALL_DEFINE5(perf_event_open, |
3729 | |
3730 | if (move_group) { |
3731 | synchronize_rcu(); |
3732 | - perf_install_in_context(ctx, group_leader, event->cpu); |
3733 | + perf_install_in_context(ctx, group_leader, group_leader->cpu); |
3734 | get_ctx(ctx); |
3735 | list_for_each_entry(sibling, &group_leader->sibling_list, |
3736 | group_entry) { |
3737 | - perf_install_in_context(ctx, sibling, event->cpu); |
3738 | + perf_install_in_context(ctx, sibling, sibling->cpu); |
3739 | get_ctx(ctx); |
3740 | } |
3741 | } |
3742 | diff --git a/kernel/exit.c b/kernel/exit.c |
3743 | index 5d30019ff953..2116aace6c85 100644 |
3744 | --- a/kernel/exit.c |
3745 | +++ b/kernel/exit.c |
3746 | @@ -1302,9 +1302,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) |
3747 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
3748 | struct task_struct *p) |
3749 | { |
3750 | + /* |
3751 | + * We can race with wait_task_zombie() from another thread. |
3752 | + * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition |
3753 | + * can't confuse the checks below. |
3754 | + */ |
3755 | + int exit_state = ACCESS_ONCE(p->exit_state); |
3756 | int ret; |
3757 | |
3758 | - if (unlikely(p->exit_state == EXIT_DEAD)) |
3759 | + if (unlikely(exit_state == EXIT_DEAD)) |
3760 | return 0; |
3761 | |
3762 | ret = eligible_child(wo, p); |
3763 | @@ -1325,7 +1331,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, |
3764 | return 0; |
3765 | } |
3766 | |
3767 | - if (unlikely(p->exit_state == EXIT_TRACE)) { |
3768 | + if (unlikely(exit_state == EXIT_TRACE)) { |
3769 | /* |
3770 | * ptrace == 0 means we are the natural parent. In this case |
3771 | * we should clear notask_error, debugger will notify us. |
3772 | @@ -1352,7 +1358,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, |
3773 | } |
3774 | |
3775 | /* slay zombie? */ |
3776 | - if (p->exit_state == EXIT_ZOMBIE) { |
3777 | + if (exit_state == EXIT_ZOMBIE) { |
3778 | /* we don't reap group leaders with subthreads */ |
3779 | if (!delay_group_leader(p)) { |
3780 | /* |
3781 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
3782 | index 89e7283015a6..efdca2f08222 100644 |
3783 | --- a/kernel/sched/core.c |
3784 | +++ b/kernel/sched/core.c |
3785 | @@ -1623,8 +1623,10 @@ void wake_up_if_idle(int cpu) |
3786 | struct rq *rq = cpu_rq(cpu); |
3787 | unsigned long flags; |
3788 | |
3789 | - if (!is_idle_task(rq->curr)) |
3790 | - return; |
3791 | + rcu_read_lock(); |
3792 | + |
3793 | + if (!is_idle_task(rcu_dereference(rq->curr))) |
3794 | + goto out; |
3795 | |
3796 | if (set_nr_if_polling(rq->idle)) { |
3797 | trace_sched_wake_idle_without_ipi(cpu); |
3798 | @@ -1635,6 +1637,9 @@ void wake_up_if_idle(int cpu) |
3799 | /* Else cpu is not in idle, do nothing here */ |
3800 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
3801 | } |
3802 | + |
3803 | +out: |
3804 | + rcu_read_unlock(); |
3805 | } |
3806 | |
3807 | bool cpus_share_cache(int this_cpu, int that_cpu) |
3808 | diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
3809 | index 28fa9d9e9201..40a97c3d8aba 100644 |
3810 | --- a/kernel/sched/deadline.c |
3811 | +++ b/kernel/sched/deadline.c |
3812 | @@ -575,24 +575,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) |
3813 | static |
3814 | int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) |
3815 | { |
3816 | - int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); |
3817 | - int rorun = dl_se->runtime <= 0; |
3818 | - |
3819 | - if (!rorun && !dmiss) |
3820 | - return 0; |
3821 | - |
3822 | - /* |
3823 | - * If we are beyond our current deadline and we are still |
3824 | - * executing, then we have already used some of the runtime of |
3825 | - * the next instance. Thus, if we do not account that, we are |
3826 | - * stealing bandwidth from the system at each deadline miss! |
3827 | - */ |
3828 | - if (dmiss) { |
3829 | - dl_se->runtime = rorun ? dl_se->runtime : 0; |
3830 | - dl_se->runtime -= rq_clock(rq) - dl_se->deadline; |
3831 | - } |
3832 | - |
3833 | - return 1; |
3834 | + return (dl_se->runtime <= 0); |
3835 | } |
3836 | |
3837 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); |
3838 | @@ -831,10 +814,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, |
3839 | * parameters of the task might need updating. Otherwise, |
3840 | * we want a replenishment of its runtime. |
3841 | */ |
3842 | - if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) |
3843 | - replenish_dl_entity(dl_se, pi_se); |
3844 | - else |
3845 | + if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) |
3846 | update_dl_entity(dl_se, pi_se); |
3847 | + else if (flags & ENQUEUE_REPLENISH) |
3848 | + replenish_dl_entity(dl_se, pi_se); |
3849 | |
3850 | __enqueue_dl_entity(dl_se); |
3851 | } |
3852 | diff --git a/mm/memory.c b/mm/memory.c |
3853 | index d5f2ae9c4a23..7f86cf6252bd 100644 |
3854 | --- a/mm/memory.c |
3855 | +++ b/mm/memory.c |
3856 | @@ -2150,17 +2150,24 @@ reuse: |
3857 | if (!dirty_page) |
3858 | return ret; |
3859 | |
3860 | - /* |
3861 | - * Yes, Virginia, this is actually required to prevent a race |
3862 | - * with clear_page_dirty_for_io() from clearing the page dirty |
3863 | - * bit after it clear all dirty ptes, but before a racing |
3864 | - * do_wp_page installs a dirty pte. |
3865 | - * |
3866 | - * do_shared_fault is protected similarly. |
3867 | - */ |
3868 | if (!page_mkwrite) { |
3869 | - wait_on_page_locked(dirty_page); |
3870 | - set_page_dirty_balance(dirty_page); |
3871 | + struct address_space *mapping; |
3872 | + int dirtied; |
3873 | + |
3874 | + lock_page(dirty_page); |
3875 | + dirtied = set_page_dirty(dirty_page); |
3876 | + VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); |
3877 | + mapping = dirty_page->mapping; |
3878 | + unlock_page(dirty_page); |
3879 | + |
3880 | + if (dirtied && mapping) { |
3881 | + /* |
3882 | + * Some device drivers do not set page.mapping |
3883 | + * but still dirty their pages |
3884 | + */ |
3885 | + balance_dirty_pages_ratelimited(mapping); |
3886 | + } |
3887 | + |
3888 | /* file_update_time outside page_lock */ |
3889 | if (vma->vm_file) |
3890 | file_update_time(vma->vm_file); |
3891 | @@ -2606,7 +2613,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo |
3892 | if (prev && prev->vm_end == address) |
3893 | return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; |
3894 | |
3895 | - expand_downwards(vma, address - PAGE_SIZE); |
3896 | + return expand_downwards(vma, address - PAGE_SIZE); |
3897 | } |
3898 | if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { |
3899 | struct vm_area_struct *next = vma->vm_next; |
3900 | @@ -2615,7 +2622,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo |
3901 | if (next && next->vm_start == address + PAGE_SIZE) |
3902 | return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; |
3903 | |
3904 | - expand_upwards(vma, address + PAGE_SIZE); |
3905 | + return expand_upwards(vma, address + PAGE_SIZE); |
3906 | } |
3907 | return 0; |
3908 | } |
3909 | diff --git a/mm/mmap.c b/mm/mmap.c |
3910 | index ae919891a087..1620adbbd77f 100644 |
3911 | --- a/mm/mmap.c |
3912 | +++ b/mm/mmap.c |
3913 | @@ -2099,14 +2099,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns |
3914 | { |
3915 | struct mm_struct *mm = vma->vm_mm; |
3916 | struct rlimit *rlim = current->signal->rlim; |
3917 | - unsigned long new_start; |
3918 | + unsigned long new_start, actual_size; |
3919 | |
3920 | /* address space limit tests */ |
3921 | if (!may_expand_vm(mm, grow)) |
3922 | return -ENOMEM; |
3923 | |
3924 | /* Stack limit test */ |
3925 | - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) |
3926 | + actual_size = size; |
3927 | + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) |
3928 | + actual_size -= PAGE_SIZE; |
3929 | + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) |
3930 | return -ENOMEM; |
3931 | |
3932 | /* mlock limit tests */ |
3933 | diff --git a/mm/page-writeback.c b/mm/page-writeback.c |
3934 | index 19ceae87522d..437174a2aaa3 100644 |
3935 | --- a/mm/page-writeback.c |
3936 | +++ b/mm/page-writeback.c |
3937 | @@ -1541,16 +1541,6 @@ pause: |
3938 | bdi_start_background_writeback(bdi); |
3939 | } |
3940 | |
3941 | -void set_page_dirty_balance(struct page *page) |
3942 | -{ |
3943 | - if (set_page_dirty(page)) { |
3944 | - struct address_space *mapping = page_mapping(page); |
3945 | - |
3946 | - if (mapping) |
3947 | - balance_dirty_pages_ratelimited(mapping); |
3948 | - } |
3949 | -} |
3950 | - |
3951 | static DEFINE_PER_CPU(int, bdp_ratelimits); |
3952 | |
3953 | /* |
3954 | @@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied); |
3955 | * page dirty in that case, but not all the buffers. This is a "bottom-up" |
3956 | * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. |
3957 | * |
3958 | - * Most callers have locked the page, which pins the address_space in memory. |
3959 | - * But zap_pte_range() does not lock the page, however in that case the |
3960 | - * mapping is pinned by the vma's ->vm_file reference. |
3961 | - * |
3962 | - * We take care to handle the case where the page was truncated from the |
3963 | - * mapping by re-checking page_mapping() inside tree_lock. |
3964 | + * The caller must ensure this doesn't race with truncation. Most will simply |
3965 | + * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and |
3966 | + * the pte lock held, which also locks out truncation. |
3967 | */ |
3968 | int __set_page_dirty_nobuffers(struct page *page) |
3969 | { |
3970 | if (!TestSetPageDirty(page)) { |
3971 | struct address_space *mapping = page_mapping(page); |
3972 | - struct address_space *mapping2; |
3973 | unsigned long flags; |
3974 | |
3975 | if (!mapping) |
3976 | return 1; |
3977 | |
3978 | spin_lock_irqsave(&mapping->tree_lock, flags); |
3979 | - mapping2 = page_mapping(page); |
3980 | - if (mapping2) { /* Race with truncate? */ |
3981 | - BUG_ON(mapping2 != mapping); |
3982 | - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
3983 | - account_page_dirtied(page, mapping); |
3984 | - radix_tree_tag_set(&mapping->page_tree, |
3985 | - page_index(page), PAGECACHE_TAG_DIRTY); |
3986 | - } |
3987 | + BUG_ON(page_mapping(page) != mapping); |
3988 | + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); |
3989 | + account_page_dirtied(page, mapping); |
3990 | + radix_tree_tag_set(&mapping->page_tree, page_index(page), |
3991 | + PAGECACHE_TAG_DIRTY); |
3992 | spin_unlock_irqrestore(&mapping->tree_lock, flags); |
3993 | if (mapping->host) { |
3994 | /* !PageAnon && !swapper_space */ |
3995 | @@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page) |
3996 | /* |
3997 | * We carefully synchronise fault handlers against |
3998 | * installing a dirty pte and marking the page dirty |
3999 | - * at this point. We do this by having them hold the |
4000 | - * page lock at some point after installing their |
4001 | - * pte, but before marking the page dirty. |
4002 | - * Pages are always locked coming in here, so we get |
4003 | - * the desired exclusion. See mm/memory.c:do_wp_page() |
4004 | - * for more comments. |
4005 | + * at this point. We do this by having them hold the |
4006 | + * page lock while dirtying the page, and pages are |
4007 | + * always locked coming in here, so we get the desired |
4008 | + * exclusion. |
4009 | */ |
4010 | if (TestClearPageDirty(page)) { |
4011 | dec_zone_page_state(page, NR_FILE_DIRTY); |
4012 | diff --git a/mm/vmscan.c b/mm/vmscan.c |
4013 | index dcb47074ae03..e3b0a54a44aa 100644 |
4014 | --- a/mm/vmscan.c |
4015 | +++ b/mm/vmscan.c |
4016 | @@ -2904,18 +2904,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, |
4017 | return false; |
4018 | |
4019 | /* |
4020 | - * There is a potential race between when kswapd checks its watermarks |
4021 | - * and a process gets throttled. There is also a potential race if |
4022 | - * processes get throttled, kswapd wakes, a large process exits therby |
4023 | - * balancing the zones that causes kswapd to miss a wakeup. If kswapd |
4024 | - * is going to sleep, no process should be sleeping on pfmemalloc_wait |
4025 | - * so wake them now if necessary. If necessary, processes will wake |
4026 | - * kswapd and get throttled again |
4027 | + * The throttled processes are normally woken up in balance_pgdat() as |
4028 | + * soon as pfmemalloc_watermark_ok() is true. But there is a potential |
4029 | + * race between when kswapd checks the watermarks and a process gets |
4030 | + * throttled. There is also a potential race if processes get |
4031 | + * throttled, kswapd wakes, a large process exits thereby balancing the |
4032 | + * zones, which causes kswapd to exit balance_pgdat() before reaching |
4033 | + * the wake up checks. If kswapd is going to sleep, no process should |
4034 | + * be sleeping on pfmemalloc_wait, so wake them now if necessary. If |
4035 | + * the wake up is premature, processes will wake kswapd and get |
4036 | + * throttled again. The difference from wake ups in balance_pgdat() is |
4037 | + * that here we are under prepare_to_wait(). |
4038 | */ |
4039 | - if (waitqueue_active(&pgdat->pfmemalloc_wait)) { |
4040 | - wake_up(&pgdat->pfmemalloc_wait); |
4041 | - return false; |
4042 | - } |
4043 | + if (waitqueue_active(&pgdat->pfmemalloc_wait)) |
4044 | + wake_up_all(&pgdat->pfmemalloc_wait); |
4045 | |
4046 | return pgdat_balanced(pgdat, order, classzone_idx); |
4047 | } |
4048 | diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c |
4049 | index c2e0d14433df..cfbb39e6fdfd 100644 |
4050 | --- a/net/bluetooth/6lowpan.c |
4051 | +++ b/net/bluetooth/6lowpan.c |
4052 | @@ -591,17 +591,13 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) |
4053 | int err = 0; |
4054 | bdaddr_t addr; |
4055 | u8 addr_type; |
4056 | - struct sk_buff *tmpskb; |
4057 | |
4058 | /* We must take a copy of the skb before we modify/replace the ipv6 |
4059 | * header as the header could be used elsewhere |
4060 | */ |
4061 | - tmpskb = skb_unshare(skb, GFP_ATOMIC); |
4062 | - if (!tmpskb) { |
4063 | - kfree_skb(skb); |
4064 | + skb = skb_unshare(skb, GFP_ATOMIC); |
4065 | + if (!skb) |
4066 | return NET_XMIT_DROP; |
4067 | - } |
4068 | - skb = tmpskb; |
4069 | |
4070 | /* Return values from setup_header() |
4071 | * <0 - error, packet is dropped |
4072 | diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c |
4073 | index b9517bd17190..b45eb243a5ee 100644 |
4074 | --- a/net/bluetooth/hci_conn.c |
4075 | +++ b/net/bluetooth/hci_conn.c |
4076 | @@ -415,7 +415,7 @@ static void le_conn_timeout(struct work_struct *work) |
4077 | * happen with broken hardware or if low duty cycle was used |
4078 | * (which doesn't have a timeout of its own). |
4079 | */ |
4080 | - if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) { |
4081 | + if (conn->role == HCI_ROLE_SLAVE) { |
4082 | u8 enable = 0x00; |
4083 | hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), |
4084 | &enable); |
4085 | @@ -517,7 +517,7 @@ int hci_conn_del(struct hci_conn *conn) |
4086 | /* Unacked frames */ |
4087 | hdev->acl_cnt += conn->sent; |
4088 | } else if (conn->type == LE_LINK) { |
4089 | - cancel_delayed_work_sync(&conn->le_conn_timeout); |
4090 | + cancel_delayed_work(&conn->le_conn_timeout); |
4091 | |
4092 | if (hdev->le_pkts) |
4093 | hdev->le_cnt += conn->sent; |
4094 | diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c |
4095 | index 8b0a2a6de419..e5124a9ea6f6 100644 |
4096 | --- a/net/bluetooth/hci_event.c |
4097 | +++ b/net/bluetooth/hci_event.c |
4098 | @@ -205,6 +205,8 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb) |
4099 | hdev->le_scan_type = LE_SCAN_PASSIVE; |
4100 | |
4101 | hdev->ssp_debug_mode = 0; |
4102 | + |
4103 | + hci_bdaddr_list_clear(&hdev->le_white_list); |
4104 | } |
4105 | |
4106 | static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
4107 | @@ -237,7 +239,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) |
4108 | if (rp->status) |
4109 | return; |
4110 | |
4111 | - if (test_bit(HCI_SETUP, &hdev->dev_flags)) |
4112 | + if (test_bit(HCI_SETUP, &hdev->dev_flags) || |
4113 | + test_bit(HCI_CONFIG, &hdev->dev_flags)) |
4114 | memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); |
4115 | } |
4116 | |
4117 | @@ -492,7 +495,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) |
4118 | if (rp->status) |
4119 | return; |
4120 | |
4121 | - if (test_bit(HCI_SETUP, &hdev->dev_flags)) { |
4122 | + if (test_bit(HCI_SETUP, &hdev->dev_flags) || |
4123 | + test_bit(HCI_CONFIG, &hdev->dev_flags)) { |
4124 | hdev->hci_ver = rp->hci_ver; |
4125 | hdev->hci_rev = __le16_to_cpu(rp->hci_rev); |
4126 | hdev->lmp_ver = rp->lmp_ver; |
4127 | @@ -511,7 +515,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, |
4128 | if (rp->status) |
4129 | return; |
4130 | |
4131 | - if (test_bit(HCI_SETUP, &hdev->dev_flags)) |
4132 | + if (test_bit(HCI_SETUP, &hdev->dev_flags) || |
4133 | + test_bit(HCI_CONFIG, &hdev->dev_flags)) |
4134 | memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); |
4135 | } |
4136 | |
4137 | @@ -2139,7 +2144,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) |
4138 | return; |
4139 | } |
4140 | |
4141 | - if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && |
4142 | + /* Require HCI_CONNECTABLE or a whitelist entry to accept the |
4143 | + * connection. These features are only touched through mgmt so |
4144 | + * only do the checks if HCI_MGMT is set. |
4145 | + */ |
4146 | + if (test_bit(HCI_MGMT, &hdev->dev_flags) && |
4147 | + !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) && |
4148 | !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr, |
4149 | BDADDR_BREDR)) { |
4150 | hci_reject_conn(hdev, &ev->bdaddr); |
4151 | diff --git a/net/mac80211/key.c b/net/mac80211/key.c |
4152 | index d66c6443164c..94368404744b 100644 |
4153 | --- a/net/mac80211/key.c |
4154 | +++ b/net/mac80211/key.c |
4155 | @@ -131,7 +131,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) |
4156 | if (!ret) { |
4157 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; |
4158 | |
4159 | - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) |
4160 | + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
4161 | + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || |
4162 | + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) |
4163 | sdata->crypto_tx_tailroom_needed_cnt--; |
4164 | |
4165 | WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && |
4166 | @@ -179,7 +181,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) |
4167 | sta = key->sta; |
4168 | sdata = key->sdata; |
4169 | |
4170 | - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) |
4171 | + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
4172 | + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || |
4173 | + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) |
4174 | increment_tailroom_need_count(sdata); |
4175 | |
4176 | ret = drv_set_key(key->local, DISABLE_KEY, sdata, |
4177 | @@ -875,7 +879,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) |
4178 | if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { |
4179 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; |
4180 | |
4181 | - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) |
4182 | + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || |
4183 | + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || |
4184 | + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) |
4185 | increment_tailroom_need_count(key->sdata); |
4186 | } |
4187 | |
4188 | diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c |
4189 | index 290af97bf6f9..2a81e77c4477 100644 |
4190 | --- a/net/sunrpc/xdr.c |
4191 | +++ b/net/sunrpc/xdr.c |
4192 | @@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) |
4193 | struct kvec *head = buf->head; |
4194 | struct kvec *tail = buf->tail; |
4195 | int fraglen; |
4196 | - int new, old; |
4197 | + int new; |
4198 | |
4199 | if (len > buf->len) { |
4200 | WARN_ON_ONCE(1); |
4201 | @@ -628,8 +628,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) |
4202 | buf->len -= fraglen; |
4203 | |
4204 | new = buf->page_base + buf->page_len; |
4205 | - old = new + fraglen; |
4206 | - xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); |
4207 | + |
4208 | + xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); |
4209 | |
4210 | if (buf->page_len && buf->len == len) { |
4211 | xdr->p = page_address(*xdr->page_ptr); |
4212 | diff --git a/scripts/kernel-doc b/scripts/kernel-doc |
4213 | index 70bea942b413..9922e66883a5 100755 |
4214 | --- a/scripts/kernel-doc |
4215 | +++ b/scripts/kernel-doc |
4216 | @@ -1753,7 +1753,7 @@ sub dump_struct($$) { |
4217 | # strip kmemcheck_bitfield_{begin,end}.*; |
4218 | $members =~ s/kmemcheck_bitfield_.*?;//gos; |
4219 | # strip attributes |
4220 | - $members =~ s/__aligned\s*\(.+\)//gos; |
4221 | + $members =~ s/__aligned\s*\([^;]*\)//gos; |
4222 | |
4223 | create_parameterlist($members, ';', $file); |
4224 | check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested); |
4225 | diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c |
4226 | index 255dabc6fc33..2a85e4209f0b 100644 |
4227 | --- a/sound/firewire/fireworks/fireworks_transaction.c |
4228 | +++ b/sound/firewire/fireworks/fireworks_transaction.c |
4229 | @@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) |
4230 | spin_lock_irq(&efw->lock); |
4231 | |
4232 | t = (struct snd_efw_transaction *)data; |
4233 | - length = min_t(size_t, t->length * sizeof(t->length), length); |
4234 | + length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); |
4235 | |
4236 | if (efw->push_ptr < efw->pull_ptr) |
4237 | capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); |
4238 | diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c |
4239 | index 15e0089492f7..e708368d208f 100644 |
4240 | --- a/sound/pci/hda/hda_codec.c |
4241 | +++ b/sound/pci/hda/hda_codec.c |
4242 | @@ -338,8 +338,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid, |
4243 | unsigned int parm; |
4244 | |
4245 | parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT); |
4246 | - if (parm == -1) |
4247 | + if (parm == -1) { |
4248 | + *start_id = 0; |
4249 | return 0; |
4250 | + } |
4251 | *start_id = (parm >> 16) & 0x7fff; |
4252 | return (int)(parm & 0x7fff); |
4253 | } |
4254 | diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c |
4255 | index 9dc9cf8c90e9..edb6e6124a23 100644 |
4256 | --- a/sound/pci/hda/patch_hdmi.c |
4257 | +++ b/sound/pci/hda/patch_hdmi.c |
4258 | @@ -3351,6 +3351,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { |
4259 | { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, |
4260 | { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, |
4261 | { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, |
4262 | +{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, |
4263 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, |
4264 | { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, |
4265 | { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, |
4266 | @@ -3410,6 +3411,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060"); |
4267 | MODULE_ALIAS("snd-hda-codec-id:10de0067"); |
4268 | MODULE_ALIAS("snd-hda-codec-id:10de0070"); |
4269 | MODULE_ALIAS("snd-hda-codec-id:10de0071"); |
4270 | +MODULE_ALIAS("snd-hda-codec-id:10de0072"); |
4271 | MODULE_ALIAS("snd-hda-codec-id:10de8001"); |
4272 | MODULE_ALIAS("snd-hda-codec-id:11069f80"); |
4273 | MODULE_ALIAS("snd-hda-codec-id:11069f81"); |
4274 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
4275 | index c5ad83e4e0c7..c879c3709eae 100644 |
4276 | --- a/sound/pci/hda/patch_realtek.c |
4277 | +++ b/sound/pci/hda/patch_realtek.c |
4278 | @@ -319,10 +319,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) |
4279 | break; |
4280 | case 0x10ec0233: |
4281 | case 0x10ec0255: |
4282 | + case 0x10ec0256: |
4283 | case 0x10ec0282: |
4284 | case 0x10ec0283: |
4285 | case 0x10ec0286: |
4286 | case 0x10ec0288: |
4287 | + case 0x10ec0298: |
4288 | alc_update_coef_idx(codec, 0x10, 1<<9, 0); |
4289 | break; |
4290 | case 0x10ec0285: |
4291 | @@ -2657,7 +2659,9 @@ enum { |
4292 | ALC269_TYPE_ALC284, |
4293 | ALC269_TYPE_ALC285, |
4294 | ALC269_TYPE_ALC286, |
4295 | + ALC269_TYPE_ALC298, |
4296 | ALC269_TYPE_ALC255, |
4297 | + ALC269_TYPE_ALC256, |
4298 | }; |
4299 | |
4300 | /* |
4301 | @@ -2684,7 +2688,9 @@ static int alc269_parse_auto_config(struct hda_codec *codec) |
4302 | case ALC269_TYPE_ALC282: |
4303 | case ALC269_TYPE_ALC283: |
4304 | case ALC269_TYPE_ALC286: |
4305 | + case ALC269_TYPE_ALC298: |
4306 | case ALC269_TYPE_ALC255: |
4307 | + case ALC269_TYPE_ALC256: |
4308 | ssids = alc269_ssids; |
4309 | break; |
4310 | default: |
4311 | @@ -4790,6 +4796,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
4312 | SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), |
4313 | SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4314 | SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4315 | + SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
4316 | SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4317 | SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4318 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4319 | @@ -5377,9 +5384,15 @@ static int patch_alc269(struct hda_codec *codec) |
4320 | spec->codec_variant = ALC269_TYPE_ALC286; |
4321 | spec->shutup = alc286_shutup; |
4322 | break; |
4323 | + case 0x10ec0298: |
4324 | + spec->codec_variant = ALC269_TYPE_ALC298; |
4325 | + break; |
4326 | case 0x10ec0255: |
4327 | spec->codec_variant = ALC269_TYPE_ALC255; |
4328 | break; |
4329 | + case 0x10ec0256: |
4330 | + spec->codec_variant = ALC269_TYPE_ALC256; |
4331 | + break; |
4332 | } |
4333 | |
4334 | if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { |
4335 | @@ -6315,6 +6328,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { |
4336 | { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 }, |
4337 | { .id = 0x10ec0235, .name = "ALC233", .patch = patch_alc269 }, |
4338 | { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 }, |
4339 | + { .id = 0x10ec0256, .name = "ALC256", .patch = patch_alc269 }, |
4340 | { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 }, |
4341 | { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 }, |
4342 | { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 }, |
4343 | @@ -6334,6 +6348,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { |
4344 | { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, |
4345 | { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, |
4346 | { .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 }, |
4347 | + { .id = 0x10ec0298, .name = "ALC298", .patch = patch_alc269 }, |
4348 | { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", |
4349 | .patch = patch_alc861 }, |
4350 | { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, |
4351 | diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c |
4352 | index 4f6413e01c13..605d14003d25 100644 |
4353 | --- a/sound/pci/hda/patch_sigmatel.c |
4354 | +++ b/sound/pci/hda/patch_sigmatel.c |
4355 | @@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec) |
4356 | spec->gpio_mask; |
4357 | } |
4358 | if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) |
4359 | - spec->gpio_mask &= spec->gpio_mask; |
4360 | - if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) |
4361 | spec->gpio_dir &= spec->gpio_mask; |
4362 | + if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) |
4363 | + spec->gpio_data &= spec->gpio_mask; |
4364 | if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) |
4365 | spec->eapd_mask &= spec->gpio_mask; |
4366 | if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) |
4367 | diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c |
4368 | index 1229554f1464..d492d6ea656e 100644 |
4369 | --- a/sound/soc/codecs/max98090.c |
4370 | +++ b/sound/soc/codecs/max98090.c |
4371 | @@ -1395,8 +1395,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = { |
4372 | {"STENL Mux", "Sidetone Left", "DMICL"}, |
4373 | {"STENR Mux", "Sidetone Right", "ADCR"}, |
4374 | {"STENR Mux", "Sidetone Right", "DMICR"}, |
4375 | - {"DACL", "NULL", "STENL Mux"}, |
4376 | - {"DACR", "NULL", "STENL Mux"}, |
4377 | + {"DACL", NULL, "STENL Mux"}, |
4378 | + {"DACR", NULL, "STENL Mux"}, |
4379 | |
4380 | {"AIFINL", NULL, "SHDN"}, |
4381 | {"AIFINR", NULL, "SHDN"}, |
4382 | diff --git a/sound/soc/codecs/pcm512x-i2c.c b/sound/soc/codecs/pcm512x-i2c.c |
4383 | index 4d62230bd378..d0547fa275fc 100644 |
4384 | --- a/sound/soc/codecs/pcm512x-i2c.c |
4385 | +++ b/sound/soc/codecs/pcm512x-i2c.c |
4386 | @@ -24,8 +24,13 @@ static int pcm512x_i2c_probe(struct i2c_client *i2c, |
4387 | const struct i2c_device_id *id) |
4388 | { |
4389 | struct regmap *regmap; |
4390 | + struct regmap_config config = pcm512x_regmap; |
4391 | |
4392 | - regmap = devm_regmap_init_i2c(i2c, &pcm512x_regmap); |
4393 | + /* msb needs to be set to enable auto-increment of addresses */ |
4394 | + config.read_flag_mask = 0x80; |
4395 | + config.write_flag_mask = 0x80; |
4396 | + |
4397 | + regmap = devm_regmap_init_i2c(i2c, &config); |
4398 | if (IS_ERR(regmap)) |
4399 | return PTR_ERR(regmap); |
4400 | |
4401 | diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c |
4402 | index f2de7e049bc6..81a38dd9af1f 100644 |
4403 | --- a/sound/soc/codecs/sigmadsp.c |
4404 | +++ b/sound/soc/codecs/sigmadsp.c |
4405 | @@ -159,6 +159,13 @@ int _process_sigma_firmware(struct device *dev, |
4406 | goto done; |
4407 | } |
4408 | |
4409 | + if (ssfw_head->version != 1) { |
4410 | + dev_err(dev, |
4411 | + "Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n", |
4412 | + ssfw_head->version); |
4413 | + goto done; |
4414 | + } |
4415 | + |
4416 | crc = crc32(0, fw->data + sizeof(*ssfw_head), |
4417 | fw->size - sizeof(*ssfw_head)); |
4418 | pr_debug("%s: crc=%x\n", __func__, crc); |
4419 | diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c |
4420 | index 145fe5b253d4..93de5dd0a7b9 100644 |
4421 | --- a/sound/soc/codecs/tlv320aic31xx.c |
4422 | +++ b/sound/soc/codecs/tlv320aic31xx.c |
4423 | @@ -911,12 +911,13 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai, |
4424 | } |
4425 | aic31xx->p_div = i; |
4426 | |
4427 | - for (i = 0; aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++) { |
4428 | - if (i == ARRAY_SIZE(aic31xx_divs)) { |
4429 | - dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n", |
4430 | - __func__, freq); |
4431 | - return -EINVAL; |
4432 | - } |
4433 | + for (i = 0; i < ARRAY_SIZE(aic31xx_divs) && |
4434 | + aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++) |
4435 | + ; |
4436 | + if (i == ARRAY_SIZE(aic31xx_divs)) { |
4437 | + dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n", |
4438 | + __func__, freq); |
4439 | + return -EINVAL; |
4440 | } |
4441 | |
4442 | /* set clock on MCLK, BCLK, or GPIO1 as PLL input */ |
4443 | diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c |
4444 | index e961388e6e9c..10e1b8ca42ed 100644 |
4445 | --- a/sound/soc/dwc/designware_i2s.c |
4446 | +++ b/sound/soc/dwc/designware_i2s.c |
4447 | @@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream, |
4448 | snd_soc_dai_set_dma_data(dai, substream, NULL); |
4449 | } |
4450 | |
4451 | +static int dw_i2s_prepare(struct snd_pcm_substream *substream, |
4452 | + struct snd_soc_dai *dai) |
4453 | +{ |
4454 | + struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai); |
4455 | + |
4456 | + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
4457 | + i2s_write_reg(dev->i2s_base, TXFFR, 1); |
4458 | + else |
4459 | + i2s_write_reg(dev->i2s_base, RXFFR, 1); |
4460 | + |
4461 | + return 0; |
4462 | +} |
4463 | + |
4464 | static int dw_i2s_trigger(struct snd_pcm_substream *substream, |
4465 | int cmd, struct snd_soc_dai *dai) |
4466 | { |
4467 | @@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = { |
4468 | .startup = dw_i2s_startup, |
4469 | .shutdown = dw_i2s_shutdown, |
4470 | .hw_params = dw_i2s_hw_params, |
4471 | + .prepare = dw_i2s_prepare, |
4472 | .trigger = dw_i2s_trigger, |
4473 | }; |
4474 | |
4475 | diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c |
4476 | index eb093d5b85c4..54790461f39e 100644 |
4477 | --- a/sound/soc/fsl/eukrea-tlv320.c |
4478 | +++ b/sound/soc/fsl/eukrea-tlv320.c |
4479 | @@ -105,7 +105,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev) |
4480 | int ret; |
4481 | int int_port = 0, ext_port; |
4482 | struct device_node *np = pdev->dev.of_node; |
4483 | - struct device_node *ssi_np, *codec_np; |
4484 | + struct device_node *ssi_np = NULL, *codec_np = NULL; |
4485 | |
4486 | eukrea_tlv320.dev = &pdev->dev; |
4487 | if (np) { |
4488 | diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c |
4489 | index 272844746135..327f8642ca80 100644 |
4490 | --- a/sound/usb/caiaq/audio.c |
4491 | +++ b/sound/usb/caiaq/audio.c |
4492 | @@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev) |
4493 | return -EINVAL; |
4494 | } |
4495 | |
4496 | - if (cdev->n_streams < 2) { |
4497 | + if (cdev->n_streams < 1) { |
4498 | dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams); |
4499 | return -EINVAL; |
4500 | } |
4501 | diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c |
4502 | index d1d72ff50347..621bc9ebb55e 100644 |
4503 | --- a/sound/usb/mixer_maps.c |
4504 | +++ b/sound/usb/mixer_maps.c |
4505 | @@ -328,8 +328,11 @@ static struct usbmix_name_map gamecom780_map[] = { |
4506 | {} |
4507 | }; |
4508 | |
4509 | -static const struct usbmix_name_map kef_x300a_map[] = { |
4510 | - { 10, NULL }, /* firmware locks up (?) when we try to access this FU */ |
4511 | +/* some (all?) SCMS USB3318 devices are affected by a firmware lock up |
4512 | + * when anything attempts to access FU 10 (control) |
4513 | + */ |
4514 | +static const struct usbmix_name_map scms_usb3318_map[] = { |
4515 | + { 10, NULL }, |
4516 | { 0 } |
4517 | }; |
4518 | |
4519 | @@ -425,8 +428,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = { |
4520 | .map = ebox44_map, |
4521 | }, |
4522 | { |
4523 | + /* KEF X300A */ |
4524 | .id = USB_ID(0x27ac, 0x1000), |
4525 | - .map = kef_x300a_map, |
4526 | + .map = scms_usb3318_map, |
4527 | + }, |
4528 | + { |
4529 | + /* Arcam rPAC */ |
4530 | + .id = USB_ID(0x25c4, 0x0003), |
4531 | + .map = scms_usb3318_map, |
4532 | }, |
4533 | { 0 } /* terminator */ |
4534 | }; |
4535 | diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h |
4536 | index 5699e7e2a790..50a7b115698c 100644 |
4537 | --- a/tools/perf/util/event.h |
4538 | +++ b/tools/perf/util/event.h |
4539 | @@ -214,6 +214,7 @@ struct events_stats { |
4540 | u32 nr_invalid_chains; |
4541 | u32 nr_unknown_id; |
4542 | u32 nr_unprocessable_samples; |
4543 | + u32 nr_unordered_events; |
4544 | }; |
4545 | |
4546 | struct attr_event { |
4547 | diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c |
4548 | index 6702ac28754b..80dbba095f30 100644 |
4549 | --- a/tools/perf/util/session.c |
4550 | +++ b/tools/perf/util/session.c |
4551 | @@ -521,15 +521,11 @@ int perf_session_queue_event(struct perf_session *s, union perf_event *event, |
4552 | return -ETIME; |
4553 | |
4554 | if (timestamp < oe->last_flush) { |
4555 | - WARN_ONCE(1, "Timestamp below last timeslice flush\n"); |
4556 | - |
4557 | - pr_oe_time(timestamp, "out of order event"); |
4558 | + pr_oe_time(timestamp, "out of order event\n"); |
4559 | pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n", |
4560 | oe->last_flush_type); |
4561 | |
4562 | - /* We could get out of order messages after forced flush. */ |
4563 | - if (oe->last_flush_type != OE_FLUSH__HALF) |
4564 | - return -EINVAL; |
4565 | + s->stats.nr_unordered_events++; |
4566 | } |
4567 | |
4568 | new = ordered_events__new(oe, timestamp, event); |
4569 | @@ -1057,6 +1053,9 @@ static void perf_session__warn_about_errors(const struct perf_session *session, |
4570 | "Do you have a KVM guest running and not using 'perf kvm'?\n", |
4571 | session->stats.nr_unprocessable_samples); |
4572 | } |
4573 | + |
4574 | + if (session->stats.nr_unordered_events != 0) |
4575 | + ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events); |
4576 | } |
4577 | |
4578 | volatile int session_done; |
4579 | diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl |
4580 | index bf1398180785..dcb1e9ac949c 100755 |
4581 | --- a/tools/testing/ktest/ktest.pl |
4582 | +++ b/tools/testing/ktest/ktest.pl |
4583 | @@ -3571,7 +3571,9 @@ sub test_this_config { |
4584 | undef %configs; |
4585 | assign_configs \%configs, $output_config; |
4586 | |
4587 | - return $config if (!defined($configs{$config})); |
4588 | + if (!defined($configs{$config}) || $configs{$config} =~ /^#/) { |
4589 | + return $config; |
4590 | + } |
4591 | |
4592 | doprint "disabling config $config did not change .config\n"; |
4593 |