Contents of /trunk/kernel-magellan/patches-4.0/0104-4.0.5-all-fixes.patch
Parent Directory | Revision Log
Revision 2567 -
(show annotations)
(download)
Tue Nov 25 22:54:38 2014 UTC (9 years, 10 months ago) by niro
File size: 170581 byte(s)
Tue Nov 25 22:54:38 2014 UTC (9 years, 10 months ago) by niro
File size: 170581 byte(s)
-linux-4.0.5
1 | diff --git a/Documentation/hwmon/tmp401 b/Documentation/hwmon/tmp401 |
2 | index 8eb88e974055..711f75e189eb 100644 |
3 | --- a/Documentation/hwmon/tmp401 |
4 | +++ b/Documentation/hwmon/tmp401 |
5 | @@ -20,7 +20,7 @@ Supported chips: |
6 | Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html |
7 | * Texas Instruments TMP435 |
8 | Prefix: 'tmp435' |
9 | - Addresses scanned: I2C 0x37, 0x48 - 0x4f |
10 | + Addresses scanned: I2C 0x48 - 0x4f |
11 | Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html |
12 | |
13 | Authors: |
14 | diff --git a/Documentation/serial/tty.txt b/Documentation/serial/tty.txt |
15 | index 1e52d67d0abf..dbe6623fed1c 100644 |
16 | --- a/Documentation/serial/tty.txt |
17 | +++ b/Documentation/serial/tty.txt |
18 | @@ -198,6 +198,9 @@ TTY_IO_ERROR If set, causes all subsequent userspace read/write |
19 | |
20 | TTY_OTHER_CLOSED Device is a pty and the other side has closed. |
21 | |
22 | +TTY_OTHER_DONE Device is a pty and the other side has closed and |
23 | + all pending input processing has been completed. |
24 | + |
25 | TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into |
26 | smaller chunks. |
27 | |
28 | diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt |
29 | index 53838d9c6295..c59bd9bc41ef 100644 |
30 | --- a/Documentation/virtual/kvm/mmu.txt |
31 | +++ b/Documentation/virtual/kvm/mmu.txt |
32 | @@ -169,6 +169,10 @@ Shadow pages contain the following information: |
33 | Contains the value of cr4.smep && !cr0.wp for which the page is valid |
34 | (pages for which this is true are different from other pages; see the |
35 | treatment of cr0.wp=0 below). |
36 | + role.smap_andnot_wp: |
37 | + Contains the value of cr4.smap && !cr0.wp for which the page is valid |
38 | + (pages for which this is true are different from other pages; see the |
39 | + treatment of cr0.wp=0 below). |
40 | gfn: |
41 | Either the guest page table containing the translations shadowed by this |
42 | page, or the base page frame for linear translations. See role.direct. |
43 | @@ -344,10 +348,16 @@ on fault type: |
44 | |
45 | (user write faults generate a #PF) |
46 | |
47 | -In the first case there is an additional complication if CR4.SMEP is |
48 | -enabled: since we've turned the page into a kernel page, the kernel may now |
49 | -execute it. We handle this by also setting spte.nx. If we get a user |
50 | -fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back. |
51 | +In the first case there are two additional complications: |
52 | +- if CR4.SMEP is enabled: since we've turned the page into a kernel page, |
53 | + the kernel may now execute it. We handle this by also setting spte.nx. |
54 | + If we get a user fetch or read fault, we'll change spte.u=1 and |
55 | + spte.nx=gpte.nx back. |
56 | +- if CR4.SMAP is disabled: since the page has been changed to a kernel |
57 | + page, it can not be reused when CR4.SMAP is enabled. We set |
58 | + CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, |
59 | + here we do not care the case that CR4.SMAP is enabled since KVM will |
60 | + directly inject #PF to guest due to failed permission check. |
61 | |
62 | To prevent an spte that was converted into a kernel page with cr0.wp=0 |
63 | from being written by the kernel after cr0.wp has changed to 1, we make |
64 | diff --git a/Makefile b/Makefile |
65 | index 3d16bcc87585..1880cf77059b 100644 |
66 | --- a/Makefile |
67 | +++ b/Makefile |
68 | @@ -1,6 +1,6 @@ |
69 | VERSION = 4 |
70 | PATCHLEVEL = 0 |
71 | -SUBLEVEL = 4 |
72 | +SUBLEVEL = 5 |
73 | EXTRAVERSION = |
74 | NAME = Hurr durr I'ma sheep |
75 | |
76 | diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h |
77 | index 067551b6920a..9917a45fc430 100644 |
78 | --- a/arch/arc/include/asm/atomic.h |
79 | +++ b/arch/arc/include/asm/atomic.h |
80 | @@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ |
81 | atomic_ops_unlock(flags); \ |
82 | } |
83 | |
84 | -#define ATOMIC_OP_RETURN(op, c_op) \ |
85 | +#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
86 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
87 | { \ |
88 | unsigned long flags; \ |
89 | diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile |
90 | index a1c776b8dcec..992ea0b063d5 100644 |
91 | --- a/arch/arm/boot/dts/Makefile |
92 | +++ b/arch/arm/boot/dts/Makefile |
93 | @@ -215,7 +215,7 @@ dtb-$(CONFIG_SOC_IMX25) += \ |
94 | imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \ |
95 | imx25-karo-tx25.dtb \ |
96 | imx25-pdk.dtb |
97 | -dtb-$(CONFIG_SOC_IMX31) += \ |
98 | +dtb-$(CONFIG_SOC_IMX27) += \ |
99 | imx27-apf27.dtb \ |
100 | imx27-apf27dev.dtb \ |
101 | imx27-eukrea-mbimxsd27-baseboard.dtb \ |
102 | diff --git a/arch/arm/boot/dts/exynos4412-trats2.dts b/arch/arm/boot/dts/exynos4412-trats2.dts |
103 | index 173ffa479ad3..792394dd0f2a 100644 |
104 | --- a/arch/arm/boot/dts/exynos4412-trats2.dts |
105 | +++ b/arch/arm/boot/dts/exynos4412-trats2.dts |
106 | @@ -736,7 +736,7 @@ |
107 | |
108 | display-timings { |
109 | timing-0 { |
110 | - clock-frequency = <0>; |
111 | + clock-frequency = <57153600>; |
112 | hactive = <720>; |
113 | vactive = <1280>; |
114 | hfront-porch = <5>; |
115 | diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi |
116 | index 4b063b68db44..9ce1d2128749 100644 |
117 | --- a/arch/arm/boot/dts/imx27.dtsi |
118 | +++ b/arch/arm/boot/dts/imx27.dtsi |
119 | @@ -531,7 +531,7 @@ |
120 | |
121 | fec: ethernet@1002b000 { |
122 | compatible = "fsl,imx27-fec"; |
123 | - reg = <0x1002b000 0x4000>; |
124 | + reg = <0x1002b000 0x1000>; |
125 | interrupts = <50>; |
126 | clocks = <&clks IMX27_CLK_FEC_IPG_GATE>, |
127 | <&clks IMX27_CLK_FEC_AHB_GATE>; |
128 | diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S |
129 | index f8ccc21fa032..4e7f40c577e6 100644 |
130 | --- a/arch/arm/kernel/entry-common.S |
131 | +++ b/arch/arm/kernel/entry-common.S |
132 | @@ -33,7 +33,9 @@ ret_fast_syscall: |
133 | UNWIND(.fnstart ) |
134 | UNWIND(.cantunwind ) |
135 | disable_irq @ disable interrupts |
136 | - ldr r1, [tsk, #TI_FLAGS] |
137 | + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
138 | + tst r1, #_TIF_SYSCALL_WORK |
139 | + bne __sys_trace_return |
140 | tst r1, #_TIF_WORK_MASK |
141 | bne fast_work_pending |
142 | asm_trace_hardirqs_on |
143 | diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c |
144 | index 37266a826437..1f02bcb350e5 100644 |
145 | --- a/arch/arm/mach-exynos/pm_domains.c |
146 | +++ b/arch/arm/mach-exynos/pm_domains.c |
147 | @@ -169,7 +169,7 @@ no_clk: |
148 | args.np = np; |
149 | args.args_count = 0; |
150 | child_domain = of_genpd_get_from_provider(&args); |
151 | - if (!child_domain) |
152 | + if (IS_ERR(child_domain)) |
153 | continue; |
154 | |
155 | if (of_parse_phandle_with_args(np, "power-domains", |
156 | @@ -177,7 +177,7 @@ no_clk: |
157 | continue; |
158 | |
159 | parent_domain = of_genpd_get_from_provider(&args); |
160 | - if (!parent_domain) |
161 | + if (IS_ERR(parent_domain)) |
162 | continue; |
163 | |
164 | if (pm_genpd_add_subdomain(parent_domain, child_domain)) |
165 | diff --git a/arch/arm/mach-exynos/sleep.S b/arch/arm/mach-exynos/sleep.S |
166 | index 31d25834b9c4..cf950790fbdc 100644 |
167 | --- a/arch/arm/mach-exynos/sleep.S |
168 | +++ b/arch/arm/mach-exynos/sleep.S |
169 | @@ -23,14 +23,7 @@ |
170 | #define CPU_MASK 0xff0ffff0 |
171 | #define CPU_CORTEX_A9 0x410fc090 |
172 | |
173 | - /* |
174 | - * The following code is located into the .data section. This is to |
175 | - * allow l2x0_regs_phys to be accessed with a relative load while we |
176 | - * can't rely on any MMU translation. We could have put l2x0_regs_phys |
177 | - * in the .text section as well, but some setups might insist on it to |
178 | - * be truly read-only. (Reference from: arch/arm/kernel/sleep.S) |
179 | - */ |
180 | - .data |
181 | + .text |
182 | .align |
183 | |
184 | /* |
185 | @@ -69,10 +62,12 @@ ENTRY(exynos_cpu_resume_ns) |
186 | cmp r0, r1 |
187 | bne skip_cp15 |
188 | |
189 | - adr r0, cp15_save_power |
190 | + adr r0, _cp15_save_power |
191 | ldr r1, [r0] |
192 | - adr r0, cp15_save_diag |
193 | + ldr r1, [r0, r1] |
194 | + adr r0, _cp15_save_diag |
195 | ldr r2, [r0] |
196 | + ldr r2, [r0, r2] |
197 | mov r0, #SMC_CMD_C15RESUME |
198 | dsb |
199 | smc #0 |
200 | @@ -118,14 +113,20 @@ skip_l2x0: |
201 | skip_cp15: |
202 | b cpu_resume |
203 | ENDPROC(exynos_cpu_resume_ns) |
204 | + |
205 | + .align |
206 | +_cp15_save_power: |
207 | + .long cp15_save_power - . |
208 | +_cp15_save_diag: |
209 | + .long cp15_save_diag - . |
210 | +#ifdef CONFIG_CACHE_L2X0 |
211 | +1: .long l2x0_saved_regs - . |
212 | +#endif /* CONFIG_CACHE_L2X0 */ |
213 | + |
214 | + .data |
215 | .globl cp15_save_diag |
216 | cp15_save_diag: |
217 | .long 0 @ cp15 diagnostic |
218 | .globl cp15_save_power |
219 | cp15_save_power: |
220 | .long 0 @ cp15 power control |
221 | - |
222 | -#ifdef CONFIG_CACHE_L2X0 |
223 | - .align |
224 | -1: .long l2x0_saved_regs - . |
225 | -#endif /* CONFIG_CACHE_L2X0 */ |
226 | diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c |
227 | index 4e6ef896c619..7186382672b5 100644 |
228 | --- a/arch/arm/mm/mmu.c |
229 | +++ b/arch/arm/mm/mmu.c |
230 | @@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void) |
231 | } |
232 | |
233 | /* |
234 | - * Find the first non-section-aligned page, and point |
235 | + * Find the first non-pmd-aligned page, and point |
236 | * memblock_limit at it. This relies on rounding the |
237 | - * limit down to be section-aligned, which happens at |
238 | - * the end of this function. |
239 | + * limit down to be pmd-aligned, which happens at the |
240 | + * end of this function. |
241 | * |
242 | * With this algorithm, the start or end of almost any |
243 | - * bank can be non-section-aligned. The only exception |
244 | - * is that the start of the bank 0 must be section- |
245 | + * bank can be non-pmd-aligned. The only exception is |
246 | + * that the start of the bank 0 must be section- |
247 | * aligned, since otherwise memory would need to be |
248 | * allocated when mapping the start of bank 0, which |
249 | * occurs before any free memory is mapped. |
250 | */ |
251 | if (!memblock_limit) { |
252 | - if (!IS_ALIGNED(block_start, SECTION_SIZE)) |
253 | + if (!IS_ALIGNED(block_start, PMD_SIZE)) |
254 | memblock_limit = block_start; |
255 | - else if (!IS_ALIGNED(block_end, SECTION_SIZE)) |
256 | + else if (!IS_ALIGNED(block_end, PMD_SIZE)) |
257 | memblock_limit = arm_lowmem_limit; |
258 | } |
259 | |
260 | @@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void) |
261 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
262 | |
263 | /* |
264 | - * Round the memblock limit down to a section size. This |
265 | + * Round the memblock limit down to a pmd size. This |
266 | * helps to ensure that we will allocate memory from the |
267 | - * last full section, which should be mapped. |
268 | + * last full pmd, which should be mapped. |
269 | */ |
270 | if (memblock_limit) |
271 | - memblock_limit = round_down(memblock_limit, SECTION_SIZE); |
272 | + memblock_limit = round_down(memblock_limit, PMD_SIZE); |
273 | if (!memblock_limit) |
274 | memblock_limit = arm_lowmem_limit; |
275 | |
276 | diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c |
277 | index edba042b2325..dc6a4842683a 100644 |
278 | --- a/arch/arm64/net/bpf_jit_comp.c |
279 | +++ b/arch/arm64/net/bpf_jit_comp.c |
280 | @@ -487,7 +487,7 @@ emit_cond_jmp: |
281 | return -EINVAL; |
282 | } |
283 | |
284 | - imm64 = (u64)insn1.imm << 32 | imm; |
285 | + imm64 = (u64)insn1.imm << 32 | (u32)imm; |
286 | emit_a64_mov_i64(dst, imm64, ctx); |
287 | |
288 | return 1; |
289 | diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c |
290 | index d2c09f6475c5..f20cedcb50f1 100644 |
291 | --- a/arch/mips/kernel/elf.c |
292 | +++ b/arch/mips/kernel/elf.c |
293 | @@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, |
294 | |
295 | /* Lets see if this is an O32 ELF */ |
296 | if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { |
297 | - /* FR = 1 for N32 */ |
298 | - if (ehdr32->e_flags & EF_MIPS_ABI2) |
299 | - state->overall_fp_mode = FP_FR1; |
300 | - else |
301 | - /* Set a good default FPU mode for O32 */ |
302 | - state->overall_fp_mode = cpu_has_mips_r6 ? |
303 | - FP_FRE : FP_FR0; |
304 | - |
305 | if (ehdr32->e_flags & EF_MIPS_FP64) { |
306 | /* |
307 | * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it |
308 | @@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, |
309 | (char *)&abiflags, |
310 | sizeof(abiflags)); |
311 | } else { |
312 | - /* FR=1 is really the only option for 64-bit */ |
313 | - state->overall_fp_mode = FP_FR1; |
314 | - |
315 | if (phdr64->p_type != PT_MIPS_ABIFLAGS) |
316 | return 0; |
317 | if (phdr64->p_filesz < sizeof(abiflags)) |
318 | @@ -147,6 +136,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, |
319 | struct elf32_hdr *ehdr = _ehdr; |
320 | struct mode_req prog_req, interp_req; |
321 | int fp_abi, interp_fp_abi, abi0, abi1, max_abi; |
322 | + bool is_mips64; |
323 | |
324 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) |
325 | return 0; |
326 | @@ -162,10 +152,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, |
327 | abi0 = abi1 = fp_abi; |
328 | } |
329 | |
330 | - /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ |
331 | - max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && |
332 | - (!(ehdr->e_flags & EF_MIPS_ABI2))) ? |
333 | - MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; |
334 | + is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) || |
335 | + (ehdr->e_flags & EF_MIPS_ABI2); |
336 | + |
337 | + if (is_mips64) { |
338 | + /* MIPS64 code always uses FR=1, thus the default is easy */ |
339 | + state->overall_fp_mode = FP_FR1; |
340 | + |
341 | + /* Disallow access to the various FPXX & FP64 ABIs */ |
342 | + max_abi = MIPS_ABI_FP_SOFT; |
343 | + } else { |
344 | + /* Default to a mode capable of running code expecting FR=0 */ |
345 | + state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0; |
346 | + |
347 | + /* Allow all ABIs we know about */ |
348 | + max_abi = MIPS_ABI_FP_64A; |
349 | + } |
350 | |
351 | if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || |
352 | (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) |
353 | diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h |
354 | index 3391d061eccc..78c9fd32c554 100644 |
355 | --- a/arch/parisc/include/asm/elf.h |
356 | +++ b/arch/parisc/include/asm/elf.h |
357 | @@ -348,6 +348,10 @@ struct pt_regs; /* forward declaration... */ |
358 | |
359 | #define ELF_HWCAP 0 |
360 | |
361 | +#define STACK_RND_MASK (is_32bit_task() ? \ |
362 | + 0x7ff >> (PAGE_SHIFT - 12) : \ |
363 | + 0x3ffff >> (PAGE_SHIFT - 12)) |
364 | + |
365 | struct mm_struct; |
366 | extern unsigned long arch_randomize_brk(struct mm_struct *); |
367 | #define arch_randomize_brk arch_randomize_brk |
368 | diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c |
369 | index e1ffea2f9a0b..5aba01ac457f 100644 |
370 | --- a/arch/parisc/kernel/sys_parisc.c |
371 | +++ b/arch/parisc/kernel/sys_parisc.c |
372 | @@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void) |
373 | if (stack_base > STACK_SIZE_MAX) |
374 | stack_base = STACK_SIZE_MAX; |
375 | |
376 | + /* Add space for stack randomization. */ |
377 | + stack_base += (STACK_RND_MASK << PAGE_SHIFT); |
378 | + |
379 | return PAGE_ALIGN(STACK_TOP - stack_base); |
380 | } |
381 | |
382 | diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c |
383 | index 15c99b649b04..b2eb4686bd8f 100644 |
384 | --- a/arch/powerpc/kernel/mce.c |
385 | +++ b/arch/powerpc/kernel/mce.c |
386 | @@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled, |
387 | uint64_t nip, uint64_t addr) |
388 | { |
389 | uint64_t srr1; |
390 | - int index = __this_cpu_inc_return(mce_nest_count); |
391 | + int index = __this_cpu_inc_return(mce_nest_count) - 1; |
392 | struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]); |
393 | |
394 | /* |
395 | @@ -184,7 +184,7 @@ void machine_check_queue_event(void) |
396 | if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) |
397 | return; |
398 | |
399 | - index = __this_cpu_inc_return(mce_queue_count); |
400 | + index = __this_cpu_inc_return(mce_queue_count) - 1; |
401 | /* If queue is full, just return for now. */ |
402 | if (index >= MAX_MC_EVT) { |
403 | __this_cpu_dec(mce_queue_count); |
404 | diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S |
405 | index f096e72262f4..1db685104ffc 100644 |
406 | --- a/arch/powerpc/kernel/vmlinux.lds.S |
407 | +++ b/arch/powerpc/kernel/vmlinux.lds.S |
408 | @@ -213,6 +213,7 @@ SECTIONS |
409 | *(.opd) |
410 | } |
411 | |
412 | + . = ALIGN(256); |
413 | .got : AT(ADDR(.got) - LOAD_OFFSET) { |
414 | __toc_start = .; |
415 | #ifndef CONFIG_RELOCATABLE |
416 | diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c |
417 | index 7940dc90e80b..b258110da952 100644 |
418 | --- a/arch/s390/crypto/ghash_s390.c |
419 | +++ b/arch/s390/crypto/ghash_s390.c |
420 | @@ -16,11 +16,12 @@ |
421 | #define GHASH_DIGEST_SIZE 16 |
422 | |
423 | struct ghash_ctx { |
424 | - u8 icv[16]; |
425 | - u8 key[16]; |
426 | + u8 key[GHASH_BLOCK_SIZE]; |
427 | }; |
428 | |
429 | struct ghash_desc_ctx { |
430 | + u8 icv[GHASH_BLOCK_SIZE]; |
431 | + u8 key[GHASH_BLOCK_SIZE]; |
432 | u8 buffer[GHASH_BLOCK_SIZE]; |
433 | u32 bytes; |
434 | }; |
435 | @@ -28,8 +29,10 @@ struct ghash_desc_ctx { |
436 | static int ghash_init(struct shash_desc *desc) |
437 | { |
438 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
439 | + struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
440 | |
441 | memset(dctx, 0, sizeof(*dctx)); |
442 | + memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE); |
443 | |
444 | return 0; |
445 | } |
446 | @@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm, |
447 | } |
448 | |
449 | memcpy(ctx->key, key, GHASH_BLOCK_SIZE); |
450 | - memset(ctx->icv, 0, GHASH_BLOCK_SIZE); |
451 | |
452 | return 0; |
453 | } |
454 | @@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc, |
455 | const u8 *src, unsigned int srclen) |
456 | { |
457 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
458 | - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
459 | unsigned int n; |
460 | u8 *buf = dctx->buffer; |
461 | int ret; |
462 | @@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc, |
463 | src += n; |
464 | |
465 | if (!dctx->bytes) { |
466 | - ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, |
467 | + ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, |
468 | GHASH_BLOCK_SIZE); |
469 | if (ret != GHASH_BLOCK_SIZE) |
470 | return -EIO; |
471 | @@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc, |
472 | |
473 | n = srclen & ~(GHASH_BLOCK_SIZE - 1); |
474 | if (n) { |
475 | - ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n); |
476 | + ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n); |
477 | if (ret != n) |
478 | return -EIO; |
479 | src += n; |
480 | @@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc, |
481 | return 0; |
482 | } |
483 | |
484 | -static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) |
485 | +static int ghash_flush(struct ghash_desc_ctx *dctx) |
486 | { |
487 | u8 *buf = dctx->buffer; |
488 | int ret; |
489 | @@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) |
490 | |
491 | memset(pos, 0, dctx->bytes); |
492 | |
493 | - ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE); |
494 | + ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE); |
495 | if (ret != GHASH_BLOCK_SIZE) |
496 | return -EIO; |
497 | + |
498 | + dctx->bytes = 0; |
499 | } |
500 | |
501 | - dctx->bytes = 0; |
502 | return 0; |
503 | } |
504 | |
505 | static int ghash_final(struct shash_desc *desc, u8 *dst) |
506 | { |
507 | struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); |
508 | - struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
509 | int ret; |
510 | |
511 | - ret = ghash_flush(ctx, dctx); |
512 | + ret = ghash_flush(dctx); |
513 | if (!ret) |
514 | - memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE); |
515 | + memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE); |
516 | return ret; |
517 | } |
518 | |
519 | diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h |
520 | index e08ec38f8c6e..e10112da008d 100644 |
521 | --- a/arch/s390/include/asm/pgtable.h |
522 | +++ b/arch/s390/include/asm/pgtable.h |
523 | @@ -600,7 +600,7 @@ static inline int pmd_large(pmd_t pmd) |
524 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; |
525 | } |
526 | |
527 | -static inline int pmd_pfn(pmd_t pmd) |
528 | +static inline unsigned long pmd_pfn(pmd_t pmd) |
529 | { |
530 | unsigned long origin_mask; |
531 | |
532 | diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
533 | index a236e39cc385..1c0fb570b5c2 100644 |
534 | --- a/arch/x86/include/asm/kvm_host.h |
535 | +++ b/arch/x86/include/asm/kvm_host.h |
536 | @@ -212,6 +212,7 @@ union kvm_mmu_page_role { |
537 | unsigned nxe:1; |
538 | unsigned cr0_wp:1; |
539 | unsigned smep_andnot_wp:1; |
540 | + unsigned smap_andnot_wp:1; |
541 | }; |
542 | }; |
543 | |
544 | @@ -404,6 +405,7 @@ struct kvm_vcpu_arch { |
545 | struct kvm_mmu_memory_cache mmu_page_header_cache; |
546 | |
547 | struct fpu guest_fpu; |
548 | + bool eager_fpu; |
549 | u64 xcr0; |
550 | u64 guest_supported_xcr0; |
551 | u32 guest_xstate_size; |
552 | @@ -735,6 +737,7 @@ struct kvm_x86_ops { |
553 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
554 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
555 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
556 | + void (*fpu_activate)(struct kvm_vcpu *vcpu); |
557 | void (*fpu_deactivate)(struct kvm_vcpu *vcpu); |
558 | |
559 | void (*tlb_flush)(struct kvm_vcpu *vcpu); |
560 | diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c |
561 | index 3c036cb4a370..11dd8f23fcea 100644 |
562 | --- a/arch/x86/kernel/cpu/mcheck/mce.c |
563 | +++ b/arch/x86/kernel/cpu/mcheck/mce.c |
564 | @@ -705,6 +705,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
565 | struct pt_regs *regs) |
566 | { |
567 | int i, ret = 0; |
568 | + char *tmp; |
569 | |
570 | for (i = 0; i < mca_cfg.banks; i++) { |
571 | m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); |
572 | @@ -713,9 +714,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, |
573 | if (quirk_no_way_out) |
574 | quirk_no_way_out(i, m, regs); |
575 | } |
576 | - if (mce_severity(m, mca_cfg.tolerant, msg, true) >= |
577 | - MCE_PANIC_SEVERITY) |
578 | + |
579 | + if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
580 | + *msg = tmp; |
581 | ret = 1; |
582 | + } |
583 | } |
584 | return ret; |
585 | } |
586 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c |
587 | index c4bb8b8e5017..76d8cbe5a10f 100644 |
588 | --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c |
589 | +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c |
590 | @@ -680,6 +680,7 @@ static int __init rapl_pmu_init(void) |
591 | break; |
592 | case 60: /* Haswell */ |
593 | case 69: /* Haswell-Celeron */ |
594 | + case 61: /* Broadwell */ |
595 | rapl_cntr_mask = RAPL_IDX_HSW; |
596 | rapl_pmu_events_group.attrs = rapl_events_hsw_attr; |
597 | break; |
598 | diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c |
599 | index d5651fce0b71..f341d56b7883 100644 |
600 | --- a/arch/x86/kernel/i387.c |
601 | +++ b/arch/x86/kernel/i387.c |
602 | @@ -169,6 +169,21 @@ static void init_thread_xstate(void) |
603 | xstate_size = sizeof(struct i387_fxsave_struct); |
604 | else |
605 | xstate_size = sizeof(struct i387_fsave_struct); |
606 | + |
607 | + /* |
608 | + * Quirk: we don't yet handle the XSAVES* instructions |
609 | + * correctly, as we don't correctly convert between |
610 | + * standard and compacted format when interfacing |
611 | + * with user-space - so disable it for now. |
612 | + * |
613 | + * The difference is small: with recent CPUs the |
614 | + * compacted format is only marginally smaller than |
615 | + * the standard FPU state format. |
616 | + * |
617 | + * ( This is easy to backport while we are fixing |
618 | + * XSAVES* support. ) |
619 | + */ |
620 | + setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
621 | } |
622 | |
623 | /* |
624 | diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c |
625 | index 8a80737ee6e6..307f9ec28e08 100644 |
626 | --- a/arch/x86/kvm/cpuid.c |
627 | +++ b/arch/x86/kvm/cpuid.c |
628 | @@ -16,6 +16,8 @@ |
629 | #include <linux/module.h> |
630 | #include <linux/vmalloc.h> |
631 | #include <linux/uaccess.h> |
632 | +#include <asm/i387.h> /* For use_eager_fpu. Ugh! */ |
633 | +#include <asm/fpu-internal.h> /* For use_eager_fpu. Ugh! */ |
634 | #include <asm/user.h> |
635 | #include <asm/xsave.h> |
636 | #include "cpuid.h" |
637 | @@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) |
638 | if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) |
639 | best->ebx = xstate_required_size(vcpu->arch.xcr0, true); |
640 | |
641 | + vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu); |
642 | + |
643 | /* |
644 | * The existing code assumes virtual address is 48-bit in the canonical |
645 | * address checks; exit if it is ever changed. |
646 | diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h |
647 | index 4452eedfaedd..9bec2b8cdced 100644 |
648 | --- a/arch/x86/kvm/cpuid.h |
649 | +++ b/arch/x86/kvm/cpuid.h |
650 | @@ -111,4 +111,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) |
651 | best = kvm_find_cpuid_entry(vcpu, 7, 0); |
652 | return best && (best->ebx & bit(X86_FEATURE_RTM)); |
653 | } |
654 | + |
655 | +static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu) |
656 | +{ |
657 | + struct kvm_cpuid_entry2 *best; |
658 | + |
659 | + best = kvm_find_cpuid_entry(vcpu, 7, 0); |
660 | + return best && (best->ebx & bit(X86_FEATURE_MPX)); |
661 | +} |
662 | #endif |
663 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
664 | index cee759299a35..88ee9282a57e 100644 |
665 | --- a/arch/x86/kvm/mmu.c |
666 | +++ b/arch/x86/kvm/mmu.c |
667 | @@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu, |
668 | } |
669 | } |
670 | |
671 | -void update_permission_bitmask(struct kvm_vcpu *vcpu, |
672 | - struct kvm_mmu *mmu, bool ept) |
673 | +static void update_permission_bitmask(struct kvm_vcpu *vcpu, |
674 | + struct kvm_mmu *mmu, bool ept) |
675 | { |
676 | unsigned bit, byte, pfec; |
677 | u8 map; |
678 | @@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu) |
679 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) |
680 | { |
681 | bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); |
682 | + bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP); |
683 | struct kvm_mmu *context = &vcpu->arch.mmu; |
684 | |
685 | MMU_WARN_ON(VALID_PAGE(context->root_hpa)); |
686 | @@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu) |
687 | context->base_role.cr0_wp = is_write_protection(vcpu); |
688 | context->base_role.smep_andnot_wp |
689 | = smep && !is_write_protection(vcpu); |
690 | + context->base_role.smap_andnot_wp |
691 | + = smap && !is_write_protection(vcpu); |
692 | } |
693 | EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); |
694 | |
695 | @@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
696 | const u8 *new, int bytes) |
697 | { |
698 | gfn_t gfn = gpa >> PAGE_SHIFT; |
699 | - union kvm_mmu_page_role mask = { .word = 0 }; |
700 | struct kvm_mmu_page *sp; |
701 | LIST_HEAD(invalid_list); |
702 | u64 entry, gentry, *spte; |
703 | int npte; |
704 | bool remote_flush, local_flush, zap_page; |
705 | + union kvm_mmu_page_role mask = (union kvm_mmu_page_role) { |
706 | + .cr0_wp = 1, |
707 | + .cr4_pae = 1, |
708 | + .nxe = 1, |
709 | + .smep_andnot_wp = 1, |
710 | + .smap_andnot_wp = 1, |
711 | + }; |
712 | |
713 | /* |
714 | * If we don't have indirect shadow pages, it means no page is |
715 | @@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, |
716 | ++vcpu->kvm->stat.mmu_pte_write; |
717 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
718 | |
719 | - mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; |
720 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
721 | if (detect_write_misaligned(sp, gpa, bytes) || |
722 | detect_write_flooding(sp)) { |
723 | diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h |
724 | index c7d65637c851..0ada65ecddcf 100644 |
725 | --- a/arch/x86/kvm/mmu.h |
726 | +++ b/arch/x86/kvm/mmu.h |
727 | @@ -71,8 +71,6 @@ enum { |
728 | int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); |
729 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); |
730 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); |
731 | -void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
732 | - bool ept); |
733 | |
734 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) |
735 | { |
736 | @@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, |
737 | int index = (pfec >> 1) + |
738 | (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); |
739 | |
740 | + WARN_ON(pfec & PFERR_RSVD_MASK); |
741 | + |
742 | return (mmu->permissions[index] >> pte_access) & 1; |
743 | } |
744 | |
745 | diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h |
746 | index fd49c867b25a..6e6d115fe9b5 100644 |
747 | --- a/arch/x86/kvm/paging_tmpl.h |
748 | +++ b/arch/x86/kvm/paging_tmpl.h |
749 | @@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, |
750 | mmu_is_nested(vcpu)); |
751 | if (likely(r != RET_MMIO_PF_INVALID)) |
752 | return r; |
753 | + |
754 | + /* |
755 | + * page fault with PFEC.RSVD = 1 is caused by shadow |
756 | + * page fault, should not be used to walk guest page |
757 | + * table. |
758 | + */ |
759 | + error_code &= ~PFERR_RSVD_MASK; |
760 | }; |
761 | |
762 | r = mmu_topup_memory_caches(vcpu); |
763 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
764 | index cc618c882f90..a4e62fcfabcb 100644 |
765 | --- a/arch/x86/kvm/svm.c |
766 | +++ b/arch/x86/kvm/svm.c |
767 | @@ -4374,6 +4374,7 @@ static struct kvm_x86_ops svm_x86_ops = { |
768 | .cache_reg = svm_cache_reg, |
769 | .get_rflags = svm_get_rflags, |
770 | .set_rflags = svm_set_rflags, |
771 | + .fpu_activate = svm_fpu_activate, |
772 | .fpu_deactivate = svm_fpu_deactivate, |
773 | |
774 | .tlb_flush = svm_flush_tlb, |
775 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
776 | index a60bd3aa0965..5318d64674b0 100644 |
777 | --- a/arch/x86/kvm/vmx.c |
778 | +++ b/arch/x86/kvm/vmx.c |
779 | @@ -10179,6 +10179,7 @@ static struct kvm_x86_ops vmx_x86_ops = { |
780 | .cache_reg = vmx_cache_reg, |
781 | .get_rflags = vmx_get_rflags, |
782 | .set_rflags = vmx_set_rflags, |
783 | + .fpu_activate = vmx_fpu_activate, |
784 | .fpu_deactivate = vmx_fpu_deactivate, |
785 | |
786 | .tlb_flush = vmx_flush_tlb, |
787 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
788 | index e222ba5d2beb..8838057da9c3 100644 |
789 | --- a/arch/x86/kvm/x86.c |
790 | +++ b/arch/x86/kvm/x86.c |
791 | @@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr); |
792 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
793 | { |
794 | unsigned long old_cr4 = kvm_read_cr4(vcpu); |
795 | - unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | |
796 | - X86_CR4_PAE | X86_CR4_SMEP; |
797 | + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | |
798 | + X86_CR4_SMEP | X86_CR4_SMAP; |
799 | + |
800 | if (cr4 & CR4_RESERVED_BITS) |
801 | return 1; |
802 | |
803 | @@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
804 | (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) |
805 | kvm_mmu_reset_context(vcpu); |
806 | |
807 | - if ((cr4 ^ old_cr4) & X86_CR4_SMAP) |
808 | - update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false); |
809 | - |
810 | if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) |
811 | kvm_update_cpuid(vcpu); |
812 | |
813 | @@ -6141,6 +6139,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) |
814 | return; |
815 | |
816 | page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); |
817 | + if (is_error_page(page)) |
818 | + return; |
819 | kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); |
820 | |
821 | /* |
822 | @@ -6996,7 +6996,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) |
823 | fpu_save_init(&vcpu->arch.guest_fpu); |
824 | __kernel_fpu_end(); |
825 | ++vcpu->stat.fpu_reload; |
826 | - kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); |
827 | + if (!vcpu->arch.eager_fpu) |
828 | + kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); |
829 | + |
830 | trace_kvm_fpu(0); |
831 | } |
832 | |
833 | @@ -7012,11 +7014,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
834 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
835 | unsigned int id) |
836 | { |
837 | + struct kvm_vcpu *vcpu; |
838 | + |
839 | if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) |
840 | printk_once(KERN_WARNING |
841 | "kvm: SMP vm created on host with unstable TSC; " |
842 | "guest TSC will not be reliable\n"); |
843 | - return kvm_x86_ops->vcpu_create(kvm, id); |
844 | + |
845 | + vcpu = kvm_x86_ops->vcpu_create(kvm, id); |
846 | + |
847 | + /* |
848 | + * Activate fpu unconditionally in case the guest needs eager FPU. It will be |
849 | + * deactivated soon if it doesn't. |
850 | + */ |
851 | + kvm_x86_ops->fpu_activate(vcpu); |
852 | + return vcpu; |
853 | } |
854 | |
855 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
856 | diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c |
857 | index f9eeae871593..5aa1f6e281d2 100644 |
858 | --- a/drivers/acpi/osl.c |
859 | +++ b/drivers/acpi/osl.c |
860 | @@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas, |
861 | request_mem_region(addr, length, desc); |
862 | } |
863 | |
864 | -static int __init acpi_reserve_resources(void) |
865 | +static void __init acpi_reserve_resources(void) |
866 | { |
867 | acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length, |
868 | "ACPI PM1a_EVT_BLK"); |
869 | @@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void) |
870 | if (!(acpi_gbl_FADT.gpe1_block_length & 0x1)) |
871 | acpi_request_region(&acpi_gbl_FADT.xgpe1_block, |
872 | acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK"); |
873 | - |
874 | - return 0; |
875 | } |
876 | -device_initcall(acpi_reserve_resources); |
877 | |
878 | void acpi_os_printf(const char *fmt, ...) |
879 | { |
880 | @@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void) |
881 | |
882 | acpi_status __init acpi_os_initialize1(void) |
883 | { |
884 | + acpi_reserve_resources(); |
885 | kacpid_wq = alloc_workqueue("kacpid", 0, 1); |
886 | kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); |
887 | kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); |
888 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
889 | index 33bb06e006c9..adce56fa9cef 100644 |
890 | --- a/drivers/ata/ahci.c |
891 | +++ b/drivers/ata/ahci.c |
892 | @@ -66,6 +66,7 @@ enum board_ids { |
893 | board_ahci_yes_fbs, |
894 | |
895 | /* board IDs for specific chipsets in alphabetical order */ |
896 | + board_ahci_avn, |
897 | board_ahci_mcp65, |
898 | board_ahci_mcp77, |
899 | board_ahci_mcp89, |
900 | @@ -84,6 +85,8 @@ enum board_ids { |
901 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); |
902 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, |
903 | unsigned long deadline); |
904 | +static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, |
905 | + unsigned long deadline); |
906 | static void ahci_mcp89_apple_enable(struct pci_dev *pdev); |
907 | static bool is_mcp89_apple(struct pci_dev *pdev); |
908 | static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, |
909 | @@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = { |
910 | .hardreset = ahci_p5wdh_hardreset, |
911 | }; |
912 | |
913 | +static struct ata_port_operations ahci_avn_ops = { |
914 | + .inherits = &ahci_ops, |
915 | + .hardreset = ahci_avn_hardreset, |
916 | +}; |
917 | + |
918 | static const struct ata_port_info ahci_port_info[] = { |
919 | /* by features */ |
920 | [board_ahci] = { |
921 | @@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = { |
922 | .port_ops = &ahci_ops, |
923 | }, |
924 | /* by chipsets */ |
925 | + [board_ahci_avn] = { |
926 | + .flags = AHCI_FLAG_COMMON, |
927 | + .pio_mask = ATA_PIO4, |
928 | + .udma_mask = ATA_UDMA6, |
929 | + .port_ops = &ahci_avn_ops, |
930 | + }, |
931 | [board_ahci_mcp65] = { |
932 | AHCI_HFLAGS (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP | |
933 | AHCI_HFLAG_YES_NCQ), |
934 | @@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
935 | { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */ |
936 | { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */ |
937 | { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */ |
938 | - { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */ |
939 | - { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */ |
940 | - { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */ |
941 | - { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */ |
942 | - { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */ |
943 | - { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */ |
944 | - { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */ |
945 | - { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */ |
946 | + { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */ |
947 | + { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */ |
948 | + { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */ |
949 | + { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */ |
950 | + { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */ |
951 | + { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */ |
952 | + { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */ |
953 | + { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */ |
954 | { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */ |
955 | { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */ |
956 | { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */ |
957 | @@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class, |
958 | return rc; |
959 | } |
960 | |
961 | +/* |
962 | + * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports. |
963 | + * |
964 | + * It has been observed with some SSDs that the timing of events in the |
965 | + * link synchronization phase can leave the port in a state that can not |
966 | + * be recovered by a SATA-hard-reset alone. The failing signature is |
967 | + * SStatus.DET stuck at 1 ("Device presence detected but Phy |
968 | + * communication not established"). It was found that unloading and |
969 | + * reloading the driver when this problem occurs allows the drive |
970 | + * connection to be recovered (DET advanced to 0x3). The critical |
971 | + * component of reloading the driver is that the port state machines are |
972 | + * reset by bouncing "port enable" in the AHCI PCS configuration |
973 | + * register. So, reproduce that effect by bouncing a port whenever we |
974 | + * see DET==1 after a reset. |
975 | + */ |
976 | +static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class, |
977 | + unsigned long deadline) |
978 | +{ |
979 | + const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); |
980 | + struct ata_port *ap = link->ap; |
981 | + struct ahci_port_priv *pp = ap->private_data; |
982 | + struct ahci_host_priv *hpriv = ap->host->private_data; |
983 | + u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; |
984 | + unsigned long tmo = deadline - jiffies; |
985 | + struct ata_taskfile tf; |
986 | + bool online; |
987 | + int rc, i; |
988 | + |
989 | + DPRINTK("ENTER\n"); |
990 | + |
991 | + ahci_stop_engine(ap); |
992 | + |
993 | + for (i = 0; i < 2; i++) { |
994 | + u16 val; |
995 | + u32 sstatus; |
996 | + int port = ap->port_no; |
997 | + struct ata_host *host = ap->host; |
998 | + struct pci_dev *pdev = to_pci_dev(host->dev); |
999 | + |
1000 | + /* clear D2H reception area to properly wait for D2H FIS */ |
1001 | + ata_tf_init(link->device, &tf); |
1002 | + tf.command = ATA_BUSY; |
1003 | + ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
1004 | + |
1005 | + rc = sata_link_hardreset(link, timing, deadline, &online, |
1006 | + ahci_check_ready); |
1007 | + |
1008 | + if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 || |
1009 | + (sstatus & 0xf) != 1) |
1010 | + break; |
1011 | + |
1012 | + ata_link_printk(link, KERN_INFO, "avn bounce port%d\n", |
1013 | + port); |
1014 | + |
1015 | + pci_read_config_word(pdev, 0x92, &val); |
1016 | + val &= ~(1 << port); |
1017 | + pci_write_config_word(pdev, 0x92, val); |
1018 | + ata_msleep(ap, 1000); |
1019 | + val |= 1 << port; |
1020 | + pci_write_config_word(pdev, 0x92, val); |
1021 | + deadline += tmo; |
1022 | + } |
1023 | + |
1024 | + hpriv->start_engine(ap); |
1025 | + |
1026 | + if (online) |
1027 | + *class = ahci_dev_classify(ap); |
1028 | + |
1029 | + DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class); |
1030 | + return rc; |
1031 | +} |
1032 | + |
1033 | + |
1034 | #ifdef CONFIG_PM |
1035 | static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) |
1036 | { |
1037 | diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c |
1038 | index 61a9c07e0dff..287c4ba0219f 100644 |
1039 | --- a/drivers/ata/libahci.c |
1040 | +++ b/drivers/ata/libahci.c |
1041 | @@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap, |
1042 | if (unlikely(resetting)) |
1043 | status &= ~PORT_IRQ_BAD_PMP; |
1044 | |
1045 | - /* if LPM is enabled, PHYRDY doesn't mean anything */ |
1046 | - if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) { |
1047 | + if (sata_lpm_ignore_phy_events(&ap->link)) { |
1048 | status &= ~PORT_IRQ_PHYRDY; |
1049 | ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); |
1050 | } |
1051 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1052 | index 23dac3babfe3..87b4b7f9fdc6 100644 |
1053 | --- a/drivers/ata/libata-core.c |
1054 | +++ b/drivers/ata/libata-core.c |
1055 | @@ -4214,7 +4214,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { |
1056 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1057 | { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
1058 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1059 | - { "Samsung SSD 850 PRO*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
1060 | + { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
1061 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
1062 | |
1063 | /* |
1064 | @@ -6728,6 +6728,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, |
1065 | return tmp; |
1066 | } |
1067 | |
1068 | +/** |
1069 | + * sata_lpm_ignore_phy_events - test if PHY event should be ignored |
1070 | + * @link: Link receiving the event |
1071 | + * |
1072 | + * Test whether the received PHY event has to be ignored or not. |
1073 | + * |
1074 | + * LOCKING: |
1075 | + * None: |
1076 | + * |
1077 | + * RETURNS: |
1078 | + * True if the event has to be ignored. |
1079 | + */ |
1080 | +bool sata_lpm_ignore_phy_events(struct ata_link *link) |
1081 | +{ |
1082 | + unsigned long lpm_timeout = link->last_lpm_change + |
1083 | + msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); |
1084 | + |
1085 | + /* if LPM is enabled, PHYRDY doesn't mean anything */ |
1086 | + if (link->lpm_policy > ATA_LPM_MAX_POWER) |
1087 | + return true; |
1088 | + |
1089 | + /* ignore the first PHY event after the LPM policy changed |
1090 | + * as it is might be spurious |
1091 | + */ |
1092 | + if ((link->flags & ATA_LFLAG_CHANGED) && |
1093 | + time_before(jiffies, lpm_timeout)) |
1094 | + return true; |
1095 | + |
1096 | + return false; |
1097 | +} |
1098 | +EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); |
1099 | + |
1100 | /* |
1101 | * Dummy port_ops |
1102 | */ |
1103 | diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c |
1104 | index d2029a462e2c..89c3d83e1ca7 100644 |
1105 | --- a/drivers/ata/libata-eh.c |
1106 | +++ b/drivers/ata/libata-eh.c |
1107 | @@ -3489,6 +3489,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, |
1108 | } |
1109 | } |
1110 | |
1111 | + link->last_lpm_change = jiffies; |
1112 | + link->flags |= ATA_LFLAG_CHANGED; |
1113 | + |
1114 | return 0; |
1115 | |
1116 | fail: |
1117 | diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
1118 | index 237f23f68bfc..1daa0ea2f1ac 100644 |
1119 | --- a/drivers/clk/clk.c |
1120 | +++ b/drivers/clk/clk.c |
1121 | @@ -1443,8 +1443,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk, |
1122 | */ |
1123 | if (clk->prepare_count) { |
1124 | clk_core_prepare(parent); |
1125 | + flags = clk_enable_lock(); |
1126 | clk_core_enable(parent); |
1127 | clk_core_enable(clk); |
1128 | + clk_enable_unlock(flags); |
1129 | } |
1130 | |
1131 | /* update the clk tree topology */ |
1132 | @@ -1459,13 +1461,17 @@ static void __clk_set_parent_after(struct clk_core *core, |
1133 | struct clk_core *parent, |
1134 | struct clk_core *old_parent) |
1135 | { |
1136 | + unsigned long flags; |
1137 | + |
1138 | /* |
1139 | * Finish the migration of prepare state and undo the changes done |
1140 | * for preventing a race with clk_enable(). |
1141 | */ |
1142 | if (core->prepare_count) { |
1143 | + flags = clk_enable_lock(); |
1144 | clk_core_disable(core); |
1145 | clk_core_disable(old_parent); |
1146 | + clk_enable_unlock(flags); |
1147 | clk_core_unprepare(old_parent); |
1148 | } |
1149 | } |
1150 | @@ -1489,8 +1495,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent, |
1151 | clk_enable_unlock(flags); |
1152 | |
1153 | if (clk->prepare_count) { |
1154 | + flags = clk_enable_lock(); |
1155 | clk_core_disable(clk); |
1156 | clk_core_disable(parent); |
1157 | + clk_enable_unlock(flags); |
1158 | clk_core_unprepare(parent); |
1159 | } |
1160 | return ret; |
1161 | diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c |
1162 | index 07d666cc6a29..bea4a173eef5 100644 |
1163 | --- a/drivers/clk/samsung/clk-exynos5420.c |
1164 | +++ b/drivers/clk/samsung/clk-exynos5420.c |
1165 | @@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = { |
1166 | { .offset = SRC_MASK_PERIC0, .value = 0x11111110, }, |
1167 | { .offset = SRC_MASK_PERIC1, .value = 0x11111100, }, |
1168 | { .offset = SRC_MASK_ISP, .value = 0x11111000, }, |
1169 | + { .offset = GATE_BUS_TOP, .value = 0xffffffff, }, |
1170 | { .offset = GATE_BUS_DISP1, .value = 0xffffffff, }, |
1171 | { .offset = GATE_IP_PERIC, .value = 0xffffffff, }, |
1172 | }; |
1173 | diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c |
1174 | index 2eebd28b4c40..ccc20188f00c 100644 |
1175 | --- a/drivers/firmware/dmi_scan.c |
1176 | +++ b/drivers/firmware/dmi_scan.c |
1177 | @@ -499,18 +499,19 @@ static int __init dmi_present(const u8 *buf) |
1178 | buf += 16; |
1179 | |
1180 | if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) { |
1181 | + if (smbios_ver) |
1182 | + dmi_ver = smbios_ver; |
1183 | + else |
1184 | + dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F); |
1185 | dmi_num = get_unaligned_le16(buf + 12); |
1186 | dmi_len = get_unaligned_le16(buf + 6); |
1187 | dmi_base = get_unaligned_le32(buf + 8); |
1188 | |
1189 | if (dmi_walk_early(dmi_decode) == 0) { |
1190 | if (smbios_ver) { |
1191 | - dmi_ver = smbios_ver; |
1192 | pr_info("SMBIOS %d.%d present.\n", |
1193 | dmi_ver >> 8, dmi_ver & 0xFF); |
1194 | } else { |
1195 | - dmi_ver = (buf[14] & 0xF0) << 4 | |
1196 | - (buf[14] & 0x0F); |
1197 | pr_info("Legacy DMI %d.%d present.\n", |
1198 | dmi_ver >> 8, dmi_ver & 0xFF); |
1199 | } |
1200 | diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c |
1201 | index 443518f63f15..a6b0def4bd7b 100644 |
1202 | --- a/drivers/gpio/gpio-kempld.c |
1203 | +++ b/drivers/gpio/gpio-kempld.c |
1204 | @@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset) |
1205 | = container_of(chip, struct kempld_gpio_data, chip); |
1206 | struct kempld_device_data *pld = gpio->pld; |
1207 | |
1208 | - return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); |
1209 | + return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset); |
1210 | } |
1211 | |
1212 | static int kempld_gpio_pincount(struct kempld_device_data *pld) |
1213 | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c |
1214 | index 498399323a8c..406624a0b201 100644 |
1215 | --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c |
1216 | +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c |
1217 | @@ -729,7 +729,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, |
1218 | kfd2kgd->get_max_engine_clock_in_mhz( |
1219 | dev->gpu->kgd)); |
1220 | sysfs_show_64bit_prop(buffer, "local_mem_size", |
1221 | - kfd2kgd->get_vmem_size(dev->gpu->kgd)); |
1222 | + (unsigned long long int) 0); |
1223 | |
1224 | sysfs_show_32bit_prop(buffer, "fw_version", |
1225 | kfd2kgd->get_fw_version( |
1226 | diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c |
1227 | index 5ba5792bfdba..98b125763ecd 100644 |
1228 | --- a/drivers/gpu/drm/drm_plane_helper.c |
1229 | +++ b/drivers/gpu/drm/drm_plane_helper.c |
1230 | @@ -476,6 +476,9 @@ int drm_plane_helper_commit(struct drm_plane *plane, |
1231 | if (!crtc[i]) |
1232 | continue; |
1233 | |
1234 | + if (crtc[i]->cursor == plane) |
1235 | + continue; |
1236 | + |
1237 | /* There's no other way to figure out whether the crtc is running. */ |
1238 | ret = drm_crtc_vblank_get(crtc[i]); |
1239 | if (ret == 0) { |
1240 | diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c |
1241 | index 1afc0b419da2..965a45619f6b 100644 |
1242 | --- a/drivers/gpu/drm/radeon/atombios_crtc.c |
1243 | +++ b/drivers/gpu/drm/radeon/atombios_crtc.c |
1244 | @@ -1789,7 +1789,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc) |
1245 | if ((crtc->mode.clock == test_crtc->mode.clock) && |
1246 | (adjusted_clock == test_adjusted_clock) && |
1247 | (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) && |
1248 | - (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)) |
1249 | + (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) && |
1250 | + (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) == |
1251 | + drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector)))) |
1252 | return test_radeon_crtc->pll_id; |
1253 | } |
1254 | } |
1255 | diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c |
1256 | index 8d74de82456e..8b2c4c890507 100644 |
1257 | --- a/drivers/gpu/drm/radeon/atombios_dp.c |
1258 | +++ b/drivers/gpu/drm/radeon/atombios_dp.c |
1259 | @@ -412,19 +412,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) |
1260 | { |
1261 | struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; |
1262 | u8 msg[DP_DPCD_SIZE]; |
1263 | - int ret; |
1264 | + int ret, i; |
1265 | |
1266 | - ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, |
1267 | - DP_DPCD_SIZE); |
1268 | - if (ret > 0) { |
1269 | - memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
1270 | + for (i = 0; i < 7; i++) { |
1271 | + ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, |
1272 | + DP_DPCD_SIZE); |
1273 | + if (ret == DP_DPCD_SIZE) { |
1274 | + memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
1275 | |
1276 | - DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), |
1277 | - dig_connector->dpcd); |
1278 | + DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), |
1279 | + dig_connector->dpcd); |
1280 | |
1281 | - radeon_dp_probe_oui(radeon_connector); |
1282 | + radeon_dp_probe_oui(radeon_connector); |
1283 | |
1284 | - return true; |
1285 | + return true; |
1286 | + } |
1287 | } |
1288 | dig_connector->dpcd[0] = 0; |
1289 | return false; |
1290 | diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c |
1291 | index 3e670d344a20..19aafb71fd8e 100644 |
1292 | --- a/drivers/gpu/drm/radeon/cik.c |
1293 | +++ b/drivers/gpu/drm/radeon/cik.c |
1294 | @@ -5804,7 +5804,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) |
1295 | /* restore context1-15 */ |
1296 | /* set vm size, must be a multiple of 4 */ |
1297 | WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
1298 | - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
1299 | + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); |
1300 | for (i = 1; i < 16; i++) { |
1301 | if (i < 8) |
1302 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
1303 | diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c |
1304 | index 0926739c9fa7..9953356fe263 100644 |
1305 | --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c |
1306 | +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c |
1307 | @@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable) |
1308 | if (enable) { |
1309 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1310 | |
1311 | - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
1312 | + if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
1313 | WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, |
1314 | HDMI_AVI_INFO_SEND | /* enable AVI info frames */ |
1315 | HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */ |
1316 | @@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable) |
1317 | if (!dig || !dig->afmt) |
1318 | return; |
1319 | |
1320 | - if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
1321 | + if (enable && connector && |
1322 | + drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
1323 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); |
1324 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
1325 | struct radeon_connector_atom_dig *dig_connector; |
1326 | diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c |
1327 | index dab00812abaa..02d585455f49 100644 |
1328 | --- a/drivers/gpu/drm/radeon/ni.c |
1329 | +++ b/drivers/gpu/drm/radeon/ni.c |
1330 | @@ -1272,7 +1272,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) |
1331 | */ |
1332 | for (i = 1; i < 8; i++) { |
1333 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); |
1334 | - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); |
1335 | + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), |
1336 | + rdev->vm_manager.max_pfn - 1); |
1337 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
1338 | rdev->vm_manager.saved_table_addr[i]); |
1339 | } |
1340 | diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c |
1341 | index b7c6bb69f3c7..88c04bc0a7f6 100644 |
1342 | --- a/drivers/gpu/drm/radeon/radeon_audio.c |
1343 | +++ b/drivers/gpu/drm/radeon/radeon_audio.c |
1344 | @@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector, |
1345 | if (!connector || !connector->encoder) |
1346 | return; |
1347 | |
1348 | - if (!radeon_encoder_is_digital(connector->encoder)) |
1349 | - return; |
1350 | - |
1351 | rdev = connector->encoder->dev->dev_private; |
1352 | |
1353 | if (!radeon_audio_chipset_supported(rdev)) |
1354 | @@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector, |
1355 | radeon_encoder = to_radeon_encoder(connector->encoder); |
1356 | dig = radeon_encoder->enc_priv; |
1357 | |
1358 | - if (!dig->afmt) |
1359 | - return; |
1360 | - |
1361 | if (status == connector_status_connected) { |
1362 | - struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
1363 | + struct radeon_connector *radeon_connector; |
1364 | + int sink_type; |
1365 | + |
1366 | + if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
1367 | + radeon_encoder->audio = NULL; |
1368 | + return; |
1369 | + } |
1370 | + |
1371 | + radeon_connector = to_radeon_connector(connector); |
1372 | + sink_type = radeon_dp_getsinktype(radeon_connector); |
1373 | |
1374 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && |
1375 | - radeon_dp_getsinktype(radeon_connector) == |
1376 | - CONNECTOR_OBJECT_ID_DISPLAYPORT) |
1377 | + sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) |
1378 | radeon_encoder->audio = rdev->audio.dp_funcs; |
1379 | else |
1380 | radeon_encoder->audio = rdev->audio.hdmi_funcs; |
1381 | |
1382 | dig->afmt->pin = radeon_audio_get_pin(connector->encoder); |
1383 | - if (drm_detect_monitor_audio(radeon_connector_edid(connector))) { |
1384 | - radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
1385 | - } else { |
1386 | - radeon_audio_enable(rdev, dig->afmt->pin, 0); |
1387 | - dig->afmt->pin = NULL; |
1388 | - } |
1389 | + radeon_audio_enable(rdev, dig->afmt->pin, 0xf); |
1390 | } else { |
1391 | radeon_audio_enable(rdev, dig->afmt->pin, 0); |
1392 | dig->afmt->pin = NULL; |
1393 | diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c |
1394 | index 27973e3faf0e..27def67cb6be 100644 |
1395 | --- a/drivers/gpu/drm/radeon/radeon_connectors.c |
1396 | +++ b/drivers/gpu/drm/radeon/radeon_connectors.c |
1397 | @@ -1333,10 +1333,8 @@ out: |
1398 | /* updated in get modes as well since we need to know if it's analog or digital */ |
1399 | radeon_connector_update_scratch_regs(connector, ret); |
1400 | |
1401 | - if (radeon_audio != 0) { |
1402 | - radeon_connector_get_edid(connector); |
1403 | + if (radeon_audio != 0) |
1404 | radeon_audio_detect(connector, ret); |
1405 | - } |
1406 | |
1407 | exit: |
1408 | pm_runtime_mark_last_busy(connector->dev->dev); |
1409 | @@ -1661,10 +1659,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force) |
1410 | |
1411 | radeon_connector_update_scratch_regs(connector, ret); |
1412 | |
1413 | - if (radeon_audio != 0) { |
1414 | - radeon_connector_get_edid(connector); |
1415 | + if (radeon_audio != 0) |
1416 | radeon_audio_detect(connector, ret); |
1417 | - } |
1418 | |
1419 | out: |
1420 | pm_runtime_mark_last_busy(connector->dev->dev); |
1421 | diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c |
1422 | index a7fb2735d4a9..f433491fab6f 100644 |
1423 | --- a/drivers/gpu/drm/radeon/si.c |
1424 | +++ b/drivers/gpu/drm/radeon/si.c |
1425 | @@ -4288,7 +4288,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) |
1426 | /* empty context1-15 */ |
1427 | /* set vm size, must be a multiple of 4 */ |
1428 | WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
1429 | - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
1430 | + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1); |
1431 | /* Assign the pt base to something valid for now; the pts used for |
1432 | * the VMs are determined by the application and setup and assigned |
1433 | * on the fly in the vm part of radeon_gart.c |
1434 | diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c |
1435 | index e77658cd037c..2caf5b2f3446 100644 |
1436 | --- a/drivers/hid/hid-logitech-hidpp.c |
1437 | +++ b/drivers/hid/hid-logitech-hidpp.c |
1438 | @@ -39,7 +39,6 @@ MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>"); |
1439 | /* bits 1..20 are reserved for classes */ |
1440 | #define HIDPP_QUIRK_DELAYED_INIT BIT(21) |
1441 | #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22) |
1442 | -#define HIDPP_QUIRK_MULTI_INPUT BIT(23) |
1443 | |
1444 | /* |
1445 | * There are two hidpp protocols in use, the first version hidpp10 is known |
1446 | @@ -701,12 +700,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi, |
1447 | struct hid_field *field, struct hid_usage *usage, |
1448 | unsigned long **bit, int *max) |
1449 | { |
1450 | - struct hidpp_device *hidpp = hid_get_drvdata(hdev); |
1451 | - |
1452 | - if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && |
1453 | - (field->application == HID_GD_KEYBOARD)) |
1454 | - return 0; |
1455 | - |
1456 | return -1; |
1457 | } |
1458 | |
1459 | @@ -715,10 +708,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp, |
1460 | { |
1461 | struct wtp_data *wd = hidpp->private_data; |
1462 | |
1463 | - if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core) |
1464 | - /* this is the generic hid-input call */ |
1465 | - return; |
1466 | - |
1467 | __set_bit(EV_ABS, input_dev->evbit); |
1468 | __set_bit(EV_KEY, input_dev->evbit); |
1469 | __clear_bit(EV_REL, input_dev->evbit); |
1470 | @@ -1234,10 +1223,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) |
1471 | if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) |
1472 | connect_mask &= ~HID_CONNECT_HIDINPUT; |
1473 | |
1474 | - /* Re-enable hidinput for multi-input devices */ |
1475 | - if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) |
1476 | - connect_mask |= HID_CONNECT_HIDINPUT; |
1477 | - |
1478 | ret = hid_hw_start(hdev, connect_mask); |
1479 | if (ret) { |
1480 | hid_err(hdev, "%s:hid_hw_start returned error\n", __func__); |
1481 | @@ -1285,11 +1270,6 @@ static const struct hid_device_id hidpp_devices[] = { |
1482 | HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, |
1483 | USB_DEVICE_ID_LOGITECH_T651), |
1484 | .driver_data = HIDPP_QUIRK_CLASS_WTP }, |
1485 | - { /* Keyboard TK820 */ |
1486 | - HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, |
1487 | - USB_VENDOR_ID_LOGITECH, 0x4102), |
1488 | - .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT | |
1489 | - HIDPP_QUIRK_CLASS_WTP }, |
1490 | |
1491 | { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, |
1492 | USB_VENDOR_ID_LOGITECH, HID_ANY_ID)}, |
1493 | diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c |
1494 | index f3830db02d46..37f01702d081 100644 |
1495 | --- a/drivers/hwmon/nct6683.c |
1496 | +++ b/drivers/hwmon/nct6683.c |
1497 | @@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, |
1498 | (*t)->dev_attr.attr.name, tg->base + i); |
1499 | if ((*t)->s2) { |
1500 | a2 = &su->u.a2; |
1501 | + sysfs_attr_init(&a2->dev_attr.attr); |
1502 | a2->dev_attr.attr.name = su->name; |
1503 | a2->nr = (*t)->u.s.nr + i; |
1504 | a2->index = (*t)->u.s.index; |
1505 | @@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg, |
1506 | *attrs = &a2->dev_attr.attr; |
1507 | } else { |
1508 | a = &su->u.a1; |
1509 | + sysfs_attr_init(&a->dev_attr.attr); |
1510 | a->dev_attr.attr.name = su->name; |
1511 | a->index = (*t)->u.index + i; |
1512 | a->dev_attr.attr.mode = |
1513 | diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c |
1514 | index 1be41177b620..0773930c110e 100644 |
1515 | --- a/drivers/hwmon/nct6775.c |
1516 | +++ b/drivers/hwmon/nct6775.c |
1517 | @@ -994,6 +994,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, |
1518 | (*t)->dev_attr.attr.name, tg->base + i); |
1519 | if ((*t)->s2) { |
1520 | a2 = &su->u.a2; |
1521 | + sysfs_attr_init(&a2->dev_attr.attr); |
1522 | a2->dev_attr.attr.name = su->name; |
1523 | a2->nr = (*t)->u.s.nr + i; |
1524 | a2->index = (*t)->u.s.index; |
1525 | @@ -1004,6 +1005,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg, |
1526 | *attrs = &a2->dev_attr.attr; |
1527 | } else { |
1528 | a = &su->u.a1; |
1529 | + sysfs_attr_init(&a->dev_attr.attr); |
1530 | a->dev_attr.attr.name = su->name; |
1531 | a->index = (*t)->u.index + i; |
1532 | a->dev_attr.attr.mode = |
1533 | diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c |
1534 | index 112e4d45e4a0..68800115876b 100644 |
1535 | --- a/drivers/hwmon/ntc_thermistor.c |
1536 | +++ b/drivers/hwmon/ntc_thermistor.c |
1537 | @@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data * |
1538 | ntc_thermistor_parse_dt(struct platform_device *pdev) |
1539 | { |
1540 | struct iio_channel *chan; |
1541 | + enum iio_chan_type type; |
1542 | struct device_node *np = pdev->dev.of_node; |
1543 | struct ntc_thermistor_platform_data *pdata; |
1544 | + int ret; |
1545 | |
1546 | if (!np) |
1547 | return NULL; |
1548 | @@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) |
1549 | if (IS_ERR(chan)) |
1550 | return ERR_CAST(chan); |
1551 | |
1552 | + ret = iio_get_channel_type(chan, &type); |
1553 | + if (ret < 0) |
1554 | + return ERR_PTR(ret); |
1555 | + |
1556 | + if (type != IIO_VOLTAGE) |
1557 | + return ERR_PTR(-EINVAL); |
1558 | + |
1559 | if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv)) |
1560 | return ERR_PTR(-ENODEV); |
1561 | if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm)) |
1562 | diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c |
1563 | index 99664ebc738d..ccf4cffe0ee1 100644 |
1564 | --- a/drivers/hwmon/tmp401.c |
1565 | +++ b/drivers/hwmon/tmp401.c |
1566 | @@ -44,7 +44,7 @@ |
1567 | #include <linux/sysfs.h> |
1568 | |
1569 | /* Addresses to scan */ |
1570 | -static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d, |
1571 | +static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d, |
1572 | 0x4e, 0x4f, I2C_CLIENT_END }; |
1573 | |
1574 | enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 }; |
1575 | diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c |
1576 | index 53f32629283a..6805db0e4f07 100644 |
1577 | --- a/drivers/iio/accel/st_accel_core.c |
1578 | +++ b/drivers/iio/accel/st_accel_core.c |
1579 | @@ -465,6 +465,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev) |
1580 | |
1581 | indio_dev->modes = INDIO_DIRECT_MODE; |
1582 | indio_dev->info = &accel_info; |
1583 | + mutex_init(&adata->tb.buf_lock); |
1584 | |
1585 | st_sensors_power_enable(indio_dev); |
1586 | |
1587 | diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c |
1588 | index 08bcfb061ca5..56008a86b78f 100644 |
1589 | --- a/drivers/iio/adc/axp288_adc.c |
1590 | +++ b/drivers/iio/adc/axp288_adc.c |
1591 | @@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = { |
1592 | .channel = 0, |
1593 | .address = AXP288_TS_ADC_H, |
1594 | .datasheet_name = "TS_PIN", |
1595 | + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1596 | }, { |
1597 | .indexed = 1, |
1598 | .type = IIO_TEMP, |
1599 | .channel = 1, |
1600 | .address = AXP288_PMIC_ADC_H, |
1601 | .datasheet_name = "PMIC_TEMP", |
1602 | + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1603 | }, { |
1604 | .indexed = 1, |
1605 | .type = IIO_TEMP, |
1606 | .channel = 2, |
1607 | .address = AXP288_GP_ADC_H, |
1608 | .datasheet_name = "GPADC", |
1609 | + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1610 | }, { |
1611 | .indexed = 1, |
1612 | .type = IIO_CURRENT, |
1613 | .channel = 3, |
1614 | .address = AXP20X_BATT_CHRG_I_H, |
1615 | .datasheet_name = "BATT_CHG_I", |
1616 | - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), |
1617 | + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1618 | }, { |
1619 | .indexed = 1, |
1620 | .type = IIO_CURRENT, |
1621 | .channel = 4, |
1622 | .address = AXP20X_BATT_DISCHRG_I_H, |
1623 | .datasheet_name = "BATT_DISCHRG_I", |
1624 | - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), |
1625 | + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1626 | }, { |
1627 | .indexed = 1, |
1628 | .type = IIO_VOLTAGE, |
1629 | .channel = 5, |
1630 | .address = AXP20X_BATT_V_H, |
1631 | .datasheet_name = "BATT_V", |
1632 | - .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), |
1633 | + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1634 | }, |
1635 | }; |
1636 | |
1637 | @@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, |
1638 | chan->address)) |
1639 | dev_err(&indio_dev->dev, "TS pin restore\n"); |
1640 | break; |
1641 | - case IIO_CHAN_INFO_PROCESSED: |
1642 | - ret = axp288_adc_read_channel(val, chan->address, info->regmap); |
1643 | - break; |
1644 | default: |
1645 | ret = -EINVAL; |
1646 | } |
1647 | diff --git a/drivers/iio/adc/cc10001_adc.c b/drivers/iio/adc/cc10001_adc.c |
1648 | index 51e2a83c9404..115f6e99a7fa 100644 |
1649 | --- a/drivers/iio/adc/cc10001_adc.c |
1650 | +++ b/drivers/iio/adc/cc10001_adc.c |
1651 | @@ -35,8 +35,9 @@ |
1652 | #define CC10001_ADC_EOC_SET BIT(0) |
1653 | |
1654 | #define CC10001_ADC_CHSEL_SAMPLED 0x0c |
1655 | -#define CC10001_ADC_POWER_UP 0x10 |
1656 | -#define CC10001_ADC_POWER_UP_SET BIT(0) |
1657 | +#define CC10001_ADC_POWER_DOWN 0x10 |
1658 | +#define CC10001_ADC_POWER_DOWN_SET BIT(0) |
1659 | + |
1660 | #define CC10001_ADC_DEBUG 0x14 |
1661 | #define CC10001_ADC_DATA_COUNT 0x20 |
1662 | |
1663 | @@ -62,7 +63,6 @@ struct cc10001_adc_device { |
1664 | u16 *buf; |
1665 | |
1666 | struct mutex lock; |
1667 | - unsigned long channel_map; |
1668 | unsigned int start_delay_ns; |
1669 | unsigned int eoc_delay_ns; |
1670 | }; |
1671 | @@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev, |
1672 | return readl(adc_dev->reg_base + reg); |
1673 | } |
1674 | |
1675 | +static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev) |
1676 | +{ |
1677 | + cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0); |
1678 | + ndelay(adc_dev->start_delay_ns); |
1679 | +} |
1680 | + |
1681 | +static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev) |
1682 | +{ |
1683 | + cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, |
1684 | + CC10001_ADC_POWER_DOWN_SET); |
1685 | +} |
1686 | + |
1687 | static void cc10001_adc_start(struct cc10001_adc_device *adc_dev, |
1688 | unsigned int channel) |
1689 | { |
1690 | @@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev, |
1691 | val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV; |
1692 | cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val); |
1693 | |
1694 | + udelay(1); |
1695 | val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG); |
1696 | val = val | CC10001_ADC_START_CONV; |
1697 | cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val); |
1698 | @@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) |
1699 | struct iio_dev *indio_dev; |
1700 | unsigned int delay_ns; |
1701 | unsigned int channel; |
1702 | + unsigned int scan_idx; |
1703 | bool sample_invalid; |
1704 | u16 *data; |
1705 | int i; |
1706 | @@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) |
1707 | |
1708 | mutex_lock(&adc_dev->lock); |
1709 | |
1710 | - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, |
1711 | - CC10001_ADC_POWER_UP_SET); |
1712 | - |
1713 | - /* Wait for 8 (6+2) clock cycles before activating START */ |
1714 | - ndelay(adc_dev->start_delay_ns); |
1715 | + cc10001_adc_power_up(adc_dev); |
1716 | |
1717 | /* Calculate delay step for eoc and sampled data */ |
1718 | delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT; |
1719 | |
1720 | i = 0; |
1721 | sample_invalid = false; |
1722 | - for_each_set_bit(channel, indio_dev->active_scan_mask, |
1723 | + for_each_set_bit(scan_idx, indio_dev->active_scan_mask, |
1724 | indio_dev->masklength) { |
1725 | |
1726 | + channel = indio_dev->channels[scan_idx].channel; |
1727 | cc10001_adc_start(adc_dev, channel); |
1728 | |
1729 | data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns); |
1730 | @@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p) |
1731 | } |
1732 | |
1733 | done: |
1734 | - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0); |
1735 | + cc10001_adc_power_down(adc_dev); |
1736 | |
1737 | mutex_unlock(&adc_dev->lock); |
1738 | |
1739 | @@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev, |
1740 | unsigned int delay_ns; |
1741 | u16 val; |
1742 | |
1743 | - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, |
1744 | - CC10001_ADC_POWER_UP_SET); |
1745 | - |
1746 | - /* Wait for 8 (6+2) clock cycles before activating START */ |
1747 | - ndelay(adc_dev->start_delay_ns); |
1748 | + cc10001_adc_power_up(adc_dev); |
1749 | |
1750 | /* Calculate delay step for eoc and sampled data */ |
1751 | delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT; |
1752 | @@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev, |
1753 | |
1754 | val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns); |
1755 | |
1756 | - cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0); |
1757 | + cc10001_adc_power_down(adc_dev); |
1758 | |
1759 | return val; |
1760 | } |
1761 | @@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev, |
1762 | |
1763 | case IIO_CHAN_INFO_SCALE: |
1764 | ret = regulator_get_voltage(adc_dev->reg); |
1765 | - if (ret) |
1766 | + if (ret < 0) |
1767 | return ret; |
1768 | |
1769 | *val = ret / 1000; |
1770 | @@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = { |
1771 | .update_scan_mode = &cc10001_update_scan_mode, |
1772 | }; |
1773 | |
1774 | -static int cc10001_adc_channel_init(struct iio_dev *indio_dev) |
1775 | +static int cc10001_adc_channel_init(struct iio_dev *indio_dev, |
1776 | + unsigned long channel_map) |
1777 | { |
1778 | - struct cc10001_adc_device *adc_dev = iio_priv(indio_dev); |
1779 | struct iio_chan_spec *chan_array, *timestamp; |
1780 | unsigned int bit, idx = 0; |
1781 | |
1782 | - indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map, |
1783 | - CC10001_ADC_NUM_CHANNELS); |
1784 | + indio_dev->num_channels = bitmap_weight(&channel_map, |
1785 | + CC10001_ADC_NUM_CHANNELS) + 1; |
1786 | |
1787 | - chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1, |
1788 | + chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels, |
1789 | sizeof(struct iio_chan_spec), |
1790 | GFP_KERNEL); |
1791 | if (!chan_array) |
1792 | return -ENOMEM; |
1793 | |
1794 | - for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) { |
1795 | + for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) { |
1796 | struct iio_chan_spec *chan = &chan_array[idx]; |
1797 | |
1798 | chan->type = IIO_VOLTAGE; |
1799 | @@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev) |
1800 | unsigned long adc_clk_rate; |
1801 | struct resource *res; |
1802 | struct iio_dev *indio_dev; |
1803 | + unsigned long channel_map; |
1804 | int ret; |
1805 | |
1806 | indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); |
1807 | @@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev) |
1808 | |
1809 | adc_dev = iio_priv(indio_dev); |
1810 | |
1811 | - adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0); |
1812 | + channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0); |
1813 | if (!of_property_read_u32(node, "adc-reserved-channels", &ret)) |
1814 | - adc_dev->channel_map &= ~ret; |
1815 | + channel_map &= ~ret; |
1816 | |
1817 | adc_dev->reg = devm_regulator_get(&pdev->dev, "vref"); |
1818 | if (IS_ERR(adc_dev->reg)) |
1819 | @@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev) |
1820 | adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES; |
1821 | |
1822 | /* Setup the ADC channels available on the device */ |
1823 | - ret = cc10001_adc_channel_init(indio_dev); |
1824 | + ret = cc10001_adc_channel_init(indio_dev, channel_map); |
1825 | if (ret < 0) |
1826 | goto err_disable_clk; |
1827 | |
1828 | diff --git a/drivers/iio/adc/qcom-spmi-vadc.c b/drivers/iio/adc/qcom-spmi-vadc.c |
1829 | index 3211729bcb0b..0c4618b4d515 100644 |
1830 | --- a/drivers/iio/adc/qcom-spmi-vadc.c |
1831 | +++ b/drivers/iio/adc/qcom-spmi-vadc.c |
1832 | @@ -18,6 +18,7 @@ |
1833 | #include <linux/iio/iio.h> |
1834 | #include <linux/interrupt.h> |
1835 | #include <linux/kernel.h> |
1836 | +#include <linux/math64.h> |
1837 | #include <linux/module.h> |
1838 | #include <linux/of.h> |
1839 | #include <linux/platform_device.h> |
1840 | @@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc, |
1841 | const struct vadc_channel_prop *prop, u16 adc_code) |
1842 | { |
1843 | const struct vadc_prescale_ratio *prescale; |
1844 | - s32 voltage; |
1845 | + s64 voltage; |
1846 | |
1847 | voltage = adc_code - vadc->graph[prop->calibration].gnd; |
1848 | voltage *= vadc->graph[prop->calibration].dx; |
1849 | - voltage = voltage / vadc->graph[prop->calibration].dy; |
1850 | + voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy); |
1851 | |
1852 | if (prop->calibration == VADC_CALIB_ABSOLUTE) |
1853 | voltage += vadc->graph[prop->calibration].dx; |
1854 | @@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc, |
1855 | |
1856 | voltage = voltage * prescale->den; |
1857 | |
1858 | - return voltage / prescale->num; |
1859 | + return div64_s64(voltage, prescale->num); |
1860 | } |
1861 | |
1862 | static int vadc_decimation_from_dt(u32 value) |
1863 | diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c |
1864 | index a221f7329b79..ce93bd8e3f68 100644 |
1865 | --- a/drivers/iio/adc/xilinx-xadc-core.c |
1866 | +++ b/drivers/iio/adc/xilinx-xadc-core.c |
1867 | @@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev, |
1868 | switch (chan->address) { |
1869 | case XADC_REG_VCCINT: |
1870 | case XADC_REG_VCCAUX: |
1871 | + case XADC_REG_VREFP: |
1872 | case XADC_REG_VCCBRAM: |
1873 | case XADC_REG_VCCPINT: |
1874 | case XADC_REG_VCCPAUX: |
1875 | @@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = { |
1876 | .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \ |
1877 | .scan_index = (_scan_index), \ |
1878 | .scan_type = { \ |
1879 | - .sign = 'u', \ |
1880 | + .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \ |
1881 | .realbits = 12, \ |
1882 | .storagebits = 16, \ |
1883 | .shift = 4, \ |
1884 | @@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = { |
1885 | static const struct iio_chan_spec xadc_channels[] = { |
1886 | XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP), |
1887 | XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true), |
1888 | - XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true), |
1889 | + XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true), |
1890 | XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true), |
1891 | XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true), |
1892 | XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true), |
1893 | diff --git a/drivers/iio/adc/xilinx-xadc.h b/drivers/iio/adc/xilinx-xadc.h |
1894 | index c7487e8d7f80..54adc5087210 100644 |
1895 | --- a/drivers/iio/adc/xilinx-xadc.h |
1896 | +++ b/drivers/iio/adc/xilinx-xadc.h |
1897 | @@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg, |
1898 | #define XADC_REG_MAX_VCCPINT 0x28 |
1899 | #define XADC_REG_MAX_VCCPAUX 0x29 |
1900 | #define XADC_REG_MAX_VCCO_DDR 0x2a |
1901 | -#define XADC_REG_MIN_VCCPINT 0x2b |
1902 | -#define XADC_REG_MIN_VCCPAUX 0x2c |
1903 | -#define XADC_REG_MIN_VCCO_DDR 0x2d |
1904 | +#define XADC_REG_MIN_VCCPINT 0x2c |
1905 | +#define XADC_REG_MIN_VCCPAUX 0x2d |
1906 | +#define XADC_REG_MIN_VCCO_DDR 0x2e |
1907 | |
1908 | #define XADC_REG_CONF0 0x40 |
1909 | #define XADC_REG_CONF1 0x41 |
1910 | diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c |
1911 | index edd13d2b4121..8dd0477e201c 100644 |
1912 | --- a/drivers/iio/common/st_sensors/st_sensors_core.c |
1913 | +++ b/drivers/iio/common/st_sensors/st_sensors_core.c |
1914 | @@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, |
1915 | struct st_sensors_platform_data *of_pdata; |
1916 | int err = 0; |
1917 | |
1918 | - mutex_init(&sdata->tb.buf_lock); |
1919 | - |
1920 | /* If OF/DT pdata exists, it will take precedence of anything else */ |
1921 | of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata); |
1922 | if (of_pdata) |
1923 | diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c |
1924 | index f07a2336f7dc..566f7d2df031 100644 |
1925 | --- a/drivers/iio/gyro/st_gyro_core.c |
1926 | +++ b/drivers/iio/gyro/st_gyro_core.c |
1927 | @@ -317,6 +317,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev) |
1928 | |
1929 | indio_dev->modes = INDIO_DIRECT_MODE; |
1930 | indio_dev->info = &gyro_info; |
1931 | + mutex_init(&gdata->tb.buf_lock); |
1932 | |
1933 | st_sensors_power_enable(indio_dev); |
1934 | |
1935 | diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c |
1936 | index 3ecf79ed08ac..88f21bbe947c 100644 |
1937 | --- a/drivers/iio/light/hid-sensor-prox.c |
1938 | +++ b/drivers/iio/light/hid-sensor-prox.c |
1939 | @@ -43,8 +43,6 @@ struct prox_state { |
1940 | static const struct iio_chan_spec prox_channels[] = { |
1941 | { |
1942 | .type = IIO_PROXIMITY, |
1943 | - .modified = 1, |
1944 | - .channel2 = IIO_NO_MOD, |
1945 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1946 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | |
1947 | BIT(IIO_CHAN_INFO_SCALE) | |
1948 | diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c |
1949 | index 8ade473f99fe..2e56f812a644 100644 |
1950 | --- a/drivers/iio/magnetometer/st_magn_core.c |
1951 | +++ b/drivers/iio/magnetometer/st_magn_core.c |
1952 | @@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev) |
1953 | |
1954 | indio_dev->modes = INDIO_DIRECT_MODE; |
1955 | indio_dev->info = &magn_info; |
1956 | + mutex_init(&mdata->tb.buf_lock); |
1957 | |
1958 | st_sensors_power_enable(indio_dev); |
1959 | |
1960 | diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c |
1961 | index 1af314926ebd..476a7d03d2ce 100644 |
1962 | --- a/drivers/iio/pressure/hid-sensor-press.c |
1963 | +++ b/drivers/iio/pressure/hid-sensor-press.c |
1964 | @@ -47,8 +47,6 @@ struct press_state { |
1965 | static const struct iio_chan_spec press_channels[] = { |
1966 | { |
1967 | .type = IIO_PRESSURE, |
1968 | - .modified = 1, |
1969 | - .channel2 = IIO_NO_MOD, |
1970 | .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), |
1971 | .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) | |
1972 | BIT(IIO_CHAN_INFO_SCALE) | |
1973 | diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c |
1974 | index 97baf40d424b..e881fa6291e9 100644 |
1975 | --- a/drivers/iio/pressure/st_pressure_core.c |
1976 | +++ b/drivers/iio/pressure/st_pressure_core.c |
1977 | @@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev) |
1978 | |
1979 | indio_dev->modes = INDIO_DIRECT_MODE; |
1980 | indio_dev->info = &press_info; |
1981 | + mutex_init(&press_data->tb.buf_lock); |
1982 | |
1983 | st_sensors_power_enable(indio_dev); |
1984 | |
1985 | diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c |
1986 | index b85ddbc979e0..e5558b2660f2 100644 |
1987 | --- a/drivers/infiniband/core/iwpm_msg.c |
1988 | +++ b/drivers/infiniband/core/iwpm_msg.c |
1989 | @@ -33,7 +33,7 @@ |
1990 | |
1991 | #include "iwpm_util.h" |
1992 | |
1993 | -static const char iwpm_ulib_name[] = "iWarpPortMapperUser"; |
1994 | +static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser"; |
1995 | static int iwpm_ulib_version = 3; |
1996 | static int iwpm_user_pid = IWPM_PID_UNDEFINED; |
1997 | static atomic_t echo_nlmsg_seq; |
1998 | diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c |
1999 | index 991dc6b20a58..79363b687195 100644 |
2000 | --- a/drivers/input/mouse/elantech.c |
2001 | +++ b/drivers/input/mouse/elantech.c |
2002 | @@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev, |
2003 | unsigned int x2, unsigned int y2) |
2004 | { |
2005 | elantech_set_slot(dev, 0, num_fingers != 0, x1, y1); |
2006 | - elantech_set_slot(dev, 1, num_fingers == 2, x2, y2); |
2007 | + elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2); |
2008 | } |
2009 | |
2010 | /* |
2011 | diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c |
2012 | index 6d5a5c44453b..173e70dbf61b 100644 |
2013 | --- a/drivers/iommu/amd_iommu_v2.c |
2014 | +++ b/drivers/iommu/amd_iommu_v2.c |
2015 | @@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state) |
2016 | |
2017 | static void put_pasid_state_wait(struct pasid_state *pasid_state) |
2018 | { |
2019 | + atomic_dec(&pasid_state->count); |
2020 | wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); |
2021 | free_pasid_state(pasid_state); |
2022 | } |
2023 | diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c |
2024 | index a3adde6519f0..bd6252b01510 100644 |
2025 | --- a/drivers/iommu/arm-smmu.c |
2026 | +++ b/drivers/iommu/arm-smmu.c |
2027 | @@ -224,14 +224,7 @@ |
2028 | #define RESUME_TERMINATE (1 << 0) |
2029 | |
2030 | #define TTBCR2_SEP_SHIFT 15 |
2031 | -#define TTBCR2_SEP_MASK 0x7 |
2032 | - |
2033 | -#define TTBCR2_ADDR_32 0 |
2034 | -#define TTBCR2_ADDR_36 1 |
2035 | -#define TTBCR2_ADDR_40 2 |
2036 | -#define TTBCR2_ADDR_42 3 |
2037 | -#define TTBCR2_ADDR_44 4 |
2038 | -#define TTBCR2_ADDR_48 5 |
2039 | +#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) |
2040 | |
2041 | #define TTBRn_HI_ASID_SHIFT 16 |
2042 | |
2043 | @@ -783,26 +776,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, |
2044 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); |
2045 | if (smmu->version > ARM_SMMU_V1) { |
2046 | reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; |
2047 | - switch (smmu->va_size) { |
2048 | - case 32: |
2049 | - reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); |
2050 | - break; |
2051 | - case 36: |
2052 | - reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); |
2053 | - break; |
2054 | - case 40: |
2055 | - reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); |
2056 | - break; |
2057 | - case 42: |
2058 | - reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); |
2059 | - break; |
2060 | - case 44: |
2061 | - reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); |
2062 | - break; |
2063 | - case 48: |
2064 | - reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); |
2065 | - break; |
2066 | - } |
2067 | + reg |= TTBCR2_SEP_UPSTREAM; |
2068 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); |
2069 | } |
2070 | } else { |
2071 | diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c |
2072 | index 7dc93aa004c8..312ffd3d0017 100644 |
2073 | --- a/drivers/lguest/core.c |
2074 | +++ b/drivers/lguest/core.c |
2075 | @@ -173,7 +173,7 @@ static void unmap_switcher(void) |
2076 | bool lguest_address_ok(const struct lguest *lg, |
2077 | unsigned long addr, unsigned long len) |
2078 | { |
2079 | - return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr); |
2080 | + return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr); |
2081 | } |
2082 | |
2083 | /* |
2084 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c |
2085 | index 6554d9148927..757f1ba34c4d 100644 |
2086 | --- a/drivers/md/dm-table.c |
2087 | +++ b/drivers/md/dm-table.c |
2088 | @@ -823,6 +823,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) |
2089 | } |
2090 | EXPORT_SYMBOL(dm_consume_args); |
2091 | |
2092 | +static bool __table_type_request_based(unsigned table_type) |
2093 | +{ |
2094 | + return (table_type == DM_TYPE_REQUEST_BASED || |
2095 | + table_type == DM_TYPE_MQ_REQUEST_BASED); |
2096 | +} |
2097 | + |
2098 | static int dm_table_set_type(struct dm_table *t) |
2099 | { |
2100 | unsigned i; |
2101 | @@ -855,8 +861,7 @@ static int dm_table_set_type(struct dm_table *t) |
2102 | * Determine the type from the live device. |
2103 | * Default to bio-based if device is new. |
2104 | */ |
2105 | - if (live_md_type == DM_TYPE_REQUEST_BASED || |
2106 | - live_md_type == DM_TYPE_MQ_REQUEST_BASED) |
2107 | + if (__table_type_request_based(live_md_type)) |
2108 | request_based = 1; |
2109 | else |
2110 | bio_based = 1; |
2111 | @@ -906,7 +911,7 @@ static int dm_table_set_type(struct dm_table *t) |
2112 | } |
2113 | t->type = DM_TYPE_MQ_REQUEST_BASED; |
2114 | |
2115 | - } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) { |
2116 | + } else if (list_empty(devices) && __table_type_request_based(live_md_type)) { |
2117 | /* inherit live MD type */ |
2118 | t->type = live_md_type; |
2119 | |
2120 | @@ -928,10 +933,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) |
2121 | |
2122 | bool dm_table_request_based(struct dm_table *t) |
2123 | { |
2124 | - unsigned table_type = dm_table_get_type(t); |
2125 | - |
2126 | - return (table_type == DM_TYPE_REQUEST_BASED || |
2127 | - table_type == DM_TYPE_MQ_REQUEST_BASED); |
2128 | + return __table_type_request_based(dm_table_get_type(t)); |
2129 | } |
2130 | |
2131 | bool dm_table_mq_request_based(struct dm_table *t) |
2132 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
2133 | index 8001fe9e3434..9b4e30a82e4a 100644 |
2134 | --- a/drivers/md/dm.c |
2135 | +++ b/drivers/md/dm.c |
2136 | @@ -1642,8 +1642,7 @@ static int dm_merge_bvec(struct request_queue *q, |
2137 | struct mapped_device *md = q->queuedata; |
2138 | struct dm_table *map = dm_get_live_table_fast(md); |
2139 | struct dm_target *ti; |
2140 | - sector_t max_sectors; |
2141 | - int max_size = 0; |
2142 | + sector_t max_sectors, max_size = 0; |
2143 | |
2144 | if (unlikely(!map)) |
2145 | goto out; |
2146 | @@ -1658,8 +1657,16 @@ static int dm_merge_bvec(struct request_queue *q, |
2147 | max_sectors = min(max_io_len(bvm->bi_sector, ti), |
2148 | (sector_t) queue_max_sectors(q)); |
2149 | max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; |
2150 | - if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */ |
2151 | - max_size = 0; |
2152 | + |
2153 | + /* |
2154 | + * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t |
2155 | + * to the targets' merge function since it holds sectors not bytes). |
2156 | + * Just doing this as an interim fix for stable@ because the more |
2157 | + * comprehensive cleanup of switching to sector_t will impact every |
2158 | + * DM target that implements a ->merge hook. |
2159 | + */ |
2160 | + if (max_size > INT_MAX) |
2161 | + max_size = INT_MAX; |
2162 | |
2163 | /* |
2164 | * merge_bvec_fn() returns number of bytes |
2165 | @@ -1667,7 +1674,7 @@ static int dm_merge_bvec(struct request_queue *q, |
2166 | * max is precomputed maximal io size |
2167 | */ |
2168 | if (max_size && ti->type->merge) |
2169 | - max_size = ti->type->merge(ti, bvm, biovec, max_size); |
2170 | + max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); |
2171 | /* |
2172 | * If the target doesn't support merge method and some of the devices |
2173 | * provided their merge_bvec method (we know this by looking for the |
2174 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
2175 | index e47d1dd046da..907534b7f40d 100644 |
2176 | --- a/drivers/md/md.c |
2177 | +++ b/drivers/md/md.c |
2178 | @@ -4138,12 +4138,12 @@ action_store(struct mddev *mddev, const char *page, size_t len) |
2179 | if (!mddev->pers || !mddev->pers->sync_request) |
2180 | return -EINVAL; |
2181 | |
2182 | - if (cmd_match(page, "frozen")) |
2183 | - set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2184 | - else |
2185 | - clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2186 | |
2187 | if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { |
2188 | + if (cmd_match(page, "frozen")) |
2189 | + set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2190 | + else |
2191 | + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2192 | flush_workqueue(md_misc_wq); |
2193 | if (mddev->sync_thread) { |
2194 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
2195 | @@ -4156,16 +4156,17 @@ action_store(struct mddev *mddev, const char *page, size_t len) |
2196 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) |
2197 | return -EBUSY; |
2198 | else if (cmd_match(page, "resync")) |
2199 | - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
2200 | + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2201 | else if (cmd_match(page, "recover")) { |
2202 | + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2203 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
2204 | - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
2205 | } else if (cmd_match(page, "reshape")) { |
2206 | int err; |
2207 | if (mddev->pers->start_reshape == NULL) |
2208 | return -EINVAL; |
2209 | err = mddev_lock(mddev); |
2210 | if (!err) { |
2211 | + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2212 | err = mddev->pers->start_reshape(mddev); |
2213 | mddev_unlock(mddev); |
2214 | } |
2215 | @@ -4177,6 +4178,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) |
2216 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
2217 | else if (!cmd_match(page, "repair")) |
2218 | return -EINVAL; |
2219 | + clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
2220 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
2221 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
2222 | } |
2223 | diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c |
2224 | index 3b5d7f704aa3..903391ce9353 100644 |
2225 | --- a/drivers/md/raid0.c |
2226 | +++ b/drivers/md/raid0.c |
2227 | @@ -517,6 +517,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
2228 | ? (sector & (chunk_sects-1)) |
2229 | : sector_div(sector, chunk_sects)); |
2230 | |
2231 | + /* Restore due to sector_div */ |
2232 | + sector = bio->bi_iter.bi_sector; |
2233 | + |
2234 | if (sectors < bio_sectors(bio)) { |
2235 | split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); |
2236 | bio_chain(split, bio); |
2237 | @@ -524,7 +527,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
2238 | split = bio; |
2239 | } |
2240 | |
2241 | - sector = bio->bi_iter.bi_sector; |
2242 | zone = find_zone(mddev->private, §or); |
2243 | tmp_dev = map_sector(mddev, zone, sector, §or); |
2244 | split->bi_bdev = tmp_dev->bdev; |
2245 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
2246 | index cd2f96b2c572..007ab861eca0 100644 |
2247 | --- a/drivers/md/raid5.c |
2248 | +++ b/drivers/md/raid5.c |
2249 | @@ -1933,7 +1933,8 @@ static int resize_stripes(struct r5conf *conf, int newsize) |
2250 | |
2251 | conf->slab_cache = sc; |
2252 | conf->active_name = 1-conf->active_name; |
2253 | - conf->pool_size = newsize; |
2254 | + if (!err) |
2255 | + conf->pool_size = newsize; |
2256 | return err; |
2257 | } |
2258 | |
2259 | diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c |
2260 | index ae498b53ee40..46e3840c7a37 100644 |
2261 | --- a/drivers/mfd/da9052-core.c |
2262 | +++ b/drivers/mfd/da9052-core.c |
2263 | @@ -433,6 +433,10 @@ EXPORT_SYMBOL_GPL(da9052_adc_read_temp); |
2264 | static const struct mfd_cell da9052_subdev_info[] = { |
2265 | { |
2266 | .name = "da9052-regulator", |
2267 | + .id = 0, |
2268 | + }, |
2269 | + { |
2270 | + .name = "da9052-regulator", |
2271 | .id = 1, |
2272 | }, |
2273 | { |
2274 | @@ -484,10 +488,6 @@ static const struct mfd_cell da9052_subdev_info[] = { |
2275 | .id = 13, |
2276 | }, |
2277 | { |
2278 | - .name = "da9052-regulator", |
2279 | - .id = 14, |
2280 | - }, |
2281 | - { |
2282 | .name = "da9052-onkey", |
2283 | }, |
2284 | { |
2285 | diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c |
2286 | index 03d7c7521d97..9a39e0b7e583 100644 |
2287 | --- a/drivers/mmc/host/atmel-mci.c |
2288 | +++ b/drivers/mmc/host/atmel-mci.c |
2289 | @@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
2290 | |
2291 | if (ios->clock) { |
2292 | unsigned int clock_min = ~0U; |
2293 | - u32 clkdiv; |
2294 | + int clkdiv; |
2295 | |
2296 | spin_lock_bh(&host->lock); |
2297 | if (!host->mode_reg) { |
2298 | @@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
2299 | /* Calculate clock divider */ |
2300 | if (host->caps.has_odd_clk_div) { |
2301 | clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; |
2302 | - if (clkdiv > 511) { |
2303 | + if (clkdiv < 0) { |
2304 | + dev_warn(&mmc->class_dev, |
2305 | + "clock %u too fast; using %lu\n", |
2306 | + clock_min, host->bus_hz / 2); |
2307 | + clkdiv = 0; |
2308 | + } else if (clkdiv > 511) { |
2309 | dev_warn(&mmc->class_dev, |
2310 | "clock %u too slow; using %lu\n", |
2311 | clock_min, host->bus_hz / (511 + 2)); |
2312 | diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c |
2313 | index db2c05b6fe7f..c9eb78f10a0d 100644 |
2314 | --- a/drivers/mtd/ubi/block.c |
2315 | +++ b/drivers/mtd/ubi/block.c |
2316 | @@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work) |
2317 | blk_rq_map_sg(req->q, req, pdu->usgl.sg); |
2318 | |
2319 | ret = ubiblock_read(pdu); |
2320 | + rq_flush_dcache_pages(req); |
2321 | + |
2322 | blk_mq_end_request(req, ret); |
2323 | } |
2324 | |
2325 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |
2326 | index 6262612dec45..7a3231d8b933 100644 |
2327 | --- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |
2328 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c |
2329 | @@ -512,11 +512,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx, |
2330 | msgbuf->rx_pktids, |
2331 | msgbuf->ioctl_resp_pktid); |
2332 | if (msgbuf->ioctl_resp_ret_len != 0) { |
2333 | - if (!skb) { |
2334 | - brcmf_err("Invalid packet id idx recv'd %d\n", |
2335 | - msgbuf->ioctl_resp_pktid); |
2336 | + if (!skb) |
2337 | return -EBADF; |
2338 | - } |
2339 | + |
2340 | memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ? |
2341 | len : msgbuf->ioctl_resp_ret_len); |
2342 | } |
2343 | @@ -875,10 +873,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf) |
2344 | flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS; |
2345 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, |
2346 | msgbuf->tx_pktids, idx); |
2347 | - if (!skb) { |
2348 | - brcmf_err("Invalid packet id idx recv'd %d\n", idx); |
2349 | + if (!skb) |
2350 | return; |
2351 | - } |
2352 | |
2353 | set_bit(flowid, msgbuf->txstatus_done_map); |
2354 | commonring = msgbuf->flowrings[flowid]; |
2355 | @@ -1157,6 +1153,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) |
2356 | |
2357 | skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev, |
2358 | msgbuf->rx_pktids, idx); |
2359 | + if (!skb) |
2360 | + return; |
2361 | |
2362 | if (data_offset) |
2363 | skb_pull(skb, data_offset); |
2364 | diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c |
2365 | index 14e8fd661889..fd5a0bb1493f 100644 |
2366 | --- a/drivers/net/wireless/iwlwifi/mvm/d3.c |
2367 | +++ b/drivers/net/wireless/iwlwifi/mvm/d3.c |
2368 | @@ -1742,8 +1742,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, |
2369 | int i, j, n_matches, ret; |
2370 | |
2371 | fw_status = iwl_mvm_get_wakeup_status(mvm, vif); |
2372 | - if (!IS_ERR_OR_NULL(fw_status)) |
2373 | + if (!IS_ERR_OR_NULL(fw_status)) { |
2374 | reasons = le32_to_cpu(fw_status->wakeup_reasons); |
2375 | + kfree(fw_status); |
2376 | + } |
2377 | |
2378 | if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED) |
2379 | wakeup.rfkill_release = true; |
2380 | @@ -1860,15 +1862,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) |
2381 | /* get the BSS vif pointer again */ |
2382 | vif = iwl_mvm_get_bss_vif(mvm); |
2383 | if (IS_ERR_OR_NULL(vif)) |
2384 | - goto out_unlock; |
2385 | + goto err; |
2386 | |
2387 | ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); |
2388 | if (ret) |
2389 | - goto out_unlock; |
2390 | + goto err; |
2391 | |
2392 | if (d3_status != IWL_D3_STATUS_ALIVE) { |
2393 | IWL_INFO(mvm, "Device was reset during suspend\n"); |
2394 | - goto out_unlock; |
2395 | + goto err; |
2396 | } |
2397 | |
2398 | /* query SRAM first in case we want event logging */ |
2399 | @@ -1886,7 +1888,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) |
2400 | /* has unlocked the mutex, so skip that */ |
2401 | goto out; |
2402 | |
2403 | - out_unlock: |
2404 | +err: |
2405 | + iwl_mvm_free_nd(mvm); |
2406 | mutex_unlock(&mvm->mutex); |
2407 | |
2408 | out: |
2409 | diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c |
2410 | index 69935aa5a1b3..cb72edb3d16a 100644 |
2411 | --- a/drivers/net/wireless/iwlwifi/pcie/trans.c |
2412 | +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c |
2413 | @@ -5,8 +5,8 @@ |
2414 | * |
2415 | * GPL LICENSE SUMMARY |
2416 | * |
2417 | - * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. |
2418 | - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
2419 | + * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved. |
2420 | + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
2421 | * |
2422 | * This program is free software; you can redistribute it and/or modify |
2423 | * it under the terms of version 2 of the GNU General Public License as |
2424 | @@ -31,8 +31,8 @@ |
2425 | * |
2426 | * BSD LICENSE |
2427 | * |
2428 | - * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
2429 | - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
2430 | + * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved. |
2431 | + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
2432 | * All rights reserved. |
2433 | * |
2434 | * Redistribution and use in source and binary forms, with or without |
2435 | @@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) |
2436 | static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) |
2437 | { |
2438 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
2439 | - struct page *page; |
2440 | + struct page *page = NULL; |
2441 | dma_addr_t phys; |
2442 | u32 size; |
2443 | u8 power; |
2444 | @@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans) |
2445 | DMA_FROM_DEVICE); |
2446 | if (dma_mapping_error(trans->dev, phys)) { |
2447 | __free_pages(page, order); |
2448 | + page = NULL; |
2449 | continue; |
2450 | } |
2451 | IWL_INFO(trans, |
2452 | diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c |
2453 | index 8444313eabe2..8694dddcce9a 100644 |
2454 | --- a/drivers/net/wireless/rt2x00/rt2800usb.c |
2455 | +++ b/drivers/net/wireless/rt2x00/rt2800usb.c |
2456 | @@ -1040,6 +1040,7 @@ static struct usb_device_id rt2800usb_device_table[] = { |
2457 | { USB_DEVICE(0x07d1, 0x3c17) }, |
2458 | { USB_DEVICE(0x2001, 0x3317) }, |
2459 | { USB_DEVICE(0x2001, 0x3c1b) }, |
2460 | + { USB_DEVICE(0x2001, 0x3c25) }, |
2461 | /* Draytek */ |
2462 | { USB_DEVICE(0x07fa, 0x7712) }, |
2463 | /* DVICO */ |
2464 | diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c |
2465 | index 46ee956d0235..27cd6cabf6c5 100644 |
2466 | --- a/drivers/net/wireless/rtlwifi/usb.c |
2467 | +++ b/drivers/net/wireless/rtlwifi/usb.c |
2468 | @@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request, |
2469 | |
2470 | do { |
2471 | status = usb_control_msg(udev, pipe, request, reqtype, value, |
2472 | - index, pdata, len, 0); /*max. timeout*/ |
2473 | + index, pdata, len, 1000); |
2474 | if (status < 0) { |
2475 | /* firmware download is checksumed, don't retry */ |
2476 | if ((value >= FW_8192C_START_ADDRESS && |
2477 | diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c |
2478 | index 13584e24736a..4d7d60e593b8 100644 |
2479 | --- a/drivers/power/reset/at91-reset.c |
2480 | +++ b/drivers/power/reset/at91-reset.c |
2481 | @@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev) |
2482 | res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 ); |
2483 | at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start, |
2484 | resource_size(res)); |
2485 | - if (IS_ERR(at91_ramc_base[idx])) { |
2486 | + if (!at91_ramc_base[idx]) { |
2487 | dev_err(&pdev->dev, "Could not map ram controller address\n"); |
2488 | - return PTR_ERR(at91_ramc_base[idx]); |
2489 | + return -ENOMEM; |
2490 | } |
2491 | } |
2492 | |
2493 | diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c |
2494 | index 476171a768d6..8a029f9bc18c 100644 |
2495 | --- a/drivers/pwm/pwm-img.c |
2496 | +++ b/drivers/pwm/pwm-img.c |
2497 | @@ -16,6 +16,7 @@ |
2498 | #include <linux/mfd/syscon.h> |
2499 | #include <linux/module.h> |
2500 | #include <linux/of.h> |
2501 | +#include <linux/of_device.h> |
2502 | #include <linux/platform_device.h> |
2503 | #include <linux/pwm.h> |
2504 | #include <linux/regmap.h> |
2505 | @@ -38,7 +39,22 @@ |
2506 | #define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1 |
2507 | #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4) |
2508 | |
2509 | -#define MAX_TMBASE_STEPS 65536 |
2510 | +/* |
2511 | + * PWM period is specified with a timebase register, |
2512 | + * in number of step periods. The PWM duty cycle is also |
2513 | + * specified in step periods, in the [0, $timebase] range. |
2514 | + * In other words, the timebase imposes the duty cycle |
2515 | + * resolution. Therefore, let's constraint the timebase to |
2516 | + * a minimum value to allow a sane range of duty cycle values. |
2517 | + * Imposing a minimum timebase, will impose a maximum PWM frequency. |
2518 | + * |
2519 | + * The value chosen is completely arbitrary. |
2520 | + */ |
2521 | +#define MIN_TMBASE_STEPS 16 |
2522 | + |
2523 | +struct img_pwm_soc_data { |
2524 | + u32 max_timebase; |
2525 | +}; |
2526 | |
2527 | struct img_pwm_chip { |
2528 | struct device *dev; |
2529 | @@ -47,6 +63,9 @@ struct img_pwm_chip { |
2530 | struct clk *sys_clk; |
2531 | void __iomem *base; |
2532 | struct regmap *periph_regs; |
2533 | + int max_period_ns; |
2534 | + int min_period_ns; |
2535 | + const struct img_pwm_soc_data *data; |
2536 | }; |
2537 | |
2538 | static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip) |
2539 | @@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, |
2540 | u32 val, div, duty, timebase; |
2541 | unsigned long mul, output_clk_hz, input_clk_hz; |
2542 | struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); |
2543 | + unsigned int max_timebase = pwm_chip->data->max_timebase; |
2544 | + |
2545 | + if (period_ns < pwm_chip->min_period_ns || |
2546 | + period_ns > pwm_chip->max_period_ns) { |
2547 | + dev_err(chip->dev, "configured period not in range\n"); |
2548 | + return -ERANGE; |
2549 | + } |
2550 | |
2551 | input_clk_hz = clk_get_rate(pwm_chip->pwm_clk); |
2552 | output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns); |
2553 | |
2554 | mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz); |
2555 | - if (mul <= MAX_TMBASE_STEPS) { |
2556 | + if (mul <= max_timebase) { |
2557 | div = PWM_CTRL_CFG_NO_SUB_DIV; |
2558 | timebase = DIV_ROUND_UP(mul, 1); |
2559 | - } else if (mul <= MAX_TMBASE_STEPS * 8) { |
2560 | + } else if (mul <= max_timebase * 8) { |
2561 | div = PWM_CTRL_CFG_SUB_DIV0; |
2562 | timebase = DIV_ROUND_UP(mul, 8); |
2563 | - } else if (mul <= MAX_TMBASE_STEPS * 64) { |
2564 | + } else if (mul <= max_timebase * 64) { |
2565 | div = PWM_CTRL_CFG_SUB_DIV1; |
2566 | timebase = DIV_ROUND_UP(mul, 64); |
2567 | - } else if (mul <= MAX_TMBASE_STEPS * 512) { |
2568 | + } else if (mul <= max_timebase * 512) { |
2569 | div = PWM_CTRL_CFG_SUB_DIV0_DIV1; |
2570 | timebase = DIV_ROUND_UP(mul, 512); |
2571 | - } else if (mul > MAX_TMBASE_STEPS * 512) { |
2572 | + } else if (mul > max_timebase * 512) { |
2573 | dev_err(chip->dev, |
2574 | "failed to configure timebase steps/divider value\n"); |
2575 | return -EINVAL; |
2576 | @@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = { |
2577 | .owner = THIS_MODULE, |
2578 | }; |
2579 | |
2580 | +static const struct img_pwm_soc_data pistachio_pwm = { |
2581 | + .max_timebase = 255, |
2582 | +}; |
2583 | + |
2584 | +static const struct of_device_id img_pwm_of_match[] = { |
2585 | + { |
2586 | + .compatible = "img,pistachio-pwm", |
2587 | + .data = &pistachio_pwm, |
2588 | + }, |
2589 | + { } |
2590 | +}; |
2591 | +MODULE_DEVICE_TABLE(of, img_pwm_of_match); |
2592 | + |
2593 | static int img_pwm_probe(struct platform_device *pdev) |
2594 | { |
2595 | int ret; |
2596 | + u64 val; |
2597 | + unsigned long clk_rate; |
2598 | struct resource *res; |
2599 | struct img_pwm_chip *pwm; |
2600 | + const struct of_device_id *of_dev_id; |
2601 | |
2602 | pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); |
2603 | if (!pwm) |
2604 | @@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev) |
2605 | if (IS_ERR(pwm->base)) |
2606 | return PTR_ERR(pwm->base); |
2607 | |
2608 | + of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev); |
2609 | + if (!of_dev_id) |
2610 | + return -ENODEV; |
2611 | + pwm->data = of_dev_id->data; |
2612 | + |
2613 | pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, |
2614 | "img,cr-periph"); |
2615 | if (IS_ERR(pwm->periph_regs)) |
2616 | @@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev) |
2617 | goto disable_sysclk; |
2618 | } |
2619 | |
2620 | + clk_rate = clk_get_rate(pwm->pwm_clk); |
2621 | + |
2622 | + /* The maximum input clock divider is 512 */ |
2623 | + val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase; |
2624 | + do_div(val, clk_rate); |
2625 | + pwm->max_period_ns = val; |
2626 | + |
2627 | + val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS; |
2628 | + do_div(val, clk_rate); |
2629 | + pwm->min_period_ns = val; |
2630 | + |
2631 | pwm->chip.dev = &pdev->dev; |
2632 | pwm->chip.ops = &img_pwm_ops; |
2633 | pwm->chip.base = -1; |
2634 | @@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev) |
2635 | return pwmchip_remove(&pwm_chip->chip); |
2636 | } |
2637 | |
2638 | -static const struct of_device_id img_pwm_of_match[] = { |
2639 | - { .compatible = "img,pistachio-pwm", }, |
2640 | - { } |
2641 | -}; |
2642 | -MODULE_DEVICE_TABLE(of, img_pwm_of_match); |
2643 | - |
2644 | static struct platform_driver img_pwm_driver = { |
2645 | .driver = { |
2646 | .name = "img-pwm", |
2647 | diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c |
2648 | index 8a4df7a1f2ee..e628d4c2f2ae 100644 |
2649 | --- a/drivers/regulator/da9052-regulator.c |
2650 | +++ b/drivers/regulator/da9052-regulator.c |
2651 | @@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id, |
2652 | |
2653 | static int da9052_regulator_probe(struct platform_device *pdev) |
2654 | { |
2655 | + const struct mfd_cell *cell = mfd_get_cell(pdev); |
2656 | struct regulator_config config = { }; |
2657 | struct da9052_regulator *regulator; |
2658 | struct da9052 *da9052; |
2659 | @@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) |
2660 | regulator->da9052 = da9052; |
2661 | |
2662 | regulator->info = find_regulator_info(regulator->da9052->chip_id, |
2663 | - pdev->id); |
2664 | + cell->id); |
2665 | if (regulator->info == NULL) { |
2666 | dev_err(&pdev->dev, "invalid regulator ID specified\n"); |
2667 | return -EINVAL; |
2668 | @@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev) |
2669 | config.driver_data = regulator; |
2670 | config.regmap = da9052->regmap; |
2671 | if (pdata && pdata->regulators) { |
2672 | - config.init_data = pdata->regulators[pdev->id]; |
2673 | + config.init_data = pdata->regulators[cell->id]; |
2674 | } else { |
2675 | #ifdef CONFIG_OF |
2676 | struct device_node *nproot = da9052->dev->of_node; |
2677 | diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
2678 | index 3290a3ed5b31..a661d339adf7 100644 |
2679 | --- a/drivers/scsi/sd.c |
2680 | +++ b/drivers/scsi/sd.c |
2681 | @@ -1624,6 +1624,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) |
2682 | { |
2683 | u64 start_lba = blk_rq_pos(scmd->request); |
2684 | u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); |
2685 | + u64 factor = scmd->device->sector_size / 512; |
2686 | u64 bad_lba; |
2687 | int info_valid; |
2688 | /* |
2689 | @@ -1645,16 +1646,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) |
2690 | if (scsi_bufflen(scmd) <= scmd->device->sector_size) |
2691 | return 0; |
2692 | |
2693 | - if (scmd->device->sector_size < 512) { |
2694 | - /* only legitimate sector_size here is 256 */ |
2695 | - start_lba <<= 1; |
2696 | - end_lba <<= 1; |
2697 | - } else { |
2698 | - /* be careful ... don't want any overflows */ |
2699 | - unsigned int factor = scmd->device->sector_size / 512; |
2700 | - do_div(start_lba, factor); |
2701 | - do_div(end_lba, factor); |
2702 | - } |
2703 | + /* be careful ... don't want any overflows */ |
2704 | + do_div(start_lba, factor); |
2705 | + do_div(end_lba, factor); |
2706 | |
2707 | /* The bad lba was reported incorrectly, we have no idea where |
2708 | * the error is. |
2709 | @@ -2212,8 +2206,7 @@ got_data: |
2710 | if (sector_size != 512 && |
2711 | sector_size != 1024 && |
2712 | sector_size != 2048 && |
2713 | - sector_size != 4096 && |
2714 | - sector_size != 256) { |
2715 | + sector_size != 4096) { |
2716 | sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n", |
2717 | sector_size); |
2718 | /* |
2719 | @@ -2268,8 +2261,6 @@ got_data: |
2720 | sdkp->capacity <<= 2; |
2721 | else if (sector_size == 1024) |
2722 | sdkp->capacity <<= 1; |
2723 | - else if (sector_size == 256) |
2724 | - sdkp->capacity >>= 1; |
2725 | |
2726 | blk_queue_physical_block_size(sdp->request_queue, |
2727 | sdkp->physical_block_size); |
2728 | diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c |
2729 | index bf8c5c1e254e..75efaaeb0eca 100644 |
2730 | --- a/drivers/scsi/storvsc_drv.c |
2731 | +++ b/drivers/scsi/storvsc_drv.c |
2732 | @@ -1565,8 +1565,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) |
2733 | break; |
2734 | default: |
2735 | vm_srb->data_in = UNKNOWN_TYPE; |
2736 | - vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN | |
2737 | - SRB_FLAGS_DATA_OUT); |
2738 | + vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER; |
2739 | break; |
2740 | } |
2741 | |
2742 | diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c |
2743 | index d1ab996b3305..a21a51efaad0 100644 |
2744 | --- a/drivers/staging/gdm724x/gdm_mux.c |
2745 | +++ b/drivers/staging/gdm724x/gdm_mux.c |
2746 | @@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r) |
2747 | unsigned int start_flag; |
2748 | unsigned int payload_size; |
2749 | unsigned short packet_type; |
2750 | - int dummy_cnt; |
2751 | + int total_len; |
2752 | u32 packet_size_sum = r->offset; |
2753 | int index; |
2754 | int ret = TO_HOST_INVALID_PACKET; |
2755 | @@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r) |
2756 | break; |
2757 | } |
2758 | |
2759 | - dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4); |
2760 | + total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4); |
2761 | |
2762 | if (len - packet_size_sum < |
2763 | - MUX_HEADER_SIZE + payload_size + dummy_cnt) { |
2764 | + total_len) { |
2765 | pr_err("invalid payload : %d %d %04x\n", |
2766 | payload_size, len, packet_type); |
2767 | break; |
2768 | @@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r) |
2769 | break; |
2770 | } |
2771 | |
2772 | - packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt; |
2773 | + packet_size_sum += total_len; |
2774 | if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) { |
2775 | ret = r->callback(NULL, |
2776 | 0, |
2777 | @@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, |
2778 | struct mux_pkt_header *mux_header; |
2779 | struct mux_tx *t = NULL; |
2780 | static u32 seq_num = 1; |
2781 | - int dummy_cnt; |
2782 | int total_len; |
2783 | int ret; |
2784 | unsigned long flags; |
2785 | @@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, |
2786 | |
2787 | spin_lock_irqsave(&mux_dev->write_lock, flags); |
2788 | |
2789 | - dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4); |
2790 | - |
2791 | - total_len = len + MUX_HEADER_SIZE + dummy_cnt; |
2792 | + total_len = ALIGN(MUX_HEADER_SIZE + len, 4); |
2793 | |
2794 | t = alloc_mux_tx(total_len); |
2795 | if (!t) { |
2796 | @@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index, |
2797 | mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]); |
2798 | |
2799 | memcpy(t->buf+MUX_HEADER_SIZE, data, len); |
2800 | - memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt); |
2801 | + memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE - |
2802 | + len); |
2803 | |
2804 | t->len = total_len; |
2805 | t->callback = cb; |
2806 | diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c |
2807 | index 03b2a90b9ac0..992236f605d8 100644 |
2808 | --- a/drivers/staging/vt6655/device_main.c |
2809 | +++ b/drivers/staging/vt6655/device_main.c |
2810 | @@ -911,7 +911,11 @@ static int vnt_int_report_rate(struct vnt_private *priv, |
2811 | |
2812 | if (!(tsr1 & TSR1_TERR)) { |
2813 | info->status.rates[0].idx = idx; |
2814 | - info->flags |= IEEE80211_TX_STAT_ACK; |
2815 | + |
2816 | + if (info->flags & IEEE80211_TX_CTL_NO_ACK) |
2817 | + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; |
2818 | + else |
2819 | + info->flags |= IEEE80211_TX_STAT_ACK; |
2820 | } |
2821 | |
2822 | return 0; |
2823 | @@ -936,9 +940,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx) |
2824 | //Only the status of first TD in the chain is correct |
2825 | if (pTD->m_td1TD1.byTCR & TCR_STP) { |
2826 | if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) { |
2827 | - |
2828 | - vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1); |
2829 | - |
2830 | if (!(byTsr1 & TSR1_TERR)) { |
2831 | if (byTsr0 != 0) { |
2832 | pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n", |
2833 | @@ -957,6 +958,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx) |
2834 | (int)uIdx, byTsr1, byTsr0); |
2835 | } |
2836 | } |
2837 | + |
2838 | + vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1); |
2839 | + |
2840 | device_free_tx_buf(pDevice, pTD); |
2841 | pDevice->iTDUsed[uIdx]--; |
2842 | } |
2843 | @@ -988,10 +992,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc) |
2844 | PCI_DMA_TODEVICE); |
2845 | } |
2846 | |
2847 | - if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) |
2848 | + if (skb) |
2849 | ieee80211_tx_status_irqsafe(pDevice->hw, skb); |
2850 | - else |
2851 | - dev_kfree_skb_irq(skb); |
2852 | |
2853 | pTDInfo->skb_dma = 0; |
2854 | pTDInfo->skb = NULL; |
2855 | @@ -1201,14 +1203,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) |
2856 | if (dma_idx == TYPE_AC0DMA) |
2857 | head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; |
2858 | |
2859 | - priv->iTDUsed[dma_idx]++; |
2860 | - |
2861 | - /* Take ownership */ |
2862 | - wmb(); |
2863 | - head_td->m_td0TD0.f1Owner = OWNED_BY_NIC; |
2864 | - |
2865 | - /* get Next */ |
2866 | - wmb(); |
2867 | priv->apCurrTD[dma_idx] = head_td->next; |
2868 | |
2869 | spin_unlock_irqrestore(&priv->lock, flags); |
2870 | @@ -1229,11 +1223,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) |
2871 | |
2872 | head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma); |
2873 | |
2874 | + /* Poll Transmit the adapter */ |
2875 | + wmb(); |
2876 | + head_td->m_td0TD0.f1Owner = OWNED_BY_NIC; |
2877 | + wmb(); /* second memory barrier */ |
2878 | + |
2879 | if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) |
2880 | MACvTransmitAC0(priv->PortOffset); |
2881 | else |
2882 | MACvTransmit0(priv->PortOffset); |
2883 | |
2884 | + priv->iTDUsed[dma_idx]++; |
2885 | + |
2886 | spin_unlock_irqrestore(&priv->lock, flags); |
2887 | |
2888 | return 0; |
2889 | @@ -1413,9 +1414,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, |
2890 | |
2891 | priv->current_aid = conf->aid; |
2892 | |
2893 | - if (changed & BSS_CHANGED_BSSID) |
2894 | + if (changed & BSS_CHANGED_BSSID) { |
2895 | + unsigned long flags; |
2896 | + |
2897 | + spin_lock_irqsave(&priv->lock, flags); |
2898 | + |
2899 | MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid); |
2900 | |
2901 | + spin_unlock_irqrestore(&priv->lock, flags); |
2902 | + } |
2903 | + |
2904 | if (changed & BSS_CHANGED_BASIC_RATES) { |
2905 | priv->basic_rates = conf->basic_rates; |
2906 | |
2907 | diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c |
2908 | index 33baf26de4b5..ee9ce165dcde 100644 |
2909 | --- a/drivers/staging/vt6656/rxtx.c |
2910 | +++ b/drivers/staging/vt6656/rxtx.c |
2911 | @@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) |
2912 | vnt_schedule_command(priv, WLAN_CMD_SETPOWER); |
2913 | } |
2914 | |
2915 | - if (current_rate > RATE_11M) |
2916 | - pkt_type = priv->packet_type; |
2917 | - else |
2918 | + if (current_rate > RATE_11M) { |
2919 | + if (info->band == IEEE80211_BAND_5GHZ) { |
2920 | + pkt_type = PK_TYPE_11A; |
2921 | + } else { |
2922 | + if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) |
2923 | + pkt_type = PK_TYPE_11GB; |
2924 | + else |
2925 | + pkt_type = PK_TYPE_11GA; |
2926 | + } |
2927 | + } else { |
2928 | pkt_type = PK_TYPE_11B; |
2929 | + } |
2930 | |
2931 | spin_lock_irqsave(&priv->lock, flags); |
2932 | |
2933 | diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c |
2934 | index f6c954c4635f..4073869d2090 100644 |
2935 | --- a/drivers/target/target_core_pscsi.c |
2936 | +++ b/drivers/target/target_core_pscsi.c |
2937 | @@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev) |
2938 | " pdv_host_id: %d\n", pdv->pdv_host_id); |
2939 | return -EINVAL; |
2940 | } |
2941 | + pdv->pdv_lld_host = sh; |
2942 | } |
2943 | } else { |
2944 | if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) { |
2945 | @@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev) |
2946 | if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) && |
2947 | (phv->phv_lld_host != NULL)) |
2948 | scsi_host_put(phv->phv_lld_host); |
2949 | + else if (pdv->pdv_lld_host) |
2950 | + scsi_host_put(pdv->pdv_lld_host); |
2951 | |
2952 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) |
2953 | scsi_device_put(sd); |
2954 | diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h |
2955 | index 1bd757dff8ee..820d3052b775 100644 |
2956 | --- a/drivers/target/target_core_pscsi.h |
2957 | +++ b/drivers/target/target_core_pscsi.h |
2958 | @@ -45,6 +45,7 @@ struct pscsi_dev_virt { |
2959 | int pdv_lun_id; |
2960 | struct block_device *pdv_bd; |
2961 | struct scsi_device *pdv_sd; |
2962 | + struct Scsi_Host *pdv_lld_host; |
2963 | } ____cacheline_aligned; |
2964 | |
2965 | typedef enum phv_modes { |
2966 | diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c |
2967 | index c2556cf5186b..01255fd65135 100644 |
2968 | --- a/drivers/thermal/armada_thermal.c |
2969 | +++ b/drivers/thermal/armada_thermal.c |
2970 | @@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = { |
2971 | .is_valid_shift = 10, |
2972 | .temp_shift = 0, |
2973 | .temp_mask = 0x3ff, |
2974 | - .coef_b = 1169498786UL, |
2975 | - .coef_m = 2000000UL, |
2976 | - .coef_div = 4289, |
2977 | + .coef_b = 2931108200UL, |
2978 | + .coef_m = 5000000UL, |
2979 | + .coef_div = 10502, |
2980 | .inverted = true, |
2981 | }; |
2982 | |
2983 | diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c |
2984 | index 5bab1c684bb1..7a3d146a5f0e 100644 |
2985 | --- a/drivers/tty/hvc/hvc_xen.c |
2986 | +++ b/drivers/tty/hvc/hvc_xen.c |
2987 | @@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void) |
2988 | return -ENOMEM; |
2989 | } |
2990 | |
2991 | - info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0); |
2992 | + info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false); |
2993 | info->vtermno = HVC_COOKIE; |
2994 | |
2995 | spin_lock(&xencons_lock); |
2996 | diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c |
2997 | index c4343764cc5b..bce16e405d59 100644 |
2998 | --- a/drivers/tty/n_gsm.c |
2999 | +++ b/drivers/tty/n_gsm.c |
3000 | @@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state) |
3001 | return gsmtty_modem_update(dlci, encode); |
3002 | } |
3003 | |
3004 | -static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty) |
3005 | +static void gsmtty_cleanup(struct tty_struct *tty) |
3006 | { |
3007 | struct gsm_dlci *dlci = tty->driver_data; |
3008 | struct gsm_mux *gsm = dlci->gsm; |
3009 | @@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty) |
3010 | dlci_put(dlci); |
3011 | dlci_put(gsm->dlci[0]); |
3012 | mux_put(gsm); |
3013 | - driver->ttys[tty->index] = NULL; |
3014 | } |
3015 | |
3016 | /* Virtual ttys for the demux */ |
3017 | @@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = { |
3018 | .tiocmget = gsmtty_tiocmget, |
3019 | .tiocmset = gsmtty_tiocmset, |
3020 | .break_ctl = gsmtty_break_ctl, |
3021 | - .remove = gsmtty_remove, |
3022 | + .cleanup = gsmtty_cleanup, |
3023 | }; |
3024 | |
3025 | |
3026 | diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c |
3027 | index 644ddb841d9f..bbc4ce66c2c1 100644 |
3028 | --- a/drivers/tty/n_hdlc.c |
3029 | +++ b/drivers/tty/n_hdlc.c |
3030 | @@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, |
3031 | add_wait_queue(&tty->read_wait, &wait); |
3032 | |
3033 | for (;;) { |
3034 | - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { |
3035 | + if (test_bit(TTY_OTHER_DONE, &tty->flags)) { |
3036 | ret = -EIO; |
3037 | break; |
3038 | } |
3039 | @@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp, |
3040 | /* set bits for operations that won't block */ |
3041 | if (n_hdlc->rx_buf_list.head) |
3042 | mask |= POLLIN | POLLRDNORM; /* readable */ |
3043 | - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) |
3044 | + if (test_bit(TTY_OTHER_DONE, &tty->flags)) |
3045 | mask |= POLLHUP; |
3046 | if (tty_hung_up_p(filp)) |
3047 | mask |= POLLHUP; |
3048 | diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c |
3049 | index cf6e0f2e1331..cc57a3a6b02b 100644 |
3050 | --- a/drivers/tty/n_tty.c |
3051 | +++ b/drivers/tty/n_tty.c |
3052 | @@ -1949,6 +1949,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll) |
3053 | return ldata->commit_head - ldata->read_tail >= amt; |
3054 | } |
3055 | |
3056 | +static inline int check_other_done(struct tty_struct *tty) |
3057 | +{ |
3058 | + int done = test_bit(TTY_OTHER_DONE, &tty->flags); |
3059 | + if (done) { |
3060 | + /* paired with cmpxchg() in check_other_closed(); ensures |
3061 | + * read buffer head index is not stale |
3062 | + */ |
3063 | + smp_mb__after_atomic(); |
3064 | + } |
3065 | + return done; |
3066 | +} |
3067 | + |
3068 | /** |
3069 | * copy_from_read_buf - copy read data directly |
3070 | * @tty: terminal device |
3071 | @@ -2167,7 +2179,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, |
3072 | struct n_tty_data *ldata = tty->disc_data; |
3073 | unsigned char __user *b = buf; |
3074 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
3075 | - int c; |
3076 | + int c, done; |
3077 | int minimum, time; |
3078 | ssize_t retval = 0; |
3079 | long timeout; |
3080 | @@ -2235,8 +2247,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, |
3081 | ((minimum - (b - buf)) >= 1)) |
3082 | ldata->minimum_to_wake = (minimum - (b - buf)); |
3083 | |
3084 | + done = check_other_done(tty); |
3085 | + |
3086 | if (!input_available_p(tty, 0)) { |
3087 | - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { |
3088 | + if (done) { |
3089 | retval = -EIO; |
3090 | break; |
3091 | } |
3092 | @@ -2443,12 +2457,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, |
3093 | |
3094 | poll_wait(file, &tty->read_wait, wait); |
3095 | poll_wait(file, &tty->write_wait, wait); |
3096 | + if (check_other_done(tty)) |
3097 | + mask |= POLLHUP; |
3098 | if (input_available_p(tty, 1)) |
3099 | mask |= POLLIN | POLLRDNORM; |
3100 | if (tty->packet && tty->link->ctrl_status) |
3101 | mask |= POLLPRI | POLLIN | POLLRDNORM; |
3102 | - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) |
3103 | - mask |= POLLHUP; |
3104 | if (tty_hung_up_p(file)) |
3105 | mask |= POLLHUP; |
3106 | if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { |
3107 | diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c |
3108 | index e72ee629cead..4d5e8409769c 100644 |
3109 | --- a/drivers/tty/pty.c |
3110 | +++ b/drivers/tty/pty.c |
3111 | @@ -53,9 +53,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp) |
3112 | /* Review - krefs on tty_link ?? */ |
3113 | if (!tty->link) |
3114 | return; |
3115 | - tty_flush_to_ldisc(tty->link); |
3116 | set_bit(TTY_OTHER_CLOSED, &tty->link->flags); |
3117 | - wake_up_interruptible(&tty->link->read_wait); |
3118 | + tty_flip_buffer_push(tty->link->port); |
3119 | wake_up_interruptible(&tty->link->write_wait); |
3120 | if (tty->driver->subtype == PTY_TYPE_MASTER) { |
3121 | set_bit(TTY_OTHER_CLOSED, &tty->flags); |
3122 | @@ -243,7 +242,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp) |
3123 | goto out; |
3124 | |
3125 | clear_bit(TTY_IO_ERROR, &tty->flags); |
3126 | + /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */ |
3127 | clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); |
3128 | + clear_bit(TTY_OTHER_DONE, &tty->link->flags); |
3129 | set_bit(TTY_THROTTLED, &tty->flags); |
3130 | return 0; |
3131 | |
3132 | diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c |
3133 | index 75661641f5fe..2f78b77f0f81 100644 |
3134 | --- a/drivers/tty/tty_buffer.c |
3135 | +++ b/drivers/tty/tty_buffer.c |
3136 | @@ -37,6 +37,28 @@ |
3137 | |
3138 | #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) |
3139 | |
3140 | +/* |
3141 | + * If all tty flip buffers have been processed by flush_to_ldisc() or |
3142 | + * dropped by tty_buffer_flush(), check if the linked pty has been closed. |
3143 | + * If so, wake the reader/poll to process |
3144 | + */ |
3145 | +static inline void check_other_closed(struct tty_struct *tty) |
3146 | +{ |
3147 | + unsigned long flags, old; |
3148 | + |
3149 | + /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */ |
3150 | + for (flags = ACCESS_ONCE(tty->flags); |
3151 | + test_bit(TTY_OTHER_CLOSED, &flags); |
3152 | + ) { |
3153 | + old = flags; |
3154 | + __set_bit(TTY_OTHER_DONE, &flags); |
3155 | + flags = cmpxchg(&tty->flags, old, flags); |
3156 | + if (old == flags) { |
3157 | + wake_up_interruptible(&tty->read_wait); |
3158 | + break; |
3159 | + } |
3160 | + } |
3161 | +} |
3162 | |
3163 | /** |
3164 | * tty_buffer_lock_exclusive - gain exclusive access to buffer |
3165 | @@ -229,6 +251,8 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) |
3166 | if (ld && ld->ops->flush_buffer) |
3167 | ld->ops->flush_buffer(tty); |
3168 | |
3169 | + check_other_closed(tty); |
3170 | + |
3171 | atomic_dec(&buf->priority); |
3172 | mutex_unlock(&buf->lock); |
3173 | } |
3174 | @@ -471,8 +495,10 @@ static void flush_to_ldisc(struct work_struct *work) |
3175 | smp_rmb(); |
3176 | count = head->commit - head->read; |
3177 | if (!count) { |
3178 | - if (next == NULL) |
3179 | + if (next == NULL) { |
3180 | + check_other_closed(tty); |
3181 | break; |
3182 | + } |
3183 | buf->head = next; |
3184 | tty_buffer_free(port, head); |
3185 | continue; |
3186 | @@ -489,19 +515,6 @@ static void flush_to_ldisc(struct work_struct *work) |
3187 | } |
3188 | |
3189 | /** |
3190 | - * tty_flush_to_ldisc |
3191 | - * @tty: tty to push |
3192 | - * |
3193 | - * Push the terminal flip buffers to the line discipline. |
3194 | - * |
3195 | - * Must not be called from IRQ context. |
3196 | - */ |
3197 | -void tty_flush_to_ldisc(struct tty_struct *tty) |
3198 | -{ |
3199 | - flush_work(&tty->port->buf.work); |
3200 | -} |
3201 | - |
3202 | -/** |
3203 | * tty_flip_buffer_push - terminal |
3204 | * @port: tty port to push |
3205 | * |
3206 | diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c |
3207 | index c42765b3a060..0495c94a23d7 100644 |
3208 | --- a/drivers/usb/gadget/configfs.c |
3209 | +++ b/drivers/usb/gadget/configfs.c |
3210 | @@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi) |
3211 | } |
3212 | } |
3213 | c->next_interface_id = 0; |
3214 | + memset(c->interface, 0, sizeof(c->interface)); |
3215 | c->superspeed = 0; |
3216 | c->highspeed = 0; |
3217 | c->fullspeed = 0; |
3218 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
3219 | index eeedde8c435a..6994c99e58a6 100644 |
3220 | --- a/drivers/usb/host/xhci-ring.c |
3221 | +++ b/drivers/usb/host/xhci-ring.c |
3222 | @@ -2026,8 +2026,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td, |
3223 | break; |
3224 | case COMP_DEV_ERR: |
3225 | case COMP_STALL: |
3226 | + frame->status = -EPROTO; |
3227 | + skip_td = true; |
3228 | + break; |
3229 | case COMP_TX_ERR: |
3230 | frame->status = -EPROTO; |
3231 | + if (event_trb != td->last_trb) |
3232 | + return 0; |
3233 | skip_td = true; |
3234 | break; |
3235 | case COMP_STOP: |
3236 | @@ -2640,7 +2645,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) |
3237 | xhci_halt(xhci); |
3238 | hw_died: |
3239 | spin_unlock(&xhci->lock); |
3240 | - return -ESHUTDOWN; |
3241 | + return IRQ_HANDLED; |
3242 | } |
3243 | |
3244 | /* |
3245 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
3246 | index 8e421b89632d..ea75e8ccd3c1 100644 |
3247 | --- a/drivers/usb/host/xhci.h |
3248 | +++ b/drivers/usb/host/xhci.h |
3249 | @@ -1267,7 +1267,7 @@ union xhci_trb { |
3250 | * since the command ring is 64-byte aligned. |
3251 | * It must also be greater than 16. |
3252 | */ |
3253 | -#define TRBS_PER_SEGMENT 64 |
3254 | +#define TRBS_PER_SEGMENT 256 |
3255 | /* Allow two commands + a link TRB, along with any reserved command TRBs */ |
3256 | #define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3) |
3257 | #define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16) |
3258 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
3259 | index 84ce2d74894c..9031750e7404 100644 |
3260 | --- a/drivers/usb/serial/cp210x.c |
3261 | +++ b/drivers/usb/serial/cp210x.c |
3262 | @@ -127,6 +127,7 @@ static const struct usb_device_id id_table[] = { |
3263 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ |
3264 | { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ |
3265 | { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ |
3266 | + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ |
3267 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
3268 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
3269 | { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
3270 | diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c |
3271 | index 829604d11f3f..f5257af33ecf 100644 |
3272 | --- a/drivers/usb/serial/pl2303.c |
3273 | +++ b/drivers/usb/serial/pl2303.c |
3274 | @@ -61,7 +61,6 @@ static const struct usb_device_id id_table[] = { |
3275 | { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) }, |
3276 | { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) }, |
3277 | { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) }, |
3278 | - { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) }, |
3279 | { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1), |
3280 | .driver_info = PL2303_QUIRK_UART_STATE_IDX0 }, |
3281 | { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65), |
3282 | diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h |
3283 | index 71fd9da1d6e7..e3b7af8adfb7 100644 |
3284 | --- a/drivers/usb/serial/pl2303.h |
3285 | +++ b/drivers/usb/serial/pl2303.h |
3286 | @@ -62,10 +62,6 @@ |
3287 | #define ALCATEL_VENDOR_ID 0x11f7 |
3288 | #define ALCATEL_PRODUCT_ID 0x02df |
3289 | |
3290 | -/* Samsung I330 phone cradle */ |
3291 | -#define SAMSUNG_VENDOR_ID 0x04e8 |
3292 | -#define SAMSUNG_PRODUCT_ID 0x8001 |
3293 | - |
3294 | #define SIEMENS_VENDOR_ID 0x11f5 |
3295 | #define SIEMENS_PRODUCT_ID_SX1 0x0001 |
3296 | #define SIEMENS_PRODUCT_ID_X65 0x0003 |
3297 | diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c |
3298 | index bf2bd40e5f2a..60afb39eb73c 100644 |
3299 | --- a/drivers/usb/serial/visor.c |
3300 | +++ b/drivers/usb/serial/visor.c |
3301 | @@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = { |
3302 | .driver_info = (kernel_ulong_t)&palm_os_4_probe }, |
3303 | { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID), |
3304 | .driver_info = (kernel_ulong_t)&palm_os_4_probe }, |
3305 | - { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID), |
3306 | + { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff), |
3307 | .driver_info = (kernel_ulong_t)&palm_os_4_probe }, |
3308 | { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID), |
3309 | .driver_info = (kernel_ulong_t)&palm_os_4_probe }, |
3310 | diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h |
3311 | index d684b4b8108f..caf188800c67 100644 |
3312 | --- a/drivers/usb/storage/unusual_devs.h |
3313 | +++ b/drivers/usb/storage/unusual_devs.h |
3314 | @@ -766,6 +766,13 @@ UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, |
3315 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
3316 | US_FL_GO_SLOW ), |
3317 | |
3318 | +/* Reported by Christian Schaller <cschalle@redhat.com> */ |
3319 | +UNUSUAL_DEV( 0x059f, 0x0651, 0x0000, 0x0000, |
3320 | + "LaCie", |
3321 | + "External HDD", |
3322 | + USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
3323 | + US_FL_NO_WP_DETECT ), |
3324 | + |
3325 | /* Submitted by Joel Bourquard <numlock@freesurf.ch> |
3326 | * Some versions of this device need the SubClass and Protocol overrides |
3327 | * while others don't. |
3328 | diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
3329 | index 2b8553bd8715..38387950490e 100644 |
3330 | --- a/drivers/xen/events/events_base.c |
3331 | +++ b/drivers/xen/events/events_base.c |
3332 | @@ -957,7 +957,7 @@ unsigned xen_evtchn_nr_channels(void) |
3333 | } |
3334 | EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels); |
3335 | |
3336 | -int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
3337 | +int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) |
3338 | { |
3339 | struct evtchn_bind_virq bind_virq; |
3340 | int evtchn, irq, ret; |
3341 | @@ -971,8 +971,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) |
3342 | if (irq < 0) |
3343 | goto out; |
3344 | |
3345 | - irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
3346 | - handle_percpu_irq, "virq"); |
3347 | + if (percpu) |
3348 | + irq_set_chip_and_handler_name(irq, &xen_percpu_chip, |
3349 | + handle_percpu_irq, "virq"); |
3350 | + else |
3351 | + irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, |
3352 | + handle_edge_irq, "virq"); |
3353 | |
3354 | bind_virq.virq = virq; |
3355 | bind_virq.vcpu = cpu; |
3356 | @@ -1062,7 +1066,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
3357 | { |
3358 | int irq, retval; |
3359 | |
3360 | - irq = bind_virq_to_irq(virq, cpu); |
3361 | + irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU); |
3362 | if (irq < 0) |
3363 | return irq; |
3364 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
3365 | diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c |
3366 | index d925f55e4857..8081aba116a7 100644 |
3367 | --- a/fs/binfmt_elf.c |
3368 | +++ b/fs/binfmt_elf.c |
3369 | @@ -928,7 +928,7 @@ static int load_elf_binary(struct linux_binprm *bprm) |
3370 | total_size = total_mapping_size(elf_phdata, |
3371 | loc->elf_ex.e_phnum); |
3372 | if (!total_size) { |
3373 | - error = -EINVAL; |
3374 | + retval = -EINVAL; |
3375 | goto out_free_dentry; |
3376 | } |
3377 | } |
3378 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3379 | index 0a795c969c78..8b33da6ec3dd 100644 |
3380 | --- a/fs/btrfs/extent-tree.c |
3381 | +++ b/fs/btrfs/extent-tree.c |
3382 | @@ -8548,7 +8548,9 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, |
3383 | out: |
3384 | if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { |
3385 | alloc_flags = update_block_group_flags(root, cache->flags); |
3386 | + lock_chunks(root->fs_info->chunk_root); |
3387 | check_system_chunk(trans, root, alloc_flags); |
3388 | + unlock_chunks(root->fs_info->chunk_root); |
3389 | } |
3390 | |
3391 | btrfs_end_transaction(trans, root); |
3392 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
3393 | index 8222f6f74147..44a7e0398d97 100644 |
3394 | --- a/fs/btrfs/volumes.c |
3395 | +++ b/fs/btrfs/volumes.c |
3396 | @@ -4626,6 +4626,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, |
3397 | { |
3398 | u64 chunk_offset; |
3399 | |
3400 | + ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex)); |
3401 | chunk_offset = find_next_chunk(extent_root->fs_info); |
3402 | return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type); |
3403 | } |
3404 | diff --git a/fs/dcache.c b/fs/dcache.c |
3405 | index c71e3732e53b..922f23ef6041 100644 |
3406 | --- a/fs/dcache.c |
3407 | +++ b/fs/dcache.c |
3408 | @@ -1205,13 +1205,13 @@ ascend: |
3409 | /* might go back up the wrong parent if we have had a rename. */ |
3410 | if (need_seqretry(&rename_lock, seq)) |
3411 | goto rename_retry; |
3412 | - next = child->d_child.next; |
3413 | - while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) { |
3414 | + /* go into the first sibling still alive */ |
3415 | + do { |
3416 | + next = child->d_child.next; |
3417 | if (next == &this_parent->d_subdirs) |
3418 | goto ascend; |
3419 | child = list_entry(next, struct dentry, d_child); |
3420 | - next = next->next; |
3421 | - } |
3422 | + } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)); |
3423 | rcu_read_unlock(); |
3424 | goto resume; |
3425 | } |
3426 | diff --git a/fs/exec.c b/fs/exec.c |
3427 | index 00400cf522dc..120244523647 100644 |
3428 | --- a/fs/exec.c |
3429 | +++ b/fs/exec.c |
3430 | @@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm *bprm, |
3431 | if (stack_base > STACK_SIZE_MAX) |
3432 | stack_base = STACK_SIZE_MAX; |
3433 | |
3434 | + /* Add space for stack randomization. */ |
3435 | + stack_base += (STACK_RND_MASK << PAGE_SHIFT); |
3436 | + |
3437 | /* Make sure we didn't let the argument array grow too large. */ |
3438 | if (vma->vm_end - vma->vm_start > stack_base) |
3439 | return -ENOMEM; |
3440 | diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c |
3441 | index 3445035c7e01..d41843181818 100644 |
3442 | --- a/fs/ext4/ext4_jbd2.c |
3443 | +++ b/fs/ext4/ext4_jbd2.c |
3444 | @@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) |
3445 | ext4_put_nojournal(handle); |
3446 | return 0; |
3447 | } |
3448 | + |
3449 | + if (!handle->h_transaction) { |
3450 | + err = jbd2_journal_stop(handle); |
3451 | + return handle->h_err ? handle->h_err : err; |
3452 | + } |
3453 | + |
3454 | sb = handle->h_transaction->t_journal->j_private; |
3455 | err = handle->h_err; |
3456 | rc = jbd2_journal_stop(handle); |
3457 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
3458 | index 16f6365f65e7..ea4ee1732143 100644 |
3459 | --- a/fs/ext4/extents.c |
3460 | +++ b/fs/ext4/extents.c |
3461 | @@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
3462 | ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); |
3463 | ext4_lblk_t last = lblock + len - 1; |
3464 | |
3465 | - if (lblock > last) |
3466 | + if (len == 0 || lblock > last) |
3467 | return 0; |
3468 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
3469 | } |
3470 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
3471 | index 852cc521f327..1f252b4e0f51 100644 |
3472 | --- a/fs/ext4/inode.c |
3473 | +++ b/fs/ext4/inode.c |
3474 | @@ -4233,7 +4233,7 @@ static void ext4_update_other_inodes_time(struct super_block *sb, |
3475 | int inode_size = EXT4_INODE_SIZE(sb); |
3476 | |
3477 | oi.orig_ino = orig_ino; |
3478 | - ino = orig_ino & ~(inodes_per_block - 1); |
3479 | + ino = (orig_ino & ~(inodes_per_block - 1)) + 1; |
3480 | for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) { |
3481 | if (ino == orig_ino) |
3482 | continue; |
3483 | diff --git a/fs/fhandle.c b/fs/fhandle.c |
3484 | index 999ff5c3cab0..d59712dfa3e7 100644 |
3485 | --- a/fs/fhandle.c |
3486 | +++ b/fs/fhandle.c |
3487 | @@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, |
3488 | goto out_err; |
3489 | } |
3490 | /* copy the full handle */ |
3491 | - if (copy_from_user(handle, ufh, |
3492 | - sizeof(struct file_handle) + |
3493 | + *handle = f_handle; |
3494 | + if (copy_from_user(&handle->f_handle, |
3495 | + &ufh->f_handle, |
3496 | f_handle.handle_bytes)) { |
3497 | retval = -EFAULT; |
3498 | goto out_handle; |
3499 | diff --git a/fs/fs_pin.c b/fs/fs_pin.c |
3500 | index b06c98796afb..611b5408f6ec 100644 |
3501 | --- a/fs/fs_pin.c |
3502 | +++ b/fs/fs_pin.c |
3503 | @@ -9,8 +9,8 @@ static DEFINE_SPINLOCK(pin_lock); |
3504 | void pin_remove(struct fs_pin *pin) |
3505 | { |
3506 | spin_lock(&pin_lock); |
3507 | - hlist_del(&pin->m_list); |
3508 | - hlist_del(&pin->s_list); |
3509 | + hlist_del_init(&pin->m_list); |
3510 | + hlist_del_init(&pin->s_list); |
3511 | spin_unlock(&pin_lock); |
3512 | spin_lock_irq(&pin->wait.lock); |
3513 | pin->done = 1; |
3514 | diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c |
3515 | index b5128c6e63ad..a9079d035ae5 100644 |
3516 | --- a/fs/jbd2/recovery.c |
3517 | +++ b/fs/jbd2/recovery.c |
3518 | @@ -842,15 +842,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh, |
3519 | { |
3520 | jbd2_journal_revoke_header_t *header; |
3521 | int offset, max; |
3522 | + int csum_size = 0; |
3523 | + __u32 rcount; |
3524 | int record_len = 4; |
3525 | |
3526 | header = (jbd2_journal_revoke_header_t *) bh->b_data; |
3527 | offset = sizeof(jbd2_journal_revoke_header_t); |
3528 | - max = be32_to_cpu(header->r_count); |
3529 | + rcount = be32_to_cpu(header->r_count); |
3530 | |
3531 | if (!jbd2_revoke_block_csum_verify(journal, header)) |
3532 | return -EINVAL; |
3533 | |
3534 | + if (jbd2_journal_has_csum_v2or3(journal)) |
3535 | + csum_size = sizeof(struct jbd2_journal_revoke_tail); |
3536 | + if (rcount > journal->j_blocksize - csum_size) |
3537 | + return -EINVAL; |
3538 | + max = rcount; |
3539 | + |
3540 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) |
3541 | record_len = 8; |
3542 | |
3543 | diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c |
3544 | index c6cbaef2bda1..14214da80eb8 100644 |
3545 | --- a/fs/jbd2/revoke.c |
3546 | +++ b/fs/jbd2/revoke.c |
3547 | @@ -577,7 +577,7 @@ static void write_one_revoke_record(journal_t *journal, |
3548 | { |
3549 | int csum_size = 0; |
3550 | struct buffer_head *descriptor; |
3551 | - int offset; |
3552 | + int sz, offset; |
3553 | journal_header_t *header; |
3554 | |
3555 | /* If we are already aborting, this all becomes a noop. We |
3556 | @@ -594,9 +594,14 @@ static void write_one_revoke_record(journal_t *journal, |
3557 | if (jbd2_journal_has_csum_v2or3(journal)) |
3558 | csum_size = sizeof(struct jbd2_journal_revoke_tail); |
3559 | |
3560 | + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) |
3561 | + sz = 8; |
3562 | + else |
3563 | + sz = 4; |
3564 | + |
3565 | /* Make sure we have a descriptor with space left for the record */ |
3566 | if (descriptor) { |
3567 | - if (offset >= journal->j_blocksize - csum_size) { |
3568 | + if (offset + sz > journal->j_blocksize - csum_size) { |
3569 | flush_descriptor(journal, descriptor, offset, write_op); |
3570 | descriptor = NULL; |
3571 | } |
3572 | @@ -619,16 +624,13 @@ static void write_one_revoke_record(journal_t *journal, |
3573 | *descriptorp = descriptor; |
3574 | } |
3575 | |
3576 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) { |
3577 | + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) |
3578 | * ((__be64 *)(&descriptor->b_data[offset])) = |
3579 | cpu_to_be64(record->blocknr); |
3580 | - offset += 8; |
3581 | - |
3582 | - } else { |
3583 | + else |
3584 | * ((__be32 *)(&descriptor->b_data[offset])) = |
3585 | cpu_to_be32(record->blocknr); |
3586 | - offset += 4; |
3587 | - } |
3588 | + offset += sz; |
3589 | |
3590 | *offsetp = offset; |
3591 | } |
3592 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
3593 | index 5f09370c90a8..ff2f2e6ad311 100644 |
3594 | --- a/fs/jbd2/transaction.c |
3595 | +++ b/fs/jbd2/transaction.c |
3596 | @@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks) |
3597 | int result; |
3598 | int wanted; |
3599 | |
3600 | - WARN_ON(!transaction); |
3601 | if (is_handle_aborted(handle)) |
3602 | return -EROFS; |
3603 | journal = transaction->t_journal; |
3604 | @@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) |
3605 | tid_t tid; |
3606 | int need_to_start, ret; |
3607 | |
3608 | - WARN_ON(!transaction); |
3609 | /* If we've had an abort of any type, don't even think about |
3610 | * actually doing the restart! */ |
3611 | if (is_handle_aborted(handle)) |
3612 | @@ -785,7 +783,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, |
3613 | int need_copy = 0; |
3614 | unsigned long start_lock, time_lock; |
3615 | |
3616 | - WARN_ON(!transaction); |
3617 | if (is_handle_aborted(handle)) |
3618 | return -EROFS; |
3619 | journal = transaction->t_journal; |
3620 | @@ -1051,7 +1048,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) |
3621 | int err; |
3622 | |
3623 | jbd_debug(5, "journal_head %p\n", jh); |
3624 | - WARN_ON(!transaction); |
3625 | err = -EROFS; |
3626 | if (is_handle_aborted(handle)) |
3627 | goto out; |
3628 | @@ -1266,7 +1262,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) |
3629 | struct journal_head *jh; |
3630 | int ret = 0; |
3631 | |
3632 | - WARN_ON(!transaction); |
3633 | if (is_handle_aborted(handle)) |
3634 | return -EROFS; |
3635 | journal = transaction->t_journal; |
3636 | @@ -1397,7 +1392,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) |
3637 | int err = 0; |
3638 | int was_modified = 0; |
3639 | |
3640 | - WARN_ON(!transaction); |
3641 | if (is_handle_aborted(handle)) |
3642 | return -EROFS; |
3643 | journal = transaction->t_journal; |
3644 | @@ -1530,8 +1524,22 @@ int jbd2_journal_stop(handle_t *handle) |
3645 | tid_t tid; |
3646 | pid_t pid; |
3647 | |
3648 | - if (!transaction) |
3649 | - goto free_and_exit; |
3650 | + if (!transaction) { |
3651 | + /* |
3652 | + * Handle is already detached from the transaction so |
3653 | + * there is nothing to do other than decrease a refcount, |
3654 | + * or free the handle if refcount drops to zero |
3655 | + */ |
3656 | + if (--handle->h_ref > 0) { |
3657 | + jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, |
3658 | + handle->h_ref); |
3659 | + return err; |
3660 | + } else { |
3661 | + if (handle->h_rsv_handle) |
3662 | + jbd2_free_handle(handle->h_rsv_handle); |
3663 | + goto free_and_exit; |
3664 | + } |
3665 | + } |
3666 | journal = transaction->t_journal; |
3667 | |
3668 | J_ASSERT(journal_current_handle() == handle); |
3669 | @@ -2373,7 +2381,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) |
3670 | transaction_t *transaction = handle->h_transaction; |
3671 | journal_t *journal; |
3672 | |
3673 | - WARN_ON(!transaction); |
3674 | if (is_handle_aborted(handle)) |
3675 | return -EROFS; |
3676 | journal = transaction->t_journal; |
3677 | diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c |
3678 | index 6acc9648f986..345b35fd329d 100644 |
3679 | --- a/fs/kernfs/dir.c |
3680 | +++ b/fs/kernfs/dir.c |
3681 | @@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, |
3682 | if (!kn) |
3683 | goto err_out1; |
3684 | |
3685 | - ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL); |
3686 | + /* |
3687 | + * If the ino of the sysfs entry created for a kmem cache gets |
3688 | + * allocated from an ida layer, which is accounted to the memcg that |
3689 | + * owns the cache, the memcg will get pinned forever. So do not account |
3690 | + * ino ida allocations. |
3691 | + */ |
3692 | + ret = ida_simple_get(&root->ino_ida, 1, 0, |
3693 | + GFP_KERNEL | __GFP_NOACCOUNT); |
3694 | if (ret < 0) |
3695 | goto err_out2; |
3696 | kn->ino = ret; |
3697 | diff --git a/fs/namespace.c b/fs/namespace.c |
3698 | index 38ed1e1bed41..13b0f7bfc096 100644 |
3699 | --- a/fs/namespace.c |
3700 | +++ b/fs/namespace.c |
3701 | @@ -1709,8 +1709,11 @@ struct vfsmount *collect_mounts(struct path *path) |
3702 | { |
3703 | struct mount *tree; |
3704 | namespace_lock(); |
3705 | - tree = copy_tree(real_mount(path->mnt), path->dentry, |
3706 | - CL_COPY_ALL | CL_PRIVATE); |
3707 | + if (!check_mnt(real_mount(path->mnt))) |
3708 | + tree = ERR_PTR(-EINVAL); |
3709 | + else |
3710 | + tree = copy_tree(real_mount(path->mnt), path->dentry, |
3711 | + CL_COPY_ALL | CL_PRIVATE); |
3712 | namespace_unlock(); |
3713 | if (IS_ERR(tree)) |
3714 | return ERR_CAST(tree); |
3715 | diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c |
3716 | index 03d647bf195d..cdefaa331a07 100644 |
3717 | --- a/fs/nfsd/blocklayout.c |
3718 | +++ b/fs/nfsd/blocklayout.c |
3719 | @@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode, |
3720 | } |
3721 | |
3722 | const struct nfsd4_layout_ops bl_layout_ops = { |
3723 | + /* |
3724 | + * Pretend that we send notification to the client. This is a blatant |
3725 | + * lie to force recent Linux clients to cache our device IDs. |
3726 | + * We rarely ever change the device ID, so the harm of leaking deviceids |
3727 | + * for a while isn't too bad. Unfortunately RFC5661 is a complete mess |
3728 | + * in this regard, but I filed errata 4119 for this a while ago, and |
3729 | + * hopefully the Linux client will eventually start caching deviceids |
3730 | + * without this again. |
3731 | + */ |
3732 | + .notify_types = |
3733 | + NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE, |
3734 | .proc_getdeviceinfo = nfsd4_block_proc_getdeviceinfo, |
3735 | .encode_getdeviceinfo = nfsd4_block_encode_getdeviceinfo, |
3736 | .proc_layoutget = nfsd4_block_proc_layoutget, |
3737 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
3738 | index ee1cccdb083a..b4541ede7cb8 100644 |
3739 | --- a/fs/nfsd/nfs4state.c |
3740 | +++ b/fs/nfsd/nfs4state.c |
3741 | @@ -4386,10 +4386,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s |
3742 | return nfserr_old_stateid; |
3743 | } |
3744 | |
3745 | +static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) |
3746 | +{ |
3747 | + if (ols->st_stateowner->so_is_open_owner && |
3748 | + !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) |
3749 | + return nfserr_bad_stateid; |
3750 | + return nfs_ok; |
3751 | +} |
3752 | + |
3753 | static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) |
3754 | { |
3755 | struct nfs4_stid *s; |
3756 | - struct nfs4_ol_stateid *ols; |
3757 | __be32 status = nfserr_bad_stateid; |
3758 | |
3759 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
3760 | @@ -4419,13 +4426,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) |
3761 | break; |
3762 | case NFS4_OPEN_STID: |
3763 | case NFS4_LOCK_STID: |
3764 | - ols = openlockstateid(s); |
3765 | - if (ols->st_stateowner->so_is_open_owner |
3766 | - && !(openowner(ols->st_stateowner)->oo_flags |
3767 | - & NFS4_OO_CONFIRMED)) |
3768 | - status = nfserr_bad_stateid; |
3769 | - else |
3770 | - status = nfs_ok; |
3771 | + status = nfsd4_check_openowner_confirmed(openlockstateid(s)); |
3772 | break; |
3773 | default: |
3774 | printk("unknown stateid type %x\n", s->sc_type); |
3775 | @@ -4517,8 +4518,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate, |
3776 | status = nfs4_check_fh(current_fh, stp); |
3777 | if (status) |
3778 | goto out; |
3779 | - if (stp->st_stateowner->so_is_open_owner |
3780 | - && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) |
3781 | + status = nfsd4_check_openowner_confirmed(stp); |
3782 | + if (status) |
3783 | goto out; |
3784 | status = nfs4_check_openmode(stp, flags); |
3785 | if (status) |
3786 | diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c |
3787 | index 138321b0c6c2..454111a3308e 100644 |
3788 | --- a/fs/omfs/inode.c |
3789 | +++ b/fs/omfs/inode.c |
3790 | @@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = { |
3791 | */ |
3792 | static int omfs_get_imap(struct super_block *sb) |
3793 | { |
3794 | - unsigned int bitmap_size, count, array_size; |
3795 | + unsigned int bitmap_size, array_size; |
3796 | + int count; |
3797 | struct omfs_sb_info *sbi = OMFS_SB(sb); |
3798 | struct buffer_head *bh; |
3799 | unsigned long **ptr; |
3800 | @@ -359,7 +360,7 @@ nomem: |
3801 | } |
3802 | |
3803 | enum { |
3804 | - Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask |
3805 | + Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err |
3806 | }; |
3807 | |
3808 | static const match_table_t tokens = { |
3809 | @@ -368,6 +369,7 @@ static const match_table_t tokens = { |
3810 | {Opt_umask, "umask=%o"}, |
3811 | {Opt_dmask, "dmask=%o"}, |
3812 | {Opt_fmask, "fmask=%o"}, |
3813 | + {Opt_err, NULL}, |
3814 | }; |
3815 | |
3816 | static int parse_options(char *options, struct omfs_sb_info *sbi) |
3817 | diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c |
3818 | index 24f640441bd9..84d693d37428 100644 |
3819 | --- a/fs/overlayfs/copy_up.c |
3820 | +++ b/fs/overlayfs/copy_up.c |
3821 | @@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, |
3822 | struct cred *override_cred; |
3823 | char *link = NULL; |
3824 | |
3825 | + if (WARN_ON(!workdir)) |
3826 | + return -EROFS; |
3827 | + |
3828 | ovl_path_upper(parent, &parentpath); |
3829 | upperdir = parentpath.dentry; |
3830 | |
3831 | diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c |
3832 | index d139405d2bfa..692ceda3bc21 100644 |
3833 | --- a/fs/overlayfs/dir.c |
3834 | +++ b/fs/overlayfs/dir.c |
3835 | @@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, |
3836 | struct kstat stat; |
3837 | int err; |
3838 | |
3839 | + if (WARN_ON(!workdir)) |
3840 | + return ERR_PTR(-EROFS); |
3841 | + |
3842 | err = ovl_lock_rename_workdir(workdir, upperdir); |
3843 | if (err) |
3844 | goto out; |
3845 | @@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, |
3846 | struct dentry *newdentry; |
3847 | int err; |
3848 | |
3849 | + if (WARN_ON(!workdir)) |
3850 | + return -EROFS; |
3851 | + |
3852 | err = ovl_lock_rename_workdir(workdir, upperdir); |
3853 | if (err) |
3854 | goto out; |
3855 | @@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) |
3856 | struct dentry *opaquedir = NULL; |
3857 | int err; |
3858 | |
3859 | - if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { |
3860 | - opaquedir = ovl_check_empty_and_clear(dentry); |
3861 | - err = PTR_ERR(opaquedir); |
3862 | - if (IS_ERR(opaquedir)) |
3863 | - goto out; |
3864 | + if (WARN_ON(!workdir)) |
3865 | + return -EROFS; |
3866 | + |
3867 | + if (is_dir) { |
3868 | + if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { |
3869 | + opaquedir = ovl_check_empty_and_clear(dentry); |
3870 | + err = PTR_ERR(opaquedir); |
3871 | + if (IS_ERR(opaquedir)) |
3872 | + goto out; |
3873 | + } else { |
3874 | + LIST_HEAD(list); |
3875 | + |
3876 | + /* |
3877 | + * When removing an empty opaque directory, then it |
3878 | + * makes no sense to replace it with an exact replica of |
3879 | + * itself. But emptiness still needs to be checked. |
3880 | + */ |
3881 | + err = ovl_check_empty_dir(dentry, &list); |
3882 | + ovl_cache_free(&list); |
3883 | + if (err) |
3884 | + goto out; |
3885 | + } |
3886 | } |
3887 | |
3888 | err = ovl_lock_rename_workdir(workdir, upperdir); |
3889 | diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c |
3890 | index 5f0d1993e6e3..bf8537c7f455 100644 |
3891 | --- a/fs/overlayfs/super.c |
3892 | +++ b/fs/overlayfs/super.c |
3893 | @@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data) |
3894 | { |
3895 | struct ovl_fs *ufs = sb->s_fs_info; |
3896 | |
3897 | - if (!(*flags & MS_RDONLY) && !ufs->upper_mnt) |
3898 | + if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir)) |
3899 | return -EROFS; |
3900 | |
3901 | return 0; |
3902 | @@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) |
3903 | ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); |
3904 | err = PTR_ERR(ufs->workdir); |
3905 | if (IS_ERR(ufs->workdir)) { |
3906 | - pr_err("overlayfs: failed to create directory %s/%s\n", |
3907 | - ufs->config.workdir, OVL_WORKDIR_NAME); |
3908 | - goto out_put_upper_mnt; |
3909 | + pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n", |
3910 | + ufs->config.workdir, OVL_WORKDIR_NAME, -err); |
3911 | + sb->s_flags |= MS_RDONLY; |
3912 | + ufs->workdir = NULL; |
3913 | } |
3914 | } |
3915 | |
3916 | @@ -997,7 +998,6 @@ out_put_lower_mnt: |
3917 | kfree(ufs->lower_mnt); |
3918 | out_put_workdir: |
3919 | dput(ufs->workdir); |
3920 | -out_put_upper_mnt: |
3921 | mntput(ufs->upper_mnt); |
3922 | out_put_lowerpath: |
3923 | for (i = 0; i < numlower; i++) |
3924 | diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c |
3925 | index 15105dbc9e28..0166e7e829a7 100644 |
3926 | --- a/fs/xfs/libxfs/xfs_attr_leaf.c |
3927 | +++ b/fs/xfs/libxfs/xfs_attr_leaf.c |
3928 | @@ -498,8 +498,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) |
3929 | * After the last attribute is removed revert to original inode format, |
3930 | * making all literal area available to the data fork once more. |
3931 | */ |
3932 | -STATIC void |
3933 | -xfs_attr_fork_reset( |
3934 | +void |
3935 | +xfs_attr_fork_remove( |
3936 | struct xfs_inode *ip, |
3937 | struct xfs_trans *tp) |
3938 | { |
3939 | @@ -565,7 +565,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) |
3940 | (mp->m_flags & XFS_MOUNT_ATTR2) && |
3941 | (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && |
3942 | !(args->op_flags & XFS_DA_OP_ADDNAME)) { |
3943 | - xfs_attr_fork_reset(dp, args->trans); |
3944 | + xfs_attr_fork_remove(dp, args->trans); |
3945 | } else { |
3946 | xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); |
3947 | dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); |
3948 | @@ -828,7 +828,7 @@ xfs_attr3_leaf_to_shortform( |
3949 | if (forkoff == -1) { |
3950 | ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2); |
3951 | ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE); |
3952 | - xfs_attr_fork_reset(dp, args->trans); |
3953 | + xfs_attr_fork_remove(dp, args->trans); |
3954 | goto out; |
3955 | } |
3956 | |
3957 | diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h |
3958 | index e2929da7c3ba..4f3a60aa93d4 100644 |
3959 | --- a/fs/xfs/libxfs/xfs_attr_leaf.h |
3960 | +++ b/fs/xfs/libxfs/xfs_attr_leaf.h |
3961 | @@ -53,7 +53,7 @@ int xfs_attr_shortform_remove(struct xfs_da_args *args); |
3962 | int xfs_attr_shortform_list(struct xfs_attr_list_context *context); |
3963 | int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); |
3964 | int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes); |
3965 | - |
3966 | +void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp); |
3967 | |
3968 | /* |
3969 | * Internal routines when attribute fork size == XFS_LBSIZE(mp). |
3970 | diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c |
3971 | index 83af4c149635..487c8374a1e0 100644 |
3972 | --- a/fs/xfs/xfs_attr_inactive.c |
3973 | +++ b/fs/xfs/xfs_attr_inactive.c |
3974 | @@ -379,23 +379,31 @@ xfs_attr3_root_inactive( |
3975 | return error; |
3976 | } |
3977 | |
3978 | +/* |
3979 | + * xfs_attr_inactive kills all traces of an attribute fork on an inode. It |
3980 | + * removes both the on-disk and in-memory inode fork. Note that this also has to |
3981 | + * handle the condition of inodes without attributes but with an attribute fork |
3982 | + * configured, so we can't use xfs_inode_hasattr() here. |
3983 | + * |
3984 | + * The in-memory attribute fork is removed even on error. |
3985 | + */ |
3986 | int |
3987 | -xfs_attr_inactive(xfs_inode_t *dp) |
3988 | +xfs_attr_inactive( |
3989 | + struct xfs_inode *dp) |
3990 | { |
3991 | - xfs_trans_t *trans; |
3992 | - xfs_mount_t *mp; |
3993 | - int error; |
3994 | + struct xfs_trans *trans; |
3995 | + struct xfs_mount *mp; |
3996 | + int cancel_flags = 0; |
3997 | + int lock_mode = XFS_ILOCK_SHARED; |
3998 | + int error = 0; |
3999 | |
4000 | mp = dp->i_mount; |
4001 | ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); |
4002 | |
4003 | - xfs_ilock(dp, XFS_ILOCK_SHARED); |
4004 | - if (!xfs_inode_hasattr(dp) || |
4005 | - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { |
4006 | - xfs_iunlock(dp, XFS_ILOCK_SHARED); |
4007 | - return 0; |
4008 | - } |
4009 | - xfs_iunlock(dp, XFS_ILOCK_SHARED); |
4010 | + xfs_ilock(dp, lock_mode); |
4011 | + if (!XFS_IFORK_Q(dp)) |
4012 | + goto out_destroy_fork; |
4013 | + xfs_iunlock(dp, lock_mode); |
4014 | |
4015 | /* |
4016 | * Start our first transaction of the day. |
4017 | @@ -407,13 +415,18 @@ xfs_attr_inactive(xfs_inode_t *dp) |
4018 | * the inode in every transaction to let it float upward through |
4019 | * the log. |
4020 | */ |
4021 | + lock_mode = 0; |
4022 | trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); |
4023 | error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); |
4024 | - if (error) { |
4025 | - xfs_trans_cancel(trans, 0); |
4026 | - return error; |
4027 | - } |
4028 | - xfs_ilock(dp, XFS_ILOCK_EXCL); |
4029 | + if (error) |
4030 | + goto out_cancel; |
4031 | + |
4032 | + lock_mode = XFS_ILOCK_EXCL; |
4033 | + cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT; |
4034 | + xfs_ilock(dp, lock_mode); |
4035 | + |
4036 | + if (!XFS_IFORK_Q(dp)) |
4037 | + goto out_cancel; |
4038 | |
4039 | /* |
4040 | * No need to make quota reservations here. We expect to release some |
4041 | @@ -421,29 +434,31 @@ xfs_attr_inactive(xfs_inode_t *dp) |
4042 | */ |
4043 | xfs_trans_ijoin(trans, dp, 0); |
4044 | |
4045 | - /* |
4046 | - * Decide on what work routines to call based on the inode size. |
4047 | - */ |
4048 | - if (!xfs_inode_hasattr(dp) || |
4049 | - dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { |
4050 | - error = 0; |
4051 | - goto out; |
4052 | + /* invalidate and truncate the attribute fork extents */ |
4053 | + if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { |
4054 | + error = xfs_attr3_root_inactive(&trans, dp); |
4055 | + if (error) |
4056 | + goto out_cancel; |
4057 | + |
4058 | + error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); |
4059 | + if (error) |
4060 | + goto out_cancel; |
4061 | } |
4062 | - error = xfs_attr3_root_inactive(&trans, dp); |
4063 | - if (error) |
4064 | - goto out; |
4065 | |
4066 | - error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); |
4067 | - if (error) |
4068 | - goto out; |
4069 | + /* Reset the attribute fork - this also destroys the in-core fork */ |
4070 | + xfs_attr_fork_remove(dp, trans); |
4071 | |
4072 | error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES); |
4073 | - xfs_iunlock(dp, XFS_ILOCK_EXCL); |
4074 | - |
4075 | + xfs_iunlock(dp, lock_mode); |
4076 | return error; |
4077 | |
4078 | -out: |
4079 | - xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); |
4080 | - xfs_iunlock(dp, XFS_ILOCK_EXCL); |
4081 | +out_cancel: |
4082 | + xfs_trans_cancel(trans, cancel_flags); |
4083 | +out_destroy_fork: |
4084 | + /* kill the in-core attr fork before we drop the inode lock */ |
4085 | + if (dp->i_afp) |
4086 | + xfs_idestroy_fork(dp, XFS_ATTR_FORK); |
4087 | + if (lock_mode) |
4088 | + xfs_iunlock(dp, lock_mode); |
4089 | return error; |
4090 | } |
4091 | diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c |
4092 | index a2e1cb8a568b..f3ba637a8ece 100644 |
4093 | --- a/fs/xfs/xfs_file.c |
4094 | +++ b/fs/xfs/xfs_file.c |
4095 | @@ -125,7 +125,7 @@ xfs_iozero( |
4096 | status = 0; |
4097 | } while (count); |
4098 | |
4099 | - return (-status); |
4100 | + return status; |
4101 | } |
4102 | |
4103 | int |
4104 | diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c |
4105 | index 6163767aa856..b1edda7890f4 100644 |
4106 | --- a/fs/xfs/xfs_inode.c |
4107 | +++ b/fs/xfs/xfs_inode.c |
4108 | @@ -1889,21 +1889,17 @@ xfs_inactive( |
4109 | /* |
4110 | * If there are attributes associated with the file then blow them away |
4111 | * now. The code calls a routine that recursively deconstructs the |
4112 | - * attribute fork. We need to just commit the current transaction |
4113 | - * because we can't use it for xfs_attr_inactive(). |
4114 | + * attribute fork. If also blows away the in-core attribute fork. |
4115 | */ |
4116 | - if (ip->i_d.di_anextents > 0) { |
4117 | - ASSERT(ip->i_d.di_forkoff != 0); |
4118 | - |
4119 | + if (XFS_IFORK_Q(ip)) { |
4120 | error = xfs_attr_inactive(ip); |
4121 | if (error) |
4122 | return; |
4123 | } |
4124 | |
4125 | - if (ip->i_afp) |
4126 | - xfs_idestroy_fork(ip, XFS_ATTR_FORK); |
4127 | - |
4128 | + ASSERT(!ip->i_afp); |
4129 | ASSERT(ip->i_d.di_anextents == 0); |
4130 | + ASSERT(ip->i_d.di_forkoff == 0); |
4131 | |
4132 | /* |
4133 | * Free the inode. |
4134 | diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h |
4135 | index 2dd405c9be78..45c39a37f924 100644 |
4136 | --- a/include/drm/drm_pciids.h |
4137 | +++ b/include/drm/drm_pciids.h |
4138 | @@ -186,6 +186,7 @@ |
4139 | {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
4140 | {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
4141 | {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
4142 | + {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
4143 | {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
4144 | {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
4145 | {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
4146 | diff --git a/include/linux/fs_pin.h b/include/linux/fs_pin.h |
4147 | index 9dc4e0384bfb..3886b3bffd7f 100644 |
4148 | --- a/include/linux/fs_pin.h |
4149 | +++ b/include/linux/fs_pin.h |
4150 | @@ -13,6 +13,8 @@ struct vfsmount; |
4151 | static inline void init_fs_pin(struct fs_pin *p, void (*kill)(struct fs_pin *)) |
4152 | { |
4153 | init_waitqueue_head(&p->wait); |
4154 | + INIT_HLIST_NODE(&p->s_list); |
4155 | + INIT_HLIST_NODE(&p->m_list); |
4156 | p->kill = kill; |
4157 | } |
4158 | |
4159 | diff --git a/include/linux/gfp.h b/include/linux/gfp.h |
4160 | index 51bd1e72a917..eb6fafe66bec 100644 |
4161 | --- a/include/linux/gfp.h |
4162 | +++ b/include/linux/gfp.h |
4163 | @@ -30,6 +30,7 @@ struct vm_area_struct; |
4164 | #define ___GFP_HARDWALL 0x20000u |
4165 | #define ___GFP_THISNODE 0x40000u |
4166 | #define ___GFP_RECLAIMABLE 0x80000u |
4167 | +#define ___GFP_NOACCOUNT 0x100000u |
4168 | #define ___GFP_NOTRACK 0x200000u |
4169 | #define ___GFP_NO_KSWAPD 0x400000u |
4170 | #define ___GFP_OTHER_NODE 0x800000u |
4171 | @@ -85,6 +86,7 @@ struct vm_area_struct; |
4172 | #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */ |
4173 | #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */ |
4174 | #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */ |
4175 | +#define __GFP_NOACCOUNT ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */ |
4176 | #define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */ |
4177 | |
4178 | #define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD) |
4179 | diff --git a/include/linux/ktime.h b/include/linux/ktime.h |
4180 | index 5fc3d1083071..2b6a204bd8d4 100644 |
4181 | --- a/include/linux/ktime.h |
4182 | +++ b/include/linux/ktime.h |
4183 | @@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) |
4184 | } |
4185 | |
4186 | #if BITS_PER_LONG < 64 |
4187 | -extern u64 __ktime_divns(const ktime_t kt, s64 div); |
4188 | -static inline u64 ktime_divns(const ktime_t kt, s64 div) |
4189 | +extern s64 __ktime_divns(const ktime_t kt, s64 div); |
4190 | +static inline s64 ktime_divns(const ktime_t kt, s64 div) |
4191 | { |
4192 | + /* |
4193 | + * Negative divisors could cause an inf loop, |
4194 | + * so bug out here. |
4195 | + */ |
4196 | + BUG_ON(div < 0); |
4197 | if (__builtin_constant_p(div) && !(div >> 32)) { |
4198 | - u64 ns = kt.tv64; |
4199 | - do_div(ns, div); |
4200 | - return ns; |
4201 | + s64 ns = kt.tv64; |
4202 | + u64 tmp = ns < 0 ? -ns : ns; |
4203 | + |
4204 | + do_div(tmp, div); |
4205 | + return ns < 0 ? -tmp : tmp; |
4206 | } else { |
4207 | return __ktime_divns(kt, div); |
4208 | } |
4209 | } |
4210 | #else /* BITS_PER_LONG < 64 */ |
4211 | -# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) |
4212 | +static inline s64 ktime_divns(const ktime_t kt, s64 div) |
4213 | +{ |
4214 | + /* |
4215 | + * 32-bit implementation cannot handle negative divisors, |
4216 | + * so catch them on 64bit as well. |
4217 | + */ |
4218 | + WARN_ON(div < 0); |
4219 | + return kt.tv64 / div; |
4220 | +} |
4221 | #endif |
4222 | |
4223 | static inline s64 ktime_to_us(const ktime_t kt) |
4224 | diff --git a/include/linux/libata.h b/include/linux/libata.h |
4225 | index 6b08cc106c21..f8994b4b122c 100644 |
4226 | --- a/include/linux/libata.h |
4227 | +++ b/include/linux/libata.h |
4228 | @@ -205,6 +205,7 @@ enum { |
4229 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ |
4230 | ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ |
4231 | ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */ |
4232 | + ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */ |
4233 | |
4234 | /* struct ata_port flags */ |
4235 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
4236 | @@ -310,6 +311,12 @@ enum { |
4237 | */ |
4238 | ATA_TMOUT_PMP_SRST_WAIT = 5000, |
4239 | |
4240 | + /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might |
4241 | + * be a spurious PHY event, so ignore the first PHY event that |
4242 | + * occurs within 10s after the policy change. |
4243 | + */ |
4244 | + ATA_TMOUT_SPURIOUS_PHY = 10000, |
4245 | + |
4246 | /* ATA bus states */ |
4247 | BUS_UNKNOWN = 0, |
4248 | BUS_DMA = 1, |
4249 | @@ -789,6 +796,8 @@ struct ata_link { |
4250 | struct ata_eh_context eh_context; |
4251 | |
4252 | struct ata_device device[ATA_MAX_DEVICES]; |
4253 | + |
4254 | + unsigned long last_lpm_change; /* when last LPM change happened */ |
4255 | }; |
4256 | #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) |
4257 | #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) |
4258 | @@ -1202,6 +1211,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev); |
4259 | extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); |
4260 | extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); |
4261 | extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q); |
4262 | +extern bool sata_lpm_ignore_phy_events(struct ata_link *link); |
4263 | |
4264 | extern int ata_cable_40wire(struct ata_port *ap); |
4265 | extern int ata_cable_80wire(struct ata_port *ap); |
4266 | diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h |
4267 | index 72dff5fb0d0c..6c8918114804 100644 |
4268 | --- a/include/linux/memcontrol.h |
4269 | +++ b/include/linux/memcontrol.h |
4270 | @@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) |
4271 | if (!memcg_kmem_enabled()) |
4272 | return true; |
4273 | |
4274 | + if (gfp & __GFP_NOACCOUNT) |
4275 | + return true; |
4276 | /* |
4277 | * __GFP_NOFAIL allocations will move on even if charging is not |
4278 | * possible. Therefore we don't even try, and have this allocation |
4279 | @@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) |
4280 | { |
4281 | if (!memcg_kmem_enabled()) |
4282 | return cachep; |
4283 | + if (gfp & __GFP_NOACCOUNT) |
4284 | + return cachep; |
4285 | if (gfp & __GFP_NOFAIL) |
4286 | return cachep; |
4287 | if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) |
4288 | diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h |
4289 | index 6341f5be6e24..a30b172df6e1 100644 |
4290 | --- a/include/linux/sched/rt.h |
4291 | +++ b/include/linux/sched/rt.h |
4292 | @@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p) |
4293 | #ifdef CONFIG_RT_MUTEXES |
4294 | extern int rt_mutex_getprio(struct task_struct *p); |
4295 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
4296 | -extern int rt_mutex_check_prio(struct task_struct *task, int newprio); |
4297 | +extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio); |
4298 | extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); |
4299 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
4300 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) |
4301 | @@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p) |
4302 | return p->normal_prio; |
4303 | } |
4304 | |
4305 | -static inline int rt_mutex_check_prio(struct task_struct *task, int newprio) |
4306 | +static inline int rt_mutex_get_effective_prio(struct task_struct *task, |
4307 | + int newprio) |
4308 | { |
4309 | - return 0; |
4310 | + return newprio; |
4311 | } |
4312 | |
4313 | static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) |
4314 | diff --git a/include/linux/tty.h b/include/linux/tty.h |
4315 | index 358a337af598..790752ac074a 100644 |
4316 | --- a/include/linux/tty.h |
4317 | +++ b/include/linux/tty.h |
4318 | @@ -339,6 +339,7 @@ struct tty_file_private { |
4319 | #define TTY_EXCLUSIVE 3 /* Exclusive open mode */ |
4320 | #define TTY_DEBUG 4 /* Debugging */ |
4321 | #define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ |
4322 | +#define TTY_OTHER_DONE 6 /* Closed pty has completed input processing */ |
4323 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ |
4324 | #define TTY_PTY_LOCK 16 /* pty private */ |
4325 | #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ |
4326 | @@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp); |
4327 | extern void do_SAK(struct tty_struct *tty); |
4328 | extern void __do_SAK(struct tty_struct *tty); |
4329 | extern void no_tty(void); |
4330 | -extern void tty_flush_to_ldisc(struct tty_struct *tty); |
4331 | extern void tty_buffer_free_all(struct tty_port *port); |
4332 | extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld); |
4333 | extern void tty_buffer_init(struct tty_port *port); |
4334 | diff --git a/include/xen/events.h b/include/xen/events.h |
4335 | index 5321cd9636e6..7d95fdf9cf3e 100644 |
4336 | --- a/include/xen/events.h |
4337 | +++ b/include/xen/events.h |
4338 | @@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn, |
4339 | irq_handler_t handler, |
4340 | unsigned long irqflags, const char *devname, |
4341 | void *dev_id); |
4342 | -int bind_virq_to_irq(unsigned int virq, unsigned int cpu); |
4343 | +int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu); |
4344 | int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu, |
4345 | irq_handler_t handler, |
4346 | unsigned long irqflags, const char *devname, |
4347 | diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c |
4348 | index 6357265a31ad..ce9108c059fb 100644 |
4349 | --- a/kernel/locking/rtmutex.c |
4350 | +++ b/kernel/locking/rtmutex.c |
4351 | @@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task) |
4352 | } |
4353 | |
4354 | /* |
4355 | - * Called by sched_setscheduler() to check whether the priority change |
4356 | - * is overruled by a possible priority boosting. |
4357 | + * Called by sched_setscheduler() to get the priority which will be |
4358 | + * effective after the change. |
4359 | */ |
4360 | -int rt_mutex_check_prio(struct task_struct *task, int newprio) |
4361 | +int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) |
4362 | { |
4363 | if (!task_has_pi_waiters(task)) |
4364 | - return 0; |
4365 | + return newprio; |
4366 | |
4367 | - return task_top_pi_waiter(task)->task->prio <= newprio; |
4368 | + if (task_top_pi_waiter(task)->task->prio <= newprio) |
4369 | + return task_top_pi_waiter(task)->task->prio; |
4370 | + return newprio; |
4371 | } |
4372 | |
4373 | /* |
4374 | diff --git a/kernel/module.c b/kernel/module.c |
4375 | index ec53f594e9c9..538794ce3cc7 100644 |
4376 | --- a/kernel/module.c |
4377 | +++ b/kernel/module.c |
4378 | @@ -3366,6 +3366,9 @@ static int load_module(struct load_info *info, const char __user *uargs, |
4379 | module_bug_cleanup(mod); |
4380 | mutex_unlock(&module_mutex); |
4381 | |
4382 | + blocking_notifier_call_chain(&module_notify_list, |
4383 | + MODULE_STATE_GOING, mod); |
4384 | + |
4385 | /* we can't deallocate the module until we clear memory protection */ |
4386 | unset_module_init_ro_nx(mod); |
4387 | unset_module_core_ro_nx(mod); |
4388 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
4389 | index 3d5f6f6d14c2..f4da2cbbfd7f 100644 |
4390 | --- a/kernel/sched/core.c |
4391 | +++ b/kernel/sched/core.c |
4392 | @@ -3295,15 +3295,18 @@ static void __setscheduler_params(struct task_struct *p, |
4393 | |
4394 | /* Actually do priority change: must hold pi & rq lock. */ |
4395 | static void __setscheduler(struct rq *rq, struct task_struct *p, |
4396 | - const struct sched_attr *attr) |
4397 | + const struct sched_attr *attr, bool keep_boost) |
4398 | { |
4399 | __setscheduler_params(p, attr); |
4400 | |
4401 | /* |
4402 | - * If we get here, there was no pi waiters boosting the |
4403 | - * task. It is safe to use the normal prio. |
4404 | + * Keep a potential priority boosting if called from |
4405 | + * sched_setscheduler(). |
4406 | */ |
4407 | - p->prio = normal_prio(p); |
4408 | + if (keep_boost) |
4409 | + p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); |
4410 | + else |
4411 | + p->prio = normal_prio(p); |
4412 | |
4413 | if (dl_prio(p->prio)) |
4414 | p->sched_class = &dl_sched_class; |
4415 | @@ -3403,7 +3406,7 @@ static int __sched_setscheduler(struct task_struct *p, |
4416 | int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 : |
4417 | MAX_RT_PRIO - 1 - attr->sched_priority; |
4418 | int retval, oldprio, oldpolicy = -1, queued, running; |
4419 | - int policy = attr->sched_policy; |
4420 | + int new_effective_prio, policy = attr->sched_policy; |
4421 | unsigned long flags; |
4422 | const struct sched_class *prev_class; |
4423 | struct rq *rq; |
4424 | @@ -3585,15 +3588,14 @@ change: |
4425 | oldprio = p->prio; |
4426 | |
4427 | /* |
4428 | - * Special case for priority boosted tasks. |
4429 | - * |
4430 | - * If the new priority is lower or equal (user space view) |
4431 | - * than the current (boosted) priority, we just store the new |
4432 | + * Take priority boosted tasks into account. If the new |
4433 | + * effective priority is unchanged, we just store the new |
4434 | * normal parameters and do not touch the scheduler class and |
4435 | * the runqueue. This will be done when the task deboost |
4436 | * itself. |
4437 | */ |
4438 | - if (rt_mutex_check_prio(p, newprio)) { |
4439 | + new_effective_prio = rt_mutex_get_effective_prio(p, newprio); |
4440 | + if (new_effective_prio == oldprio) { |
4441 | __setscheduler_params(p, attr); |
4442 | task_rq_unlock(rq, p, &flags); |
4443 | return 0; |
4444 | @@ -3607,7 +3609,7 @@ change: |
4445 | put_prev_task(rq, p); |
4446 | |
4447 | prev_class = p->sched_class; |
4448 | - __setscheduler(rq, p, attr); |
4449 | + __setscheduler(rq, p, attr, true); |
4450 | |
4451 | if (running) |
4452 | p->sched_class->set_curr_task(rq); |
4453 | @@ -4382,10 +4384,7 @@ long __sched io_schedule_timeout(long timeout) |
4454 | long ret; |
4455 | |
4456 | current->in_iowait = 1; |
4457 | - if (old_iowait) |
4458 | - blk_schedule_flush_plug(current); |
4459 | - else |
4460 | - blk_flush_plug(current); |
4461 | + blk_schedule_flush_plug(current); |
4462 | |
4463 | delayacct_blkio_start(); |
4464 | rq = raw_rq(); |
4465 | @@ -7357,7 +7356,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p) |
4466 | queued = task_on_rq_queued(p); |
4467 | if (queued) |
4468 | dequeue_task(rq, p, 0); |
4469 | - __setscheduler(rq, p, &attr); |
4470 | + __setscheduler(rq, p, &attr, false); |
4471 | if (queued) { |
4472 | enqueue_task(rq, p, 0); |
4473 | resched_curr(rq); |
4474 | diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c |
4475 | index bee0c1f78091..38f586c076fe 100644 |
4476 | --- a/kernel/time/hrtimer.c |
4477 | +++ b/kernel/time/hrtimer.c |
4478 | @@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
4479 | /* |
4480 | * Divide a ktime value by a nanosecond value |
4481 | */ |
4482 | -u64 __ktime_divns(const ktime_t kt, s64 div) |
4483 | +s64 __ktime_divns(const ktime_t kt, s64 div) |
4484 | { |
4485 | - u64 dclc; |
4486 | int sft = 0; |
4487 | + s64 dclc; |
4488 | + u64 tmp; |
4489 | |
4490 | dclc = ktime_to_ns(kt); |
4491 | + tmp = dclc < 0 ? -dclc : dclc; |
4492 | + |
4493 | /* Make sure the divisor is less than 2^32: */ |
4494 | while (div >> 32) { |
4495 | sft++; |
4496 | div >>= 1; |
4497 | } |
4498 | - dclc >>= sft; |
4499 | - do_div(dclc, (unsigned long) div); |
4500 | - |
4501 | - return dclc; |
4502 | + tmp >>= sft; |
4503 | + do_div(tmp, (unsigned long) div); |
4504 | + return dclc < 0 ? -tmp : tmp; |
4505 | } |
4506 | EXPORT_SYMBOL_GPL(__ktime_divns); |
4507 | #endif /* BITS_PER_LONG >= 64 */ |
4508 | diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c |
4509 | index a28df5206d95..11649615c505 100644 |
4510 | --- a/lib/strnlen_user.c |
4511 | +++ b/lib/strnlen_user.c |
4512 | @@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, |
4513 | return res + find_zero(data) + 1 - align; |
4514 | } |
4515 | res += sizeof(unsigned long); |
4516 | - if (unlikely(max < sizeof(unsigned long))) |
4517 | + /* We already handled 'unsigned long' bytes. Did we do it all ? */ |
4518 | + if (unlikely(max <= sizeof(unsigned long))) |
4519 | break; |
4520 | max -= sizeof(unsigned long); |
4521 | if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) |
4522 | diff --git a/mm/kmemleak.c b/mm/kmemleak.c |
4523 | index 5405aff5a590..f0fe4f2c1fa7 100644 |
4524 | --- a/mm/kmemleak.c |
4525 | +++ b/mm/kmemleak.c |
4526 | @@ -115,7 +115,8 @@ |
4527 | #define BYTES_PER_POINTER sizeof(void *) |
4528 | |
4529 | /* GFP bitmask for kmemleak internal allocations */ |
4530 | -#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ |
4531 | +#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ |
4532 | + __GFP_NOACCOUNT)) | \ |
4533 | __GFP_NORETRY | __GFP_NOMEMALLOC | \ |
4534 | __GFP_NOWARN) |
4535 | |
4536 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
4537 | index de5dc5e12691..0f7d73b3e4b1 100644 |
4538 | --- a/mm/mempolicy.c |
4539 | +++ b/mm/mempolicy.c |
4540 | @@ -2517,7 +2517,7 @@ static void __init check_numabalancing_enable(void) |
4541 | if (numabalancing_override) |
4542 | set_numabalancing_state(numabalancing_override == 1); |
4543 | |
4544 | - if (nr_node_ids > 1 && !numabalancing_override) { |
4545 | + if (num_online_nodes() > 1 && !numabalancing_override) { |
4546 | pr_info("%s automatic NUMA balancing. " |
4547 | "Configure with numa_balancing= or the " |
4548 | "kernel.numa_balancing sysctl", |
4549 | diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c |
4550 | index 41a4abc7e98e..c4ec9239249a 100644 |
4551 | --- a/net/ceph/osd_client.c |
4552 | +++ b/net/ceph/osd_client.c |
4553 | @@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, |
4554 | if (list_empty(&req->r_osd_item)) |
4555 | req->r_osd = NULL; |
4556 | } |
4557 | - |
4558 | - list_del_init(&req->r_req_lru_item); /* can be on notarget */ |
4559 | ceph_osdc_put_request(req); |
4560 | } |
4561 | |
4562 | @@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend, |
4563 | err = __map_request(osdc, req, |
4564 | force_resend || force_resend_writes); |
4565 | dout("__map_request returned %d\n", err); |
4566 | - if (err == 0) |
4567 | - continue; /* no change and no osd was specified */ |
4568 | if (err < 0) |
4569 | continue; /* hrm! */ |
4570 | - if (req->r_osd == NULL) { |
4571 | - dout("tid %llu maps to no valid osd\n", req->r_tid); |
4572 | - needmap++; /* request a newer map */ |
4573 | - continue; |
4574 | - } |
4575 | + if (req->r_osd == NULL || err > 0) { |
4576 | + if (req->r_osd == NULL) { |
4577 | + dout("lingering %p tid %llu maps to no osd\n", |
4578 | + req, req->r_tid); |
4579 | + /* |
4580 | + * A homeless lingering request makes |
4581 | + * no sense, as it's job is to keep |
4582 | + * a particular OSD connection open. |
4583 | + * Request a newer map and kick the |
4584 | + * request, knowing that it won't be |
4585 | + * resent until we actually get a map |
4586 | + * that can tell us where to send it. |
4587 | + */ |
4588 | + needmap++; |
4589 | + } |
4590 | |
4591 | - dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid, |
4592 | - req->r_osd ? req->r_osd->o_osd : -1); |
4593 | - __register_request(osdc, req); |
4594 | - __unregister_linger_request(osdc, req); |
4595 | + dout("kicking lingering %p tid %llu osd%d\n", req, |
4596 | + req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); |
4597 | + __register_request(osdc, req); |
4598 | + __unregister_linger_request(osdc, req); |
4599 | + } |
4600 | } |
4601 | reset_changed_osds(osdc); |
4602 | mutex_unlock(&osdc->request_mutex); |
4603 | diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h |
4604 | index 8d53d65bd2ab..81e8dc5cb7f9 100644 |
4605 | --- a/net/mac80211/ieee80211_i.h |
4606 | +++ b/net/mac80211/ieee80211_i.h |
4607 | @@ -204,6 +204,8 @@ enum ieee80211_packet_rx_flags { |
4608 | * @IEEE80211_RX_CMNTR: received on cooked monitor already |
4609 | * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported |
4610 | * to cfg80211_report_obss_beacon(). |
4611 | + * @IEEE80211_RX_REORDER_TIMER: this frame is released by the |
4612 | + * reorder buffer timeout timer, not the normal RX path |
4613 | * |
4614 | * These flags are used across handling multiple interfaces |
4615 | * for a single frame. |
4616 | @@ -211,6 +213,7 @@ enum ieee80211_packet_rx_flags { |
4617 | enum ieee80211_rx_flags { |
4618 | IEEE80211_RX_CMNTR = BIT(0), |
4619 | IEEE80211_RX_BEACON_REPORTED = BIT(1), |
4620 | + IEEE80211_RX_REORDER_TIMER = BIT(2), |
4621 | }; |
4622 | |
4623 | struct ieee80211_rx_data { |
4624 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
4625 | index 1eb730bf8752..4c887d053333 100644 |
4626 | --- a/net/mac80211/rx.c |
4627 | +++ b/net/mac80211/rx.c |
4628 | @@ -2106,7 +2106,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx) |
4629 | /* deliver to local stack */ |
4630 | skb->protocol = eth_type_trans(skb, dev); |
4631 | memset(skb->cb, 0, sizeof(skb->cb)); |
4632 | - if (rx->local->napi) |
4633 | + if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) && |
4634 | + rx->local->napi) |
4635 | napi_gro_receive(rx->local->napi, skb); |
4636 | else |
4637 | netif_receive_skb(skb); |
4638 | @@ -3215,7 +3216,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) |
4639 | /* This is OK -- must be QoS data frame */ |
4640 | .security_idx = tid, |
4641 | .seqno_idx = tid, |
4642 | - .flags = 0, |
4643 | + .flags = IEEE80211_RX_REORDER_TIMER, |
4644 | }; |
4645 | struct tid_ampdu_rx *tid_agg_rx; |
4646 | |
4647 | diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c |
4648 | index a4220e92f0cc..efa3f48f1ec5 100644 |
4649 | --- a/net/mac80211/wep.c |
4650 | +++ b/net/mac80211/wep.c |
4651 | @@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local, |
4652 | |
4653 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); |
4654 | |
4655 | - if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN || |
4656 | - skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) |
4657 | + if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN)) |
4658 | return NULL; |
4659 | |
4660 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
4661 | @@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, |
4662 | size_t len; |
4663 | u8 rc4key[3 + WLAN_KEY_LEN_WEP104]; |
4664 | |
4665 | + if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN)) |
4666 | + return -1; |
4667 | + |
4668 | iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx); |
4669 | if (!iv) |
4670 | return -1; |
4671 | diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c |
4672 | index 1ec19f6f0c2b..eeeba5adee6d 100644 |
4673 | --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c |
4674 | +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c |
4675 | @@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, |
4676 | { |
4677 | u32 value_follows; |
4678 | int err; |
4679 | + struct page *scratch; |
4680 | + |
4681 | + scratch = alloc_page(GFP_KERNEL); |
4682 | + if (!scratch) |
4683 | + return -ENOMEM; |
4684 | + xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE); |
4685 | |
4686 | /* res->status */ |
4687 | err = gssx_dec_status(xdr, &res->status); |
4688 | if (err) |
4689 | - return err; |
4690 | + goto out_free; |
4691 | |
4692 | /* res->context_handle */ |
4693 | err = gssx_dec_bool(xdr, &value_follows); |
4694 | if (err) |
4695 | - return err; |
4696 | + goto out_free; |
4697 | if (value_follows) { |
4698 | err = gssx_dec_ctx(xdr, res->context_handle); |
4699 | if (err) |
4700 | - return err; |
4701 | + goto out_free; |
4702 | } else { |
4703 | res->context_handle = NULL; |
4704 | } |
4705 | @@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, |
4706 | /* res->output_token */ |
4707 | err = gssx_dec_bool(xdr, &value_follows); |
4708 | if (err) |
4709 | - return err; |
4710 | + goto out_free; |
4711 | if (value_follows) { |
4712 | err = gssx_dec_buffer(xdr, res->output_token); |
4713 | if (err) |
4714 | - return err; |
4715 | + goto out_free; |
4716 | } else { |
4717 | res->output_token = NULL; |
4718 | } |
4719 | @@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, |
4720 | /* res->delegated_cred_handle */ |
4721 | err = gssx_dec_bool(xdr, &value_follows); |
4722 | if (err) |
4723 | - return err; |
4724 | + goto out_free; |
4725 | if (value_follows) { |
4726 | /* we do not support upcall servers sending this data. */ |
4727 | - return -EINVAL; |
4728 | + err = -EINVAL; |
4729 | + goto out_free; |
4730 | } |
4731 | |
4732 | /* res->options */ |
4733 | err = gssx_dec_option_array(xdr, &res->options); |
4734 | |
4735 | +out_free: |
4736 | + __free_page(scratch); |
4737 | return err; |
4738 | } |
4739 | diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c |
4740 | index a8a1e14272a1..a002a6d1e6da 100644 |
4741 | --- a/sound/pci/hda/hda_intel.c |
4742 | +++ b/sound/pci/hda/hda_intel.c |
4743 | @@ -2108,6 +2108,8 @@ static const struct pci_device_id azx_ids[] = { |
4744 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
4745 | { PCI_DEVICE(0x1002, 0xaab0), |
4746 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
4747 | + { PCI_DEVICE(0x1002, 0xaac8), |
4748 | + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
4749 | /* VIA VT8251/VT8237A */ |
4750 | { PCI_DEVICE(0x1106, 0x3288), |
4751 | .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, |
4752 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
4753 | index da67ea8645a6..e27298bdcd6d 100644 |
4754 | --- a/sound/pci/hda/patch_conexant.c |
4755 | +++ b/sound/pci/hda/patch_conexant.c |
4756 | @@ -973,6 +973,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = { |
4757 | .patch = patch_conexant_auto }, |
4758 | { .id = 0x14f150b9, .name = "CX20665", |
4759 | .patch = patch_conexant_auto }, |
4760 | + { .id = 0x14f150f1, .name = "CX20721", |
4761 | + .patch = patch_conexant_auto }, |
4762 | + { .id = 0x14f150f2, .name = "CX20722", |
4763 | + .patch = patch_conexant_auto }, |
4764 | + { .id = 0x14f150f3, .name = "CX20723", |
4765 | + .patch = patch_conexant_auto }, |
4766 | + { .id = 0x14f150f4, .name = "CX20724", |
4767 | + .patch = patch_conexant_auto }, |
4768 | { .id = 0x14f1510f, .name = "CX20751/2", |
4769 | .patch = patch_conexant_auto }, |
4770 | { .id = 0x14f15110, .name = "CX20751/2", |
4771 | @@ -1007,6 +1015,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab"); |
4772 | MODULE_ALIAS("snd-hda-codec-id:14f150ac"); |
4773 | MODULE_ALIAS("snd-hda-codec-id:14f150b8"); |
4774 | MODULE_ALIAS("snd-hda-codec-id:14f150b9"); |
4775 | +MODULE_ALIAS("snd-hda-codec-id:14f150f1"); |
4776 | +MODULE_ALIAS("snd-hda-codec-id:14f150f2"); |
4777 | +MODULE_ALIAS("snd-hda-codec-id:14f150f3"); |
4778 | +MODULE_ALIAS("snd-hda-codec-id:14f150f4"); |
4779 | MODULE_ALIAS("snd-hda-codec-id:14f1510f"); |
4780 | MODULE_ALIAS("snd-hda-codec-id:14f15110"); |
4781 | MODULE_ALIAS("snd-hda-codec-id:14f15111"); |
4782 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
4783 | index 2fd490b1764b..93c78c3c4b95 100644 |
4784 | --- a/sound/pci/hda/patch_realtek.c |
4785 | +++ b/sound/pci/hda/patch_realtek.c |
4786 | @@ -5027,6 +5027,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
4787 | SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), |
4788 | SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), |
4789 | SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), |
4790 | + SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), |
4791 | SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), |
4792 | SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC), |
4793 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC), |
4794 | @@ -5056,6 +5057,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
4795 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
4796 | SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK), |
4797 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), |
4798 | + SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK), |
4799 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
4800 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
4801 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |
4802 | @@ -5246,6 +5248,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
4803 | {0x17, 0x40000000}, |
4804 | {0x1d, 0x40700001}, |
4805 | {0x21, 0x02211050}), |
4806 | + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
4807 | + ALC255_STANDARD_PINS, |
4808 | + {0x12, 0x90a60180}, |
4809 | + {0x14, 0x90170130}, |
4810 | + {0x17, 0x40000000}, |
4811 | + {0x1d, 0x40700001}, |
4812 | + {0x21, 0x02211040}), |
4813 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
4814 | ALC256_STANDARD_PINS, |
4815 | {0x13, 0x40000000}), |
4816 | diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c |
4817 | index 2341fc334163..6ba0b5517c40 100644 |
4818 | --- a/sound/pci/hda/thinkpad_helper.c |
4819 | +++ b/sound/pci/hda/thinkpad_helper.c |
4820 | @@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec, |
4821 | if (led_set_func(TPACPI_LED_MUTE, false) >= 0) { |
4822 | old_vmaster_hook = spec->vmaster_mute.hook; |
4823 | spec->vmaster_mute.hook = update_tpacpi_mute_led; |
4824 | - spec->vmaster_mute_enum = 1; |
4825 | removefunc = false; |
4826 | } |
4827 | if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) { |
4828 | diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c |
4829 | index 2ffb9a0570dc..3d44fc50e4d0 100644 |
4830 | --- a/sound/soc/codecs/mc13783.c |
4831 | +++ b/sound/soc/codecs/mc13783.c |
4832 | @@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec) |
4833 | AUDIO_SSI_SEL, 0); |
4834 | else |
4835 | mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC, |
4836 | - 0, AUDIO_SSI_SEL); |
4837 | + AUDIO_SSI_SEL, AUDIO_SSI_SEL); |
4838 | |
4839 | if (priv->dac_ssi_port == MC13783_SSI1_PORT) |
4840 | mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC, |
4841 | AUDIO_SSI_SEL, 0); |
4842 | else |
4843 | mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC, |
4844 | - 0, AUDIO_SSI_SEL); |
4845 | + AUDIO_SSI_SEL, AUDIO_SSI_SEL); |
4846 | |
4847 | return 0; |
4848 | } |
4849 | diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c |
4850 | index dc7778b6dd7f..c3c33bd0df1c 100644 |
4851 | --- a/sound/soc/codecs/uda1380.c |
4852 | +++ b/sound/soc/codecs/uda1380.c |
4853 | @@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai, |
4854 | if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) |
4855 | return -EINVAL; |
4856 | |
4857 | - uda1380_write(codec, UDA1380_IFACE, iface); |
4858 | + uda1380_write_reg_cache(codec, UDA1380_IFACE, iface); |
4859 | |
4860 | return 0; |
4861 | } |
4862 | diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c |
4863 | index 3035d9856415..e97a7615df85 100644 |
4864 | --- a/sound/soc/codecs/wm8960.c |
4865 | +++ b/sound/soc/codecs/wm8960.c |
4866 | @@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = { |
4867 | { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", }, |
4868 | { "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */ |
4869 | { "Right Input Mixer", NULL, "RINPUT2" }, |
4870 | - { "Right Input Mixer", NULL, "LINPUT3" }, |
4871 | + { "Right Input Mixer", NULL, "RINPUT3" }, |
4872 | |
4873 | { "Left ADC", NULL, "Left Input Mixer" }, |
4874 | { "Right ADC", NULL, "Right Input Mixer" }, |
4875 | diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c |
4876 | index 4fbc7689339a..a1c04dab6684 100644 |
4877 | --- a/sound/soc/codecs/wm8994.c |
4878 | +++ b/sound/soc/codecs/wm8994.c |
4879 | @@ -2754,7 +2754,7 @@ static struct { |
4880 | }; |
4881 | |
4882 | static int fs_ratios[] = { |
4883 | - 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536 |
4884 | + 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536 |
4885 | }; |
4886 | |
4887 | static int bclk_divs[] = { |
4888 | diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c |
4889 | index b6f88202b8c9..e19a6765bd8a 100644 |
4890 | --- a/sound/soc/soc-dapm.c |
4891 | +++ b/sound/soc/soc-dapm.c |
4892 | @@ -3074,11 +3074,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm, |
4893 | } |
4894 | |
4895 | prefix = soc_dapm_prefix(dapm); |
4896 | - if (prefix) |
4897 | + if (prefix) { |
4898 | w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); |
4899 | - else |
4900 | + if (widget->sname) |
4901 | + w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix, |
4902 | + widget->sname); |
4903 | + } else { |
4904 | w->name = kasprintf(GFP_KERNEL, "%s", widget->name); |
4905 | - |
4906 | + if (widget->sname) |
4907 | + w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname); |
4908 | + } |
4909 | if (w->name == NULL) { |
4910 | kfree(w); |
4911 | return NULL; |
4912 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
4913 | index 32631a86078b..e21ec5abcc3a 100644 |
4914 | --- a/sound/usb/quirks.c |
4915 | +++ b/sound/usb/quirks.c |
4916 | @@ -1117,6 +1117,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) |
4917 | switch (chip->usb_id) { |
4918 | case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ |
4919 | case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ |
4920 | + case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ |
4921 | + case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ |
4922 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
4923 | return true; |
4924 | } |
4925 | diff --git a/tools/vm/Makefile b/tools/vm/Makefile |
4926 | index ac884b65a072..93aadaf7ff63 100644 |
4927 | --- a/tools/vm/Makefile |
4928 | +++ b/tools/vm/Makefile |
4929 | @@ -3,7 +3,7 @@ |
4930 | TARGETS=page-types slabinfo page_owner_sort |
4931 | |
4932 | LIB_DIR = ../lib/api |
4933 | -LIBS = $(LIB_DIR)/libapikfs.a |
4934 | +LIBS = $(LIB_DIR)/libapi.a |
4935 | |
4936 | CC = $(CROSS_COMPILE)gcc |
4937 | CFLAGS = -Wall -Wextra -I../lib/ |