Contents of /trunk/kernel-magellan/patches-4.2/0101-4.2.2-all-fixes.patch
Parent Directory | Revision Log
Revision 2707 -
(show annotations)
(download)
Mon Nov 2 12:49:12 2015 UTC (8 years, 10 months ago) by niro
File size: 168501 byte(s)
Mon Nov 2 12:49:12 2015 UTC (8 years, 10 months ago) by niro
File size: 168501 byte(s)
-linux-4.2.2
1 | diff --git a/Makefile b/Makefile |
2 | index a03efc18aa48..3578b4426ecf 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,6 +1,6 @@ |
6 | VERSION = 4 |
7 | PATCHLEVEL = 2 |
8 | -SUBLEVEL = 1 |
9 | +SUBLEVEL = 2 |
10 | EXTRAVERSION = |
11 | NAME = Hurr durr I'ma sheep |
12 | |
13 | diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c |
14 | index bd245d34952d..a0765e7ed6c7 100644 |
15 | --- a/arch/arm/boot/compressed/decompress.c |
16 | +++ b/arch/arm/boot/compressed/decompress.c |
17 | @@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2); |
18 | |
19 | int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) |
20 | { |
21 | - return decompress(input, len, NULL, NULL, output, NULL, error); |
22 | + return __decompress(input, len, NULL, NULL, output, 0, NULL, error); |
23 | } |
24 | diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c |
25 | index bc738d2b8392..f9c341c5ae78 100644 |
26 | --- a/arch/arm/kvm/arm.c |
27 | +++ b/arch/arm/kvm/arm.c |
28 | @@ -449,7 +449,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) |
29 | * Map the VGIC hardware resources before running a vcpu the first |
30 | * time on this VM. |
31 | */ |
32 | - if (unlikely(!vgic_ready(kvm))) { |
33 | + if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) { |
34 | ret = kvm_vgic_map_resources(kvm); |
35 | if (ret) |
36 | return ret; |
37 | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig |
38 | index 318175f62c24..735456feb08e 100644 |
39 | --- a/arch/arm64/Kconfig |
40 | +++ b/arch/arm64/Kconfig |
41 | @@ -104,6 +104,10 @@ config NO_IOPORT_MAP |
42 | config STACKTRACE_SUPPORT |
43 | def_bool y |
44 | |
45 | +config ILLEGAL_POINTER_VALUE |
46 | + hex |
47 | + default 0xdead000000000000 |
48 | + |
49 | config LOCKDEP_SUPPORT |
50 | def_bool y |
51 | |
52 | @@ -417,6 +421,22 @@ config ARM64_ERRATUM_845719 |
53 | |
54 | If unsure, say Y. |
55 | |
56 | +config ARM64_ERRATUM_843419 |
57 | + bool "Cortex-A53: 843419: A load or store might access an incorrect address" |
58 | + depends on MODULES |
59 | + default y |
60 | + help |
61 | + This option builds kernel modules using the large memory model in |
62 | + order to avoid the use of the ADRP instruction, which can cause |
63 | + a subsequent memory access to use an incorrect address on Cortex-A53 |
64 | + parts up to r0p4. |
65 | + |
66 | + Note that the kernel itself must be linked with a version of ld |
67 | + which fixes potentially affected ADRP instructions through the |
68 | + use of veneers. |
69 | + |
70 | + If unsure, say Y. |
71 | + |
72 | endmenu |
73 | |
74 | |
75 | diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile |
76 | index 4d2a925998f9..81151663ef38 100644 |
77 | --- a/arch/arm64/Makefile |
78 | +++ b/arch/arm64/Makefile |
79 | @@ -30,6 +30,10 @@ endif |
80 | |
81 | CHECKFLAGS += -D__aarch64__ |
82 | |
83 | +ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) |
84 | +CFLAGS_MODULE += -mcmodel=large |
85 | +endif |
86 | + |
87 | # Default value |
88 | head-y := arch/arm64/kernel/head.o |
89 | |
90 | diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h |
91 | index f800d45ea226..44a59c20e773 100644 |
92 | --- a/arch/arm64/include/asm/memory.h |
93 | +++ b/arch/arm64/include/asm/memory.h |
94 | @@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr; |
95 | #define PHYS_OFFSET ({ memstart_addr; }) |
96 | |
97 | /* |
98 | + * The maximum physical address that the linear direct mapping |
99 | + * of system RAM can cover. (PAGE_OFFSET can be interpreted as |
100 | + * a 2's complement signed quantity and negated to derive the |
101 | + * maximum size of the linear mapping.) |
102 | + */ |
103 | +#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; }) |
104 | + |
105 | +/* |
106 | * PFNs are used to describe any physical page; this means |
107 | * PFN 0 == physical address 0. |
108 | * |
109 | diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
110 | index e16351819fed..8213ca15abd2 100644 |
111 | --- a/arch/arm64/kernel/entry.S |
112 | +++ b/arch/arm64/kernel/entry.S |
113 | @@ -116,7 +116,7 @@ |
114 | */ |
115 | .endm |
116 | |
117 | - .macro kernel_exit, el, ret = 0 |
118 | + .macro kernel_exit, el |
119 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
120 | .if \el == 0 |
121 | ct_user_enter |
122 | @@ -146,11 +146,7 @@ |
123 | .endif |
124 | msr elr_el1, x21 // set up the return data |
125 | msr spsr_el1, x22 |
126 | - .if \ret |
127 | - ldr x1, [sp, #S_X1] // preserve x0 (syscall return) |
128 | - .else |
129 | ldp x0, x1, [sp, #16 * 0] |
130 | - .endif |
131 | ldp x2, x3, [sp, #16 * 1] |
132 | ldp x4, x5, [sp, #16 * 2] |
133 | ldp x6, x7, [sp, #16 * 3] |
134 | @@ -613,22 +609,21 @@ ENDPROC(cpu_switch_to) |
135 | */ |
136 | ret_fast_syscall: |
137 | disable_irq // disable interrupts |
138 | + str x0, [sp, #S_X0] // returned x0 |
139 | ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing |
140 | and x2, x1, #_TIF_SYSCALL_WORK |
141 | cbnz x2, ret_fast_syscall_trace |
142 | and x2, x1, #_TIF_WORK_MASK |
143 | - cbnz x2, fast_work_pending |
144 | + cbnz x2, work_pending |
145 | enable_step_tsk x1, x2 |
146 | - kernel_exit 0, ret = 1 |
147 | + kernel_exit 0 |
148 | ret_fast_syscall_trace: |
149 | enable_irq // enable interrupts |
150 | - b __sys_trace_return |
151 | + b __sys_trace_return_skipped // we already saved x0 |
152 | |
153 | /* |
154 | * Ok, we need to do extra processing, enter the slow path. |
155 | */ |
156 | -fast_work_pending: |
157 | - str x0, [sp, #S_X0] // returned x0 |
158 | work_pending: |
159 | tbnz x1, #TIF_NEED_RESCHED, work_resched |
160 | /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ |
161 | @@ -652,7 +647,7 @@ ret_to_user: |
162 | cbnz x2, work_pending |
163 | enable_step_tsk x1, x2 |
164 | no_work_pending: |
165 | - kernel_exit 0, ret = 0 |
166 | + kernel_exit 0 |
167 | ENDPROC(ret_to_user) |
168 | |
169 | /* |
170 | diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c |
171 | index 44d6f7545505..c56956a16d3f 100644 |
172 | --- a/arch/arm64/kernel/fpsimd.c |
173 | +++ b/arch/arm64/kernel/fpsimd.c |
174 | @@ -158,6 +158,7 @@ void fpsimd_thread_switch(struct task_struct *next) |
175 | void fpsimd_flush_thread(void) |
176 | { |
177 | memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); |
178 | + fpsimd_flush_task_state(current); |
179 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
180 | } |
181 | |
182 | diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S |
183 | index c0ff3ce4299e..370541162658 100644 |
184 | --- a/arch/arm64/kernel/head.S |
185 | +++ b/arch/arm64/kernel/head.S |
186 | @@ -528,6 +528,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems |
187 | msr hstr_el2, xzr // Disable CP15 traps to EL2 |
188 | #endif |
189 | |
190 | + /* EL2 debug */ |
191 | + mrs x0, pmcr_el0 // Disable debug access traps |
192 | + ubfx x0, x0, #11, #5 // to EL2 and allow access to |
193 | + msr mdcr_el2, x0 // all PMU counters from EL1 |
194 | + |
195 | /* Stage-2 translation */ |
196 | msr vttbr_el2, xzr |
197 | |
198 | diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c |
199 | index 67bf4107f6ef..876eb8df50bf 100644 |
200 | --- a/arch/arm64/kernel/module.c |
201 | +++ b/arch/arm64/kernel/module.c |
202 | @@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, |
203 | ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, |
204 | AARCH64_INSN_IMM_ADR); |
205 | break; |
206 | +#ifndef CONFIG_ARM64_ERRATUM_843419 |
207 | case R_AARCH64_ADR_PREL_PG_HI21_NC: |
208 | overflow_check = false; |
209 | case R_AARCH64_ADR_PREL_PG_HI21: |
210 | ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, |
211 | AARCH64_INSN_IMM_ADR); |
212 | break; |
213 | +#endif |
214 | case R_AARCH64_ADD_ABS_LO12_NC: |
215 | case R_AARCH64_LDST8_ABS_LO12_NC: |
216 | overflow_check = false; |
217 | diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c |
218 | index 948f0ad2de23..71ef6dc89ae5 100644 |
219 | --- a/arch/arm64/kernel/signal32.c |
220 | +++ b/arch/arm64/kernel/signal32.c |
221 | @@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) |
222 | |
223 | /* |
224 | * VFP save/restore code. |
225 | + * |
226 | + * We have to be careful with endianness, since the fpsimd context-switch |
227 | + * code operates on 128-bit (Q) register values whereas the compat ABI |
228 | + * uses an array of 64-bit (D) registers. Consequently, we need to swap |
229 | + * the two halves of each Q register when running on a big-endian CPU. |
230 | */ |
231 | +union __fpsimd_vreg { |
232 | + __uint128_t raw; |
233 | + struct { |
234 | +#ifdef __AARCH64EB__ |
235 | + u64 hi; |
236 | + u64 lo; |
237 | +#else |
238 | + u64 lo; |
239 | + u64 hi; |
240 | +#endif |
241 | + }; |
242 | +}; |
243 | + |
244 | static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) |
245 | { |
246 | struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; |
247 | compat_ulong_t magic = VFP_MAGIC; |
248 | compat_ulong_t size = VFP_STORAGE_SIZE; |
249 | compat_ulong_t fpscr, fpexc; |
250 | - int err = 0; |
251 | + int i, err = 0; |
252 | |
253 | /* |
254 | * Save the hardware registers to the fpsimd_state structure. |
255 | @@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) |
256 | /* |
257 | * Now copy the FP registers. Since the registers are packed, |
258 | * we can copy the prefix we want (V0-V15) as it is. |
259 | - * FIXME: Won't work if big endian. |
260 | */ |
261 | - err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, |
262 | - sizeof(frame->ufp.fpregs)); |
263 | + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { |
264 | + union __fpsimd_vreg vreg = { |
265 | + .raw = fpsimd->vregs[i >> 1], |
266 | + }; |
267 | + |
268 | + __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err); |
269 | + __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); |
270 | + } |
271 | |
272 | /* Create an AArch32 fpscr from the fpsr and the fpcr. */ |
273 | fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | |
274 | @@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) |
275 | compat_ulong_t magic = VFP_MAGIC; |
276 | compat_ulong_t size = VFP_STORAGE_SIZE; |
277 | compat_ulong_t fpscr; |
278 | - int err = 0; |
279 | + int i, err = 0; |
280 | |
281 | __get_user_error(magic, &frame->magic, err); |
282 | __get_user_error(size, &frame->size, err); |
283 | @@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) |
284 | if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) |
285 | return -EINVAL; |
286 | |
287 | - /* |
288 | - * Copy the FP registers into the start of the fpsimd_state. |
289 | - * FIXME: Won't work if big endian. |
290 | - */ |
291 | - err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, |
292 | - sizeof(frame->ufp.fpregs)); |
293 | + /* Copy the FP registers into the start of the fpsimd_state. */ |
294 | + for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { |
295 | + union __fpsimd_vreg vreg; |
296 | + |
297 | + __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err); |
298 | + __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); |
299 | + fpsimd.vregs[i >> 1] = vreg.raw; |
300 | + } |
301 | |
302 | /* Extract the fpsr and the fpcr from the fpscr */ |
303 | __get_user_error(fpscr, &frame->ufp.fpscr, err); |
304 | diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S |
305 | index 17a8fb14f428..3c6051cbf442 100644 |
306 | --- a/arch/arm64/kvm/hyp.S |
307 | +++ b/arch/arm64/kvm/hyp.S |
308 | @@ -840,8 +840,6 @@ |
309 | mrs x3, cntv_ctl_el0 |
310 | and x3, x3, #3 |
311 | str w3, [x0, #VCPU_TIMER_CNTV_CTL] |
312 | - bic x3, x3, #1 // Clear Enable |
313 | - msr cntv_ctl_el0, x3 |
314 | |
315 | isb |
316 | |
317 | @@ -849,6 +847,9 @@ |
318 | str x3, [x0, #VCPU_TIMER_CNTV_CVAL] |
319 | |
320 | 1: |
321 | + // Disable the virtual timer |
322 | + msr cntv_ctl_el0, xzr |
323 | + |
324 | // Allow physical timer/counter access for the host |
325 | mrs x2, cnthctl_el2 |
326 | orr x2, x2, #3 |
327 | @@ -943,13 +944,15 @@ ENTRY(__kvm_vcpu_run) |
328 | // Guest context |
329 | add x2, x0, #VCPU_CONTEXT |
330 | |
331 | + // We must restore the 32-bit state before the sysregs, thanks |
332 | + // to Cortex-A57 erratum #852523. |
333 | + restore_guest_32bit_state |
334 | bl __restore_sysregs |
335 | bl __restore_fpsimd |
336 | |
337 | skip_debug_state x3, 1f |
338 | bl __restore_debug |
339 | 1: |
340 | - restore_guest_32bit_state |
341 | restore_guest_regs |
342 | |
343 | // That's it, no more messing around. |
344 | diff --git a/arch/h8300/boot/compressed/misc.c b/arch/h8300/boot/compressed/misc.c |
345 | index 704274127c07..c4f2cfcb117b 100644 |
346 | --- a/arch/h8300/boot/compressed/misc.c |
347 | +++ b/arch/h8300/boot/compressed/misc.c |
348 | @@ -70,5 +70,5 @@ void decompress_kernel(void) |
349 | free_mem_ptr = (unsigned long)&_end; |
350 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; |
351 | |
352 | - decompress(input_data, input_len, NULL, NULL, output, NULL, error); |
353 | + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); |
354 | } |
355 | diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c |
356 | index 28a09529f206..3a7692745868 100644 |
357 | --- a/arch/m32r/boot/compressed/misc.c |
358 | +++ b/arch/m32r/boot/compressed/misc.c |
359 | @@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data, |
360 | free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE; |
361 | |
362 | puts("\nDecompressing Linux... "); |
363 | - decompress(input_data, input_len, NULL, NULL, output_data, NULL, error); |
364 | + __decompress(input_data, input_len, NULL, NULL, output_data, 0, |
365 | + NULL, error); |
366 | puts("done.\nBooting the kernel.\n"); |
367 | } |
368 | diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c |
369 | index 54831069a206..080cd53bac36 100644 |
370 | --- a/arch/mips/boot/compressed/decompress.c |
371 | +++ b/arch/mips/boot/compressed/decompress.c |
372 | @@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start) |
373 | puts("\n"); |
374 | |
375 | /* Decompress the kernel with according algorithm */ |
376 | - decompress((char *)zimage_start, zimage_size, 0, 0, |
377 | - (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error); |
378 | + __decompress((char *)zimage_start, zimage_size, 0, 0, |
379 | + (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error); |
380 | |
381 | /* FIXME: should we flush cache here? */ |
382 | puts("Now, booting the kernel...\n"); |
383 | diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S |
384 | index 1b6ca634e646..9f71c06aebf6 100644 |
385 | --- a/arch/mips/kernel/cps-vec.S |
386 | +++ b/arch/mips/kernel/cps-vec.S |
387 | @@ -152,7 +152,7 @@ dcache_done: |
388 | |
389 | /* Enter the coherent domain */ |
390 | li t0, 0xff |
391 | - PTR_S t0, GCR_CL_COHERENCE_OFS(v1) |
392 | + sw t0, GCR_CL_COHERENCE_OFS(v1) |
393 | ehb |
394 | |
395 | /* Jump to kseg0 */ |
396 | @@ -302,7 +302,7 @@ LEAF(mips_cps_boot_vpes) |
397 | PTR_L t0, 0(t0) |
398 | |
399 | /* Calculate a pointer to this cores struct core_boot_config */ |
400 | - PTR_L t0, GCR_CL_ID_OFS(t0) |
401 | + lw t0, GCR_CL_ID_OFS(t0) |
402 | li t1, COREBOOTCFG_SIZE |
403 | mul t0, t0, t1 |
404 | PTR_LA t1, mips_cps_core_bootcfg |
405 | diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c |
406 | index 712f17a2ecf2..f0f1b98a5fde 100644 |
407 | --- a/arch/mips/math-emu/cp1emu.c |
408 | +++ b/arch/mips/math-emu/cp1emu.c |
409 | @@ -1137,7 +1137,7 @@ emul: |
410 | break; |
411 | |
412 | case mfhc_op: |
413 | - if (!cpu_has_mips_r2) |
414 | + if (!cpu_has_mips_r2_r6) |
415 | goto sigill; |
416 | |
417 | /* copregister rd -> gpr[rt] */ |
418 | @@ -1148,7 +1148,7 @@ emul: |
419 | break; |
420 | |
421 | case mthc_op: |
422 | - if (!cpu_has_mips_r2) |
423 | + if (!cpu_has_mips_r2_r6) |
424 | goto sigill; |
425 | |
426 | /* copregister rd <- gpr[rt] */ |
427 | @@ -1181,6 +1181,24 @@ emul: |
428 | } |
429 | break; |
430 | |
431 | + case bc1eqz_op: |
432 | + case bc1nez_op: |
433 | + if (!cpu_has_mips_r6 || delay_slot(xcp)) |
434 | + return SIGILL; |
435 | + |
436 | + cond = likely = 0; |
437 | + switch (MIPSInst_RS(ir)) { |
438 | + case bc1eqz_op: |
439 | + if (get_fpr32(¤t->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1) |
440 | + cond = 1; |
441 | + break; |
442 | + case bc1nez_op: |
443 | + if (!(get_fpr32(¤t->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)) |
444 | + cond = 1; |
445 | + break; |
446 | + } |
447 | + goto branch_common; |
448 | + |
449 | case bc_op: |
450 | if (delay_slot(xcp)) |
451 | return SIGILL; |
452 | @@ -1207,7 +1225,7 @@ emul: |
453 | case bct_op: |
454 | break; |
455 | } |
456 | - |
457 | +branch_common: |
458 | set_delay_slot(xcp); |
459 | if (cond) { |
460 | /* |
461 | diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c |
462 | index f3191db6e2e9..c0eab24f6a9e 100644 |
463 | --- a/arch/parisc/kernel/irq.c |
464 | +++ b/arch/parisc/kernel/irq.c |
465 | @@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs) |
466 | struct pt_regs *old_regs; |
467 | unsigned long eirr_val; |
468 | int irq, cpu = smp_processor_id(); |
469 | -#ifdef CONFIG_SMP |
470 | struct irq_desc *desc; |
471 | +#ifdef CONFIG_SMP |
472 | cpumask_t dest; |
473 | #endif |
474 | |
475 | @@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs) |
476 | goto set_out; |
477 | irq = eirr_to_irq(eirr_val); |
478 | |
479 | -#ifdef CONFIG_SMP |
480 | + /* Filter out spurious interrupts, mostly from serial port at bootup */ |
481 | desc = irq_to_desc(irq); |
482 | + if (unlikely(!desc->action)) |
483 | + goto set_out; |
484 | + |
485 | +#ifdef CONFIG_SMP |
486 | cpumask_copy(&dest, desc->irq_data.affinity); |
487 | if (irqd_is_per_cpu(&desc->irq_data) && |
488 | !cpumask_test_cpu(smp_processor_id(), &dest)) { |
489 | diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S |
490 | index 7ef22e3387e0..0b8d26d3ba43 100644 |
491 | --- a/arch/parisc/kernel/syscall.S |
492 | +++ b/arch/parisc/kernel/syscall.S |
493 | @@ -821,7 +821,7 @@ cas2_action: |
494 | /* 64bit CAS */ |
495 | #ifdef CONFIG_64BIT |
496 | 19: ldd,ma 0(%sr3,%r26), %r29 |
497 | - sub,= %r29, %r25, %r0 |
498 | + sub,*= %r29, %r25, %r0 |
499 | b,n cas2_end |
500 | 20: std,ma %r24, 0(%sr3,%r26) |
501 | copy %r0, %r28 |
502 | diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile |
503 | index 73eddda53b8e..4eec430d8fa8 100644 |
504 | --- a/arch/powerpc/boot/Makefile |
505 | +++ b/arch/powerpc/boot/Makefile |
506 | @@ -28,6 +28,9 @@ BOOTCFLAGS += -m64 |
507 | endif |
508 | ifdef CONFIG_CPU_BIG_ENDIAN |
509 | BOOTCFLAGS += -mbig-endian |
510 | +else |
511 | +BOOTCFLAGS += -mlittle-endian |
512 | +BOOTCFLAGS += $(call cc-option,-mabi=elfv2) |
513 | endif |
514 | |
515 | BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc |
516 | diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h |
517 | index 3bb7488bd24b..7ee2300ee392 100644 |
518 | --- a/arch/powerpc/include/asm/pgtable-ppc64.h |
519 | +++ b/arch/powerpc/include/asm/pgtable-ppc64.h |
520 | @@ -135,7 +135,19 @@ |
521 | #define pte_iterate_hashed_end() } while(0) |
522 | |
523 | #ifdef CONFIG_PPC_HAS_HASH_64K |
524 | -#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) |
525 | +/* |
526 | + * We expect this to be called only for user addresses or kernel virtual |
527 | + * addresses other than the linear mapping. |
528 | + */ |
529 | +#define pte_pagesize_index(mm, addr, pte) \ |
530 | + ({ \ |
531 | + unsigned int psize; \ |
532 | + if (is_kernel_addr(addr)) \ |
533 | + psize = MMU_PAGE_4K; \ |
534 | + else \ |
535 | + psize = get_slice_psize(mm, addr); \ |
536 | + psize; \ |
537 | + }) |
538 | #else |
539 | #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K |
540 | #endif |
541 | diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h |
542 | index 7a4ede16b283..b77ef369c0f0 100644 |
543 | --- a/arch/powerpc/include/asm/rtas.h |
544 | +++ b/arch/powerpc/include/asm/rtas.h |
545 | @@ -343,6 +343,7 @@ extern void rtas_power_off(void); |
546 | extern void rtas_halt(void); |
547 | extern void rtas_os_term(char *str); |
548 | extern int rtas_get_sensor(int sensor, int index, int *state); |
549 | +extern int rtas_get_sensor_fast(int sensor, int index, int *state); |
550 | extern int rtas_get_power_level(int powerdomain, int *level); |
551 | extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); |
552 | extern bool rtas_indicator_present(int token, int *maxindex); |
553 | diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h |
554 | index 58abeda64cb7..15cca17cba4b 100644 |
555 | --- a/arch/powerpc/include/asm/switch_to.h |
556 | +++ b/arch/powerpc/include/asm/switch_to.h |
557 | @@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {} |
558 | |
559 | extern void enable_kernel_fp(void); |
560 | extern void enable_kernel_altivec(void); |
561 | +extern void enable_kernel_vsx(void); |
562 | extern int emulate_altivec(struct pt_regs *); |
563 | extern void __giveup_vsx(struct task_struct *); |
564 | extern void giveup_vsx(struct task_struct *); |
565 | diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c |
566 | index af9b597b10af..01c961d5d2de 100644 |
567 | --- a/arch/powerpc/kernel/eeh.c |
568 | +++ b/arch/powerpc/kernel/eeh.c |
569 | @@ -308,11 +308,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) |
570 | if (!(pe->type & EEH_PE_PHB)) { |
571 | if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) |
572 | eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); |
573 | + |
574 | + /* |
575 | + * The config space of some PCI devices can't be accessed |
576 | + * when their PEs are in frozen state. Otherwise, fenced |
577 | + * PHB might be seen. Those PEs are identified with flag |
578 | + * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED |
579 | + * is set automatically when the PE is put to EEH_PE_ISOLATED. |
580 | + * |
581 | + * Restoring BARs possibly triggers PCI config access in |
582 | + * (OPAL) firmware and then causes fenced PHB. If the |
583 | + * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's |
584 | + * pointless to restore BARs and dump config space. |
585 | + */ |
586 | eeh_ops->configure_bridge(pe); |
587 | - eeh_pe_restore_bars(pe); |
588 | + if (!(pe->state & EEH_PE_CFG_BLOCKED)) { |
589 | + eeh_pe_restore_bars(pe); |
590 | |
591 | - pci_regs_buf[0] = 0; |
592 | - eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); |
593 | + pci_regs_buf[0] = 0; |
594 | + eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); |
595 | + } |
596 | } |
597 | |
598 | eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); |
599 | @@ -1116,9 +1131,6 @@ void eeh_add_device_late(struct pci_dev *dev) |
600 | return; |
601 | } |
602 | |
603 | - if (eeh_has_flag(EEH_PROBE_MODE_DEV)) |
604 | - eeh_ops->probe(pdn, NULL); |
605 | - |
606 | /* |
607 | * The EEH cache might not be removed correctly because of |
608 | * unbalanced kref to the device during unplug time, which |
609 | @@ -1142,6 +1154,9 @@ void eeh_add_device_late(struct pci_dev *dev) |
610 | dev->dev.archdata.edev = NULL; |
611 | } |
612 | |
613 | + if (eeh_has_flag(EEH_PROBE_MODE_DEV)) |
614 | + eeh_ops->probe(pdn, NULL); |
615 | + |
616 | edev->pdev = dev; |
617 | dev->dev.archdata.edev = edev; |
618 | |
619 | diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c |
620 | index 8005e18d1b40..64e6e9d9e656 100644 |
621 | --- a/arch/powerpc/kernel/process.c |
622 | +++ b/arch/powerpc/kernel/process.c |
623 | @@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); |
624 | #endif /* CONFIG_ALTIVEC */ |
625 | |
626 | #ifdef CONFIG_VSX |
627 | -#if 0 |
628 | -/* not currently used, but some crazy RAID module might want to later */ |
629 | void enable_kernel_vsx(void) |
630 | { |
631 | WARN_ON(preemptible()); |
632 | @@ -220,7 +218,6 @@ void enable_kernel_vsx(void) |
633 | #endif /* CONFIG_SMP */ |
634 | } |
635 | EXPORT_SYMBOL(enable_kernel_vsx); |
636 | -#endif |
637 | |
638 | void giveup_vsx(struct task_struct *tsk) |
639 | { |
640 | diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c |
641 | index 7a488c108410..caffb10e7aa3 100644 |
642 | --- a/arch/powerpc/kernel/rtas.c |
643 | +++ b/arch/powerpc/kernel/rtas.c |
644 | @@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state) |
645 | } |
646 | EXPORT_SYMBOL(rtas_get_sensor); |
647 | |
648 | +int rtas_get_sensor_fast(int sensor, int index, int *state) |
649 | +{ |
650 | + int token = rtas_token("get-sensor-state"); |
651 | + int rc; |
652 | + |
653 | + if (token == RTAS_UNKNOWN_SERVICE) |
654 | + return -ENOENT; |
655 | + |
656 | + rc = rtas_call(token, 2, 2, state, sensor, index); |
657 | + WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN && |
658 | + rc <= RTAS_EXTENDED_DELAY_MAX)); |
659 | + |
660 | + if (rc < 0) |
661 | + return rtas_error_rc(rc); |
662 | + return rc; |
663 | +} |
664 | + |
665 | bool rtas_indicator_present(int token, int *maxindex) |
666 | { |
667 | int proplen, count, i; |
668 | diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c |
669 | index 43dafb9d6a46..4d87122cf6a7 100644 |
670 | --- a/arch/powerpc/mm/hugepage-hash64.c |
671 | +++ b/arch/powerpc/mm/hugepage-hash64.c |
672 | @@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, |
673 | BUG_ON(index >= 4096); |
674 | |
675 | vpn = hpt_vpn(ea, vsid, ssize); |
676 | - hash = hpt_hash(vpn, shift, ssize); |
677 | hpte_slot_array = get_hpte_slot_array(pmdp); |
678 | if (psize == MMU_PAGE_4K) { |
679 | /* |
680 | @@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, |
681 | valid = hpte_valid(hpte_slot_array, index); |
682 | if (valid) { |
683 | /* update the hpte bits */ |
684 | + hash = hpt_hash(vpn, shift, ssize); |
685 | hidx = hpte_hash_index(hpte_slot_array, index); |
686 | if (hidx & _PTEIDX_SECONDARY) |
687 | hash = ~hash; |
688 | @@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, |
689 | if (!valid) { |
690 | unsigned long hpte_group; |
691 | |
692 | + hash = hpt_hash(vpn, shift, ssize); |
693 | /* insert new entry */ |
694 | pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; |
695 | new_pmd |= _PAGE_HASHPTE; |
696 | diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c |
697 | index 85cbc96eff6c..8b64f89e68c9 100644 |
698 | --- a/arch/powerpc/platforms/powernv/pci-ioda.c |
699 | +++ b/arch/powerpc/platforms/powernv/pci-ioda.c |
700 | @@ -2078,9 +2078,23 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) |
701 | struct iommu_table *tbl = NULL; |
702 | long rc; |
703 | |
704 | + /* |
705 | + * crashkernel= specifies the kdump kernel's maximum memory at |
706 | + * some offset and there is no guaranteed the result is a power |
707 | + * of 2, which will cause errors later. |
708 | + */ |
709 | + const u64 max_memory = __rounddown_pow_of_two(memory_hotplug_max()); |
710 | + |
711 | + /* |
712 | + * In memory constrained environments, e.g. kdump kernel, the |
713 | + * DMA window can be larger than available memory, which will |
714 | + * cause errors later. |
715 | + */ |
716 | + const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); |
717 | + |
718 | rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, |
719 | IOMMU_PAGE_SHIFT_4K, |
720 | - pe->table_group.tce32_size, |
721 | + window_size, |
722 | POWERNV_IOMMU_DEFAULT_LEVELS, &tbl); |
723 | if (rc) { |
724 | pe_err(pe, "Failed to create 32-bit TCE table, err %ld", |
725 | diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c |
726 | index 47d9cebe7159..db17827eb746 100644 |
727 | --- a/arch/powerpc/platforms/pseries/dlpar.c |
728 | +++ b/arch/powerpc/platforms/pseries/dlpar.c |
729 | @@ -422,8 +422,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count) |
730 | |
731 | dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent); |
732 | of_node_put(parent); |
733 | - if (!dn) |
734 | + if (!dn) { |
735 | + dlpar_release_drc(drc_index); |
736 | return -EINVAL; |
737 | + } |
738 | |
739 | rc = dlpar_attach_node(dn); |
740 | if (rc) { |
741 | diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c |
742 | index 02e4a1745516..3b6647e574b6 100644 |
743 | --- a/arch/powerpc/platforms/pseries/ras.c |
744 | +++ b/arch/powerpc/platforms/pseries/ras.c |
745 | @@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) |
746 | int state; |
747 | int critical; |
748 | |
749 | - status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state); |
750 | + status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, |
751 | + &state); |
752 | |
753 | if (state > 3) |
754 | critical = 1; /* Time Critical */ |
755 | diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c |
756 | index df6a7041922b..e6e8b241d717 100644 |
757 | --- a/arch/powerpc/platforms/pseries/setup.c |
758 | +++ b/arch/powerpc/platforms/pseries/setup.c |
759 | @@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act |
760 | eeh_dev_init(PCI_DN(np), pci->phb); |
761 | } |
762 | break; |
763 | + case OF_RECONFIG_DETACH_NODE: |
764 | + pci = PCI_DN(np); |
765 | + if (pci) |
766 | + list_del(&pci->list); |
767 | + break; |
768 | default: |
769 | err = NOTIFY_DONE; |
770 | break; |
771 | diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c |
772 | index 42506b371b74..4da604ebf6fd 100644 |
773 | --- a/arch/s390/boot/compressed/misc.c |
774 | +++ b/arch/s390/boot/compressed/misc.c |
775 | @@ -167,7 +167,7 @@ unsigned long decompress_kernel(void) |
776 | #endif |
777 | |
778 | puts("Uncompressing Linux... "); |
779 | - decompress(input_data, input_len, NULL, NULL, output, NULL, error); |
780 | + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); |
781 | puts("Ok, booting the kernel.\n"); |
782 | return (unsigned long) output; |
783 | } |
784 | diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c |
785 | index 95470a472d2c..208a9753ab38 100644 |
786 | --- a/arch/sh/boot/compressed/misc.c |
787 | +++ b/arch/sh/boot/compressed/misc.c |
788 | @@ -132,7 +132,7 @@ void decompress_kernel(void) |
789 | |
790 | puts("Uncompressing Linux... "); |
791 | cache_control(CACHE_ENABLE); |
792 | - decompress(input_data, input_len, NULL, NULL, output, NULL, error); |
793 | + __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); |
794 | cache_control(CACHE_DISABLE); |
795 | puts("Ok, booting the kernel.\n"); |
796 | } |
797 | diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c |
798 | index 176d5bda3559..5c65dfee278c 100644 |
799 | --- a/arch/unicore32/boot/compressed/misc.c |
800 | +++ b/arch/unicore32/boot/compressed/misc.c |
801 | @@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start, |
802 | output_ptr = get_unaligned_le32(tmp); |
803 | |
804 | arch_decomp_puts("Uncompressing Linux..."); |
805 | - decompress(input_data, input_data_end - input_data, NULL, NULL, |
806 | - output_data, NULL, error); |
807 | + __decompress(input_data, input_data_end - input_data, NULL, NULL, |
808 | + output_data, 0, NULL, error); |
809 | arch_decomp_puts(" done, booting the kernel.\n"); |
810 | return output_ptr; |
811 | } |
812 | diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c |
813 | index a107b935e22f..e28437e0f708 100644 |
814 | --- a/arch/x86/boot/compressed/misc.c |
815 | +++ b/arch/x86/boot/compressed/misc.c |
816 | @@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, |
817 | #endif |
818 | |
819 | debug_putstr("\nDecompressing Linux... "); |
820 | - decompress(input_data, input_len, NULL, NULL, output, NULL, error); |
821 | + __decompress(input_data, input_len, NULL, NULL, output, output_len, |
822 | + NULL, error); |
823 | parse_elf(output); |
824 | /* |
825 | * 32-bit always performs relocations. 64-bit relocations are only |
826 | diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c |
827 | index 8340e45c891a..68aec42545c2 100644 |
828 | --- a/arch/x86/mm/init_32.c |
829 | +++ b/arch/x86/mm/init_32.c |
830 | @@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end) |
831 | |
832 | vaddr = start; |
833 | pgd_idx = pgd_index(vaddr); |
834 | + pmd_idx = pmd_index(vaddr); |
835 | |
836 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { |
837 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
838 | diff --git a/block/blk-flush.c b/block/blk-flush.c |
839 | index 20badd7b9d1b..9c423e53324a 100644 |
840 | --- a/block/blk-flush.c |
841 | +++ b/block/blk-flush.c |
842 | @@ -73,6 +73,7 @@ |
843 | |
844 | #include "blk.h" |
845 | #include "blk-mq.h" |
846 | +#include "blk-mq-tag.h" |
847 | |
848 | /* FLUSH/FUA sequences */ |
849 | enum { |
850 | @@ -226,7 +227,12 @@ static void flush_end_io(struct request *flush_rq, int error) |
851 | struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); |
852 | |
853 | if (q->mq_ops) { |
854 | + struct blk_mq_hw_ctx *hctx; |
855 | + |
856 | + /* release the tag's ownership to the req cloned from */ |
857 | spin_lock_irqsave(&fq->mq_flush_lock, flags); |
858 | + hctx = q->mq_ops->map_queue(q, flush_rq->mq_ctx->cpu); |
859 | + blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); |
860 | flush_rq->tag = -1; |
861 | } |
862 | |
863 | @@ -308,11 +314,18 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq) |
864 | |
865 | /* |
866 | * Borrow tag from the first request since they can't |
867 | - * be in flight at the same time. |
868 | + * be in flight at the same time. And acquire the tag's |
869 | + * ownership for flush req. |
870 | */ |
871 | if (q->mq_ops) { |
872 | + struct blk_mq_hw_ctx *hctx; |
873 | + |
874 | flush_rq->mq_ctx = first_rq->mq_ctx; |
875 | flush_rq->tag = first_rq->tag; |
876 | + fq->orig_rq = first_rq; |
877 | + |
878 | + hctx = q->mq_ops->map_queue(q, first_rq->mq_ctx->cpu); |
879 | + blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq); |
880 | } |
881 | |
882 | flush_rq->cmd_type = REQ_TYPE_FS; |
883 | diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c |
884 | index b79685e06b70..279c5d674edf 100644 |
885 | --- a/block/blk-mq-sysfs.c |
886 | +++ b/block/blk-mq-sysfs.c |
887 | @@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) |
888 | |
889 | static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) |
890 | { |
891 | - char *start_page = page; |
892 | struct request *rq; |
893 | + int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg); |
894 | + |
895 | + list_for_each_entry(rq, list, queuelist) { |
896 | + const int rq_len = 2 * sizeof(rq) + 2; |
897 | + |
898 | + /* if the output will be truncated */ |
899 | + if (PAGE_SIZE - 1 < len + rq_len) { |
900 | + /* backspacing if it can't hold '\t...\n' */ |
901 | + if (PAGE_SIZE - 1 < len + 5) |
902 | + len -= rq_len; |
903 | + len += snprintf(page + len, PAGE_SIZE - 1 - len, |
904 | + "\t...\n"); |
905 | + break; |
906 | + } |
907 | + len += snprintf(page + len, PAGE_SIZE - 1 - len, |
908 | + "\t%p\n", rq); |
909 | + } |
910 | |
911 | - page += sprintf(page, "%s:\n", msg); |
912 | - |
913 | - list_for_each_entry(rq, list, queuelist) |
914 | - page += sprintf(page, "\t%p\n", rq); |
915 | - |
916 | - return page - start_page; |
917 | + return len; |
918 | } |
919 | |
920 | static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) |
921 | diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c |
922 | index 9b6e28830b82..9115c6d59948 100644 |
923 | --- a/block/blk-mq-tag.c |
924 | +++ b/block/blk-mq-tag.c |
925 | @@ -429,7 +429,7 @@ static void bt_for_each(struct blk_mq_hw_ctx *hctx, |
926 | for (bit = find_first_bit(&bm->word, bm->depth); |
927 | bit < bm->depth; |
928 | bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { |
929 | - rq = blk_mq_tag_to_rq(hctx->tags, off + bit); |
930 | + rq = hctx->tags->rqs[off + bit]; |
931 | if (rq->q == hctx->queue) |
932 | fn(hctx, rq, data, reserved); |
933 | } |
934 | @@ -453,7 +453,7 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, |
935 | for (bit = find_first_bit(&bm->word, bm->depth); |
936 | bit < bm->depth; |
937 | bit = find_next_bit(&bm->word, bm->depth, bit + 1)) { |
938 | - rq = blk_mq_tag_to_rq(tags, off + bit); |
939 | + rq = tags->rqs[off + bit]; |
940 | fn(rq, data, reserved); |
941 | } |
942 | |
943 | diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h |
944 | index 75893a34237d..9eb2cf4f01cb 100644 |
945 | --- a/block/blk-mq-tag.h |
946 | +++ b/block/blk-mq-tag.h |
947 | @@ -89,4 +89,16 @@ static inline void blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
948 | __blk_mq_tag_idle(hctx); |
949 | } |
950 | |
951 | +/* |
952 | + * This helper should only be used for flush request to share tag |
953 | + * with the request cloned from, and both the two requests can't be |
954 | + * in flight at the same time. The caller has to make sure the tag |
955 | + * can't be freed. |
956 | + */ |
957 | +static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx, |
958 | + unsigned int tag, struct request *rq) |
959 | +{ |
960 | + hctx->tags->rqs[tag] = rq; |
961 | +} |
962 | + |
963 | #endif |
964 | diff --git a/block/blk-mq.c b/block/blk-mq.c |
965 | index 7d842db59699..176262ec3731 100644 |
966 | --- a/block/blk-mq.c |
967 | +++ b/block/blk-mq.c |
968 | @@ -559,23 +559,9 @@ void blk_mq_abort_requeue_list(struct request_queue *q) |
969 | } |
970 | EXPORT_SYMBOL(blk_mq_abort_requeue_list); |
971 | |
972 | -static inline bool is_flush_request(struct request *rq, |
973 | - struct blk_flush_queue *fq, unsigned int tag) |
974 | -{ |
975 | - return ((rq->cmd_flags & REQ_FLUSH_SEQ) && |
976 | - fq->flush_rq->tag == tag); |
977 | -} |
978 | - |
979 | struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) |
980 | { |
981 | - struct request *rq = tags->rqs[tag]; |
982 | - /* mq_ctx of flush rq is always cloned from the corresponding req */ |
983 | - struct blk_flush_queue *fq = blk_get_flush_queue(rq->q, rq->mq_ctx); |
984 | - |
985 | - if (!is_flush_request(rq, fq, tag)) |
986 | - return rq; |
987 | - |
988 | - return fq->flush_rq; |
989 | + return tags->rqs[tag]; |
990 | } |
991 | EXPORT_SYMBOL(blk_mq_tag_to_rq); |
992 | |
993 | diff --git a/block/blk.h b/block/blk.h |
994 | index 026d9594142b..838188b35a83 100644 |
995 | --- a/block/blk.h |
996 | +++ b/block/blk.h |
997 | @@ -22,6 +22,12 @@ struct blk_flush_queue { |
998 | struct list_head flush_queue[2]; |
999 | struct list_head flush_data_in_flight; |
1000 | struct request *flush_rq; |
1001 | + |
1002 | + /* |
1003 | + * flush_rq shares tag with this rq, both can't be active |
1004 | + * at the same time |
1005 | + */ |
1006 | + struct request *orig_rq; |
1007 | spinlock_t mq_flush_lock; |
1008 | }; |
1009 | |
1010 | diff --git a/drivers/base/node.c b/drivers/base/node.c |
1011 | index 31df474d72f4..560751bad294 100644 |
1012 | --- a/drivers/base/node.c |
1013 | +++ b/drivers/base/node.c |
1014 | @@ -392,6 +392,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) |
1015 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
1016 | int page_nid; |
1017 | |
1018 | + /* |
1019 | + * memory block could have several absent sections from start. |
1020 | + * skip pfn range from absent section |
1021 | + */ |
1022 | + if (!pfn_present(pfn)) { |
1023 | + pfn = round_down(pfn + PAGES_PER_SECTION, |
1024 | + PAGES_PER_SECTION) - 1; |
1025 | + continue; |
1026 | + } |
1027 | + |
1028 | page_nid = get_nid_for_pfn(pfn); |
1029 | if (page_nid < 0) |
1030 | continue; |
1031 | diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c |
1032 | index e79e567e43aa..263af709e536 100644 |
1033 | --- a/drivers/crypto/vmx/aes.c |
1034 | +++ b/drivers/crypto/vmx/aes.c |
1035 | @@ -84,6 +84,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, |
1036 | preempt_disable(); |
1037 | pagefault_disable(); |
1038 | enable_kernel_altivec(); |
1039 | + enable_kernel_vsx(); |
1040 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
1041 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); |
1042 | pagefault_enable(); |
1043 | @@ -103,6 +104,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
1044 | preempt_disable(); |
1045 | pagefault_disable(); |
1046 | enable_kernel_altivec(); |
1047 | + enable_kernel_vsx(); |
1048 | aes_p8_encrypt(src, dst, &ctx->enc_key); |
1049 | pagefault_enable(); |
1050 | preempt_enable(); |
1051 | @@ -119,6 +121,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) |
1052 | preempt_disable(); |
1053 | pagefault_disable(); |
1054 | enable_kernel_altivec(); |
1055 | + enable_kernel_vsx(); |
1056 | aes_p8_decrypt(src, dst, &ctx->dec_key); |
1057 | pagefault_enable(); |
1058 | preempt_enable(); |
1059 | diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c |
1060 | index 7299995c78ec..0b8fe2ec5315 100644 |
1061 | --- a/drivers/crypto/vmx/aes_cbc.c |
1062 | +++ b/drivers/crypto/vmx/aes_cbc.c |
1063 | @@ -85,6 +85,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, |
1064 | preempt_disable(); |
1065 | pagefault_disable(); |
1066 | enable_kernel_altivec(); |
1067 | + enable_kernel_vsx(); |
1068 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
1069 | ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); |
1070 | pagefault_enable(); |
1071 | @@ -115,6 +116,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, |
1072 | preempt_disable(); |
1073 | pagefault_disable(); |
1074 | enable_kernel_altivec(); |
1075 | + enable_kernel_vsx(); |
1076 | |
1077 | blkcipher_walk_init(&walk, dst, src, nbytes); |
1078 | ret = blkcipher_walk_virt(desc, &walk); |
1079 | @@ -155,6 +157,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, |
1080 | preempt_disable(); |
1081 | pagefault_disable(); |
1082 | enable_kernel_altivec(); |
1083 | + enable_kernel_vsx(); |
1084 | |
1085 | blkcipher_walk_init(&walk, dst, src, nbytes); |
1086 | ret = blkcipher_walk_virt(desc, &walk); |
1087 | diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c |
1088 | index ed3838781b4c..ee1306cd8f59 100644 |
1089 | --- a/drivers/crypto/vmx/aes_ctr.c |
1090 | +++ b/drivers/crypto/vmx/aes_ctr.c |
1091 | @@ -82,6 +82,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, |
1092 | |
1093 | pagefault_disable(); |
1094 | enable_kernel_altivec(); |
1095 | + enable_kernel_vsx(); |
1096 | ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); |
1097 | pagefault_enable(); |
1098 | |
1099 | @@ -100,6 +101,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, |
1100 | |
1101 | pagefault_disable(); |
1102 | enable_kernel_altivec(); |
1103 | + enable_kernel_vsx(); |
1104 | aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); |
1105 | pagefault_enable(); |
1106 | |
1107 | @@ -132,6 +134,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, |
1108 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
1109 | pagefault_disable(); |
1110 | enable_kernel_altivec(); |
1111 | + enable_kernel_vsx(); |
1112 | aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, |
1113 | walk.dst.virt.addr, |
1114 | (nbytes & |
1115 | diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c |
1116 | index b5e29002b666..2183a2e77641 100644 |
1117 | --- a/drivers/crypto/vmx/ghash.c |
1118 | +++ b/drivers/crypto/vmx/ghash.c |
1119 | @@ -119,6 +119,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, |
1120 | preempt_disable(); |
1121 | pagefault_disable(); |
1122 | enable_kernel_altivec(); |
1123 | + enable_kernel_vsx(); |
1124 | enable_kernel_fp(); |
1125 | gcm_init_p8(ctx->htable, (const u64 *) key); |
1126 | pagefault_enable(); |
1127 | @@ -149,6 +150,7 @@ static int p8_ghash_update(struct shash_desc *desc, |
1128 | preempt_disable(); |
1129 | pagefault_disable(); |
1130 | enable_kernel_altivec(); |
1131 | + enable_kernel_vsx(); |
1132 | enable_kernel_fp(); |
1133 | gcm_ghash_p8(dctx->shash, ctx->htable, |
1134 | dctx->buffer, GHASH_DIGEST_SIZE); |
1135 | @@ -163,6 +165,7 @@ static int p8_ghash_update(struct shash_desc *desc, |
1136 | preempt_disable(); |
1137 | pagefault_disable(); |
1138 | enable_kernel_altivec(); |
1139 | + enable_kernel_vsx(); |
1140 | enable_kernel_fp(); |
1141 | gcm_ghash_p8(dctx->shash, ctx->htable, src, len); |
1142 | pagefault_enable(); |
1143 | @@ -193,6 +196,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) |
1144 | preempt_disable(); |
1145 | pagefault_disable(); |
1146 | enable_kernel_altivec(); |
1147 | + enable_kernel_vsx(); |
1148 | enable_kernel_fp(); |
1149 | gcm_ghash_p8(dctx->shash, ctx->htable, |
1150 | dctx->buffer, GHASH_DIGEST_SIZE); |
1151 | diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c |
1152 | index cacb07b7a8f1..32e7b4a686ef 100644 |
1153 | --- a/drivers/gpu/drm/i915/intel_ddi.c |
1154 | +++ b/drivers/gpu/drm/i915/intel_ddi.c |
1155 | @@ -1293,17 +1293,14 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, |
1156 | DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | |
1157 | wrpll_params.central_freq; |
1158 | } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { |
1159 | - struct drm_encoder *encoder = &intel_encoder->base; |
1160 | - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1161 | - |
1162 | - switch (intel_dp->link_bw) { |
1163 | - case DP_LINK_BW_1_62: |
1164 | + switch (crtc_state->port_clock / 2) { |
1165 | + case 81000: |
1166 | ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); |
1167 | break; |
1168 | - case DP_LINK_BW_2_7: |
1169 | + case 135000: |
1170 | ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); |
1171 | break; |
1172 | - case DP_LINK_BW_5_4: |
1173 | + case 270000: |
1174 | ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); |
1175 | break; |
1176 | } |
1177 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1178 | index bd8f8863eb0e..ca2d923101fc 100644 |
1179 | --- a/drivers/gpu/drm/i915/intel_dp.c |
1180 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
1181 | @@ -48,28 +48,28 @@ |
1182 | #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK) |
1183 | |
1184 | struct dp_link_dpll { |
1185 | - int link_bw; |
1186 | + int clock; |
1187 | struct dpll dpll; |
1188 | }; |
1189 | |
1190 | static const struct dp_link_dpll gen4_dpll[] = { |
1191 | - { DP_LINK_BW_1_62, |
1192 | + { 162000, |
1193 | { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } }, |
1194 | - { DP_LINK_BW_2_7, |
1195 | + { 270000, |
1196 | { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } } |
1197 | }; |
1198 | |
1199 | static const struct dp_link_dpll pch_dpll[] = { |
1200 | - { DP_LINK_BW_1_62, |
1201 | + { 162000, |
1202 | { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } }, |
1203 | - { DP_LINK_BW_2_7, |
1204 | + { 270000, |
1205 | { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } } |
1206 | }; |
1207 | |
1208 | static const struct dp_link_dpll vlv_dpll[] = { |
1209 | - { DP_LINK_BW_1_62, |
1210 | + { 162000, |
1211 | { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } }, |
1212 | - { DP_LINK_BW_2_7, |
1213 | + { 270000, |
1214 | { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } } |
1215 | }; |
1216 | |
1217 | @@ -83,11 +83,11 @@ static const struct dp_link_dpll chv_dpll[] = { |
1218 | * m2 is stored in fixed point format using formula below |
1219 | * (m2_int << 22) | m2_fraction |
1220 | */ |
1221 | - { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */ |
1222 | + { 162000, /* m2_int = 32, m2_fraction = 1677722 */ |
1223 | { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } }, |
1224 | - { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */ |
1225 | + { 270000, /* m2_int = 27, m2_fraction = 0 */ |
1226 | { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }, |
1227 | - { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */ |
1228 | + { 540000, /* m2_int = 27, m2_fraction = 0 */ |
1229 | { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } |
1230 | }; |
1231 | |
1232 | @@ -1089,7 +1089,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) |
1233 | } |
1234 | |
1235 | static void |
1236 | -skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) |
1237 | +skl_edp_set_pll_config(struct intel_crtc_state *pipe_config) |
1238 | { |
1239 | u32 ctrl1; |
1240 | |
1241 | @@ -1101,7 +1101,7 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) |
1242 | pipe_config->dpll_hw_state.cfgcr2 = 0; |
1243 | |
1244 | ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); |
1245 | - switch (link_clock / 2) { |
1246 | + switch (pipe_config->port_clock / 2) { |
1247 | case 81000: |
1248 | ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, |
1249 | SKL_DPLL0); |
1250 | @@ -1134,20 +1134,20 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) |
1251 | pipe_config->dpll_hw_state.ctrl1 = ctrl1; |
1252 | } |
1253 | |
1254 | -static void |
1255 | -hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) |
1256 | +void |
1257 | +hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config) |
1258 | { |
1259 | memset(&pipe_config->dpll_hw_state, 0, |
1260 | sizeof(pipe_config->dpll_hw_state)); |
1261 | |
1262 | - switch (link_bw) { |
1263 | - case DP_LINK_BW_1_62: |
1264 | + switch (pipe_config->port_clock / 2) { |
1265 | + case 81000: |
1266 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; |
1267 | break; |
1268 | - case DP_LINK_BW_2_7: |
1269 | + case 135000: |
1270 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; |
1271 | break; |
1272 | - case DP_LINK_BW_5_4: |
1273 | + case 270000: |
1274 | pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; |
1275 | break; |
1276 | } |
1277 | @@ -1198,7 +1198,7 @@ intel_dp_source_rates(struct drm_device *dev, const int **source_rates) |
1278 | |
1279 | static void |
1280 | intel_dp_set_clock(struct intel_encoder *encoder, |
1281 | - struct intel_crtc_state *pipe_config, int link_bw) |
1282 | + struct intel_crtc_state *pipe_config) |
1283 | { |
1284 | struct drm_device *dev = encoder->base.dev; |
1285 | const struct dp_link_dpll *divisor = NULL; |
1286 | @@ -1220,7 +1220,7 @@ intel_dp_set_clock(struct intel_encoder *encoder, |
1287 | |
1288 | if (divisor && count) { |
1289 | for (i = 0; i < count; i++) { |
1290 | - if (link_bw == divisor[i].link_bw) { |
1291 | + if (pipe_config->port_clock == divisor[i].clock) { |
1292 | pipe_config->dpll = divisor[i].dpll; |
1293 | pipe_config->clock_set = true; |
1294 | break; |
1295 | @@ -1494,13 +1494,13 @@ found: |
1296 | } |
1297 | |
1298 | if (IS_SKYLAKE(dev) && is_edp(intel_dp)) |
1299 | - skl_edp_set_pll_config(pipe_config, common_rates[clock]); |
1300 | + skl_edp_set_pll_config(pipe_config); |
1301 | else if (IS_BROXTON(dev)) |
1302 | /* handled in ddi */; |
1303 | else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1304 | - hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); |
1305 | + hsw_dp_set_ddi_pll_sel(pipe_config); |
1306 | else |
1307 | - intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); |
1308 | + intel_dp_set_clock(encoder, pipe_config); |
1309 | |
1310 | return true; |
1311 | } |
1312 | diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c |
1313 | index 600afdbef8c9..8c127201ab3c 100644 |
1314 | --- a/drivers/gpu/drm/i915/intel_dp_mst.c |
1315 | +++ b/drivers/gpu/drm/i915/intel_dp_mst.c |
1316 | @@ -33,6 +33,7 @@ |
1317 | static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, |
1318 | struct intel_crtc_state *pipe_config) |
1319 | { |
1320 | + struct drm_device *dev = encoder->base.dev; |
1321 | struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); |
1322 | struct intel_digital_port *intel_dig_port = intel_mst->primary; |
1323 | struct intel_dp *intel_dp = &intel_dig_port->dp; |
1324 | @@ -97,6 +98,10 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, |
1325 | &pipe_config->dp_m_n); |
1326 | |
1327 | pipe_config->dp_m_n.tu = slots; |
1328 | + |
1329 | + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) |
1330 | + hsw_dp_set_ddi_pll_sel(pipe_config); |
1331 | + |
1332 | return true; |
1333 | |
1334 | } |
1335 | diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h |
1336 | index 105928382e21..04d426156bdb 100644 |
1337 | --- a/drivers/gpu/drm/i915/intel_drv.h |
1338 | +++ b/drivers/gpu/drm/i915/intel_drv.h |
1339 | @@ -1194,6 +1194,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp); |
1340 | void intel_edp_drrs_invalidate(struct drm_device *dev, |
1341 | unsigned frontbuffer_bits); |
1342 | void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); |
1343 | +void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); |
1344 | |
1345 | /* intel_dp_mst.c */ |
1346 | int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); |
1347 | diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c |
1348 | index c097d3a82bda..a9b01bcf7d0a 100644 |
1349 | --- a/drivers/gpu/drm/radeon/radeon_combios.c |
1350 | +++ b/drivers/gpu/drm/radeon/radeon_combios.c |
1351 | @@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev) |
1352 | rdev->pdev->subsystem_device == 0x30ae) |
1353 | return; |
1354 | |
1355 | + /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume |
1356 | + * - it hangs on resume inside the dynclk 1 table. |
1357 | + */ |
1358 | + if (rdev->family == CHIP_RS480 && |
1359 | + rdev->pdev->subsystem_vendor == 0x103c && |
1360 | + rdev->pdev->subsystem_device == 0x280a) |
1361 | + return; |
1362 | + |
1363 | /* DYN CLK 1 */ |
1364 | table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); |
1365 | if (table) |
1366 | diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c |
1367 | index 1c9cb65ac4cf..4233f5695352 100644 |
1368 | --- a/drivers/i2c/busses/i2c-xgene-slimpro.c |
1369 | +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c |
1370 | @@ -198,10 +198,10 @@ static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, |
1371 | int rc; |
1372 | |
1373 | paddr = dma_map_single(ctx->dev, ctx->dma_buffer, readlen, DMA_FROM_DEVICE); |
1374 | - rc = dma_mapping_error(ctx->dev, paddr); |
1375 | - if (rc) { |
1376 | + if (dma_mapping_error(ctx->dev, paddr)) { |
1377 | dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n", |
1378 | ctx->dma_buffer); |
1379 | + rc = -ENOMEM; |
1380 | goto err; |
1381 | } |
1382 | |
1383 | @@ -241,10 +241,10 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, |
1384 | memcpy(ctx->dma_buffer, data, writelen); |
1385 | paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen, |
1386 | DMA_TO_DEVICE); |
1387 | - rc = dma_mapping_error(ctx->dev, paddr); |
1388 | - if (rc) { |
1389 | + if (dma_mapping_error(ctx->dev, paddr)) { |
1390 | dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n", |
1391 | ctx->dma_buffer); |
1392 | + rc = -ENOMEM; |
1393 | goto err; |
1394 | } |
1395 | |
1396 | diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h |
1397 | index ba365b6d1e8d..65cbfcc92f11 100644 |
1398 | --- a/drivers/infiniband/core/uverbs.h |
1399 | +++ b/drivers/infiniband/core/uverbs.h |
1400 | @@ -85,7 +85,7 @@ |
1401 | */ |
1402 | |
1403 | struct ib_uverbs_device { |
1404 | - struct kref ref; |
1405 | + atomic_t refcount; |
1406 | int num_comp_vectors; |
1407 | struct completion comp; |
1408 | struct device *dev; |
1409 | @@ -94,6 +94,7 @@ struct ib_uverbs_device { |
1410 | struct cdev cdev; |
1411 | struct rb_root xrcd_tree; |
1412 | struct mutex xrcd_tree_mutex; |
1413 | + struct kobject kobj; |
1414 | }; |
1415 | |
1416 | struct ib_uverbs_event_file { |
1417 | diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c |
1418 | index bbb02ffe87df..a6ca83b3153f 100644 |
1419 | --- a/drivers/infiniband/core/uverbs_cmd.c |
1420 | +++ b/drivers/infiniband/core/uverbs_cmd.c |
1421 | @@ -2346,6 +2346,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, |
1422 | next->send_flags = user_wr->send_flags; |
1423 | |
1424 | if (is_ud) { |
1425 | + if (next->opcode != IB_WR_SEND && |
1426 | + next->opcode != IB_WR_SEND_WITH_IMM) { |
1427 | + ret = -EINVAL; |
1428 | + goto out_put; |
1429 | + } |
1430 | + |
1431 | next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, |
1432 | file->ucontext); |
1433 | if (!next->wr.ud.ah) { |
1434 | @@ -2385,9 +2391,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, |
1435 | user_wr->wr.atomic.compare_add; |
1436 | next->wr.atomic.swap = user_wr->wr.atomic.swap; |
1437 | next->wr.atomic.rkey = user_wr->wr.atomic.rkey; |
1438 | + case IB_WR_SEND: |
1439 | break; |
1440 | default: |
1441 | - break; |
1442 | + ret = -EINVAL; |
1443 | + goto out_put; |
1444 | } |
1445 | } |
1446 | |
1447 | diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c |
1448 | index f6eef2da7097..15f4126a577d 100644 |
1449 | --- a/drivers/infiniband/core/uverbs_main.c |
1450 | +++ b/drivers/infiniband/core/uverbs_main.c |
1451 | @@ -130,14 +130,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, |
1452 | static void ib_uverbs_add_one(struct ib_device *device); |
1453 | static void ib_uverbs_remove_one(struct ib_device *device); |
1454 | |
1455 | -static void ib_uverbs_release_dev(struct kref *ref) |
1456 | +static void ib_uverbs_release_dev(struct kobject *kobj) |
1457 | { |
1458 | struct ib_uverbs_device *dev = |
1459 | - container_of(ref, struct ib_uverbs_device, ref); |
1460 | + container_of(kobj, struct ib_uverbs_device, kobj); |
1461 | |
1462 | - complete(&dev->comp); |
1463 | + kfree(dev); |
1464 | } |
1465 | |
1466 | +static struct kobj_type ib_uverbs_dev_ktype = { |
1467 | + .release = ib_uverbs_release_dev, |
1468 | +}; |
1469 | + |
1470 | static void ib_uverbs_release_event_file(struct kref *ref) |
1471 | { |
1472 | struct ib_uverbs_event_file *file = |
1473 | @@ -303,13 +307,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, |
1474 | return context->device->dealloc_ucontext(context); |
1475 | } |
1476 | |
1477 | +static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) |
1478 | +{ |
1479 | + complete(&dev->comp); |
1480 | +} |
1481 | + |
1482 | static void ib_uverbs_release_file(struct kref *ref) |
1483 | { |
1484 | struct ib_uverbs_file *file = |
1485 | container_of(ref, struct ib_uverbs_file, ref); |
1486 | |
1487 | module_put(file->device->ib_dev->owner); |
1488 | - kref_put(&file->device->ref, ib_uverbs_release_dev); |
1489 | + if (atomic_dec_and_test(&file->device->refcount)) |
1490 | + ib_uverbs_comp_dev(file->device); |
1491 | |
1492 | kfree(file); |
1493 | } |
1494 | @@ -743,9 +753,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) |
1495 | int ret; |
1496 | |
1497 | dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); |
1498 | - if (dev) |
1499 | - kref_get(&dev->ref); |
1500 | - else |
1501 | + if (!atomic_inc_not_zero(&dev->refcount)) |
1502 | return -ENXIO; |
1503 | |
1504 | if (!try_module_get(dev->ib_dev->owner)) { |
1505 | @@ -766,6 +774,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) |
1506 | mutex_init(&file->mutex); |
1507 | |
1508 | filp->private_data = file; |
1509 | + kobject_get(&dev->kobj); |
1510 | |
1511 | return nonseekable_open(inode, filp); |
1512 | |
1513 | @@ -773,13 +782,16 @@ err_module: |
1514 | module_put(dev->ib_dev->owner); |
1515 | |
1516 | err: |
1517 | - kref_put(&dev->ref, ib_uverbs_release_dev); |
1518 | + if (atomic_dec_and_test(&dev->refcount)) |
1519 | + ib_uverbs_comp_dev(dev); |
1520 | + |
1521 | return ret; |
1522 | } |
1523 | |
1524 | static int ib_uverbs_close(struct inode *inode, struct file *filp) |
1525 | { |
1526 | struct ib_uverbs_file *file = filp->private_data; |
1527 | + struct ib_uverbs_device *dev = file->device; |
1528 | |
1529 | ib_uverbs_cleanup_ucontext(file, file->ucontext); |
1530 | |
1531 | @@ -787,6 +799,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) |
1532 | kref_put(&file->async_file->ref, ib_uverbs_release_event_file); |
1533 | |
1534 | kref_put(&file->ref, ib_uverbs_release_file); |
1535 | + kobject_put(&dev->kobj); |
1536 | |
1537 | return 0; |
1538 | } |
1539 | @@ -882,10 +895,11 @@ static void ib_uverbs_add_one(struct ib_device *device) |
1540 | if (!uverbs_dev) |
1541 | return; |
1542 | |
1543 | - kref_init(&uverbs_dev->ref); |
1544 | + atomic_set(&uverbs_dev->refcount, 1); |
1545 | init_completion(&uverbs_dev->comp); |
1546 | uverbs_dev->xrcd_tree = RB_ROOT; |
1547 | mutex_init(&uverbs_dev->xrcd_tree_mutex); |
1548 | + kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype); |
1549 | |
1550 | spin_lock(&map_lock); |
1551 | devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); |
1552 | @@ -912,6 +926,7 @@ static void ib_uverbs_add_one(struct ib_device *device) |
1553 | cdev_init(&uverbs_dev->cdev, NULL); |
1554 | uverbs_dev->cdev.owner = THIS_MODULE; |
1555 | uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; |
1556 | + uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj; |
1557 | kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); |
1558 | if (cdev_add(&uverbs_dev->cdev, base, 1)) |
1559 | goto err_cdev; |
1560 | @@ -942,9 +957,10 @@ err_cdev: |
1561 | clear_bit(devnum, overflow_map); |
1562 | |
1563 | err: |
1564 | - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
1565 | + if (atomic_dec_and_test(&uverbs_dev->refcount)) |
1566 | + ib_uverbs_comp_dev(uverbs_dev); |
1567 | wait_for_completion(&uverbs_dev->comp); |
1568 | - kfree(uverbs_dev); |
1569 | + kobject_put(&uverbs_dev->kobj); |
1570 | return; |
1571 | } |
1572 | |
1573 | @@ -964,9 +980,10 @@ static void ib_uverbs_remove_one(struct ib_device *device) |
1574 | else |
1575 | clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); |
1576 | |
1577 | - kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); |
1578 | + if (atomic_dec_and_test(&uverbs_dev->refcount)) |
1579 | + ib_uverbs_comp_dev(uverbs_dev); |
1580 | wait_for_completion(&uverbs_dev->comp); |
1581 | - kfree(uverbs_dev); |
1582 | + kobject_put(&uverbs_dev->kobj); |
1583 | } |
1584 | |
1585 | static char *uverbs_devnode(struct device *dev, umode_t *mode) |
1586 | diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c |
1587 | index f50a546224ad..33fdd50123f7 100644 |
1588 | --- a/drivers/infiniband/hw/mlx4/ah.c |
1589 | +++ b/drivers/infiniband/hw/mlx4/ah.c |
1590 | @@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) |
1591 | enum rdma_link_layer ll; |
1592 | |
1593 | memset(ah_attr, 0, sizeof *ah_attr); |
1594 | - ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; |
1595 | ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; |
1596 | ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); |
1597 | + if (ll == IB_LINK_LAYER_ETHERNET) |
1598 | + ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29; |
1599 | + else |
1600 | + ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; |
1601 | + |
1602 | ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; |
1603 | if (ah->av.ib.stat_rate) |
1604 | ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; |
1605 | diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c |
1606 | index 36eb3d012b6d..2f4259525bb1 100644 |
1607 | --- a/drivers/infiniband/hw/mlx4/cq.c |
1608 | +++ b/drivers/infiniband/hw/mlx4/cq.c |
1609 | @@ -638,7 +638,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, |
1610 | * simulated FLUSH_ERR completions |
1611 | */ |
1612 | list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { |
1613 | - mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1); |
1614 | + mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); |
1615 | if (*npolled >= num_entries) |
1616 | goto out; |
1617 | } |
1618 | diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c |
1619 | index ed327e6c8fdc..a0559a8af4f4 100644 |
1620 | --- a/drivers/infiniband/hw/mlx4/mcg.c |
1621 | +++ b/drivers/infiniband/hw/mlx4/mcg.c |
1622 | @@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) |
1623 | { |
1624 | struct mlx4_ib_dev *dev = ctx->dev; |
1625 | struct ib_ah_attr ah_attr; |
1626 | + unsigned long flags; |
1627 | |
1628 | - spin_lock(&dev->sm_lock); |
1629 | + spin_lock_irqsave(&dev->sm_lock, flags); |
1630 | if (!dev->sm_ah[ctx->port - 1]) { |
1631 | /* port is not yet Active, sm_ah not ready */ |
1632 | - spin_unlock(&dev->sm_lock); |
1633 | + spin_unlock_irqrestore(&dev->sm_lock, flags); |
1634 | return -EAGAIN; |
1635 | } |
1636 | mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); |
1637 | - spin_unlock(&dev->sm_lock); |
1638 | + spin_unlock_irqrestore(&dev->sm_lock, flags); |
1639 | return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), |
1640 | ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, |
1641 | &ah_attr, NULL, mad); |
1642 | diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c |
1643 | index 6797108ce873..69fb5ba94d0f 100644 |
1644 | --- a/drivers/infiniband/hw/mlx4/sysfs.c |
1645 | +++ b/drivers/infiniband/hw/mlx4/sysfs.c |
1646 | @@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) |
1647 | struct mlx4_port *p; |
1648 | int i; |
1649 | int ret; |
1650 | + int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) == |
1651 | + IB_LINK_LAYER_ETHERNET; |
1652 | |
1653 | p = kzalloc(sizeof *p, GFP_KERNEL); |
1654 | if (!p) |
1655 | @@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) |
1656 | |
1657 | p->pkey_group.name = "pkey_idx"; |
1658 | p->pkey_group.attrs = |
1659 | - alloc_group_attrs(show_port_pkey, store_port_pkey, |
1660 | + alloc_group_attrs(show_port_pkey, |
1661 | + is_eth ? NULL : store_port_pkey, |
1662 | dev->dev->caps.pkey_table_len[port_num]); |
1663 | if (!p->pkey_group.attrs) { |
1664 | ret = -ENOMEM; |
1665 | diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c |
1666 | index bc9a0de897cb..dbb75c0de848 100644 |
1667 | --- a/drivers/infiniband/hw/mlx5/mr.c |
1668 | +++ b/drivers/infiniband/hw/mlx5/mr.c |
1669 | @@ -1118,19 +1118,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
1670 | return &mr->ibmr; |
1671 | |
1672 | error: |
1673 | - /* |
1674 | - * Destroy the umem *before* destroying the MR, to ensure we |
1675 | - * will not have any in-flight notifiers when destroying the |
1676 | - * MR. |
1677 | - * |
1678 | - * As the MR is completely invalid to begin with, and this |
1679 | - * error path is only taken if we can't push the mr entry into |
1680 | - * the pagefault tree, this is safe. |
1681 | - */ |
1682 | - |
1683 | ib_umem_release(umem); |
1684 | - /* Kill the MR, and return an error code. */ |
1685 | - clean_mr(mr); |
1686 | return ERR_PTR(err); |
1687 | } |
1688 | |
1689 | diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c |
1690 | index ad843c786e72..5afaa218508d 100644 |
1691 | --- a/drivers/infiniband/hw/qib/qib_keys.c |
1692 | +++ b/drivers/infiniband/hw/qib/qib_keys.c |
1693 | @@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) |
1694 | * unrestricted LKEY. |
1695 | */ |
1696 | rkt->gen++; |
1697 | + /* |
1698 | + * bits are capped in qib_verbs.c to insure enough bits |
1699 | + * for generation number |
1700 | + */ |
1701 | mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | |
1702 | ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) |
1703 | << 8); |
1704 | diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c |
1705 | index a05d1a372208..77e981abfce4 100644 |
1706 | --- a/drivers/infiniband/hw/qib/qib_verbs.c |
1707 | +++ b/drivers/infiniband/hw/qib/qib_verbs.c |
1708 | @@ -40,6 +40,7 @@ |
1709 | #include <linux/rculist.h> |
1710 | #include <linux/mm.h> |
1711 | #include <linux/random.h> |
1712 | +#include <linux/vmalloc.h> |
1713 | |
1714 | #include "qib.h" |
1715 | #include "qib_common.h" |
1716 | @@ -2109,10 +2110,16 @@ int qib_register_ib_device(struct qib_devdata *dd) |
1717 | * the LKEY). The remaining bits act as a generation number or tag. |
1718 | */ |
1719 | spin_lock_init(&dev->lk_table.lock); |
1720 | + /* insure generation is at least 4 bits see keys.c */ |
1721 | + if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) { |
1722 | + qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", |
1723 | + ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS); |
1724 | + ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS; |
1725 | + } |
1726 | dev->lk_table.max = 1 << ib_qib_lkey_table_size; |
1727 | lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); |
1728 | dev->lk_table.table = (struct qib_mregion __rcu **) |
1729 | - __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); |
1730 | + vmalloc(lk_tab_size); |
1731 | if (dev->lk_table.table == NULL) { |
1732 | ret = -ENOMEM; |
1733 | goto err_lk; |
1734 | @@ -2286,7 +2293,7 @@ err_tx: |
1735 | sizeof(struct qib_pio_header), |
1736 | dev->pio_hdrs, dev->pio_hdrs_phys); |
1737 | err_hdrs: |
1738 | - free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); |
1739 | + vfree(dev->lk_table.table); |
1740 | err_lk: |
1741 | kfree(dev->qp_table); |
1742 | err_qpt: |
1743 | @@ -2340,8 +2347,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd) |
1744 | sizeof(struct qib_pio_header), |
1745 | dev->pio_hdrs, dev->pio_hdrs_phys); |
1746 | lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); |
1747 | - free_pages((unsigned long) dev->lk_table.table, |
1748 | - get_order(lk_tab_size)); |
1749 | + vfree(dev->lk_table.table); |
1750 | kfree(dev->qp_table); |
1751 | } |
1752 | |
1753 | diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h |
1754 | index 1635572752ce..bce0fa596b4d 100644 |
1755 | --- a/drivers/infiniband/hw/qib/qib_verbs.h |
1756 | +++ b/drivers/infiniband/hw/qib/qib_verbs.h |
1757 | @@ -647,6 +647,8 @@ struct qib_qpn_table { |
1758 | struct qpn_map map[QPNMAP_ENTRIES]; |
1759 | }; |
1760 | |
1761 | +#define MAX_LKEY_TABLE_BITS 23 |
1762 | + |
1763 | struct qib_lkey_table { |
1764 | spinlock_t lock; /* protect changes in this struct */ |
1765 | u32 next; /* next unused index (speeds search) */ |
1766 | diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c |
1767 | index 6a594aac2290..c933d882c35c 100644 |
1768 | --- a/drivers/infiniband/ulp/iser/iscsi_iser.c |
1769 | +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c |
1770 | @@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task, |
1771 | goto out; |
1772 | } |
1773 | |
1774 | + tx_desc->mapped = true; |
1775 | tx_desc->dma_addr = dma_addr; |
1776 | tx_desc->tx_sg[0].addr = tx_desc->dma_addr; |
1777 | tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; |
1778 | @@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task) |
1779 | static void iscsi_iser_cleanup_task(struct iscsi_task *task) |
1780 | { |
1781 | struct iscsi_iser_task *iser_task = task->dd_data; |
1782 | - struct iser_tx_desc *tx_desc = &iser_task->desc; |
1783 | - struct iser_conn *iser_conn = task->conn->dd_data; |
1784 | + struct iser_tx_desc *tx_desc = &iser_task->desc; |
1785 | + struct iser_conn *iser_conn = task->conn->dd_data; |
1786 | struct iser_device *device = iser_conn->ib_conn.device; |
1787 | |
1788 | /* DEVICE_REMOVAL event might have already released the device */ |
1789 | if (!device) |
1790 | return; |
1791 | |
1792 | - ib_dma_unmap_single(device->ib_device, |
1793 | - tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); |
1794 | + if (likely(tx_desc->mapped)) { |
1795 | + ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, |
1796 | + ISER_HEADERS_LEN, DMA_TO_DEVICE); |
1797 | + tx_desc->mapped = false; |
1798 | + } |
1799 | |
1800 | /* mgmt tasks do not need special cleanup */ |
1801 | if (!task->sc) |
1802 | diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h |
1803 | index 262ba1f8ee50..d2b6caf7694d 100644 |
1804 | --- a/drivers/infiniband/ulp/iser/iscsi_iser.h |
1805 | +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h |
1806 | @@ -270,6 +270,7 @@ enum iser_desc_type { |
1807 | * sg[1] optionally points to either of immediate data |
1808 | * unsolicited data-out or control |
1809 | * @num_sge: number sges used on this TX task |
1810 | + * @mapped: Is the task header mapped |
1811 | */ |
1812 | struct iser_tx_desc { |
1813 | struct iser_hdr iser_header; |
1814 | @@ -278,6 +279,7 @@ struct iser_tx_desc { |
1815 | u64 dma_addr; |
1816 | struct ib_sge tx_sg[2]; |
1817 | int num_sge; |
1818 | + bool mapped; |
1819 | }; |
1820 | |
1821 | #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ |
1822 | diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c |
1823 | index 3e2118e8ed87..0a47f42fec24 100644 |
1824 | --- a/drivers/infiniband/ulp/iser/iser_initiator.c |
1825 | +++ b/drivers/infiniband/ulp/iser/iser_initiator.c |
1826 | @@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn, |
1827 | unsigned long buf_offset; |
1828 | unsigned long data_seg_len; |
1829 | uint32_t itt; |
1830 | - int err = 0; |
1831 | + int err; |
1832 | struct ib_sge *tx_dsg; |
1833 | |
1834 | itt = (__force uint32_t)hdr->itt; |
1835 | @@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn, |
1836 | memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); |
1837 | |
1838 | /* build the tx desc */ |
1839 | - iser_initialize_task_headers(task, tx_desc); |
1840 | + err = iser_initialize_task_headers(task, tx_desc); |
1841 | + if (err) |
1842 | + goto send_data_out_error; |
1843 | |
1844 | mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; |
1845 | tx_dsg = &tx_desc->tx_sg[1]; |
1846 | @@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn, |
1847 | |
1848 | send_data_out_error: |
1849 | kmem_cache_free(ig.desc_cache, tx_desc); |
1850 | - iser_err("conn %p failed err %d\n",conn, err); |
1851 | + iser_err("conn %p failed err %d\n", conn, err); |
1852 | return err; |
1853 | } |
1854 | |
1855 | diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c |
1856 | index 31a20b462266..ffda44ff9375 100644 |
1857 | --- a/drivers/infiniband/ulp/srp/ib_srp.c |
1858 | +++ b/drivers/infiniband/ulp/srp/ib_srp.c |
1859 | @@ -2757,6 +2757,13 @@ static int srp_sdev_count(struct Scsi_Host *host) |
1860 | return c; |
1861 | } |
1862 | |
1863 | +/* |
1864 | + * Return values: |
1865 | + * < 0 upon failure. Caller is responsible for SRP target port cleanup. |
1866 | + * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port |
1867 | + * removal has been scheduled. |
1868 | + * 0 and target->state != SRP_TARGET_REMOVED upon success. |
1869 | + */ |
1870 | static int srp_add_target(struct srp_host *host, struct srp_target_port *target) |
1871 | { |
1872 | struct srp_rport_identifiers ids; |
1873 | @@ -3262,7 +3269,7 @@ static ssize_t srp_create_target(struct device *dev, |
1874 | srp_free_ch_ib(target, ch); |
1875 | srp_free_req_data(target, ch); |
1876 | target->ch_count = ch - target->ch; |
1877 | - break; |
1878 | + goto connected; |
1879 | } |
1880 | } |
1881 | |
1882 | @@ -3272,6 +3279,7 @@ static ssize_t srp_create_target(struct device *dev, |
1883 | node_idx++; |
1884 | } |
1885 | |
1886 | +connected: |
1887 | target->scsi_host->nr_hw_queues = target->ch_count; |
1888 | |
1889 | ret = srp_add_target(host, target); |
1890 | @@ -3294,6 +3302,8 @@ out: |
1891 | mutex_unlock(&host->add_target_mutex); |
1892 | |
1893 | scsi_host_put(target->scsi_host); |
1894 | + if (ret < 0) |
1895 | + scsi_host_put(target->scsi_host); |
1896 | |
1897 | return ret; |
1898 | |
1899 | diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c |
1900 | index 9d35499faca4..08d496411f75 100644 |
1901 | --- a/drivers/input/evdev.c |
1902 | +++ b/drivers/input/evdev.c |
1903 | @@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id) |
1904 | { |
1905 | struct evdev_client *client = file->private_data; |
1906 | struct evdev *evdev = client->evdev; |
1907 | - int retval; |
1908 | |
1909 | - retval = mutex_lock_interruptible(&evdev->mutex); |
1910 | - if (retval) |
1911 | - return retval; |
1912 | + mutex_lock(&evdev->mutex); |
1913 | |
1914 | - if (!evdev->exist || client->revoked) |
1915 | - retval = -ENODEV; |
1916 | - else |
1917 | - retval = input_flush_device(&evdev->handle, file); |
1918 | + if (evdev->exist && !client->revoked) |
1919 | + input_flush_device(&evdev->handle, file); |
1920 | |
1921 | mutex_unlock(&evdev->mutex); |
1922 | - return retval; |
1923 | + return 0; |
1924 | } |
1925 | |
1926 | static void evdev_free(struct device *dev) |
1927 | diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c |
1928 | index abeedc9a78c2..2570f2a25dc4 100644 |
1929 | --- a/drivers/iommu/fsl_pamu.c |
1930 | +++ b/drivers/iommu/fsl_pamu.c |
1931 | @@ -41,7 +41,6 @@ struct pamu_isr_data { |
1932 | |
1933 | static struct paace *ppaact; |
1934 | static struct paace *spaact; |
1935 | -static struct ome *omt __initdata; |
1936 | |
1937 | /* |
1938 | * Table for matching compatible strings, for device tree |
1939 | @@ -50,7 +49,7 @@ static struct ome *omt __initdata; |
1940 | * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" |
1941 | * string would be used. |
1942 | */ |
1943 | -static const struct of_device_id guts_device_ids[] __initconst = { |
1944 | +static const struct of_device_id guts_device_ids[] = { |
1945 | { .compatible = "fsl,qoriq-device-config-1.0", }, |
1946 | { .compatible = "fsl,qoriq-device-config-2.0", }, |
1947 | {} |
1948 | @@ -599,7 +598,7 @@ found_cpu_node: |
1949 | * Memory accesses to QMAN and BMAN private memory need not be coherent, so |
1950 | * clear the PAACE entry coherency attribute for them. |
1951 | */ |
1952 | -static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) |
1953 | +static void setup_qbman_paace(struct paace *ppaace, int paace_type) |
1954 | { |
1955 | switch (paace_type) { |
1956 | case QMAN_PAACE: |
1957 | @@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) |
1958 | * this table to translate device transaction to appropriate corenet |
1959 | * transaction. |
1960 | */ |
1961 | -static void __init setup_omt(struct ome *omt) |
1962 | +static void setup_omt(struct ome *omt) |
1963 | { |
1964 | struct ome *ome; |
1965 | |
1966 | @@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt) |
1967 | * Get the maximum number of PAACT table entries |
1968 | * and subwindows supported by PAMU |
1969 | */ |
1970 | -static void __init get_pamu_cap_values(unsigned long pamu_reg_base) |
1971 | +static void get_pamu_cap_values(unsigned long pamu_reg_base) |
1972 | { |
1973 | u32 pc_val; |
1974 | |
1975 | @@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base) |
1976 | } |
1977 | |
1978 | /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ |
1979 | -static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, |
1980 | - phys_addr_t ppaact_phys, phys_addr_t spaact_phys, |
1981 | - phys_addr_t omt_phys) |
1982 | +static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, |
1983 | + phys_addr_t ppaact_phys, phys_addr_t spaact_phys, |
1984 | + phys_addr_t omt_phys) |
1985 | { |
1986 | u32 *pc; |
1987 | struct pamu_mmap_regs *pamu_regs; |
1988 | @@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu |
1989 | } |
1990 | |
1991 | /* Enable all device LIODNS */ |
1992 | -static void __init setup_liodns(void) |
1993 | +static void setup_liodns(void) |
1994 | { |
1995 | int i, len; |
1996 | struct paace *ppaace; |
1997 | @@ -846,7 +845,7 @@ struct ccsr_law { |
1998 | /* |
1999 | * Create a coherence subdomain for a given memory block. |
2000 | */ |
2001 | -static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) |
2002 | +static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) |
2003 | { |
2004 | struct device_node *np; |
2005 | const __be32 *iprop; |
2006 | @@ -988,7 +987,7 @@ error: |
2007 | static const struct { |
2008 | u32 svr; |
2009 | u32 port_id; |
2010 | -} port_id_map[] __initconst = { |
2011 | +} port_id_map[] = { |
2012 | {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ |
2013 | {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ |
2014 | {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ |
2015 | @@ -1006,7 +1005,7 @@ static const struct { |
2016 | |
2017 | #define SVR_SECURITY 0x80000 /* The Security (E) bit */ |
2018 | |
2019 | -static int __init fsl_pamu_probe(struct platform_device *pdev) |
2020 | +static int fsl_pamu_probe(struct platform_device *pdev) |
2021 | { |
2022 | struct device *dev = &pdev->dev; |
2023 | void __iomem *pamu_regs = NULL; |
2024 | @@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) |
2025 | int irq; |
2026 | phys_addr_t ppaact_phys; |
2027 | phys_addr_t spaact_phys; |
2028 | + struct ome *omt; |
2029 | phys_addr_t omt_phys; |
2030 | size_t mem_size = 0; |
2031 | unsigned int order = 0; |
2032 | @@ -1200,7 +1200,7 @@ error: |
2033 | return ret; |
2034 | } |
2035 | |
2036 | -static struct platform_driver fsl_of_pamu_driver __initdata = { |
2037 | +static struct platform_driver fsl_of_pamu_driver = { |
2038 | .driver = { |
2039 | .name = "fsl-of-pamu", |
2040 | }, |
2041 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
2042 | index 0649b94f5958..7553cb90627f 100644 |
2043 | --- a/drivers/iommu/intel-iommu.c |
2044 | +++ b/drivers/iommu/intel-iommu.c |
2045 | @@ -755,6 +755,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu |
2046 | struct context_entry *context; |
2047 | u64 *entry; |
2048 | |
2049 | + entry = &root->lo; |
2050 | if (ecs_enabled(iommu)) { |
2051 | if (devfn >= 0x80) { |
2052 | devfn -= 0x80; |
2053 | @@ -762,7 +763,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu |
2054 | } |
2055 | devfn *= 2; |
2056 | } |
2057 | - entry = &root->lo; |
2058 | if (*entry & 1) |
2059 | context = phys_to_virt(*entry & VTD_PAGE_MASK); |
2060 | else { |
2061 | diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c |
2062 | index 4e460216bd16..e29d5d7fe220 100644 |
2063 | --- a/drivers/iommu/io-pgtable-arm.c |
2064 | +++ b/drivers/iommu/io-pgtable-arm.c |
2065 | @@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte; |
2066 | |
2067 | static bool selftest_running = false; |
2068 | |
2069 | +static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, |
2070 | + unsigned long iova, size_t size, int lvl, |
2071 | + arm_lpae_iopte *ptep); |
2072 | + |
2073 | static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, |
2074 | unsigned long iova, phys_addr_t paddr, |
2075 | arm_lpae_iopte prot, int lvl, |
2076 | @@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, |
2077 | { |
2078 | arm_lpae_iopte pte = prot; |
2079 | |
2080 | - /* We require an unmap first */ |
2081 | if (iopte_leaf(*ptep, lvl)) { |
2082 | + /* We require an unmap first */ |
2083 | WARN_ON(!selftest_running); |
2084 | return -EEXIST; |
2085 | + } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { |
2086 | + /* |
2087 | + * We need to unmap and free the old table before |
2088 | + * overwriting it with a block entry. |
2089 | + */ |
2090 | + arm_lpae_iopte *tblp; |
2091 | + size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); |
2092 | + |
2093 | + tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); |
2094 | + if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) |
2095 | + return -EINVAL; |
2096 | } |
2097 | |
2098 | if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) |
2099 | diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c |
2100 | index c1f2e521dc52..2cd439203d0f 100644 |
2101 | --- a/drivers/iommu/tegra-smmu.c |
2102 | +++ b/drivers/iommu/tegra-smmu.c |
2103 | @@ -27,6 +27,7 @@ struct tegra_smmu { |
2104 | const struct tegra_smmu_soc *soc; |
2105 | |
2106 | unsigned long pfn_mask; |
2107 | + unsigned long tlb_mask; |
2108 | |
2109 | unsigned long *asids; |
2110 | struct mutex lock; |
2111 | @@ -68,7 +69,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) |
2112 | #define SMMU_TLB_CONFIG 0x14 |
2113 | #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) |
2114 | #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) |
2115 | -#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f) |
2116 | +#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ |
2117 | + ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) |
2118 | |
2119 | #define SMMU_PTC_CONFIG 0x18 |
2120 | #define SMMU_PTC_CONFIG_ENABLE (1 << 29) |
2121 | @@ -816,6 +818,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
2122 | smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; |
2123 | dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", |
2124 | mc->soc->num_address_bits, smmu->pfn_mask); |
2125 | + smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; |
2126 | + dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, |
2127 | + smmu->tlb_mask); |
2128 | |
2129 | value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); |
2130 | |
2131 | @@ -825,7 +830,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, |
2132 | smmu_writel(smmu, value, SMMU_PTC_CONFIG); |
2133 | |
2134 | value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | |
2135 | - SMMU_TLB_CONFIG_ACTIVE_LINES(0x20); |
2136 | + SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); |
2137 | |
2138 | if (soc->supports_round_robin_arbitration) |
2139 | value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; |
2140 | diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c |
2141 | index 1fba339cddc1..c8447fa3fd91 100644 |
2142 | --- a/drivers/media/platform/am437x/am437x-vpfe.c |
2143 | +++ b/drivers/media/platform/am437x/am437x-vpfe.c |
2144 | @@ -1186,14 +1186,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe) |
2145 | static int vpfe_release(struct file *file) |
2146 | { |
2147 | struct vpfe_device *vpfe = video_drvdata(file); |
2148 | + bool fh_singular; |
2149 | int ret; |
2150 | |
2151 | mutex_lock(&vpfe->lock); |
2152 | |
2153 | - if (v4l2_fh_is_singular_file(file)) |
2154 | - vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); |
2155 | + /* Save the singular status before we call the clean-up helper */ |
2156 | + fh_singular = v4l2_fh_is_singular_file(file); |
2157 | + |
2158 | + /* the release helper will cleanup any on-going streaming */ |
2159 | ret = _vb2_fop_release(file, NULL); |
2160 | |
2161 | + /* |
2162 | + * If this was the last open file. |
2163 | + * Then de-initialize hw module. |
2164 | + */ |
2165 | + if (fh_singular) |
2166 | + vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); |
2167 | + |
2168 | mutex_unlock(&vpfe->lock); |
2169 | |
2170 | return ret; |
2171 | @@ -1565,7 +1575,7 @@ static int vpfe_s_fmt(struct file *file, void *priv, |
2172 | return -EBUSY; |
2173 | } |
2174 | |
2175 | - ret = vpfe_try_fmt(file, priv, fmt); |
2176 | + ret = vpfe_try_fmt(file, priv, &format); |
2177 | if (ret) |
2178 | return ret; |
2179 | |
2180 | diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c |
2181 | index 18d0a871747f..12be830d704f 100644 |
2182 | --- a/drivers/media/platform/omap3isp/isp.c |
2183 | +++ b/drivers/media/platform/omap3isp/isp.c |
2184 | @@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags, |
2185 | int ret; |
2186 | |
2187 | if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && |
2188 | - !(link->flags & MEDIA_LNK_FL_ENABLED)) { |
2189 | + !(flags & MEDIA_LNK_FL_ENABLED)) { |
2190 | /* Powering off entities is assumed to never fail. */ |
2191 | isp_pipeline_pm_power(source, -sink_use); |
2192 | isp_pipeline_pm_power(sink, -source_use); |
2193 | return 0; |
2194 | } |
2195 | |
2196 | - if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && |
2197 | + if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && |
2198 | (flags & MEDIA_LNK_FL_ENABLED)) { |
2199 | |
2200 | ret = isp_pipeline_pm_power(source, sink_use); |
2201 | @@ -2000,10 +2000,8 @@ static int isp_register_entities(struct isp_device *isp) |
2202 | ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); |
2203 | |
2204 | done: |
2205 | - if (ret < 0) { |
2206 | + if (ret < 0) |
2207 | isp_unregister_entities(isp); |
2208 | - v4l2_async_notifier_unregister(&isp->notifier); |
2209 | - } |
2210 | |
2211 | return ret; |
2212 | } |
2213 | @@ -2423,10 +2421,6 @@ static int isp_probe(struct platform_device *pdev) |
2214 | ret = isp_of_parse_nodes(&pdev->dev, &isp->notifier); |
2215 | if (ret < 0) |
2216 | return ret; |
2217 | - ret = v4l2_async_notifier_register(&isp->v4l2_dev, |
2218 | - &isp->notifier); |
2219 | - if (ret) |
2220 | - return ret; |
2221 | } else { |
2222 | isp->pdata = pdev->dev.platform_data; |
2223 | isp->syscon = syscon_regmap_lookup_by_pdevname("syscon.0"); |
2224 | @@ -2557,18 +2551,27 @@ static int isp_probe(struct platform_device *pdev) |
2225 | if (ret < 0) |
2226 | goto error_iommu; |
2227 | |
2228 | - isp->notifier.bound = isp_subdev_notifier_bound; |
2229 | - isp->notifier.complete = isp_subdev_notifier_complete; |
2230 | - |
2231 | ret = isp_register_entities(isp); |
2232 | if (ret < 0) |
2233 | goto error_modules; |
2234 | |
2235 | + if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { |
2236 | + isp->notifier.bound = isp_subdev_notifier_bound; |
2237 | + isp->notifier.complete = isp_subdev_notifier_complete; |
2238 | + |
2239 | + ret = v4l2_async_notifier_register(&isp->v4l2_dev, |
2240 | + &isp->notifier); |
2241 | + if (ret) |
2242 | + goto error_register_entities; |
2243 | + } |
2244 | + |
2245 | isp_core_init(isp, 1); |
2246 | omap3isp_put(isp); |
2247 | |
2248 | return 0; |
2249 | |
2250 | +error_register_entities: |
2251 | + isp_unregister_entities(isp); |
2252 | error_modules: |
2253 | isp_cleanup_modules(isp); |
2254 | error_iommu: |
2255 | diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c |
2256 | index 98e50e446d57..e779c93cb015 100644 |
2257 | --- a/drivers/media/platform/xilinx/xilinx-dma.c |
2258 | +++ b/drivers/media/platform/xilinx/xilinx-dma.c |
2259 | @@ -699,8 +699,10 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma, |
2260 | |
2261 | /* ... and the buffers queue... */ |
2262 | dma->alloc_ctx = vb2_dma_contig_init_ctx(dma->xdev->dev); |
2263 | - if (IS_ERR(dma->alloc_ctx)) |
2264 | + if (IS_ERR(dma->alloc_ctx)) { |
2265 | + ret = PTR_ERR(dma->alloc_ctx); |
2266 | goto error; |
2267 | + } |
2268 | |
2269 | /* Don't enable VB2_READ and VB2_WRITE, as using the read() and write() |
2270 | * V4L2 APIs would be inefficient. Testing on the command line with a |
2271 | diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c |
2272 | index 0ff388a16168..f3b6b2caabf6 100644 |
2273 | --- a/drivers/media/rc/rc-main.c |
2274 | +++ b/drivers/media/rc/rc-main.c |
2275 | @@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) |
2276 | { |
2277 | struct rc_dev *dev = to_rc_dev(device); |
2278 | |
2279 | - if (!dev || !dev->input_dev) |
2280 | - return -ENODEV; |
2281 | - |
2282 | if (dev->rc_map.name) |
2283 | ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); |
2284 | if (dev->driver_name) |
2285 | diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c |
2286 | index 9f579589e800..9bf11ea90549 100644 |
2287 | --- a/drivers/memory/tegra/tegra114.c |
2288 | +++ b/drivers/memory/tegra/tegra114.c |
2289 | @@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = { |
2290 | .num_swgroups = ARRAY_SIZE(tegra114_swgroups), |
2291 | .supports_round_robin_arbitration = false, |
2292 | .supports_request_limit = false, |
2293 | + .num_tlb_lines = 32, |
2294 | .num_asids = 4, |
2295 | .ops = &tegra114_smmu_ops, |
2296 | }; |
2297 | diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c |
2298 | index 966e1557e6f4..70ed80d23431 100644 |
2299 | --- a/drivers/memory/tegra/tegra124.c |
2300 | +++ b/drivers/memory/tegra/tegra124.c |
2301 | @@ -1023,6 +1023,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = { |
2302 | .num_swgroups = ARRAY_SIZE(tegra124_swgroups), |
2303 | .supports_round_robin_arbitration = true, |
2304 | .supports_request_limit = true, |
2305 | + .num_tlb_lines = 32, |
2306 | .num_asids = 128, |
2307 | .ops = &tegra124_smmu_ops, |
2308 | }; |
2309 | diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c |
2310 | index 1abcd8f6f3ba..b2a34fefabef 100644 |
2311 | --- a/drivers/memory/tegra/tegra30.c |
2312 | +++ b/drivers/memory/tegra/tegra30.c |
2313 | @@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = { |
2314 | .num_swgroups = ARRAY_SIZE(tegra30_swgroups), |
2315 | .supports_round_robin_arbitration = false, |
2316 | .supports_request_limit = false, |
2317 | + .num_tlb_lines = 16, |
2318 | .num_asids = 4, |
2319 | .ops = &tegra30_smmu_ops, |
2320 | }; |
2321 | diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c |
2322 | index 729e0851167d..4224a6acf4c4 100644 |
2323 | --- a/drivers/misc/cxl/api.c |
2324 | +++ b/drivers/misc/cxl/api.c |
2325 | @@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(cxl_get_phys_dev); |
2326 | |
2327 | int cxl_release_context(struct cxl_context *ctx) |
2328 | { |
2329 | - if (ctx->status != CLOSED) |
2330 | + if (ctx->status >= STARTED) |
2331 | return -EBUSY; |
2332 | |
2333 | put_device(&ctx->afu->dev); |
2334 | diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c |
2335 | index 32ad09705949..dc836071c633 100644 |
2336 | --- a/drivers/misc/cxl/pci.c |
2337 | +++ b/drivers/misc/cxl/pci.c |
2338 | @@ -851,16 +851,9 @@ int cxl_reset(struct cxl *adapter) |
2339 | { |
2340 | struct pci_dev *dev = to_pci_dev(adapter->dev.parent); |
2341 | int rc; |
2342 | - int i; |
2343 | - u32 val; |
2344 | |
2345 | dev_info(&dev->dev, "CXL reset\n"); |
2346 | |
2347 | - for (i = 0; i < adapter->slices; i++) { |
2348 | - cxl_pci_vphb_remove(adapter->afu[i]); |
2349 | - cxl_remove_afu(adapter->afu[i]); |
2350 | - } |
2351 | - |
2352 | /* pcie_warm_reset requests a fundamental pci reset which includes a |
2353 | * PERST assert/deassert. PERST triggers a loading of the image |
2354 | * if "user" or "factory" is selected in sysfs */ |
2355 | @@ -869,20 +862,6 @@ int cxl_reset(struct cxl *adapter) |
2356 | return rc; |
2357 | } |
2358 | |
2359 | - /* the PERST done above fences the PHB. So, reset depends on EEH |
2360 | - * to unbind the driver, tell Sapphire to reinit the PHB, and rebind |
2361 | - * the driver. Do an mmio read explictly to ensure EEH notices the |
2362 | - * fenced PHB. Retry for a few seconds before giving up. */ |
2363 | - i = 0; |
2364 | - while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) && |
2365 | - (i < 5)) { |
2366 | - msleep(500); |
2367 | - i++; |
2368 | - } |
2369 | - |
2370 | - if (val != 0xffffffff) |
2371 | - dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n"); |
2372 | - |
2373 | return rc; |
2374 | } |
2375 | |
2376 | @@ -1140,8 +1119,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) |
2377 | int slice; |
2378 | int rc; |
2379 | |
2380 | - pci_dev_get(dev); |
2381 | - |
2382 | if (cxl_verbose) |
2383 | dump_cxl_config_space(dev); |
2384 | |
2385 | diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c |
2386 | index 9ad73f30f744..9e3fdbdc4037 100644 |
2387 | --- a/drivers/mmc/core/core.c |
2388 | +++ b/drivers/mmc/core/core.c |
2389 | @@ -358,8 +358,10 @@ EXPORT_SYMBOL(mmc_start_bkops); |
2390 | */ |
2391 | static void mmc_wait_data_done(struct mmc_request *mrq) |
2392 | { |
2393 | - mrq->host->context_info.is_done_rcv = true; |
2394 | - wake_up_interruptible(&mrq->host->context_info.wait); |
2395 | + struct mmc_context_info *context_info = &mrq->host->context_info; |
2396 | + |
2397 | + context_info->is_done_rcv = true; |
2398 | + wake_up_interruptible(&context_info->wait); |
2399 | } |
2400 | |
2401 | static void mmc_wait_done(struct mmc_request *mrq) |
2402 | diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c |
2403 | index 797be7549a15..653f335bef15 100644 |
2404 | --- a/drivers/mmc/host/sdhci-of-esdhc.c |
2405 | +++ b/drivers/mmc/host/sdhci-of-esdhc.c |
2406 | @@ -208,6 +208,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) |
2407 | if (clock == 0) |
2408 | return; |
2409 | |
2410 | + /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ |
2411 | + temp = esdhc_readw(host, SDHCI_HOST_VERSION); |
2412 | + temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; |
2413 | + if (temp < VENDOR_V_23) |
2414 | + pre_div = 2; |
2415 | + |
2416 | /* Workaround to reduce the clock frequency for p1010 esdhc */ |
2417 | if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { |
2418 | if (clock > 20000000) |
2419 | diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c |
2420 | index 94f54d2772e8..b3b0a3e4fca1 100644 |
2421 | --- a/drivers/mmc/host/sdhci-pci.c |
2422 | +++ b/drivers/mmc/host/sdhci-pci.c |
2423 | @@ -618,6 +618,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) |
2424 | static const struct sdhci_pci_fixes sdhci_o2 = { |
2425 | .probe = sdhci_pci_o2_probe, |
2426 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
2427 | + .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, |
2428 | .probe_slot = sdhci_pci_o2_probe_slot, |
2429 | .resume = sdhci_pci_o2_resume, |
2430 | }; |
2431 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
2432 | index 1dbe93232030..b0c915a35a9e 100644 |
2433 | --- a/drivers/mmc/host/sdhci.c |
2434 | +++ b/drivers/mmc/host/sdhci.c |
2435 | @@ -54,8 +54,7 @@ static void sdhci_finish_command(struct sdhci_host *); |
2436 | static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); |
2437 | static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); |
2438 | static int sdhci_pre_dma_transfer(struct sdhci_host *host, |
2439 | - struct mmc_data *data, |
2440 | - struct sdhci_host_next *next); |
2441 | + struct mmc_data *data); |
2442 | static int sdhci_do_get_cd(struct sdhci_host *host); |
2443 | |
2444 | #ifdef CONFIG_PM |
2445 | @@ -496,7 +495,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, |
2446 | goto fail; |
2447 | BUG_ON(host->align_addr & host->align_mask); |
2448 | |
2449 | - host->sg_count = sdhci_pre_dma_transfer(host, data, NULL); |
2450 | + host->sg_count = sdhci_pre_dma_transfer(host, data); |
2451 | if (host->sg_count < 0) |
2452 | goto unmap_align; |
2453 | |
2454 | @@ -635,9 +634,11 @@ static void sdhci_adma_table_post(struct sdhci_host *host, |
2455 | } |
2456 | } |
2457 | |
2458 | - if (!data->host_cookie) |
2459 | + if (data->host_cookie == COOKIE_MAPPED) { |
2460 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, |
2461 | data->sg_len, direction); |
2462 | + data->host_cookie = COOKIE_UNMAPPED; |
2463 | + } |
2464 | } |
2465 | |
2466 | static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) |
2467 | @@ -833,7 +834,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) |
2468 | } else { |
2469 | int sg_cnt; |
2470 | |
2471 | - sg_cnt = sdhci_pre_dma_transfer(host, data, NULL); |
2472 | + sg_cnt = sdhci_pre_dma_transfer(host, data); |
2473 | if (sg_cnt <= 0) { |
2474 | /* |
2475 | * This only happens when someone fed |
2476 | @@ -949,11 +950,13 @@ static void sdhci_finish_data(struct sdhci_host *host) |
2477 | if (host->flags & SDHCI_USE_ADMA) |
2478 | sdhci_adma_table_post(host, data); |
2479 | else { |
2480 | - if (!data->host_cookie) |
2481 | + if (data->host_cookie == COOKIE_MAPPED) { |
2482 | dma_unmap_sg(mmc_dev(host->mmc), |
2483 | data->sg, data->sg_len, |
2484 | (data->flags & MMC_DATA_READ) ? |
2485 | DMA_FROM_DEVICE : DMA_TO_DEVICE); |
2486 | + data->host_cookie = COOKIE_UNMAPPED; |
2487 | + } |
2488 | } |
2489 | } |
2490 | |
2491 | @@ -1132,6 +1135,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host) |
2492 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); |
2493 | break; |
2494 | case MMC_TIMING_UHS_DDR50: |
2495 | + case MMC_TIMING_MMC_DDR52: |
2496 | preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); |
2497 | break; |
2498 | case MMC_TIMING_MMC_HS400: |
2499 | @@ -1559,7 +1563,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) |
2500 | (ios->timing == MMC_TIMING_UHS_SDR25) || |
2501 | (ios->timing == MMC_TIMING_UHS_SDR50) || |
2502 | (ios->timing == MMC_TIMING_UHS_SDR104) || |
2503 | - (ios->timing == MMC_TIMING_UHS_DDR50))) { |
2504 | + (ios->timing == MMC_TIMING_UHS_DDR50) || |
2505 | + (ios->timing == MMC_TIMING_MMC_DDR52))) { |
2506 | u16 preset; |
2507 | |
2508 | sdhci_enable_preset_value(host, true); |
2509 | @@ -2097,49 +2102,36 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, |
2510 | struct mmc_data *data = mrq->data; |
2511 | |
2512 | if (host->flags & SDHCI_REQ_USE_DMA) { |
2513 | - if (data->host_cookie) |
2514 | + if (data->host_cookie == COOKIE_GIVEN || |
2515 | + data->host_cookie == COOKIE_MAPPED) |
2516 | dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
2517 | data->flags & MMC_DATA_WRITE ? |
2518 | DMA_TO_DEVICE : DMA_FROM_DEVICE); |
2519 | - mrq->data->host_cookie = 0; |
2520 | + data->host_cookie = COOKIE_UNMAPPED; |
2521 | } |
2522 | } |
2523 | |
2524 | static int sdhci_pre_dma_transfer(struct sdhci_host *host, |
2525 | - struct mmc_data *data, |
2526 | - struct sdhci_host_next *next) |
2527 | + struct mmc_data *data) |
2528 | { |
2529 | int sg_count; |
2530 | |
2531 | - if (!next && data->host_cookie && |
2532 | - data->host_cookie != host->next_data.cookie) { |
2533 | - pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n", |
2534 | - __func__, data->host_cookie, host->next_data.cookie); |
2535 | - data->host_cookie = 0; |
2536 | + if (data->host_cookie == COOKIE_MAPPED) { |
2537 | + data->host_cookie = COOKIE_GIVEN; |
2538 | + return data->sg_count; |
2539 | } |
2540 | |
2541 | - /* Check if next job is already prepared */ |
2542 | - if (next || |
2543 | - (!next && data->host_cookie != host->next_data.cookie)) { |
2544 | - sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, |
2545 | - data->sg_len, |
2546 | - data->flags & MMC_DATA_WRITE ? |
2547 | - DMA_TO_DEVICE : DMA_FROM_DEVICE); |
2548 | - |
2549 | - } else { |
2550 | - sg_count = host->next_data.sg_count; |
2551 | - host->next_data.sg_count = 0; |
2552 | - } |
2553 | + WARN_ON(data->host_cookie == COOKIE_GIVEN); |
2554 | |
2555 | + sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, |
2556 | + data->flags & MMC_DATA_WRITE ? |
2557 | + DMA_TO_DEVICE : DMA_FROM_DEVICE); |
2558 | |
2559 | if (sg_count == 0) |
2560 | - return -EINVAL; |
2561 | + return -ENOSPC; |
2562 | |
2563 | - if (next) { |
2564 | - next->sg_count = sg_count; |
2565 | - data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; |
2566 | - } else |
2567 | - host->sg_count = sg_count; |
2568 | + data->sg_count = sg_count; |
2569 | + data->host_cookie = COOKIE_MAPPED; |
2570 | |
2571 | return sg_count; |
2572 | } |
2573 | @@ -2149,16 +2141,10 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, |
2574 | { |
2575 | struct sdhci_host *host = mmc_priv(mmc); |
2576 | |
2577 | - if (mrq->data->host_cookie) { |
2578 | - mrq->data->host_cookie = 0; |
2579 | - return; |
2580 | - } |
2581 | + mrq->data->host_cookie = COOKIE_UNMAPPED; |
2582 | |
2583 | if (host->flags & SDHCI_REQ_USE_DMA) |
2584 | - if (sdhci_pre_dma_transfer(host, |
2585 | - mrq->data, |
2586 | - &host->next_data) < 0) |
2587 | - mrq->data->host_cookie = 0; |
2588 | + sdhci_pre_dma_transfer(host, mrq->data); |
2589 | } |
2590 | |
2591 | static void sdhci_card_event(struct mmc_host *mmc) |
2592 | @@ -3030,7 +3016,6 @@ int sdhci_add_host(struct sdhci_host *host) |
2593 | host->max_clk = host->ops->get_max_clock(host); |
2594 | } |
2595 | |
2596 | - host->next_data.cookie = 1; |
2597 | /* |
2598 | * In case of Host Controller v3.00, find out whether clock |
2599 | * multiplier is supported. |
2600 | diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h |
2601 | index 5521d29368e4..a9512a421f52 100644 |
2602 | --- a/drivers/mmc/host/sdhci.h |
2603 | +++ b/drivers/mmc/host/sdhci.h |
2604 | @@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc { |
2605 | */ |
2606 | #define SDHCI_MAX_SEGS 128 |
2607 | |
2608 | -struct sdhci_host_next { |
2609 | - unsigned int sg_count; |
2610 | - s32 cookie; |
2611 | +enum sdhci_cookie { |
2612 | + COOKIE_UNMAPPED, |
2613 | + COOKIE_MAPPED, |
2614 | + COOKIE_GIVEN, |
2615 | }; |
2616 | |
2617 | struct sdhci_host { |
2618 | @@ -503,7 +504,6 @@ struct sdhci_host { |
2619 | unsigned int tuning_mode; /* Re-tuning mode supported by host */ |
2620 | #define SDHCI_TUNING_MODE_1 0 |
2621 | |
2622 | - struct sdhci_host_next next_data; |
2623 | unsigned long private[0] ____cacheline_aligned; |
2624 | }; |
2625 | |
2626 | diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c |
2627 | index 73c934cf6c61..79789d8e52da 100644 |
2628 | --- a/drivers/net/ethernet/broadcom/tg3.c |
2629 | +++ b/drivers/net/ethernet/broadcom/tg3.c |
2630 | @@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev, |
2631 | tg3_ape_scratchpad_read(tp, &temperature, attr->index, |
2632 | sizeof(temperature)); |
2633 | spin_unlock_bh(&tp->lock); |
2634 | - return sprintf(buf, "%u\n", temperature); |
2635 | + return sprintf(buf, "%u\n", temperature * 1000); |
2636 | } |
2637 | |
2638 | |
2639 | diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h |
2640 | index c2bd4f98a837..212d668dabb3 100644 |
2641 | --- a/drivers/net/ethernet/intel/igb/igb.h |
2642 | +++ b/drivers/net/ethernet/intel/igb/igb.h |
2643 | @@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, |
2644 | struct sk_buff *skb); |
2645 | int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); |
2646 | int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); |
2647 | +void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); |
2648 | #ifdef CONFIG_IGB_HWMON |
2649 | void igb_sysfs_exit(struct igb_adapter *adapter); |
2650 | int igb_sysfs_init(struct igb_adapter *adapter); |
2651 | diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c |
2652 | index d5673eb90c54..0afc0913e5b9 100644 |
2653 | --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c |
2654 | +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c |
2655 | @@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev, |
2656 | { |
2657 | struct igb_adapter *adapter = netdev_priv(netdev); |
2658 | unsigned int count = ch->combined_count; |
2659 | + unsigned int max_combined = 0; |
2660 | |
2661 | /* Verify they are not requesting separate vectors */ |
2662 | if (!count || ch->rx_count || ch->tx_count) |
2663 | @@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev, |
2664 | return -EINVAL; |
2665 | |
2666 | /* Verify the number of channels doesn't exceed hw limits */ |
2667 | - if (count > igb_max_channels(adapter)) |
2668 | + max_combined = igb_max_channels(adapter); |
2669 | + if (count > max_combined) |
2670 | return -EINVAL; |
2671 | |
2672 | if (count != adapter->rss_queues) { |
2673 | adapter->rss_queues = count; |
2674 | + igb_set_flag_queue_pairs(adapter, max_combined); |
2675 | |
2676 | /* Hardware has to reinitialize queues and interrupts to |
2677 | * match the new configuration. |
2678 | diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c |
2679 | index 830466c49987..8d7b59689722 100644 |
2680 | --- a/drivers/net/ethernet/intel/igb/igb_main.c |
2681 | +++ b/drivers/net/ethernet/intel/igb/igb_main.c |
2682 | @@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, |
2683 | |
2684 | /* allocate q_vector and rings */ |
2685 | q_vector = adapter->q_vector[v_idx]; |
2686 | - if (!q_vector) |
2687 | + if (!q_vector) { |
2688 | q_vector = kzalloc(size, GFP_KERNEL); |
2689 | - else |
2690 | + } else if (size > ksize(q_vector)) { |
2691 | + kfree_rcu(q_vector, rcu); |
2692 | + q_vector = kzalloc(size, GFP_KERNEL); |
2693 | + } else { |
2694 | memset(q_vector, 0, size); |
2695 | + } |
2696 | if (!q_vector) |
2697 | return -ENOMEM; |
2698 | |
2699 | @@ -2888,6 +2892,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) |
2700 | |
2701 | adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); |
2702 | |
2703 | + igb_set_flag_queue_pairs(adapter, max_rss_queues); |
2704 | +} |
2705 | + |
2706 | +void igb_set_flag_queue_pairs(struct igb_adapter *adapter, |
2707 | + const u32 max_rss_queues) |
2708 | +{ |
2709 | + struct e1000_hw *hw = &adapter->hw; |
2710 | + |
2711 | /* Determine if we need to pair queues. */ |
2712 | switch (hw->mac.type) { |
2713 | case e1000_82575: |
2714 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2715 | index 864b476f7fd5..925f2f8659b8 100644 |
2716 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2717 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
2718 | @@ -837,8 +837,11 @@ static int stmmac_init_phy(struct net_device *dev) |
2719 | interface); |
2720 | } |
2721 | |
2722 | - if (IS_ERR(phydev)) { |
2723 | + if (IS_ERR_OR_NULL(phydev)) { |
2724 | pr_err("%s: Could not attach to PHY\n", dev->name); |
2725 | + if (!phydev) |
2726 | + return -ENODEV; |
2727 | + |
2728 | return PTR_ERR(phydev); |
2729 | } |
2730 | |
2731 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
2732 | index 23806c243a53..fd4a5353d216 100644 |
2733 | --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
2734 | +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
2735 | @@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { |
2736 | {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ |
2737 | {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ |
2738 | {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ |
2739 | + {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/ |
2740 | {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ |
2741 | {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ |
2742 | {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
2743 | diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c |
2744 | index 3236d44b459d..b7f18e2155eb 100644 |
2745 | --- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c |
2746 | +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c |
2747 | @@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, |
2748 | |
2749 | rtl_write_byte(rtlpriv, MSR, bt_msr); |
2750 | rtlpriv->cfg->ops->led_control(hw, ledaction); |
2751 | - if ((bt_msr & 0xfc) == MSR_AP) |
2752 | + if ((bt_msr & MSR_MASK) == MSR_AP) |
2753 | rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); |
2754 | else |
2755 | rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); |
2756 | diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h |
2757 | index 53668fc8f23e..1d6110f9c1fb 100644 |
2758 | --- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h |
2759 | +++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h |
2760 | @@ -429,6 +429,7 @@ |
2761 | #define MSR_ADHOC 0x01 |
2762 | #define MSR_INFRA 0x02 |
2763 | #define MSR_AP 0x03 |
2764 | +#define MSR_MASK 0x03 |
2765 | |
2766 | #define RRSR_RSC_OFFSET 21 |
2767 | #define RRSR_SHORT_OFFSET 23 |
2768 | diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c |
2769 | index 06175ce769bb..707ed2eb5936 100644 |
2770 | --- a/drivers/nfc/st-nci/i2c.c |
2771 | +++ b/drivers/nfc/st-nci/i2c.c |
2772 | @@ -25,15 +25,15 @@ |
2773 | #include <linux/interrupt.h> |
2774 | #include <linux/delay.h> |
2775 | #include <linux/nfc.h> |
2776 | -#include <linux/platform_data/st_nci.h> |
2777 | +#include <linux/platform_data/st-nci.h> |
2778 | |
2779 | #include "ndlc.h" |
2780 | |
2781 | -#define DRIVER_DESC "NCI NFC driver for ST21NFCB" |
2782 | +#define DRIVER_DESC "NCI NFC driver for ST_NCI" |
2783 | |
2784 | /* ndlc header */ |
2785 | -#define ST21NFCB_FRAME_HEADROOM 1 |
2786 | -#define ST21NFCB_FRAME_TAILROOM 0 |
2787 | +#define ST_NCI_FRAME_HEADROOM 1 |
2788 | +#define ST_NCI_FRAME_TAILROOM 0 |
2789 | |
2790 | #define ST_NCI_I2C_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */ |
2791 | #define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */ |
2792 | @@ -118,15 +118,10 @@ static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb) |
2793 | /* |
2794 | * Reads an ndlc frame and returns it in a newly allocated sk_buff. |
2795 | * returns: |
2796 | - * frame size : if received frame is complete (find ST21NFCB_SOF_EOF at |
2797 | - * end of read) |
2798 | - * -EAGAIN : if received frame is incomplete (not find ST21NFCB_SOF_EOF |
2799 | - * at end of read) |
2800 | + * 0 : if received frame is complete |
2801 | * -EREMOTEIO : i2c read error (fatal) |
2802 | * -EBADMSG : frame was incorrect and discarded |
2803 | - * (value returned from st_nci_i2c_repack) |
2804 | - * -EIO : if no ST21NFCB_SOF_EOF is found after reaching |
2805 | - * the read length end sequence |
2806 | + * -ENOMEM : cannot allocate skb, frame dropped |
2807 | */ |
2808 | static int st_nci_i2c_read(struct st_nci_i2c_phy *phy, |
2809 | struct sk_buff **skb) |
2810 | @@ -179,7 +174,7 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy, |
2811 | /* |
2812 | * Reads an ndlc frame from the chip. |
2813 | * |
2814 | - * On ST21NFCB, IRQ goes in idle state when read starts. |
2815 | + * On ST_NCI, IRQ goes in idle state when read starts. |
2816 | */ |
2817 | static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) |
2818 | { |
2819 | @@ -325,12 +320,12 @@ static int st_nci_i2c_probe(struct i2c_client *client, |
2820 | } |
2821 | } else { |
2822 | nfc_err(&client->dev, |
2823 | - "st21nfcb platform resources not available\n"); |
2824 | + "st_nci platform resources not available\n"); |
2825 | return -ENODEV; |
2826 | } |
2827 | |
2828 | r = ndlc_probe(phy, &i2c_phy_ops, &client->dev, |
2829 | - ST21NFCB_FRAME_HEADROOM, ST21NFCB_FRAME_TAILROOM, |
2830 | + ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM, |
2831 | &phy->ndlc); |
2832 | if (r < 0) { |
2833 | nfc_err(&client->dev, "Unable to register ndlc layer\n"); |
2834 | diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c |
2835 | index 56c6a4cb4c96..4f51649d0e75 100644 |
2836 | --- a/drivers/nfc/st-nci/ndlc.c |
2837 | +++ b/drivers/nfc/st-nci/ndlc.c |
2838 | @@ -171,6 +171,8 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc) |
2839 | if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) { |
2840 | switch (pcb & PCB_SYNC_MASK) { |
2841 | case PCB_SYNC_ACK: |
2842 | + skb = skb_dequeue(&ndlc->ack_pending_q); |
2843 | + kfree_skb(skb); |
2844 | del_timer_sync(&ndlc->t1_timer); |
2845 | del_timer_sync(&ndlc->t2_timer); |
2846 | ndlc->t2_active = false; |
2847 | @@ -196,8 +198,10 @@ static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc) |
2848 | kfree_skb(skb); |
2849 | break; |
2850 | } |
2851 | - } else { |
2852 | + } else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) { |
2853 | nci_recv_frame(ndlc->ndev, skb); |
2854 | + } else { |
2855 | + kfree_skb(skb); |
2856 | } |
2857 | } |
2858 | } |
2859 | diff --git a/drivers/nfc/st-nci/st-nci_se.c b/drivers/nfc/st-nci/st-nci_se.c |
2860 | index 97addfa96c6f..c742ef65a05a 100644 |
2861 | --- a/drivers/nfc/st-nci/st-nci_se.c |
2862 | +++ b/drivers/nfc/st-nci/st-nci_se.c |
2863 | @@ -189,14 +189,14 @@ int st_nci_hci_load_session(struct nci_dev *ndev) |
2864 | ST_NCI_DEVICE_MGNT_GATE, |
2865 | ST_NCI_DEVICE_MGNT_PIPE); |
2866 | if (r < 0) |
2867 | - goto free_info; |
2868 | + return r; |
2869 | |
2870 | /* Get pipe list */ |
2871 | r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, |
2872 | ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list), |
2873 | &skb_pipe_list); |
2874 | if (r < 0) |
2875 | - goto free_info; |
2876 | + return r; |
2877 | |
2878 | /* Complete the existing gate_pipe table */ |
2879 | for (i = 0; i < skb_pipe_list->len; i++) { |
2880 | @@ -222,6 +222,7 @@ int st_nci_hci_load_session(struct nci_dev *ndev) |
2881 | dm_pipe_info->src_host_id != ST_NCI_ESE_HOST_ID) { |
2882 | pr_err("Unexpected apdu_reader pipe on host %x\n", |
2883 | dm_pipe_info->src_host_id); |
2884 | + kfree_skb(skb_pipe_info); |
2885 | continue; |
2886 | } |
2887 | |
2888 | @@ -241,13 +242,12 @@ int st_nci_hci_load_session(struct nci_dev *ndev) |
2889 | ndev->hci_dev->pipes[st_nci_gates[j].pipe].host = |
2890 | dm_pipe_info->src_host_id; |
2891 | } |
2892 | + kfree_skb(skb_pipe_info); |
2893 | } |
2894 | |
2895 | memcpy(ndev->hci_dev->init_data.gates, st_nci_gates, |
2896 | sizeof(st_nci_gates)); |
2897 | |
2898 | -free_info: |
2899 | - kfree_skb(skb_pipe_info); |
2900 | kfree_skb(skb_pipe_list); |
2901 | return r; |
2902 | } |
2903 | diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c |
2904 | index d251f7229c4e..051286562fab 100644 |
2905 | --- a/drivers/nfc/st21nfca/st21nfca.c |
2906 | +++ b/drivers/nfc/st21nfca/st21nfca.c |
2907 | @@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) |
2908 | ST21NFCA_DEVICE_MGNT_GATE, |
2909 | ST21NFCA_DEVICE_MGNT_PIPE); |
2910 | if (r < 0) |
2911 | - goto free_info; |
2912 | + return r; |
2913 | |
2914 | /* Get pipe list */ |
2915 | r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, |
2916 | ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list), |
2917 | &skb_pipe_list); |
2918 | if (r < 0) |
2919 | - goto free_info; |
2920 | + return r; |
2921 | |
2922 | /* Complete the existing gate_pipe table */ |
2923 | for (i = 0; i < skb_pipe_list->len; i++) { |
2924 | @@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) |
2925 | info->src_host_id != ST21NFCA_ESE_HOST_ID) { |
2926 | pr_err("Unexpected apdu_reader pipe on host %x\n", |
2927 | info->src_host_id); |
2928 | + kfree_skb(skb_pipe_info); |
2929 | continue; |
2930 | } |
2931 | |
2932 | @@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) |
2933 | hdev->pipes[st21nfca_gates[j].pipe].dest_host = |
2934 | info->src_host_id; |
2935 | } |
2936 | + kfree_skb(skb_pipe_info); |
2937 | } |
2938 | |
2939 | /* |
2940 | @@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) |
2941 | st21nfca_gates[i].gate, |
2942 | st21nfca_gates[i].pipe); |
2943 | if (r < 0) |
2944 | - goto free_info; |
2945 | + goto free_list; |
2946 | } |
2947 | } |
2948 | |
2949 | memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates)); |
2950 | -free_info: |
2951 | - kfree_skb(skb_pipe_info); |
2952 | +free_list: |
2953 | kfree_skb(skb_pipe_list); |
2954 | return r; |
2955 | } |
2956 | diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c |
2957 | index 07496560e5b9..6e82bc42373b 100644 |
2958 | --- a/drivers/of/fdt.c |
2959 | +++ b/drivers/of/fdt.c |
2960 | @@ -967,7 +967,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, |
2961 | } |
2962 | |
2963 | #ifdef CONFIG_HAVE_MEMBLOCK |
2964 | -#define MAX_PHYS_ADDR ((phys_addr_t)~0) |
2965 | +#ifndef MAX_MEMBLOCK_ADDR |
2966 | +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) |
2967 | +#endif |
2968 | |
2969 | void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) |
2970 | { |
2971 | @@ -984,16 +986,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) |
2972 | } |
2973 | size &= PAGE_MASK; |
2974 | |
2975 | - if (base > MAX_PHYS_ADDR) { |
2976 | + if (base > MAX_MEMBLOCK_ADDR) { |
2977 | pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", |
2978 | base, base + size); |
2979 | return; |
2980 | } |
2981 | |
2982 | - if (base + size - 1 > MAX_PHYS_ADDR) { |
2983 | + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { |
2984 | pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", |
2985 | - ((u64)MAX_PHYS_ADDR) + 1, base + size); |
2986 | - size = MAX_PHYS_ADDR - base + 1; |
2987 | + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); |
2988 | + size = MAX_MEMBLOCK_ADDR - base + 1; |
2989 | } |
2990 | |
2991 | if (base + size < phys_offset) { |
2992 | diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c |
2993 | index dceb9ddfd99a..a32c1f6c252c 100644 |
2994 | --- a/drivers/parisc/lba_pci.c |
2995 | +++ b/drivers/parisc/lba_pci.c |
2996 | @@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev) |
2997 | if (lba_dev->hba.lmmio_space.flags) |
2998 | pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space, |
2999 | lba_dev->hba.lmmio_space_offset); |
3000 | - if (lba_dev->hba.gmmio_space.flags) |
3001 | - pci_add_resource(&resources, &lba_dev->hba.gmmio_space); |
3002 | + if (lba_dev->hba.gmmio_space.flags) { |
3003 | + /* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */ |
3004 | + pr_warn("LBA: Not registering GMMIO space %pR\n", |
3005 | + &lba_dev->hba.gmmio_space); |
3006 | + } |
3007 | |
3008 | pci_add_resource(&resources, &lba_dev->hba.bus_num); |
3009 | |
3010 | diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig |
3011 | index 944f50015ed0..73de4efcbe6e 100644 |
3012 | --- a/drivers/pci/Kconfig |
3013 | +++ b/drivers/pci/Kconfig |
3014 | @@ -2,7 +2,7 @@ |
3015 | # PCI configuration |
3016 | # |
3017 | config PCI_BUS_ADDR_T_64BIT |
3018 | - def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC)) |
3019 | + def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) |
3020 | depends on PCI |
3021 | |
3022 | config PCI_MSI |
3023 | diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c |
3024 | index ad1ea1695b4a..4a52072d1d3f 100644 |
3025 | --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c |
3026 | +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c |
3027 | @@ -1202,12 +1202,6 @@ static int mtk_pctrl_build_state(struct platform_device *pdev) |
3028 | return 0; |
3029 | } |
3030 | |
3031 | -static struct pinctrl_desc mtk_pctrl_desc = { |
3032 | - .confops = &mtk_pconf_ops, |
3033 | - .pctlops = &mtk_pctrl_ops, |
3034 | - .pmxops = &mtk_pmx_ops, |
3035 | -}; |
3036 | - |
3037 | int mtk_pctrl_init(struct platform_device *pdev, |
3038 | const struct mtk_pinctrl_devdata *data, |
3039 | struct regmap *regmap) |
3040 | @@ -1265,12 +1259,17 @@ int mtk_pctrl_init(struct platform_device *pdev, |
3041 | |
3042 | for (i = 0; i < pctl->devdata->npins; i++) |
3043 | pins[i] = pctl->devdata->pins[i].pin; |
3044 | - mtk_pctrl_desc.name = dev_name(&pdev->dev); |
3045 | - mtk_pctrl_desc.owner = THIS_MODULE; |
3046 | - mtk_pctrl_desc.pins = pins; |
3047 | - mtk_pctrl_desc.npins = pctl->devdata->npins; |
3048 | + |
3049 | + pctl->pctl_desc.name = dev_name(&pdev->dev); |
3050 | + pctl->pctl_desc.owner = THIS_MODULE; |
3051 | + pctl->pctl_desc.pins = pins; |
3052 | + pctl->pctl_desc.npins = pctl->devdata->npins; |
3053 | + pctl->pctl_desc.confops = &mtk_pconf_ops; |
3054 | + pctl->pctl_desc.pctlops = &mtk_pctrl_ops; |
3055 | + pctl->pctl_desc.pmxops = &mtk_pmx_ops; |
3056 | pctl->dev = &pdev->dev; |
3057 | - pctl->pctl_dev = pinctrl_register(&mtk_pctrl_desc, &pdev->dev, pctl); |
3058 | + |
3059 | + pctl->pctl_dev = pinctrl_register(&pctl->pctl_desc, &pdev->dev, pctl); |
3060 | if (IS_ERR(pctl->pctl_dev)) { |
3061 | dev_err(&pdev->dev, "couldn't register pinctrl driver\n"); |
3062 | return PTR_ERR(pctl->pctl_dev); |
3063 | diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h |
3064 | index 30213e514c2f..c532c23c70b4 100644 |
3065 | --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.h |
3066 | +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.h |
3067 | @@ -256,6 +256,7 @@ struct mtk_pinctrl_devdata { |
3068 | struct mtk_pinctrl { |
3069 | struct regmap *regmap1; |
3070 | struct regmap *regmap2; |
3071 | + struct pinctrl_desc pctl_desc; |
3072 | struct device *dev; |
3073 | struct gpio_chip *chip; |
3074 | struct mtk_pinctrl_group *groups; |
3075 | diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c |
3076 | index a0824477072b..2deb1309fcac 100644 |
3077 | --- a/drivers/pinctrl/pinctrl-at91.c |
3078 | +++ b/drivers/pinctrl/pinctrl-at91.c |
3079 | @@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = { |
3080 | static void __iomem *pin_to_controller(struct at91_pinctrl *info, |
3081 | unsigned int bank) |
3082 | { |
3083 | + if (!gpio_chips[bank]) |
3084 | + return NULL; |
3085 | + |
3086 | return gpio_chips[bank]->regbase; |
3087 | } |
3088 | |
3089 | @@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, |
3090 | pin = &pins_conf[i]; |
3091 | at91_pin_dbg(info->dev, pin); |
3092 | pio = pin_to_controller(info, pin->bank); |
3093 | + |
3094 | + if (!pio) |
3095 | + continue; |
3096 | + |
3097 | mask = pin_to_mask(pin->pin); |
3098 | at91_mux_disable_interrupt(pio, mask); |
3099 | switch (pin->mux) { |
3100 | @@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, |
3101 | *config = 0; |
3102 | dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id); |
3103 | pio = pin_to_controller(info, pin_to_bank(pin_id)); |
3104 | + |
3105 | + if (!pio) |
3106 | + return -EINVAL; |
3107 | + |
3108 | pin = pin_id % MAX_NB_GPIO_PER_BANK; |
3109 | |
3110 | if (at91_mux_get_multidrive(pio, pin)) |
3111 | @@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, |
3112 | "%s:%d, pin_id=%d, config=0x%lx", |
3113 | __func__, __LINE__, pin_id, config); |
3114 | pio = pin_to_controller(info, pin_to_bank(pin_id)); |
3115 | + |
3116 | + if (!pio) |
3117 | + return -EINVAL; |
3118 | + |
3119 | pin = pin_id % MAX_NB_GPIO_PER_BANK; |
3120 | mask = pin_to_mask(pin); |
3121 | |
3122 | diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c |
3123 | index 76b57388d01b..81c3e582309a 100644 |
3124 | --- a/drivers/platform/x86/ideapad-laptop.c |
3125 | +++ b/drivers/platform/x86/ideapad-laptop.c |
3126 | @@ -853,6 +853,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { |
3127 | }, |
3128 | }, |
3129 | { |
3130 | + .ident = "Lenovo Yoga 3 14", |
3131 | + .matches = { |
3132 | + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
3133 | + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"), |
3134 | + }, |
3135 | + }, |
3136 | + { |
3137 | .ident = "Lenovo Yoga 3 Pro 1370", |
3138 | .matches = { |
3139 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
3140 | diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c |
3141 | index 4337c3bc6ace..afea84c7a155 100644 |
3142 | --- a/drivers/rtc/rtc-abx80x.c |
3143 | +++ b/drivers/rtc/rtc-abx80x.c |
3144 | @@ -28,7 +28,7 @@ |
3145 | #define ABX8XX_REG_WD 0x07 |
3146 | |
3147 | #define ABX8XX_REG_CTRL1 0x10 |
3148 | -#define ABX8XX_CTRL_WRITE BIT(1) |
3149 | +#define ABX8XX_CTRL_WRITE BIT(0) |
3150 | #define ABX8XX_CTRL_12_24 BIT(6) |
3151 | |
3152 | #define ABX8XX_REG_CFG_KEY 0x1f |
3153 | diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c |
3154 | index a0f832362199..2e709e239dbc 100644 |
3155 | --- a/drivers/rtc/rtc-s3c.c |
3156 | +++ b/drivers/rtc/rtc-s3c.c |
3157 | @@ -39,6 +39,7 @@ struct s3c_rtc { |
3158 | void __iomem *base; |
3159 | struct clk *rtc_clk; |
3160 | struct clk *rtc_src_clk; |
3161 | + bool clk_disabled; |
3162 | |
3163 | struct s3c_rtc_data *data; |
3164 | |
3165 | @@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info) |
3166 | unsigned long irq_flags; |
3167 | |
3168 | spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); |
3169 | - clk_enable(info->rtc_clk); |
3170 | - if (info->data->needs_src_clk) |
3171 | - clk_enable(info->rtc_src_clk); |
3172 | + if (info->clk_disabled) { |
3173 | + clk_enable(info->rtc_clk); |
3174 | + if (info->data->needs_src_clk) |
3175 | + clk_enable(info->rtc_src_clk); |
3176 | + info->clk_disabled = false; |
3177 | + } |
3178 | spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); |
3179 | } |
3180 | |
3181 | @@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info) |
3182 | unsigned long irq_flags; |
3183 | |
3184 | spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); |
3185 | - if (info->data->needs_src_clk) |
3186 | - clk_disable(info->rtc_src_clk); |
3187 | - clk_disable(info->rtc_clk); |
3188 | + if (!info->clk_disabled) { |
3189 | + if (info->data->needs_src_clk) |
3190 | + clk_disable(info->rtc_src_clk); |
3191 | + clk_disable(info->rtc_clk); |
3192 | + info->clk_disabled = true; |
3193 | + } |
3194 | spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); |
3195 | } |
3196 | |
3197 | @@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) |
3198 | |
3199 | s3c_rtc_disable_clk(info); |
3200 | |
3201 | + if (enabled) |
3202 | + s3c_rtc_enable_clk(info); |
3203 | + else |
3204 | + s3c_rtc_disable_clk(info); |
3205 | + |
3206 | return 0; |
3207 | } |
3208 | |
3209 | diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c |
3210 | index 8c70d785ba73..ab60287ee72d 100644 |
3211 | --- a/drivers/rtc/rtc-s5m.c |
3212 | +++ b/drivers/rtc/rtc-s5m.c |
3213 | @@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) |
3214 | case S2MPS13X: |
3215 | data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); |
3216 | ret = regmap_write(info->regmap, info->regs->ctrl, data[0]); |
3217 | + if (ret < 0) |
3218 | + break; |
3219 | + |
3220 | + /* |
3221 | + * Should set WUDR & (RUDR or AUDR) bits to high after writing |
3222 | + * RTC_CTRL register like writing Alarm registers. We can't find |
3223 | + * the description from datasheet but vendor code does that |
3224 | + * really. |
3225 | + */ |
3226 | + ret = s5m8767_rtc_set_alarm_reg(info); |
3227 | break; |
3228 | |
3229 | default: |
3230 | diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c |
3231 | index f5021fcb154e..089e7f8543a5 100644 |
3232 | --- a/fs/btrfs/transaction.c |
3233 | +++ b/fs/btrfs/transaction.c |
3234 | @@ -1893,8 +1893,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
3235 | spin_unlock(&root->fs_info->trans_lock); |
3236 | |
3237 | wait_for_commit(root, prev_trans); |
3238 | + ret = prev_trans->aborted; |
3239 | |
3240 | btrfs_put_transaction(prev_trans); |
3241 | + if (ret) |
3242 | + goto cleanup_transaction; |
3243 | } else { |
3244 | spin_unlock(&root->fs_info->trans_lock); |
3245 | } |
3246 | diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c |
3247 | index 49b8b6e41a18..c7b84f3bf6ad 100644 |
3248 | --- a/fs/cifs/ioctl.c |
3249 | +++ b/fs/cifs/ioctl.c |
3250 | @@ -70,6 +70,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, |
3251 | goto out_drop_write; |
3252 | } |
3253 | |
3254 | + if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) { |
3255 | + rc = -EBADF; |
3256 | + cifs_dbg(VFS, "src file seems to be from a different filesystem type\n"); |
3257 | + goto out_fput; |
3258 | + } |
3259 | + |
3260 | if ((!src_file.file->private_data) || (!dst_file->private_data)) { |
3261 | rc = -EBADF; |
3262 | cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); |
3263 | diff --git a/fs/coredump.c b/fs/coredump.c |
3264 | index c5ecde6f3eed..a8f75640ac86 100644 |
3265 | --- a/fs/coredump.c |
3266 | +++ b/fs/coredump.c |
3267 | @@ -513,10 +513,10 @@ void do_coredump(const siginfo_t *siginfo) |
3268 | const struct cred *old_cred; |
3269 | struct cred *cred; |
3270 | int retval = 0; |
3271 | - int flag = 0; |
3272 | int ispipe; |
3273 | struct files_struct *displaced; |
3274 | - bool need_nonrelative = false; |
3275 | + /* require nonrelative corefile path and be extra careful */ |
3276 | + bool need_suid_safe = false; |
3277 | bool core_dumped = false; |
3278 | static atomic_t core_dump_count = ATOMIC_INIT(0); |
3279 | struct coredump_params cprm = { |
3280 | @@ -550,9 +550,8 @@ void do_coredump(const siginfo_t *siginfo) |
3281 | */ |
3282 | if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { |
3283 | /* Setuid core dump mode */ |
3284 | - flag = O_EXCL; /* Stop rewrite attacks */ |
3285 | cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ |
3286 | - need_nonrelative = true; |
3287 | + need_suid_safe = true; |
3288 | } |
3289 | |
3290 | retval = coredump_wait(siginfo->si_signo, &core_state); |
3291 | @@ -633,7 +632,7 @@ void do_coredump(const siginfo_t *siginfo) |
3292 | if (cprm.limit < binfmt->min_coredump) |
3293 | goto fail_unlock; |
3294 | |
3295 | - if (need_nonrelative && cn.corename[0] != '/') { |
3296 | + if (need_suid_safe && cn.corename[0] != '/') { |
3297 | printk(KERN_WARNING "Pid %d(%s) can only dump core "\ |
3298 | "to fully qualified path!\n", |
3299 | task_tgid_vnr(current), current->comm); |
3300 | @@ -641,8 +640,35 @@ void do_coredump(const siginfo_t *siginfo) |
3301 | goto fail_unlock; |
3302 | } |
3303 | |
3304 | + /* |
3305 | + * Unlink the file if it exists unless this is a SUID |
3306 | + * binary - in that case, we're running around with root |
3307 | + * privs and don't want to unlink another user's coredump. |
3308 | + */ |
3309 | + if (!need_suid_safe) { |
3310 | + mm_segment_t old_fs; |
3311 | + |
3312 | + old_fs = get_fs(); |
3313 | + set_fs(KERNEL_DS); |
3314 | + /* |
3315 | + * If it doesn't exist, that's fine. If there's some |
3316 | + * other problem, we'll catch it at the filp_open(). |
3317 | + */ |
3318 | + (void) sys_unlink((const char __user *)cn.corename); |
3319 | + set_fs(old_fs); |
3320 | + } |
3321 | + |
3322 | + /* |
3323 | + * There is a race between unlinking and creating the |
3324 | + * file, but if that causes an EEXIST here, that's |
3325 | + * fine - another process raced with us while creating |
3326 | + * the corefile, and the other process won. To userspace, |
3327 | + * what matters is that at least one of the two processes |
3328 | + * writes its coredump successfully, not which one. |
3329 | + */ |
3330 | cprm.file = filp_open(cn.corename, |
3331 | - O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, |
3332 | + O_CREAT | 2 | O_NOFOLLOW | |
3333 | + O_LARGEFILE | O_EXCL, |
3334 | 0600); |
3335 | if (IS_ERR(cprm.file)) |
3336 | goto fail_unlock; |
3337 | @@ -659,11 +685,15 @@ void do_coredump(const siginfo_t *siginfo) |
3338 | if (!S_ISREG(inode->i_mode)) |
3339 | goto close_fail; |
3340 | /* |
3341 | - * Dont allow local users get cute and trick others to coredump |
3342 | - * into their pre-created files. |
3343 | + * Don't dump core if the filesystem changed owner or mode |
3344 | + * of the file during file creation. This is an issue when |
3345 | + * a process dumps core while its cwd is e.g. on a vfat |
3346 | + * filesystem. |
3347 | */ |
3348 | if (!uid_eq(inode->i_uid, current_fsuid())) |
3349 | goto close_fail; |
3350 | + if ((inode->i_mode & 0677) != 0600) |
3351 | + goto close_fail; |
3352 | if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) |
3353 | goto close_fail; |
3354 | if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) |
3355 | diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c |
3356 | index 8db0b464483f..63cd2c147221 100644 |
3357 | --- a/fs/ecryptfs/dentry.c |
3358 | +++ b/fs/ecryptfs/dentry.c |
3359 | @@ -45,20 +45,20 @@ |
3360 | static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) |
3361 | { |
3362 | struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); |
3363 | - int rc; |
3364 | - |
3365 | - if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) |
3366 | - return 1; |
3367 | + int rc = 1; |
3368 | |
3369 | if (flags & LOOKUP_RCU) |
3370 | return -ECHILD; |
3371 | |
3372 | - rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); |
3373 | + if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) |
3374 | + rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); |
3375 | + |
3376 | if (d_really_is_positive(dentry)) { |
3377 | - struct inode *lower_inode = |
3378 | - ecryptfs_inode_to_lower(d_inode(dentry)); |
3379 | + struct inode *inode = d_inode(dentry); |
3380 | |
3381 | - fsstack_copy_attr_all(d_inode(dentry), lower_inode); |
3382 | + fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode)); |
3383 | + if (!inode->i_nlink) |
3384 | + return 0; |
3385 | } |
3386 | return rc; |
3387 | } |
3388 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
3389 | index 9981064c4a54..a5e8c744e962 100644 |
3390 | --- a/fs/ext4/super.c |
3391 | +++ b/fs/ext4/super.c |
3392 | @@ -325,6 +325,22 @@ static void save_error_info(struct super_block *sb, const char *func, |
3393 | ext4_commit_super(sb, 1); |
3394 | } |
3395 | |
3396 | +/* |
3397 | + * The del_gendisk() function uninitializes the disk-specific data |
3398 | + * structures, including the bdi structure, without telling anyone |
3399 | + * else. Once this happens, any attempt to call mark_buffer_dirty() |
3400 | + * (for example, by ext4_commit_super), will cause a kernel OOPS. |
3401 | + * This is a kludge to prevent these oops until we can put in a proper |
3402 | + * hook in del_gendisk() to inform the VFS and file system layers. |
3403 | + */ |
3404 | +static int block_device_ejected(struct super_block *sb) |
3405 | +{ |
3406 | + struct inode *bd_inode = sb->s_bdev->bd_inode; |
3407 | + struct backing_dev_info *bdi = inode_to_bdi(bd_inode); |
3408 | + |
3409 | + return bdi->dev == NULL; |
3410 | +} |
3411 | + |
3412 | static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) |
3413 | { |
3414 | struct super_block *sb = journal->j_private; |
3415 | @@ -4617,7 +4633,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) |
3416 | struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; |
3417 | int error = 0; |
3418 | |
3419 | - if (!sbh) |
3420 | + if (!sbh || block_device_ejected(sb)) |
3421 | return error; |
3422 | if (buffer_write_io_error(sbh)) { |
3423 | /* |
3424 | @@ -4833,10 +4849,11 @@ static int ext4_freeze(struct super_block *sb) |
3425 | error = jbd2_journal_flush(journal); |
3426 | if (error < 0) |
3427 | goto out; |
3428 | + |
3429 | + /* Journal blocked and flushed, clear needs_recovery flag. */ |
3430 | + EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); |
3431 | } |
3432 | |
3433 | - /* Journal blocked and flushed, clear needs_recovery flag. */ |
3434 | - EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); |
3435 | error = ext4_commit_super(sb, 1); |
3436 | out: |
3437 | if (journal) |
3438 | @@ -4854,8 +4871,11 @@ static int ext4_unfreeze(struct super_block *sb) |
3439 | if (sb->s_flags & MS_RDONLY) |
3440 | return 0; |
3441 | |
3442 | - /* Reset the needs_recovery flag before the fs is unlocked. */ |
3443 | - EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); |
3444 | + if (EXT4_SB(sb)->s_journal) { |
3445 | + /* Reset the needs_recovery flag before the fs is unlocked. */ |
3446 | + EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); |
3447 | + } |
3448 | + |
3449 | ext4_commit_super(sb, 1); |
3450 | return 0; |
3451 | } |
3452 | diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c |
3453 | index d3fa6bd9503e..221719eac5de 100644 |
3454 | --- a/fs/hfs/bnode.c |
3455 | +++ b/fs/hfs/bnode.c |
3456 | @@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) |
3457 | page_cache_release(page); |
3458 | goto fail; |
3459 | } |
3460 | - page_cache_release(page); |
3461 | node->page[i] = page; |
3462 | } |
3463 | |
3464 | @@ -398,11 +397,11 @@ node_error: |
3465 | |
3466 | void hfs_bnode_free(struct hfs_bnode *node) |
3467 | { |
3468 | - //int i; |
3469 | + int i; |
3470 | |
3471 | - //for (i = 0; i < node->tree->pages_per_bnode; i++) |
3472 | - // if (node->page[i]) |
3473 | - // page_cache_release(node->page[i]); |
3474 | + for (i = 0; i < node->tree->pages_per_bnode; i++) |
3475 | + if (node->page[i]) |
3476 | + page_cache_release(node->page[i]); |
3477 | kfree(node); |
3478 | } |
3479 | |
3480 | diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c |
3481 | index 9f4ee7f52026..6fc766df0461 100644 |
3482 | --- a/fs/hfs/brec.c |
3483 | +++ b/fs/hfs/brec.c |
3484 | @@ -131,13 +131,16 @@ skip: |
3485 | hfs_bnode_write(node, entry, data_off + key_len, entry_len); |
3486 | hfs_bnode_dump(node); |
3487 | |
3488 | - if (new_node) { |
3489 | - /* update parent key if we inserted a key |
3490 | - * at the start of the first node |
3491 | - */ |
3492 | - if (!rec && new_node != node) |
3493 | - hfs_brec_update_parent(fd); |
3494 | + /* |
3495 | + * update parent key if we inserted a key |
3496 | + * at the start of the node and it is not the new node |
3497 | + */ |
3498 | + if (!rec && new_node != node) { |
3499 | + hfs_bnode_read_key(node, fd->search_key, data_off + size); |
3500 | + hfs_brec_update_parent(fd); |
3501 | + } |
3502 | |
3503 | + if (new_node) { |
3504 | hfs_bnode_put(fd->bnode); |
3505 | if (!new_node->parent) { |
3506 | hfs_btree_inc_height(tree); |
3507 | @@ -166,9 +169,6 @@ skip: |
3508 | goto again; |
3509 | } |
3510 | |
3511 | - if (!rec) |
3512 | - hfs_brec_update_parent(fd); |
3513 | - |
3514 | return 0; |
3515 | } |
3516 | |
3517 | @@ -366,6 +366,8 @@ again: |
3518 | if (IS_ERR(parent)) |
3519 | return PTR_ERR(parent); |
3520 | __hfs_brec_find(parent, fd); |
3521 | + if (fd->record < 0) |
3522 | + return -ENOENT; |
3523 | hfs_bnode_dump(parent); |
3524 | rec = fd->record; |
3525 | |
3526 | diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c |
3527 | index 759708fd9331..63924662aaf3 100644 |
3528 | --- a/fs/hfsplus/bnode.c |
3529 | +++ b/fs/hfsplus/bnode.c |
3530 | @@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) |
3531 | page_cache_release(page); |
3532 | goto fail; |
3533 | } |
3534 | - page_cache_release(page); |
3535 | node->page[i] = page; |
3536 | } |
3537 | |
3538 | @@ -566,13 +565,11 @@ node_error: |
3539 | |
3540 | void hfs_bnode_free(struct hfs_bnode *node) |
3541 | { |
3542 | -#if 0 |
3543 | int i; |
3544 | |
3545 | for (i = 0; i < node->tree->pages_per_bnode; i++) |
3546 | if (node->page[i]) |
3547 | page_cache_release(node->page[i]); |
3548 | -#endif |
3549 | kfree(node); |
3550 | } |
3551 | |
3552 | diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c |
3553 | index 4227dc4f7437..8c44654ce274 100644 |
3554 | --- a/fs/jbd2/checkpoint.c |
3555 | +++ b/fs/jbd2/checkpoint.c |
3556 | @@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal) |
3557 | * journal_clean_one_cp_list |
3558 | * |
3559 | * Find all the written-back checkpoint buffers in the given list and |
3560 | - * release them. |
3561 | + * release them. If 'destroy' is set, clean all buffers unconditionally. |
3562 | * |
3563 | * Called with j_list_lock held. |
3564 | * Returns 1 if we freed the transaction, 0 otherwise. |
3565 | */ |
3566 | -static int journal_clean_one_cp_list(struct journal_head *jh) |
3567 | +static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) |
3568 | { |
3569 | struct journal_head *last_jh; |
3570 | struct journal_head *next_jh = jh; |
3571 | @@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh) |
3572 | do { |
3573 | jh = next_jh; |
3574 | next_jh = jh->b_cpnext; |
3575 | - ret = __try_to_free_cp_buf(jh); |
3576 | + if (!destroy) |
3577 | + ret = __try_to_free_cp_buf(jh); |
3578 | + else |
3579 | + ret = __jbd2_journal_remove_checkpoint(jh) + 1; |
3580 | if (!ret) |
3581 | return freed; |
3582 | if (ret == 2) |
3583 | @@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh) |
3584 | * journal_clean_checkpoint_list |
3585 | * |
3586 | * Find all the written-back checkpoint buffers in the journal and release them. |
3587 | + * If 'destroy' is set, release all buffers unconditionally. |
3588 | * |
3589 | * Called with j_list_lock held. |
3590 | */ |
3591 | -void __jbd2_journal_clean_checkpoint_list(journal_t *journal) |
3592 | +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) |
3593 | { |
3594 | transaction_t *transaction, *last_transaction, *next_transaction; |
3595 | int ret; |
3596 | @@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) |
3597 | do { |
3598 | transaction = next_transaction; |
3599 | next_transaction = transaction->t_cpnext; |
3600 | - ret = journal_clean_one_cp_list(transaction->t_checkpoint_list); |
3601 | + ret = journal_clean_one_cp_list(transaction->t_checkpoint_list, |
3602 | + destroy); |
3603 | /* |
3604 | * This function only frees up some memory if possible so we |
3605 | * dont have an obligation to finish processing. Bail out if |
3606 | @@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) |
3607 | * we can possibly see not yet submitted buffers on io_list |
3608 | */ |
3609 | ret = journal_clean_one_cp_list(transaction-> |
3610 | - t_checkpoint_io_list); |
3611 | + t_checkpoint_io_list, destroy); |
3612 | if (need_resched()) |
3613 | return; |
3614 | /* |
3615 | @@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) |
3616 | } |
3617 | |
3618 | /* |
3619 | + * Remove buffers from all checkpoint lists as journal is aborted and we just |
3620 | + * need to free memory |
3621 | + */ |
3622 | +void jbd2_journal_destroy_checkpoint(journal_t *journal) |
3623 | +{ |
3624 | + /* |
3625 | + * We loop because __jbd2_journal_clean_checkpoint_list() may abort |
3626 | + * early due to a need of rescheduling. |
3627 | + */ |
3628 | + while (1) { |
3629 | + spin_lock(&journal->j_list_lock); |
3630 | + if (!journal->j_checkpoint_transactions) { |
3631 | + spin_unlock(&journal->j_list_lock); |
3632 | + break; |
3633 | + } |
3634 | + __jbd2_journal_clean_checkpoint_list(journal, true); |
3635 | + spin_unlock(&journal->j_list_lock); |
3636 | + cond_resched(); |
3637 | + } |
3638 | +} |
3639 | + |
3640 | +/* |
3641 | * journal_remove_checkpoint: called after a buffer has been committed |
3642 | * to disk (either by being write-back flushed to disk, or being |
3643 | * committed to the log). |
3644 | diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c |
3645 | index b73e0215baa7..362e5f614450 100644 |
3646 | --- a/fs/jbd2/commit.c |
3647 | +++ b/fs/jbd2/commit.c |
3648 | @@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) |
3649 | * frees some memory |
3650 | */ |
3651 | spin_lock(&journal->j_list_lock); |
3652 | - __jbd2_journal_clean_checkpoint_list(journal); |
3653 | + __jbd2_journal_clean_checkpoint_list(journal, false); |
3654 | spin_unlock(&journal->j_list_lock); |
3655 | |
3656 | jbd_debug(3, "JBD2: commit phase 1\n"); |
3657 | diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c |
3658 | index 4ff3fad4e9e3..2721513adb1f 100644 |
3659 | --- a/fs/jbd2/journal.c |
3660 | +++ b/fs/jbd2/journal.c |
3661 | @@ -1693,8 +1693,17 @@ int jbd2_journal_destroy(journal_t *journal) |
3662 | while (journal->j_checkpoint_transactions != NULL) { |
3663 | spin_unlock(&journal->j_list_lock); |
3664 | mutex_lock(&journal->j_checkpoint_mutex); |
3665 | - jbd2_log_do_checkpoint(journal); |
3666 | + err = jbd2_log_do_checkpoint(journal); |
3667 | mutex_unlock(&journal->j_checkpoint_mutex); |
3668 | + /* |
3669 | + * If checkpointing failed, just free the buffers to avoid |
3670 | + * looping forever |
3671 | + */ |
3672 | + if (err) { |
3673 | + jbd2_journal_destroy_checkpoint(journal); |
3674 | + spin_lock(&journal->j_list_lock); |
3675 | + break; |
3676 | + } |
3677 | spin_lock(&journal->j_list_lock); |
3678 | } |
3679 | |
3680 | diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c |
3681 | index b3289d701eea..14e3b1e1b17d 100644 |
3682 | --- a/fs/nfs/flexfilelayout/flexfilelayout.c |
3683 | +++ b/fs/nfs/flexfilelayout/flexfilelayout.c |
3684 | @@ -1199,6 +1199,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task, |
3685 | hdr->res.verf->committed == NFS_DATA_SYNC) |
3686 | ff_layout_set_layoutcommit(hdr); |
3687 | |
3688 | + /* zero out fattr since we don't care DS attr at all */ |
3689 | + hdr->fattr.valid = 0; |
3690 | + if (task->tk_status >= 0) |
3691 | + nfs_writeback_update_inode(hdr); |
3692 | + |
3693 | return 0; |
3694 | } |
3695 | |
3696 | diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c |
3697 | index f13e1969eedd..b28fa4cbea52 100644 |
3698 | --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c |
3699 | +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c |
3700 | @@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo, |
3701 | range->offset, range->length)) |
3702 | continue; |
3703 | /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) |
3704 | - * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4) |
3705 | + * + array length + deviceid(NFS4_DEVICEID4_SIZE) |
3706 | + * + status(4) + opnum(4) |
3707 | */ |
3708 | p = xdr_reserve_space(xdr, |
3709 | - 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); |
3710 | + 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); |
3711 | if (unlikely(!p)) |
3712 | return -ENOBUFS; |
3713 | p = xdr_encode_hyper(p, err->offset); |
3714 | p = xdr_encode_hyper(p, err->length); |
3715 | p = xdr_encode_opaque_fixed(p, &err->stateid, |
3716 | NFS4_STATEID_SIZE); |
3717 | + /* Encode 1 error */ |
3718 | + *p++ = cpu_to_be32(1); |
3719 | p = xdr_encode_opaque_fixed(p, &err->deviceid, |
3720 | NFS4_DEVICEID4_SIZE); |
3721 | *p++ = cpu_to_be32(err->status); |
3722 | diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c |
3723 | index 0adc7d245b3d..4afbe13321cb 100644 |
3724 | --- a/fs/nfs/inode.c |
3725 | +++ b/fs/nfs/inode.c |
3726 | @@ -1273,13 +1273,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat |
3727 | return 0; |
3728 | } |
3729 | |
3730 | -static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr) |
3731 | -{ |
3732 | - if (!(fattr->valid & NFS_ATTR_FATTR_CTIME)) |
3733 | - return 0; |
3734 | - return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; |
3735 | -} |
3736 | - |
3737 | static atomic_long_t nfs_attr_generation_counter; |
3738 | |
3739 | static unsigned long nfs_read_attr_generation_counter(void) |
3740 | @@ -1428,7 +1421,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n |
3741 | const struct nfs_inode *nfsi = NFS_I(inode); |
3742 | |
3743 | return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || |
3744 | - nfs_ctime_need_update(inode, fattr) || |
3745 | ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); |
3746 | } |
3747 | |
3748 | @@ -1491,6 +1483,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr |
3749 | { |
3750 | unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; |
3751 | |
3752 | + /* |
3753 | + * Don't revalidate the pagecache if we hold a delegation, but do |
3754 | + * force an attribute update |
3755 | + */ |
3756 | + if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) |
3757 | + invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED; |
3758 | + |
3759 | if (S_ISDIR(inode->i_mode)) |
3760 | invalid |= NFS_INO_INVALID_DATA; |
3761 | nfs_set_cache_invalid(inode, invalid); |
3762 | diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h |
3763 | index 9b372b845f6a..1dad18105ed0 100644 |
3764 | --- a/fs/nfs/internal.h |
3765 | +++ b/fs/nfs/internal.h |
3766 | @@ -490,6 +490,9 @@ void nfs_retry_commit(struct list_head *page_list, |
3767 | void nfs_commitdata_release(struct nfs_commit_data *data); |
3768 | void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, |
3769 | struct nfs_commit_info *cinfo); |
3770 | +void nfs_request_add_commit_list_locked(struct nfs_page *req, |
3771 | + struct list_head *dst, |
3772 | + struct nfs_commit_info *cinfo); |
3773 | void nfs_request_remove_commit_list(struct nfs_page *req, |
3774 | struct nfs_commit_info *cinfo); |
3775 | void nfs_init_cinfo(struct nfs_commit_info *cinfo, |
3776 | @@ -623,13 +626,15 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize) |
3777 | * Record the page as unstable and mark its inode as dirty. |
3778 | */ |
3779 | static inline |
3780 | -void nfs_mark_page_unstable(struct page *page) |
3781 | +void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo) |
3782 | { |
3783 | - struct inode *inode = page_file_mapping(page)->host; |
3784 | + if (!cinfo->dreq) { |
3785 | + struct inode *inode = page_file_mapping(page)->host; |
3786 | |
3787 | - inc_zone_page_state(page, NR_UNSTABLE_NFS); |
3788 | - inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); |
3789 | - __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
3790 | + inc_zone_page_state(page, NR_UNSTABLE_NFS); |
3791 | + inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE); |
3792 | + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
3793 | + } |
3794 | } |
3795 | |
3796 | /* |
3797 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
3798 | index 3acb1eb72930..73c8204ad463 100644 |
3799 | --- a/fs/nfs/nfs4proc.c |
3800 | +++ b/fs/nfs/nfs4proc.c |
3801 | @@ -1156,6 +1156,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) |
3802 | return 0; |
3803 | if ((delegation->type & fmode) != fmode) |
3804 | return 0; |
3805 | + if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) |
3806 | + return 0; |
3807 | if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) |
3808 | return 0; |
3809 | nfs_mark_delegation_referenced(delegation); |
3810 | @@ -1220,6 +1222,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state) |
3811 | } |
3812 | |
3813 | static void nfs_clear_open_stateid_locked(struct nfs4_state *state, |
3814 | + nfs4_stateid *arg_stateid, |
3815 | nfs4_stateid *stateid, fmode_t fmode) |
3816 | { |
3817 | clear_bit(NFS_O_RDWR_STATE, &state->flags); |
3818 | @@ -1238,8 +1241,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, |
3819 | if (stateid == NULL) |
3820 | return; |
3821 | /* Handle races with OPEN */ |
3822 | - if (!nfs4_stateid_match_other(stateid, &state->open_stateid) || |
3823 | - !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { |
3824 | + if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || |
3825 | + (nfs4_stateid_match_other(stateid, &state->open_stateid) && |
3826 | + !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { |
3827 | nfs_resync_open_stateid_locked(state); |
3828 | return; |
3829 | } |
3830 | @@ -1248,10 +1252,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, |
3831 | nfs4_stateid_copy(&state->open_stateid, stateid); |
3832 | } |
3833 | |
3834 | -static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) |
3835 | +static void nfs_clear_open_stateid(struct nfs4_state *state, |
3836 | + nfs4_stateid *arg_stateid, |
3837 | + nfs4_stateid *stateid, fmode_t fmode) |
3838 | { |
3839 | write_seqlock(&state->seqlock); |
3840 | - nfs_clear_open_stateid_locked(state, stateid, fmode); |
3841 | + nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); |
3842 | write_sequnlock(&state->seqlock); |
3843 | if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) |
3844 | nfs4_schedule_state_manager(state->owner->so_server->nfs_client); |
3845 | @@ -2425,7 +2431,7 @@ static int _nfs4_do_open(struct inode *dir, |
3846 | goto err_free_label; |
3847 | state = ctx->state; |
3848 | |
3849 | - if ((opendata->o_arg.open_flags & O_EXCL) && |
3850 | + if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && |
3851 | (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { |
3852 | nfs4_exclusive_attrset(opendata, sattr); |
3853 | |
3854 | @@ -2684,7 +2690,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data) |
3855 | goto out_release; |
3856 | } |
3857 | } |
3858 | - nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); |
3859 | + nfs_clear_open_stateid(state, &calldata->arg.stateid, |
3860 | + res_stateid, calldata->arg.fmode); |
3861 | out_release: |
3862 | nfs_release_seqid(calldata->arg.seqid); |
3863 | nfs_refresh_inode(calldata->inode, calldata->res.fattr); |
3864 | @@ -4984,7 +4991,7 @@ nfs4_init_nonuniform_client_string(struct nfs_client *clp) |
3865 | return 0; |
3866 | retry: |
3867 | rcu_read_lock(); |
3868 | - len = 10 + strlen(clp->cl_ipaddr) + 1 + |
3869 | + len = 14 + strlen(clp->cl_ipaddr) + 1 + |
3870 | strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + |
3871 | 1 + |
3872 | strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + |
3873 | @@ -8661,6 +8668,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { |
3874 | .reboot_recovery_ops = &nfs41_reboot_recovery_ops, |
3875 | .nograce_recovery_ops = &nfs41_nograce_recovery_ops, |
3876 | .state_renewal_ops = &nfs41_state_renewal_ops, |
3877 | + .mig_recovery_ops = &nfs41_mig_recovery_ops, |
3878 | }; |
3879 | #endif |
3880 | |
3881 | diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c |
3882 | index 4984bbe55ff1..7c5718ba625e 100644 |
3883 | --- a/fs/nfs/pagelist.c |
3884 | +++ b/fs/nfs/pagelist.c |
3885 | @@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init); |
3886 | void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) |
3887 | { |
3888 | spin_lock(&hdr->lock); |
3889 | - if (pos < hdr->io_start + hdr->good_bytes) { |
3890 | - set_bit(NFS_IOHDR_ERROR, &hdr->flags); |
3891 | + if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags) |
3892 | + || pos < hdr->io_start + hdr->good_bytes) { |
3893 | clear_bit(NFS_IOHDR_EOF, &hdr->flags); |
3894 | hdr->good_bytes = pos - hdr->io_start; |
3895 | hdr->error = error; |
3896 | diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c |
3897 | index f37e25b6311c..e5c679f04099 100644 |
3898 | --- a/fs/nfs/pnfs_nfs.c |
3899 | +++ b/fs/nfs/pnfs_nfs.c |
3900 | @@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) |
3901 | return false; |
3902 | } |
3903 | |
3904 | +/* |
3905 | + * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does, |
3906 | + * declare a match. |
3907 | + */ |
3908 | static bool |
3909 | _same_data_server_addrs_locked(const struct list_head *dsaddrs1, |
3910 | const struct list_head *dsaddrs2) |
3911 | { |
3912 | struct nfs4_pnfs_ds_addr *da1, *da2; |
3913 | - |
3914 | - /* step through both lists, comparing as we go */ |
3915 | - for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), |
3916 | - da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); |
3917 | - da1 != NULL && da2 != NULL; |
3918 | - da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), |
3919 | - da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { |
3920 | - if (!same_sockaddr((struct sockaddr *)&da1->da_addr, |
3921 | - (struct sockaddr *)&da2->da_addr)) |
3922 | - return false; |
3923 | + struct sockaddr *sa1, *sa2; |
3924 | + bool match = false; |
3925 | + |
3926 | + list_for_each_entry(da1, dsaddrs1, da_node) { |
3927 | + sa1 = (struct sockaddr *)&da1->da_addr; |
3928 | + match = false; |
3929 | + list_for_each_entry(da2, dsaddrs2, da_node) { |
3930 | + sa2 = (struct sockaddr *)&da2->da_addr; |
3931 | + match = same_sockaddr(sa1, sa2); |
3932 | + if (match) |
3933 | + break; |
3934 | + } |
3935 | + if (!match) |
3936 | + break; |
3937 | } |
3938 | - if (da1 == NULL && da2 == NULL) |
3939 | - return true; |
3940 | - |
3941 | - return false; |
3942 | + return match; |
3943 | } |
3944 | |
3945 | /* |
3946 | @@ -863,9 +868,10 @@ pnfs_layout_mark_request_commit(struct nfs_page *req, |
3947 | } |
3948 | set_bit(PG_COMMIT_TO_DS, &req->wb_flags); |
3949 | cinfo->ds->nwritten++; |
3950 | - spin_unlock(cinfo->lock); |
3951 | |
3952 | - nfs_request_add_commit_list(req, list, cinfo); |
3953 | + nfs_request_add_commit_list_locked(req, list, cinfo); |
3954 | + spin_unlock(cinfo->lock); |
3955 | + nfs_mark_page_unstable(req->wb_page, cinfo); |
3956 | } |
3957 | EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); |
3958 | |
3959 | diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
3960 | index 75a35a1afa79..fdee9270ca15 100644 |
3961 | --- a/fs/nfs/write.c |
3962 | +++ b/fs/nfs/write.c |
3963 | @@ -768,6 +768,28 @@ nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, |
3964 | } |
3965 | |
3966 | /** |
3967 | + * nfs_request_add_commit_list_locked - add request to a commit list |
3968 | + * @req: pointer to a struct nfs_page |
3969 | + * @dst: commit list head |
3970 | + * @cinfo: holds list lock and accounting info |
3971 | + * |
3972 | + * This sets the PG_CLEAN bit, updates the cinfo count of |
3973 | + * number of outstanding requests requiring a commit as well as |
3974 | + * the MM page stats. |
3975 | + * |
3976 | + * The caller must hold the cinfo->lock, and the nfs_page lock. |
3977 | + */ |
3978 | +void |
3979 | +nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, |
3980 | + struct nfs_commit_info *cinfo) |
3981 | +{ |
3982 | + set_bit(PG_CLEAN, &req->wb_flags); |
3983 | + nfs_list_add_request(req, dst); |
3984 | + cinfo->mds->ncommit++; |
3985 | +} |
3986 | +EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); |
3987 | + |
3988 | +/** |
3989 | * nfs_request_add_commit_list - add request to a commit list |
3990 | * @req: pointer to a struct nfs_page |
3991 | * @dst: commit list head |
3992 | @@ -784,13 +806,10 @@ void |
3993 | nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, |
3994 | struct nfs_commit_info *cinfo) |
3995 | { |
3996 | - set_bit(PG_CLEAN, &(req)->wb_flags); |
3997 | spin_lock(cinfo->lock); |
3998 | - nfs_list_add_request(req, dst); |
3999 | - cinfo->mds->ncommit++; |
4000 | + nfs_request_add_commit_list_locked(req, dst, cinfo); |
4001 | spin_unlock(cinfo->lock); |
4002 | - if (!cinfo->dreq) |
4003 | - nfs_mark_page_unstable(req->wb_page); |
4004 | + nfs_mark_page_unstable(req->wb_page, cinfo); |
4005 | } |
4006 | EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); |
4007 | |
4008 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
4009 | index 95202719a1fd..75189cd34583 100644 |
4010 | --- a/fs/nfsd/nfs4state.c |
4011 | +++ b/fs/nfsd/nfs4state.c |
4012 | @@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) |
4013 | list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); |
4014 | } |
4015 | |
4016 | -static void |
4017 | +static bool |
4018 | unhash_delegation_locked(struct nfs4_delegation *dp) |
4019 | { |
4020 | struct nfs4_file *fp = dp->dl_stid.sc_file; |
4021 | |
4022 | lockdep_assert_held(&state_lock); |
4023 | |
4024 | + if (list_empty(&dp->dl_perfile)) |
4025 | + return false; |
4026 | + |
4027 | dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; |
4028 | /* Ensure that deleg break won't try to requeue it */ |
4029 | ++dp->dl_time; |
4030 | @@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp) |
4031 | list_del_init(&dp->dl_recall_lru); |
4032 | list_del_init(&dp->dl_perfile); |
4033 | spin_unlock(&fp->fi_lock); |
4034 | + return true; |
4035 | } |
4036 | |
4037 | static void destroy_delegation(struct nfs4_delegation *dp) |
4038 | { |
4039 | + bool unhashed; |
4040 | + |
4041 | spin_lock(&state_lock); |
4042 | - unhash_delegation_locked(dp); |
4043 | + unhashed = unhash_delegation_locked(dp); |
4044 | spin_unlock(&state_lock); |
4045 | - put_clnt_odstate(dp->dl_clnt_odstate); |
4046 | - nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
4047 | - nfs4_put_stid(&dp->dl_stid); |
4048 | + if (unhashed) { |
4049 | + put_clnt_odstate(dp->dl_clnt_odstate); |
4050 | + nfs4_put_deleg_lease(dp->dl_stid.sc_file); |
4051 | + nfs4_put_stid(&dp->dl_stid); |
4052 | + } |
4053 | } |
4054 | |
4055 | static void revoke_delegation(struct nfs4_delegation *dp) |
4056 | @@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) |
4057 | sop->so_ops->so_free(sop); |
4058 | } |
4059 | |
4060 | -static void unhash_ol_stateid(struct nfs4_ol_stateid *stp) |
4061 | +static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) |
4062 | { |
4063 | struct nfs4_file *fp = stp->st_stid.sc_file; |
4064 | |
4065 | lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); |
4066 | |
4067 | + if (list_empty(&stp->st_perfile)) |
4068 | + return false; |
4069 | + |
4070 | spin_lock(&fp->fi_lock); |
4071 | - list_del(&stp->st_perfile); |
4072 | + list_del_init(&stp->st_perfile); |
4073 | spin_unlock(&fp->fi_lock); |
4074 | list_del(&stp->st_perstateowner); |
4075 | + return true; |
4076 | } |
4077 | |
4078 | static void nfs4_free_ol_stateid(struct nfs4_stid *stid) |
4079 | @@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, |
4080 | list_add(&stp->st_locks, reaplist); |
4081 | } |
4082 | |
4083 | -static void unhash_lock_stateid(struct nfs4_ol_stateid *stp) |
4084 | +static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) |
4085 | { |
4086 | struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); |
4087 | |
4088 | lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); |
4089 | |
4090 | list_del_init(&stp->st_locks); |
4091 | - unhash_ol_stateid(stp); |
4092 | nfs4_unhash_stid(&stp->st_stid); |
4093 | + return unhash_ol_stateid(stp); |
4094 | } |
4095 | |
4096 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) |
4097 | { |
4098 | struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); |
4099 | + bool unhashed; |
4100 | |
4101 | spin_lock(&oo->oo_owner.so_client->cl_lock); |
4102 | - unhash_lock_stateid(stp); |
4103 | + unhashed = unhash_lock_stateid(stp); |
4104 | spin_unlock(&oo->oo_owner.so_client->cl_lock); |
4105 | - nfs4_put_stid(&stp->st_stid); |
4106 | + if (unhashed) |
4107 | + nfs4_put_stid(&stp->st_stid); |
4108 | } |
4109 | |
4110 | static void unhash_lockowner_locked(struct nfs4_lockowner *lo) |
4111 | @@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo) |
4112 | while (!list_empty(&lo->lo_owner.so_stateids)) { |
4113 | stp = list_first_entry(&lo->lo_owner.so_stateids, |
4114 | struct nfs4_ol_stateid, st_perstateowner); |
4115 | - unhash_lock_stateid(stp); |
4116 | + WARN_ON(!unhash_lock_stateid(stp)); |
4117 | put_ol_stateid_locked(stp, &reaplist); |
4118 | } |
4119 | spin_unlock(&clp->cl_lock); |
4120 | @@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, |
4121 | { |
4122 | struct nfs4_ol_stateid *stp; |
4123 | |
4124 | + lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); |
4125 | + |
4126 | while (!list_empty(&open_stp->st_locks)) { |
4127 | stp = list_entry(open_stp->st_locks.next, |
4128 | struct nfs4_ol_stateid, st_locks); |
4129 | - unhash_lock_stateid(stp); |
4130 | + WARN_ON(!unhash_lock_stateid(stp)); |
4131 | put_ol_stateid_locked(stp, reaplist); |
4132 | } |
4133 | } |
4134 | |
4135 | -static void unhash_open_stateid(struct nfs4_ol_stateid *stp, |
4136 | +static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, |
4137 | struct list_head *reaplist) |
4138 | { |
4139 | + bool unhashed; |
4140 | + |
4141 | lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); |
4142 | |
4143 | - unhash_ol_stateid(stp); |
4144 | + unhashed = unhash_ol_stateid(stp); |
4145 | release_open_stateid_locks(stp, reaplist); |
4146 | + return unhashed; |
4147 | } |
4148 | |
4149 | static void release_open_stateid(struct nfs4_ol_stateid *stp) |
4150 | @@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp) |
4151 | LIST_HEAD(reaplist); |
4152 | |
4153 | spin_lock(&stp->st_stid.sc_client->cl_lock); |
4154 | - unhash_open_stateid(stp, &reaplist); |
4155 | - put_ol_stateid_locked(stp, &reaplist); |
4156 | + if (unhash_open_stateid(stp, &reaplist)) |
4157 | + put_ol_stateid_locked(stp, &reaplist); |
4158 | spin_unlock(&stp->st_stid.sc_client->cl_lock); |
4159 | free_ol_stateid_reaplist(&reaplist); |
4160 | } |
4161 | @@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo) |
4162 | while (!list_empty(&oo->oo_owner.so_stateids)) { |
4163 | stp = list_first_entry(&oo->oo_owner.so_stateids, |
4164 | struct nfs4_ol_stateid, st_perstateowner); |
4165 | - unhash_open_stateid(stp, &reaplist); |
4166 | - put_ol_stateid_locked(stp, &reaplist); |
4167 | + if (unhash_open_stateid(stp, &reaplist)) |
4168 | + put_ol_stateid_locked(stp, &reaplist); |
4169 | } |
4170 | spin_unlock(&clp->cl_lock); |
4171 | free_ol_stateid_reaplist(&reaplist); |
4172 | @@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp) |
4173 | spin_lock(&state_lock); |
4174 | while (!list_empty(&clp->cl_delegations)) { |
4175 | dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); |
4176 | - unhash_delegation_locked(dp); |
4177 | + WARN_ON(!unhash_delegation_locked(dp)); |
4178 | list_add(&dp->dl_recall_lru, &reaplist); |
4179 | } |
4180 | spin_unlock(&state_lock); |
4181 | @@ -4345,7 +4364,7 @@ nfs4_laundromat(struct nfsd_net *nn) |
4182 | new_timeo = min(new_timeo, t); |
4183 | break; |
4184 | } |
4185 | - unhash_delegation_locked(dp); |
4186 | + WARN_ON(!unhash_delegation_locked(dp)); |
4187 | list_add(&dp->dl_recall_lru, &reaplist); |
4188 | } |
4189 | spin_unlock(&state_lock); |
4190 | @@ -4751,7 +4770,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4191 | if (check_for_locks(stp->st_stid.sc_file, |
4192 | lockowner(stp->st_stateowner))) |
4193 | break; |
4194 | - unhash_lock_stateid(stp); |
4195 | + WARN_ON(!unhash_lock_stateid(stp)); |
4196 | spin_unlock(&cl->cl_lock); |
4197 | nfs4_put_stid(s); |
4198 | ret = nfs_ok; |
4199 | @@ -4967,20 +4986,23 @@ out: |
4200 | static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) |
4201 | { |
4202 | struct nfs4_client *clp = s->st_stid.sc_client; |
4203 | + bool unhashed; |
4204 | LIST_HEAD(reaplist); |
4205 | |
4206 | s->st_stid.sc_type = NFS4_CLOSED_STID; |
4207 | spin_lock(&clp->cl_lock); |
4208 | - unhash_open_stateid(s, &reaplist); |
4209 | + unhashed = unhash_open_stateid(s, &reaplist); |
4210 | |
4211 | if (clp->cl_minorversion) { |
4212 | - put_ol_stateid_locked(s, &reaplist); |
4213 | + if (unhashed) |
4214 | + put_ol_stateid_locked(s, &reaplist); |
4215 | spin_unlock(&clp->cl_lock); |
4216 | free_ol_stateid_reaplist(&reaplist); |
4217 | } else { |
4218 | spin_unlock(&clp->cl_lock); |
4219 | free_ol_stateid_reaplist(&reaplist); |
4220 | - move_to_close_lru(s, clp->net); |
4221 | + if (unhashed) |
4222 | + move_to_close_lru(s, clp->net); |
4223 | } |
4224 | } |
4225 | |
4226 | @@ -6019,7 +6041,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, |
4227 | |
4228 | static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, |
4229 | struct list_head *collect, |
4230 | - void (*func)(struct nfs4_ol_stateid *)) |
4231 | + bool (*func)(struct nfs4_ol_stateid *)) |
4232 | { |
4233 | struct nfs4_openowner *oop; |
4234 | struct nfs4_ol_stateid *stp, *st_next; |
4235 | @@ -6033,9 +6055,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, |
4236 | list_for_each_entry_safe(lst, lst_next, |
4237 | &stp->st_locks, st_locks) { |
4238 | if (func) { |
4239 | - func(lst); |
4240 | - nfsd_inject_add_lock_to_list(lst, |
4241 | - collect); |
4242 | + if (func(lst)) |
4243 | + nfsd_inject_add_lock_to_list(lst, |
4244 | + collect); |
4245 | } |
4246 | ++count; |
4247 | /* |
4248 | @@ -6305,7 +6327,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, |
4249 | continue; |
4250 | |
4251 | atomic_inc(&clp->cl_refcount); |
4252 | - unhash_delegation_locked(dp); |
4253 | + WARN_ON(!unhash_delegation_locked(dp)); |
4254 | list_add(&dp->dl_recall_lru, victims); |
4255 | } |
4256 | ++count; |
4257 | @@ -6635,7 +6657,7 @@ nfs4_state_shutdown_net(struct net *net) |
4258 | spin_lock(&state_lock); |
4259 | list_for_each_safe(pos, next, &nn->del_recall_lru) { |
4260 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
4261 | - unhash_delegation_locked(dp); |
4262 | + WARN_ON(!unhash_delegation_locked(dp)); |
4263 | list_add(&dp->dl_recall_lru, &reaplist); |
4264 | } |
4265 | spin_unlock(&state_lock); |
4266 | diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c |
4267 | index 75e0563c09d1..b81f725ee21d 100644 |
4268 | --- a/fs/nfsd/nfs4xdr.c |
4269 | +++ b/fs/nfsd/nfs4xdr.c |
4270 | @@ -2140,6 +2140,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, |
4271 | return nfsd4_encode_user(xdr, rqstp, ace->who_uid); |
4272 | } |
4273 | |
4274 | +static inline __be32 |
4275 | +nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type) |
4276 | +{ |
4277 | + __be32 *p; |
4278 | + |
4279 | + if (layout_type) { |
4280 | + p = xdr_reserve_space(xdr, 8); |
4281 | + if (!p) |
4282 | + return nfserr_resource; |
4283 | + *p++ = cpu_to_be32(1); |
4284 | + *p++ = cpu_to_be32(layout_type); |
4285 | + } else { |
4286 | + p = xdr_reserve_space(xdr, 4); |
4287 | + if (!p) |
4288 | + return nfserr_resource; |
4289 | + *p++ = cpu_to_be32(0); |
4290 | + } |
4291 | + |
4292 | + return 0; |
4293 | +} |
4294 | + |
4295 | #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ |
4296 | FATTR4_WORD0_RDATTR_ERROR) |
4297 | #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID |
4298 | @@ -2688,20 +2709,16 @@ out_acl: |
4299 | p = xdr_encode_hyper(p, stat.ino); |
4300 | } |
4301 | #ifdef CONFIG_NFSD_PNFS |
4302 | - if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) || |
4303 | - (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) { |
4304 | - if (exp->ex_layout_type) { |
4305 | - p = xdr_reserve_space(xdr, 8); |
4306 | - if (!p) |
4307 | - goto out_resource; |
4308 | - *p++ = cpu_to_be32(1); |
4309 | - *p++ = cpu_to_be32(exp->ex_layout_type); |
4310 | - } else { |
4311 | - p = xdr_reserve_space(xdr, 4); |
4312 | - if (!p) |
4313 | - goto out_resource; |
4314 | - *p++ = cpu_to_be32(0); |
4315 | - } |
4316 | + if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) { |
4317 | + status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type); |
4318 | + if (status) |
4319 | + goto out; |
4320 | + } |
4321 | + |
4322 | + if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) { |
4323 | + status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type); |
4324 | + if (status) |
4325 | + goto out; |
4326 | } |
4327 | |
4328 | if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) { |
4329 | diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h |
4330 | index edb640ae9a94..eb1cebed3f36 100644 |
4331 | --- a/include/linux/jbd2.h |
4332 | +++ b/include/linux/jbd2.h |
4333 | @@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); |
4334 | extern void jbd2_journal_commit_transaction(journal_t *); |
4335 | |
4336 | /* Checkpoint list management */ |
4337 | -void __jbd2_journal_clean_checkpoint_list(journal_t *journal); |
4338 | +void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); |
4339 | int __jbd2_journal_remove_checkpoint(struct journal_head *); |
4340 | +void jbd2_journal_destroy_checkpoint(journal_t *journal); |
4341 | void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); |
4342 | |
4343 | |
4344 | diff --git a/include/linux/platform_data/st_nci.h b/include/linux/platform_data/st_nci.h |
4345 | deleted file mode 100644 |
4346 | index d9d400a297bd..000000000000 |
4347 | --- a/include/linux/platform_data/st_nci.h |
4348 | +++ /dev/null |
4349 | @@ -1,29 +0,0 @@ |
4350 | -/* |
4351 | - * Driver include for ST NCI NFC chip family. |
4352 | - * |
4353 | - * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. |
4354 | - * |
4355 | - * This program is free software; you can redistribute it and/or modify it |
4356 | - * under the terms and conditions of the GNU General Public License, |
4357 | - * version 2, as published by the Free Software Foundation. |
4358 | - * |
4359 | - * This program is distributed in the hope that it will be useful, |
4360 | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
4361 | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
4362 | - * GNU General Public License for more details. |
4363 | - * |
4364 | - * You should have received a copy of the GNU General Public License |
4365 | - * along with this program; if not, see <http://www.gnu.org/licenses/>. |
4366 | - */ |
4367 | - |
4368 | -#ifndef _ST_NCI_H_ |
4369 | -#define _ST_NCI_H_ |
4370 | - |
4371 | -#define ST_NCI_DRIVER_NAME "st_nci" |
4372 | - |
4373 | -struct st_nci_nfc_platform_data { |
4374 | - unsigned int gpio_reset; |
4375 | - unsigned int irq_polarity; |
4376 | -}; |
4377 | - |
4378 | -#endif /* _ST_NCI_H_ */ |
4379 | diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h |
4380 | index cb94ee4181d4..4929a8a9fd52 100644 |
4381 | --- a/include/linux/sunrpc/svc_rdma.h |
4382 | +++ b/include/linux/sunrpc/svc_rdma.h |
4383 | @@ -172,13 +172,6 @@ struct svcxprt_rdma { |
4384 | #define RDMAXPRT_SQ_PENDING 2 |
4385 | #define RDMAXPRT_CONN_PENDING 3 |
4386 | |
4387 | -#define RPCRDMA_MAX_SVC_SEGS (64) /* server max scatter/gather */ |
4388 | -#if RPCSVC_MAXPAYLOAD < (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT) |
4389 | -#define RPCRDMA_MAXPAYLOAD RPCSVC_MAXPAYLOAD |
4390 | -#else |
4391 | -#define RPCRDMA_MAXPAYLOAD (RPCRDMA_MAX_SVC_SEGS << PAGE_SHIFT) |
4392 | -#endif |
4393 | - |
4394 | #define RPCRDMA_LISTEN_BACKLOG 10 |
4395 | /* The default ORD value is based on two outstanding full-size writes with a |
4396 | * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */ |
4397 | @@ -187,6 +180,8 @@ struct svcxprt_rdma { |
4398 | #define RPCRDMA_MAX_REQUESTS 32 |
4399 | #define RPCRDMA_MAX_REQ_SIZE 4096 |
4400 | |
4401 | +#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD |
4402 | + |
4403 | /* svc_rdma_marshal.c */ |
4404 | extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *); |
4405 | extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *, |
4406 | diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h |
4407 | index 7591788e9fbf..357e44c1a46b 100644 |
4408 | --- a/include/linux/sunrpc/xprtsock.h |
4409 | +++ b/include/linux/sunrpc/xprtsock.h |
4410 | @@ -42,6 +42,7 @@ struct sock_xprt { |
4411 | /* |
4412 | * Connection of transports |
4413 | */ |
4414 | + unsigned long sock_state; |
4415 | struct delayed_work connect_worker; |
4416 | struct sockaddr_storage srcaddr; |
4417 | unsigned short srcport; |
4418 | @@ -76,6 +77,8 @@ struct sock_xprt { |
4419 | */ |
4420 | #define TCP_RPC_REPLY (1UL << 6) |
4421 | |
4422 | +#define XPRT_SOCK_CONNECTING 1U |
4423 | + |
4424 | #endif /* __KERNEL__ */ |
4425 | |
4426 | #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ |
4427 | diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h |
4428 | index 1ab2813273cd..bf2058690ceb 100644 |
4429 | --- a/include/soc/tegra/mc.h |
4430 | +++ b/include/soc/tegra/mc.h |
4431 | @@ -66,6 +66,7 @@ struct tegra_smmu_soc { |
4432 | bool supports_round_robin_arbitration; |
4433 | bool supports_request_limit; |
4434 | |
4435 | + unsigned int num_tlb_lines; |
4436 | unsigned int num_asids; |
4437 | |
4438 | const struct tegra_smmu_ops *ops; |
4439 | diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h |
4440 | index adb5ba5cbd9d..ff99140831ba 100644 |
4441 | --- a/include/sound/hda_i915.h |
4442 | +++ b/include/sound/hda_i915.h |
4443 | @@ -11,7 +11,7 @@ int snd_hdac_get_display_clk(struct hdac_bus *bus); |
4444 | int snd_hdac_i915_init(struct hdac_bus *bus); |
4445 | int snd_hdac_i915_exit(struct hdac_bus *bus); |
4446 | #else |
4447 | -static int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable) |
4448 | +static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable) |
4449 | { |
4450 | return 0; |
4451 | } |
4452 | diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h |
4453 | index fd1a02cb3c82..003dca933803 100644 |
4454 | --- a/include/trace/events/sunrpc.h |
4455 | +++ b/include/trace/events/sunrpc.h |
4456 | @@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue, |
4457 | |
4458 | TP_STRUCT__entry( |
4459 | __field(struct svc_xprt *, xprt) |
4460 | - __field(struct svc_rqst *, rqst) |
4461 | + __field_struct(struct sockaddr_storage, ss) |
4462 | + __field(int, pid) |
4463 | + __field(unsigned long, flags) |
4464 | ), |
4465 | |
4466 | TP_fast_assign( |
4467 | __entry->xprt = xprt; |
4468 | - __entry->rqst = rqst; |
4469 | + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); |
4470 | + __entry->pid = rqst? rqst->rq_task->pid : 0; |
4471 | + __entry->flags = xprt ? xprt->xpt_flags : 0; |
4472 | ), |
4473 | |
4474 | TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, |
4475 | - (struct sockaddr *)&__entry->xprt->xpt_remote, |
4476 | - __entry->rqst ? __entry->rqst->rq_task->pid : 0, |
4477 | - show_svc_xprt_flags(__entry->xprt->xpt_flags)) |
4478 | + (struct sockaddr *)&__entry->ss, |
4479 | + __entry->pid, show_svc_xprt_flags(__entry->flags)) |
4480 | ); |
4481 | |
4482 | TRACE_EVENT(svc_xprt_dequeue, |
4483 | @@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt, |
4484 | TP_STRUCT__entry( |
4485 | __field(struct svc_xprt *, xprt) |
4486 | __field(int, len) |
4487 | + __field_struct(struct sockaddr_storage, ss) |
4488 | + __field(unsigned long, flags) |
4489 | ), |
4490 | |
4491 | TP_fast_assign( |
4492 | __entry->xprt = xprt; |
4493 | + xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); |
4494 | __entry->len = len; |
4495 | + __entry->flags = xprt ? xprt->xpt_flags : 0; |
4496 | ), |
4497 | |
4498 | TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, |
4499 | - (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len, |
4500 | - show_svc_xprt_flags(__entry->xprt->xpt_flags)) |
4501 | + (struct sockaddr *)&__entry->ss, |
4502 | + __entry->len, show_svc_xprt_flags(__entry->flags)) |
4503 | ); |
4504 | #endif /* _TRACE_SUNRPC_H */ |
4505 | |
4506 | diff --git a/kernel/fork.c b/kernel/fork.c |
4507 | index dbd9b8d7b7cc..26a70dc7a915 100644 |
4508 | --- a/kernel/fork.c |
4509 | +++ b/kernel/fork.c |
4510 | @@ -1871,13 +1871,21 @@ static int check_unshare_flags(unsigned long unshare_flags) |
4511 | CLONE_NEWUSER|CLONE_NEWPID)) |
4512 | return -EINVAL; |
4513 | /* |
4514 | - * Not implemented, but pretend it works if there is nothing to |
4515 | - * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND |
4516 | - * needs to unshare vm. |
4517 | + * Not implemented, but pretend it works if there is nothing |
4518 | + * to unshare. Note that unsharing the address space or the |
4519 | + * signal handlers also need to unshare the signal queues (aka |
4520 | + * CLONE_THREAD). |
4521 | */ |
4522 | if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { |
4523 | - /* FIXME: get_task_mm() increments ->mm_users */ |
4524 | - if (atomic_read(¤t->mm->mm_users) > 1) |
4525 | + if (!thread_group_empty(current)) |
4526 | + return -EINVAL; |
4527 | + } |
4528 | + if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { |
4529 | + if (atomic_read(¤t->sighand->count) > 1) |
4530 | + return -EINVAL; |
4531 | + } |
4532 | + if (unshare_flags & CLONE_VM) { |
4533 | + if (!current_is_single_threaded()) |
4534 | return -EINVAL; |
4535 | } |
4536 | |
4537 | @@ -1946,16 +1954,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) |
4538 | if (unshare_flags & CLONE_NEWUSER) |
4539 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
4540 | /* |
4541 | - * If unsharing a thread from a thread group, must also unshare vm. |
4542 | - */ |
4543 | - if (unshare_flags & CLONE_THREAD) |
4544 | - unshare_flags |= CLONE_VM; |
4545 | - /* |
4546 | * If unsharing vm, must also unshare signal handlers. |
4547 | */ |
4548 | if (unshare_flags & CLONE_VM) |
4549 | unshare_flags |= CLONE_SIGHAND; |
4550 | /* |
4551 | + * If unsharing a signal handlers, must also unshare the signal queues. |
4552 | + */ |
4553 | + if (unshare_flags & CLONE_SIGHAND) |
4554 | + unshare_flags |= CLONE_THREAD; |
4555 | + /* |
4556 | * If unsharing namespace, must also unshare filesystem information. |
4557 | */ |
4558 | if (unshare_flags & CLONE_NEWNS) |
4559 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c |
4560 | index 4c4f06176f74..a413acb59a07 100644 |
4561 | --- a/kernel/workqueue.c |
4562 | +++ b/kernel/workqueue.c |
4563 | @@ -2614,7 +2614,7 @@ void flush_workqueue(struct workqueue_struct *wq) |
4564 | out_unlock: |
4565 | mutex_unlock(&wq->mutex); |
4566 | } |
4567 | -EXPORT_SYMBOL_GPL(flush_workqueue); |
4568 | +EXPORT_SYMBOL(flush_workqueue); |
4569 | |
4570 | /** |
4571 | * drain_workqueue - drain a workqueue |
4572 | diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c |
4573 | index 6dd0335ea61b..0234361b24b8 100644 |
4574 | --- a/lib/decompress_bunzip2.c |
4575 | +++ b/lib/decompress_bunzip2.c |
4576 | @@ -743,12 +743,12 @@ exit_0: |
4577 | } |
4578 | |
4579 | #ifdef PREBOOT |
4580 | -STATIC int INIT decompress(unsigned char *buf, long len, |
4581 | +STATIC int INIT __decompress(unsigned char *buf, long len, |
4582 | long (*fill)(void*, unsigned long), |
4583 | long (*flush)(void*, unsigned long), |
4584 | - unsigned char *outbuf, |
4585 | + unsigned char *outbuf, long olen, |
4586 | long *pos, |
4587 | - void(*error)(char *x)) |
4588 | + void (*error)(char *x)) |
4589 | { |
4590 | return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); |
4591 | } |
4592 | diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c |
4593 | index d4c7891635ec..555c06bf20da 100644 |
4594 | --- a/lib/decompress_inflate.c |
4595 | +++ b/lib/decompress_inflate.c |
4596 | @@ -1,4 +1,5 @@ |
4597 | #ifdef STATIC |
4598 | +#define PREBOOT |
4599 | /* Pre-boot environment: included */ |
4600 | |
4601 | /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots |
4602 | @@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len) |
4603 | } |
4604 | |
4605 | /* Included from initramfs et al code */ |
4606 | -STATIC int INIT gunzip(unsigned char *buf, long len, |
4607 | +STATIC int INIT __gunzip(unsigned char *buf, long len, |
4608 | long (*fill)(void*, unsigned long), |
4609 | long (*flush)(void*, unsigned long), |
4610 | - unsigned char *out_buf, |
4611 | + unsigned char *out_buf, long out_len, |
4612 | long *pos, |
4613 | void(*error)(char *x)) { |
4614 | u8 *zbuf; |
4615 | struct z_stream_s *strm; |
4616 | int rc; |
4617 | - size_t out_len; |
4618 | |
4619 | rc = -1; |
4620 | if (flush) { |
4621 | out_len = 0x8000; /* 32 K */ |
4622 | out_buf = malloc(out_len); |
4623 | } else { |
4624 | - out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ |
4625 | + if (!out_len) |
4626 | + out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ |
4627 | } |
4628 | if (!out_buf) { |
4629 | error("Out of memory while allocating output buffer"); |
4630 | @@ -181,4 +182,24 @@ gunzip_nomem1: |
4631 | return rc; /* returns Z_OK (0) if successful */ |
4632 | } |
4633 | |
4634 | -#define decompress gunzip |
4635 | +#ifndef PREBOOT |
4636 | +STATIC int INIT gunzip(unsigned char *buf, long len, |
4637 | + long (*fill)(void*, unsigned long), |
4638 | + long (*flush)(void*, unsigned long), |
4639 | + unsigned char *out_buf, |
4640 | + long *pos, |
4641 | + void (*error)(char *x)) |
4642 | +{ |
4643 | + return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); |
4644 | +} |
4645 | +#else |
4646 | +STATIC int INIT __decompress(unsigned char *buf, long len, |
4647 | + long (*fill)(void*, unsigned long), |
4648 | + long (*flush)(void*, unsigned long), |
4649 | + unsigned char *out_buf, long out_len, |
4650 | + long *pos, |
4651 | + void (*error)(char *x)) |
4652 | +{ |
4653 | + return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error); |
4654 | +} |
4655 | +#endif |
4656 | diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c |
4657 | index 40f66ebe57b7..036fc882cd72 100644 |
4658 | --- a/lib/decompress_unlz4.c |
4659 | +++ b/lib/decompress_unlz4.c |
4660 | @@ -196,12 +196,12 @@ exit_0: |
4661 | } |
4662 | |
4663 | #ifdef PREBOOT |
4664 | -STATIC int INIT decompress(unsigned char *buf, long in_len, |
4665 | +STATIC int INIT __decompress(unsigned char *buf, long in_len, |
4666 | long (*fill)(void*, unsigned long), |
4667 | long (*flush)(void*, unsigned long), |
4668 | - unsigned char *output, |
4669 | + unsigned char *output, long out_len, |
4670 | long *posp, |
4671 | - void(*error)(char *x) |
4672 | + void (*error)(char *x) |
4673 | ) |
4674 | { |
4675 | return unlz4(buf, in_len - 4, fill, flush, output, posp, error); |
4676 | diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c |
4677 | index 0be83af62b88..decb64629c14 100644 |
4678 | --- a/lib/decompress_unlzma.c |
4679 | +++ b/lib/decompress_unlzma.c |
4680 | @@ -667,13 +667,12 @@ exit_0: |
4681 | } |
4682 | |
4683 | #ifdef PREBOOT |
4684 | -STATIC int INIT decompress(unsigned char *buf, long in_len, |
4685 | +STATIC int INIT __decompress(unsigned char *buf, long in_len, |
4686 | long (*fill)(void*, unsigned long), |
4687 | long (*flush)(void*, unsigned long), |
4688 | - unsigned char *output, |
4689 | + unsigned char *output, long out_len, |
4690 | long *posp, |
4691 | - void(*error)(char *x) |
4692 | - ) |
4693 | + void (*error)(char *x)) |
4694 | { |
4695 | return unlzma(buf, in_len - 4, fill, flush, output, posp, error); |
4696 | } |
4697 | diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c |
4698 | index b94a31bdd87d..f4c158e3a022 100644 |
4699 | --- a/lib/decompress_unlzo.c |
4700 | +++ b/lib/decompress_unlzo.c |
4701 | @@ -31,6 +31,7 @@ |
4702 | */ |
4703 | |
4704 | #ifdef STATIC |
4705 | +#define PREBOOT |
4706 | #include "lzo/lzo1x_decompress_safe.c" |
4707 | #else |
4708 | #include <linux/decompress/unlzo.h> |
4709 | @@ -287,4 +288,14 @@ exit: |
4710 | return ret; |
4711 | } |
4712 | |
4713 | -#define decompress unlzo |
4714 | +#ifdef PREBOOT |
4715 | +STATIC int INIT __decompress(unsigned char *buf, long len, |
4716 | + long (*fill)(void*, unsigned long), |
4717 | + long (*flush)(void*, unsigned long), |
4718 | + unsigned char *out_buf, long olen, |
4719 | + long *pos, |
4720 | + void (*error)(char *x)) |
4721 | +{ |
4722 | + return unlzo(buf, len, fill, flush, out_buf, pos, error); |
4723 | +} |
4724 | +#endif |
4725 | diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c |
4726 | index b07a78340e9d..25d59a95bd66 100644 |
4727 | --- a/lib/decompress_unxz.c |
4728 | +++ b/lib/decompress_unxz.c |
4729 | @@ -394,4 +394,14 @@ error_alloc_state: |
4730 | * This macro is used by architecture-specific files to decompress |
4731 | * the kernel image. |
4732 | */ |
4733 | -#define decompress unxz |
4734 | +#ifdef XZ_PREBOOT |
4735 | +STATIC int INIT __decompress(unsigned char *buf, long len, |
4736 | + long (*fill)(void*, unsigned long), |
4737 | + long (*flush)(void*, unsigned long), |
4738 | + unsigned char *out_buf, long olen, |
4739 | + long *pos, |
4740 | + void (*error)(char *x)) |
4741 | +{ |
4742 | + return unxz(buf, len, fill, flush, out_buf, pos, error); |
4743 | +} |
4744 | +#endif |
4745 | diff --git a/mm/vmscan.c b/mm/vmscan.c |
4746 | index 8286938c70de..26c86e2fb5af 100644 |
4747 | --- a/mm/vmscan.c |
4748 | +++ b/mm/vmscan.c |
4749 | @@ -1190,7 +1190,7 @@ cull_mlocked: |
4750 | if (PageSwapCache(page)) |
4751 | try_to_free_swap(page); |
4752 | unlock_page(page); |
4753 | - putback_lru_page(page); |
4754 | + list_add(&page->lru, &ret_pages); |
4755 | continue; |
4756 | |
4757 | activate_locked: |
4758 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
4759 | index b8233505bf9f..8f1df6793650 100644 |
4760 | --- a/net/mac80211/tx.c |
4761 | +++ b/net/mac80211/tx.c |
4762 | @@ -311,9 +311,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) |
4763 | if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) |
4764 | return TX_CONTINUE; |
4765 | |
4766 | - if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) |
4767 | - return TX_CONTINUE; |
4768 | - |
4769 | if (tx->flags & IEEE80211_TX_PS_BUFFERED) |
4770 | return TX_CONTINUE; |
4771 | |
4772 | diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c |
4773 | index af002df640c7..609f92283d1b 100644 |
4774 | --- a/net/nfc/nci/hci.c |
4775 | +++ b/net/nfc/nci/hci.c |
4776 | @@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd, |
4777 | r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data, |
4778 | msecs_to_jiffies(NCI_DATA_TIMEOUT)); |
4779 | |
4780 | - if (r == NCI_STATUS_OK) |
4781 | + if (r == NCI_STATUS_OK && skb) |
4782 | *skb = conn_info->rx_skb; |
4783 | |
4784 | return r; |
4785 | diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c |
4786 | index f85f37ed19b2..73d1ca7c546c 100644 |
4787 | --- a/net/nfc/netlink.c |
4788 | +++ b/net/nfc/netlink.c |
4789 | @@ -1518,12 +1518,13 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb, |
4790 | if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds) |
4791 | return -ENODEV; |
4792 | |
4793 | - data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]); |
4794 | - if (data) { |
4795 | + if (info->attrs[NFC_ATTR_VENDOR_DATA]) { |
4796 | + data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]); |
4797 | data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]); |
4798 | if (data_len == 0) |
4799 | return -EINVAL; |
4800 | } else { |
4801 | + data = NULL; |
4802 | data_len = 0; |
4803 | } |
4804 | |
4805 | diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c |
4806 | index ab5dd621ae0c..2e98f4a243e5 100644 |
4807 | --- a/net/sunrpc/xprt.c |
4808 | +++ b/net/sunrpc/xprt.c |
4809 | @@ -614,6 +614,7 @@ static void xprt_autoclose(struct work_struct *work) |
4810 | clear_bit(XPRT_CLOSE_WAIT, &xprt->state); |
4811 | xprt->ops->close(xprt); |
4812 | xprt_release_write(xprt, NULL); |
4813 | + wake_up_bit(&xprt->state, XPRT_LOCKED); |
4814 | } |
4815 | |
4816 | /** |
4817 | @@ -723,6 +724,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) |
4818 | xprt->ops->release_xprt(xprt, NULL); |
4819 | out: |
4820 | spin_unlock_bh(&xprt->transport_lock); |
4821 | + wake_up_bit(&xprt->state, XPRT_LOCKED); |
4822 | } |
4823 | |
4824 | /** |
4825 | @@ -1394,6 +1396,10 @@ out: |
4826 | static void xprt_destroy(struct rpc_xprt *xprt) |
4827 | { |
4828 | dprintk("RPC: destroying transport %p\n", xprt); |
4829 | + |
4830 | + /* Exclude transport connect/disconnect handlers */ |
4831 | + wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); |
4832 | + |
4833 | del_timer_sync(&xprt->timer); |
4834 | |
4835 | rpc_xprt_debugfs_unregister(xprt); |
4836 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c |
4837 | index 6b36279e4288..48f6de912f78 100644 |
4838 | --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c |
4839 | +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c |
4840 | @@ -91,7 +91,7 @@ struct svc_xprt_class svc_rdma_class = { |
4841 | .xcl_name = "rdma", |
4842 | .xcl_owner = THIS_MODULE, |
4843 | .xcl_ops = &svc_rdma_ops, |
4844 | - .xcl_max_payload = RPCRDMA_MAXPAYLOAD, |
4845 | + .xcl_max_payload = RPCSVC_MAXPAYLOAD_RDMA, |
4846 | .xcl_ident = XPRT_TRANSPORT_RDMA, |
4847 | }; |
4848 | |
4849 | diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h |
4850 | index f49dd8b38122..e718d0959af3 100644 |
4851 | --- a/net/sunrpc/xprtrdma/xprt_rdma.h |
4852 | +++ b/net/sunrpc/xprtrdma/xprt_rdma.h |
4853 | @@ -51,7 +51,6 @@ |
4854 | #include <linux/sunrpc/clnt.h> /* rpc_xprt */ |
4855 | #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ |
4856 | #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ |
4857 | -#include <linux/sunrpc/svc.h> /* RPCSVC_MAXPAYLOAD */ |
4858 | |
4859 | #define RDMA_RESOLVE_TIMEOUT (5000) /* 5 seconds */ |
4860 | #define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */ |
4861 | diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c |
4862 | index 0030376327b7..8a39b1e48bc4 100644 |
4863 | --- a/net/sunrpc/xprtsock.c |
4864 | +++ b/net/sunrpc/xprtsock.c |
4865 | @@ -829,6 +829,7 @@ static void xs_reset_transport(struct sock_xprt *transport) |
4866 | sk->sk_user_data = NULL; |
4867 | |
4868 | xs_restore_old_callbacks(transport, sk); |
4869 | + xprt_clear_connected(xprt); |
4870 | write_unlock_bh(&sk->sk_callback_lock); |
4871 | xs_sock_reset_connection_flags(xprt); |
4872 | |
4873 | @@ -1432,6 +1433,7 @@ out: |
4874 | static void xs_tcp_state_change(struct sock *sk) |
4875 | { |
4876 | struct rpc_xprt *xprt; |
4877 | + struct sock_xprt *transport; |
4878 | |
4879 | read_lock_bh(&sk->sk_callback_lock); |
4880 | if (!(xprt = xprt_from_sock(sk))) |
4881 | @@ -1443,13 +1445,12 @@ static void xs_tcp_state_change(struct sock *sk) |
4882 | sock_flag(sk, SOCK_ZAPPED), |
4883 | sk->sk_shutdown); |
4884 | |
4885 | + transport = container_of(xprt, struct sock_xprt, xprt); |
4886 | trace_rpc_socket_state_change(xprt, sk->sk_socket); |
4887 | switch (sk->sk_state) { |
4888 | case TCP_ESTABLISHED: |
4889 | spin_lock(&xprt->transport_lock); |
4890 | if (!xprt_test_and_set_connected(xprt)) { |
4891 | - struct sock_xprt *transport = container_of(xprt, |
4892 | - struct sock_xprt, xprt); |
4893 | |
4894 | /* Reset TCP record info */ |
4895 | transport->tcp_offset = 0; |
4896 | @@ -1458,6 +1459,8 @@ static void xs_tcp_state_change(struct sock *sk) |
4897 | transport->tcp_flags = |
4898 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; |
4899 | xprt->connect_cookie++; |
4900 | + clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); |
4901 | + xprt_clear_connecting(xprt); |
4902 | |
4903 | xprt_wake_pending_tasks(xprt, -EAGAIN); |
4904 | } |
4905 | @@ -1493,6 +1496,9 @@ static void xs_tcp_state_change(struct sock *sk) |
4906 | smp_mb__after_atomic(); |
4907 | break; |
4908 | case TCP_CLOSE: |
4909 | + if (test_and_clear_bit(XPRT_SOCK_CONNECTING, |
4910 | + &transport->sock_state)) |
4911 | + xprt_clear_connecting(xprt); |
4912 | xs_sock_mark_closed(xprt); |
4913 | } |
4914 | out: |
4915 | @@ -2176,6 +2182,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) |
4916 | /* Tell the socket layer to start connecting... */ |
4917 | xprt->stat.connect_count++; |
4918 | xprt->stat.connect_start = jiffies; |
4919 | + set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); |
4920 | ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); |
4921 | switch (ret) { |
4922 | case 0: |
4923 | @@ -2237,7 +2244,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) |
4924 | case -EINPROGRESS: |
4925 | case -EALREADY: |
4926 | xprt_unlock_connect(xprt, transport); |
4927 | - xprt_clear_connecting(xprt); |
4928 | return; |
4929 | case -EINVAL: |
4930 | /* Happens, for instance, if the user specified a link |
4931 | @@ -2279,13 +2285,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) |
4932 | |
4933 | WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); |
4934 | |
4935 | - /* Start by resetting any existing state */ |
4936 | - xs_reset_transport(transport); |
4937 | - |
4938 | - if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { |
4939 | + if (transport->sock != NULL) { |
4940 | dprintk("RPC: xs_connect delayed xprt %p for %lu " |
4941 | "seconds\n", |
4942 | xprt, xprt->reestablish_timeout / HZ); |
4943 | + |
4944 | + /* Start by resetting any existing state */ |
4945 | + xs_reset_transport(transport); |
4946 | + |
4947 | queue_delayed_work(rpciod_workqueue, |
4948 | &transport->connect_worker, |
4949 | xprt->reestablish_timeout); |
4950 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
4951 | index 374ea53288ca..c8f01ccc2513 100644 |
4952 | --- a/sound/pci/hda/patch_realtek.c |
4953 | +++ b/sound/pci/hda/patch_realtek.c |
4954 | @@ -1135,7 +1135,7 @@ static const struct hda_fixup alc880_fixups[] = { |
4955 | /* override all pins as BIOS on old Amilo is broken */ |
4956 | .type = HDA_FIXUP_PINS, |
4957 | .v.pins = (const struct hda_pintbl[]) { |
4958 | - { 0x14, 0x0121411f }, /* HP */ |
4959 | + { 0x14, 0x0121401f }, /* HP */ |
4960 | { 0x15, 0x99030120 }, /* speaker */ |
4961 | { 0x16, 0x99030130 }, /* bass speaker */ |
4962 | { 0x17, 0x411111f0 }, /* N/A */ |
4963 | @@ -1155,7 +1155,7 @@ static const struct hda_fixup alc880_fixups[] = { |
4964 | /* almost compatible with FUJITSU, but no bass and SPDIF */ |
4965 | .type = HDA_FIXUP_PINS, |
4966 | .v.pins = (const struct hda_pintbl[]) { |
4967 | - { 0x14, 0x0121411f }, /* HP */ |
4968 | + { 0x14, 0x0121401f }, /* HP */ |
4969 | { 0x15, 0x99030120 }, /* speaker */ |
4970 | { 0x16, 0x411111f0 }, /* N/A */ |
4971 | { 0x17, 0x411111f0 }, /* N/A */ |
4972 | @@ -1364,7 +1364,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { |
4973 | SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), |
4974 | SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), |
4975 | SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE), |
4976 | - SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734), |
4977 | + SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU), |
4978 | SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), |
4979 | SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), |
4980 | SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU), |
4981 | @@ -5189,8 +5189,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
4982 | SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), |
4983 | SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4984 | SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4985 | - SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
4986 | SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
4987 | + SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
4988 | + SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
4989 | + SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
4990 | + SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), |
4991 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4992 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
4993 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
4994 | @@ -6579,6 +6582,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { |
4995 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
4996 | SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13), |
4997 | SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13), |
4998 | + SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13), |
4999 | SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5000 | SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5001 | SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
5002 | diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
5003 | index 6b3acba5da7a..83d6e76435b4 100644 |
5004 | --- a/sound/usb/mixer.c |
5005 | +++ b/sound/usb/mixer.c |
5006 | @@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list) |
5007 | for (c = 0; c < MAX_CHANNELS; c++) { |
5008 | if (!(cval->cmask & (1 << c))) |
5009 | continue; |
5010 | - if (cval->cached & (1 << c)) { |
5011 | + if (cval->cached & (1 << (c + 1))) { |
5012 | err = snd_usb_set_cur_mix_value(cval, c + 1, idx, |
5013 | cval->cache_val[idx]); |
5014 | if (err < 0) |