Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.8/0106-4.8.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2846 - (show annotations) (download)
Tue Nov 22 13:19:39 2016 UTC (7 years, 5 months ago) by niro
File size: 145065 byte(s)
-linux-4.8.7
1 diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
2 index e5b6497116f4..c75b64a85859 100644
3 --- a/Documentation/device-mapper/dm-raid.txt
4 +++ b/Documentation/device-mapper/dm-raid.txt
5 @@ -309,3 +309,4 @@ Version History
6 with a reshape in progress.
7 1.9.0 Add support for RAID level takeover/reshape/region size
8 and set size reduction.
9 +1.9.1 Fix activation of existing RAID 4/10 mapped devices
10 diff --git a/Makefile b/Makefile
11 index b249529204cd..4d0f28cb481d 100644
12 --- a/Makefile
13 +++ b/Makefile
14 @@ -1,6 +1,6 @@
15 VERSION = 4
16 PATCHLEVEL = 8
17 -SUBLEVEL = 6
18 +SUBLEVEL = 7
19 EXTRAVERSION =
20 NAME = Psychotic Stoned Sheep
21
22 diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
23 index b3df1c60d465..386eee6de232 100644
24 --- a/arch/arm/boot/dts/ste-snowball.dts
25 +++ b/arch/arm/boot/dts/ste-snowball.dts
26 @@ -239,14 +239,25 @@
27 arm,primecell-periphid = <0x10480180>;
28 max-frequency = <100000000>;
29 bus-width = <4>;
30 + cap-sd-highspeed;
31 cap-mmc-highspeed;
32 + sd-uhs-sdr12;
33 + sd-uhs-sdr25;
34 + /* All direction control is used */
35 + st,sig-dir-cmd;
36 + st,sig-dir-dat0;
37 + st,sig-dir-dat2;
38 + st,sig-dir-dat31;
39 + st,sig-pin-fbclk;
40 + full-pwr-cycle;
41 vmmc-supply = <&ab8500_ldo_aux3_reg>;
42 vqmmc-supply = <&vmmci>;
43 pinctrl-names = "default", "sleep";
44 pinctrl-0 = <&sdi0_default_mode>;
45 pinctrl-1 = <&sdi0_sleep_mode>;
46
47 - cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>; // 218
48 + /* GPIO218 MMC_CD */
49 + cd-gpios = <&gpio6 26 GPIO_ACTIVE_LOW>;
50
51 status = "okay";
52 };
53 @@ -549,7 +560,7 @@
54 /* VMMCI level-shifter enable */
55 snowball_cfg3 {
56 pins = "GPIO217_AH12";
57 - ste,config = <&gpio_out_lo>;
58 + ste,config = <&gpio_out_hi>;
59 };
60 /* VMMCI level-shifter voltage select */
61 snowball_cfg4 {
62 diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
63 index f9b6bd306cfe..541647f57192 100644
64 --- a/arch/arm/mach-mvebu/Kconfig
65 +++ b/arch/arm/mach-mvebu/Kconfig
66 @@ -23,6 +23,7 @@ config MACH_MVEBU_V7
67 select CACHE_L2X0
68 select ARM_CPU_SUSPEND
69 select MACH_MVEBU_ANY
70 + select MVEBU_CLK_COREDIV
71
72 config MACH_ARMADA_370
73 bool "Marvell Armada 370 boards"
74 @@ -32,7 +33,6 @@ config MACH_ARMADA_370
75 select CPU_PJ4B
76 select MACH_MVEBU_V7
77 select PINCTRL_ARMADA_370
78 - select MVEBU_CLK_COREDIV
79 help
80 Say 'Y' here if you want your kernel to support boards based
81 on the Marvell Armada 370 SoC with device tree.
82 @@ -50,7 +50,6 @@ config MACH_ARMADA_375
83 select HAVE_SMP
84 select MACH_MVEBU_V7
85 select PINCTRL_ARMADA_375
86 - select MVEBU_CLK_COREDIV
87 help
88 Say 'Y' here if you want your kernel to support boards based
89 on the Marvell Armada 375 SoC with device tree.
90 @@ -68,7 +67,6 @@ config MACH_ARMADA_38X
91 select HAVE_SMP
92 select MACH_MVEBU_V7
93 select PINCTRL_ARMADA_38X
94 - select MVEBU_CLK_COREDIV
95 help
96 Say 'Y' here if you want your kernel to support boards based
97 on the Marvell Armada 380/385 SoC with device tree.
98 diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
99 index 6d8e8e3365d1..4cdfab31a0b6 100644
100 --- a/arch/arm/mm/abort-lv4t.S
101 +++ b/arch/arm/mm/abort-lv4t.S
102 @@ -7,7 +7,7 @@
103 * : r4 = aborted context pc
104 * : r5 = aborted context psr
105 *
106 - * Returns : r4-r5, r10-r11, r13 preserved
107 + * Returns : r4-r5, r9-r11, r13 preserved
108 *
109 * Purpose : obtain information about current aborted instruction.
110 * Note: we read user space. This means we might cause a data
111 @@ -48,7 +48,10 @@ ENTRY(v4t_late_abort)
112 /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
113 /* d */ b do_DataAbort @ ldc rd, [rn, #m]
114 /* e */ b .data_unknown
115 -/* f */
116 +/* f */ b .data_unknown
117 +
118 +.data_unknown_r9:
119 + ldr r9, [sp], #4
120 .data_unknown: @ Part of jumptable
121 mov r0, r4
122 mov r1, r8
123 @@ -57,6 +60,7 @@ ENTRY(v4t_late_abort)
124 .data_arm_ldmstm:
125 tst r8, #1 << 21 @ check writeback bit
126 beq do_DataAbort @ no writeback -> no fixup
127 + str r9, [sp, #-4]!
128 mov r7, #0x11
129 orr r7, r7, #0x1100
130 and r6, r8, r7
131 @@ -75,12 +79,14 @@ ENTRY(v4t_late_abort)
132 subne r7, r7, r6, lsl #2 @ Undo increment
133 addeq r7, r7, r6, lsl #2 @ Undo decrement
134 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
135 + ldr r9, [sp], #4
136 b do_DataAbort
137
138 .data_arm_lateldrhpre:
139 tst r8, #1 << 21 @ Check writeback bit
140 beq do_DataAbort @ No writeback -> no fixup
141 .data_arm_lateldrhpost:
142 + str r9, [sp, #-4]!
143 and r9, r8, #0x00f @ get Rm / low nibble of immediate value
144 tst r8, #1 << 22 @ if (immediate offset)
145 andne r6, r8, #0xf00 @ { immediate high nibble
146 @@ -93,6 +99,7 @@ ENTRY(v4t_late_abort)
147 subne r7, r7, r6 @ Undo incrmenet
148 addeq r7, r7, r6 @ Undo decrement
149 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
150 + ldr r9, [sp], #4
151 b do_DataAbort
152
153 .data_arm_lateldrpreconst:
154 @@ -101,12 +108,14 @@ ENTRY(v4t_late_abort)
155 .data_arm_lateldrpostconst:
156 movs r6, r8, lsl #20 @ Get offset
157 beq do_DataAbort @ zero -> no fixup
158 + str r9, [sp, #-4]!
159 and r9, r8, #15 << 16 @ Extract 'n' from instruction
160 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
161 tst r8, #1 << 23 @ Check U bit
162 subne r7, r7, r6, lsr #20 @ Undo increment
163 addeq r7, r7, r6, lsr #20 @ Undo decrement
164 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
165 + ldr r9, [sp], #4
166 b do_DataAbort
167
168 .data_arm_lateldrprereg:
169 @@ -115,6 +124,7 @@ ENTRY(v4t_late_abort)
170 .data_arm_lateldrpostreg:
171 and r7, r8, #15 @ Extract 'm' from instruction
172 ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
173 + str r9, [sp, #-4]!
174 mov r9, r8, lsr #7 @ get shift count
175 ands r9, r9, #31
176 and r7, r8, #0x70 @ get shift type
177 @@ -126,33 +136,33 @@ ENTRY(v4t_late_abort)
178 b .data_arm_apply_r6_and_rn
179 b .data_arm_apply_r6_and_rn @ 1: LSL #0
180 nop
181 - b .data_unknown @ 2: MUL?
182 + b .data_unknown_r9 @ 2: MUL?
183 nop
184 - b .data_unknown @ 3: MUL?
185 + b .data_unknown_r9 @ 3: MUL?
186 nop
187 mov r6, r6, lsr r9 @ 4: LSR #!0
188 b .data_arm_apply_r6_and_rn
189 mov r6, r6, lsr #32 @ 5: LSR #32
190 b .data_arm_apply_r6_and_rn
191 - b .data_unknown @ 6: MUL?
192 + b .data_unknown_r9 @ 6: MUL?
193 nop
194 - b .data_unknown @ 7: MUL?
195 + b .data_unknown_r9 @ 7: MUL?
196 nop
197 mov r6, r6, asr r9 @ 8: ASR #!0
198 b .data_arm_apply_r6_and_rn
199 mov r6, r6, asr #32 @ 9: ASR #32
200 b .data_arm_apply_r6_and_rn
201 - b .data_unknown @ A: MUL?
202 + b .data_unknown_r9 @ A: MUL?
203 nop
204 - b .data_unknown @ B: MUL?
205 + b .data_unknown_r9 @ B: MUL?
206 nop
207 mov r6, r6, ror r9 @ C: ROR #!0
208 b .data_arm_apply_r6_and_rn
209 mov r6, r6, rrx @ D: RRX
210 b .data_arm_apply_r6_and_rn
211 - b .data_unknown @ E: MUL?
212 + b .data_unknown_r9 @ E: MUL?
213 nop
214 - b .data_unknown @ F: MUL?
215 + b .data_unknown_r9 @ F: MUL?
216
217 .data_thumb_abort:
218 ldrh r8, [r4] @ read instruction
219 @@ -190,6 +200,7 @@ ENTRY(v4t_late_abort)
220 .data_thumb_pushpop:
221 tst r8, #1 << 10
222 beq .data_unknown
223 + str r9, [sp, #-4]!
224 and r6, r8, #0x55 @ hweight8(r8) + R bit
225 and r9, r8, #0xaa
226 add r6, r6, r9, lsr #1
227 @@ -204,9 +215,11 @@ ENTRY(v4t_late_abort)
228 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
229 subne r7, r7, r6, lsl #2 @ decrement SP if POP
230 str r7, [r2, #13 << 2]
231 + ldr r9, [sp], #4
232 b do_DataAbort
233
234 .data_thumb_ldmstm:
235 + str r9, [sp, #-4]!
236 and r6, r8, #0x55 @ hweight8(r8)
237 and r9, r8, #0xaa
238 add r6, r6, r9, lsr #1
239 @@ -219,4 +232,5 @@ ENTRY(v4t_late_abort)
240 and r6, r6, #15 @ number of regs to transfer
241 sub r7, r7, r6, lsl #2 @ always decrement
242 str r7, [r2, r9, lsr #6]
243 + ldr r9, [sp], #4
244 b do_DataAbort
245 diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
246 index da31bbbbb59e..399271853aad 100644
247 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
248 +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi
249 @@ -131,7 +131,7 @@
250 #address-cells = <0x1>;
251 #size-cells = <0x0>;
252 cell-index = <1>;
253 - clocks = <&cpm_syscon0 0 3>;
254 + clocks = <&cpm_syscon0 1 21>;
255 status = "disabled";
256 };
257
258 diff --git a/arch/h8300/include/asm/thread_info.h b/arch/h8300/include/asm/thread_info.h
259 index b408fe660cf8..3cef06875f5c 100644
260 --- a/arch/h8300/include/asm/thread_info.h
261 +++ b/arch/h8300/include/asm/thread_info.h
262 @@ -31,7 +31,6 @@ struct thread_info {
263 int cpu; /* cpu we're on */
264 int preempt_count; /* 0 => preemptable, <0 => BUG */
265 mm_segment_t addr_limit;
266 - struct restart_block restart_block;
267 };
268
269 /*
270 @@ -44,9 +43,6 @@ struct thread_info {
271 .cpu = 0, \
272 .preempt_count = INIT_PREEMPT_COUNT, \
273 .addr_limit = KERNEL_DS, \
274 - .restart_block = { \
275 - .fn = do_no_restart_syscall, \
276 - }, \
277 }
278
279 #define init_thread_info (init_thread_union.thread_info)
280 diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
281 index ad1f81f574e5..7138303cbbf2 100644
282 --- a/arch/h8300/kernel/signal.c
283 +++ b/arch/h8300/kernel/signal.c
284 @@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
285 unsigned int er0;
286
287 /* Always make any pending restarted system calls return -EINTR */
288 - current_thread_info()->restart_block.fn = do_no_restart_syscall;
289 + current->restart_block.fn = do_no_restart_syscall;
290
291 /* restore passed registers */
292 #define COPY(r) do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
293 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
294 index b54bcadd8aec..45799ef232df 100644
295 --- a/arch/mips/include/asm/kvm_host.h
296 +++ b/arch/mips/include/asm/kvm_host.h
297 @@ -279,7 +279,10 @@ struct kvm_vcpu_arch {
298 /* Host KSEG0 address of the EI/DI offset */
299 void *kseg0_commpage;
300
301 - u32 io_gpr; /* GPR used as IO source/target */
302 + /* Resume PC after MMIO completion */
303 + unsigned long io_pc;
304 + /* GPR used as IO source/target */
305 + u32 io_gpr;
306
307 struct hrtimer comparecount_timer;
308 /* Count timer control KVM register */
309 @@ -301,8 +304,6 @@ struct kvm_vcpu_arch {
310 /* Bitmask of pending exceptions to be cleared */
311 unsigned long pending_exceptions_clr;
312
313 - u32 pending_load_cause;
314 -
315 /* Save/Restore the entryhi register when are are preempted/scheduled back in */
316 unsigned long preempt_entryhi;
317
318 diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
319 index ca1cc30c0891..1958910b75c0 100644
320 --- a/arch/mips/kernel/relocate.c
321 +++ b/arch/mips/kernel/relocate.c
322 @@ -200,7 +200,7 @@ static inline __init unsigned long get_random_boot(void)
323
324 #if defined(CONFIG_USE_OF)
325 /* Get any additional entropy passed in device tree */
326 - {
327 + if (initial_boot_params) {
328 int node, len;
329 u64 *prop;
330
331 diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
332 index 43853ec6e160..4d65285ca418 100644
333 --- a/arch/mips/kvm/emulate.c
334 +++ b/arch/mips/kvm/emulate.c
335 @@ -791,15 +791,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
336 struct mips_coproc *cop0 = vcpu->arch.cop0;
337 enum emulation_result er = EMULATE_DONE;
338
339 - if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
340 + if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
341 + kvm_clear_c0_guest_status(cop0, ST0_ERL);
342 + vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
343 + } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
344 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
345 kvm_read_c0_guest_epc(cop0));
346 kvm_clear_c0_guest_status(cop0, ST0_EXL);
347 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
348
349 - } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
350 - kvm_clear_c0_guest_status(cop0, ST0_ERL);
351 - vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
352 } else {
353 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
354 vcpu->arch.pc);
355 @@ -1522,13 +1522,25 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
356 struct kvm_vcpu *vcpu)
357 {
358 enum emulation_result er = EMULATE_DO_MMIO;
359 + unsigned long curr_pc;
360 u32 op, rt;
361 u32 bytes;
362
363 rt = inst.i_format.rt;
364 op = inst.i_format.opcode;
365
366 - vcpu->arch.pending_load_cause = cause;
367 + /*
368 + * Find the resume PC now while we have safe and easy access to the
369 + * prior branch instruction, and save it for
370 + * kvm_mips_complete_mmio_load() to restore later.
371 + */
372 + curr_pc = vcpu->arch.pc;
373 + er = update_pc(vcpu, cause);
374 + if (er == EMULATE_FAIL)
375 + return er;
376 + vcpu->arch.io_pc = vcpu->arch.pc;
377 + vcpu->arch.pc = curr_pc;
378 +
379 vcpu->arch.io_gpr = rt;
380
381 switch (op) {
382 @@ -2488,9 +2500,8 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
383 goto done;
384 }
385
386 - er = update_pc(vcpu, vcpu->arch.pending_load_cause);
387 - if (er == EMULATE_FAIL)
388 - return er;
389 + /* Restore saved resume PC */
390 + vcpu->arch.pc = vcpu->arch.io_pc;
391
392 switch (run->mmio.len) {
393 case 4:
394 @@ -2512,11 +2523,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
395 break;
396 }
397
398 - if (vcpu->arch.pending_load_cause & CAUSEF_BD)
399 - kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
400 - vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
401 - vcpu->mmio_needed);
402 -
403 done:
404 return er;
405 }
406 diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
407 index d03422e5f188..7ed036c57d00 100644
408 --- a/arch/parisc/kernel/syscall.S
409 +++ b/arch/parisc/kernel/syscall.S
410 @@ -106,8 +106,6 @@ linux_gateway_entry:
411 mtsp %r0,%sr4 /* get kernel space into sr4 */
412 mtsp %r0,%sr5 /* get kernel space into sr5 */
413 mtsp %r0,%sr6 /* get kernel space into sr6 */
414 - mfsp %sr7,%r1 /* save user sr7 */
415 - mtsp %r1,%sr3 /* and store it in sr3 */
416
417 #ifdef CONFIG_64BIT
418 /* for now we can *always* set the W bit on entry to the syscall
419 @@ -133,6 +131,14 @@ linux_gateway_entry:
420 depdi 0, 31, 32, %r21
421 1:
422 #endif
423 +
424 + /* We use a rsm/ssm pair to prevent sr3 from being clobbered
425 + * by external interrupts.
426 + */
427 + mfsp %sr7,%r1 /* save user sr7 */
428 + rsm PSW_SM_I, %r0 /* disable interrupts */
429 + mtsp %r1,%sr3 /* and store it in sr3 */
430 +
431 mfctl %cr30,%r1
432 xor %r1,%r30,%r30 /* ye olde xor trick */
433 xor %r1,%r30,%r1
434 @@ -147,6 +153,7 @@ linux_gateway_entry:
435 */
436
437 mtsp %r0,%sr7 /* get kernel space into sr7 */
438 + ssm PSW_SM_I, %r0 /* enable interrupts */
439 STREGM %r1,FRAME_SIZE(%r30) /* save r1 (usp) here for now */
440 mfctl %cr30,%r1 /* get task ptr in %r1 */
441 LDREG TI_TASK(%r1),%r1
442 diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
443 index 01b8a13f0224..3919332965af 100644
444 --- a/arch/powerpc/include/asm/cpuidle.h
445 +++ b/arch/powerpc/include/asm/cpuidle.h
446 @@ -26,7 +26,7 @@ extern u64 pnv_first_deep_stop_state;
447 std r0,0(r1); \
448 ptesync; \
449 ld r0,0(r1); \
450 -1: cmp cr0,r0,r0; \
451 +1: cmpd cr0,r0,r0; \
452 bne 1b; \
453 IDLE_INST; \
454 b .
455 diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
456 index f6f68f73e858..99e1397b71da 100644
457 --- a/arch/powerpc/include/asm/tlb.h
458 +++ b/arch/powerpc/include/asm/tlb.h
459 @@ -52,11 +52,23 @@ static inline int mm_is_core_local(struct mm_struct *mm)
460 return cpumask_subset(mm_cpumask(mm),
461 topology_sibling_cpumask(smp_processor_id()));
462 }
463 +
464 +static inline int mm_is_thread_local(struct mm_struct *mm)
465 +{
466 + return cpumask_equal(mm_cpumask(mm),
467 + cpumask_of(smp_processor_id()));
468 +}
469 +
470 #else
471 static inline int mm_is_core_local(struct mm_struct *mm)
472 {
473 return 1;
474 }
475 +
476 +static inline int mm_is_thread_local(struct mm_struct *mm)
477 +{
478 + return 1;
479 +}
480 #endif
481
482 #endif /* __KERNEL__ */
483 diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
484 index bd739fed26e3..72dac0b58061 100644
485 --- a/arch/powerpc/kernel/idle_book3s.S
486 +++ b/arch/powerpc/kernel/idle_book3s.S
487 @@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
488 * Threads will spin in HMT_LOW until the lock bit is cleared.
489 * r14 - pointer to core_idle_state
490 * r15 - used to load contents of core_idle_state
491 + * r9 - used as a temporary variable
492 */
493
494 core_idle_lock_held:
495 @@ -99,6 +100,8 @@ core_idle_lock_held:
496 bne 3b
497 HMT_MEDIUM
498 lwarx r15,0,r14
499 + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
500 + bne core_idle_lock_held
501 blr
502
503 /*
504 @@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common)
505 std r9,_MSR(r1)
506 std r1,PACAR1(r13)
507
508 -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
509 - /* Tell KVM we're entering idle */
510 - li r4,KVM_HWTHREAD_IN_IDLE
511 - stb r4,HSTATE_HWTHREAD_STATE(r13)
512 -#endif
513 -
514 /*
515 * Go to real mode to do the nap, as required by the architecture.
516 * Also, we need to be in real mode before setting hwthread_state,
517 @@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common)
518
519 .globl pnv_enter_arch207_idle_mode
520 pnv_enter_arch207_idle_mode:
521 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
522 + /* Tell KVM we're entering idle */
523 + li r4,KVM_HWTHREAD_IN_IDLE
524 + /******************************************************/
525 + /* N O T E W E L L ! ! ! N O T E W E L L */
526 + /* The following store to HSTATE_HWTHREAD_STATE(r13) */
527 + /* MUST occur in real mode, i.e. with the MMU off, */
528 + /* and the MMU must stay off until we clear this flag */
529 + /* and test HSTATE_HWTHREAD_REQ(r13) in the system */
530 + /* reset interrupt vector in exceptions-64s.S. */
531 + /* The reason is that another thread can switch the */
532 + /* MMU to a guest context whenever this flag is set */
533 + /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */
534 + /* that would potentially cause this thread to start */
535 + /* executing instructions from guest memory in */
536 + /* hypervisor mode, leading to a host crash or data */
537 + /* corruption, or worse. */
538 + /******************************************************/
539 + stb r4,HSTATE_HWTHREAD_STATE(r13)
540 +#endif
541 stb r3,PACA_THREAD_IDLE_STATE(r13)
542 cmpwi cr3,r3,PNV_THREAD_SLEEP
543 bge cr3,2f
544 @@ -250,6 +267,12 @@ enter_winkle:
545 * r3 - requested stop state
546 */
547 power_enter_stop:
548 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
549 + /* Tell KVM we're entering idle */
550 + li r4,KVM_HWTHREAD_IN_IDLE
551 + /* DO THIS IN REAL MODE! See comment above. */
552 + stb r4,HSTATE_HWTHREAD_STATE(r13)
553 +#endif
554 /*
555 * Check if the requested state is a deep idle state.
556 */
557 diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
558 index 48df05ef5231..d6960681939a 100644
559 --- a/arch/powerpc/mm/tlb-radix.c
560 +++ b/arch/powerpc/mm/tlb-radix.c
561 @@ -175,7 +175,7 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
562 if (unlikely(pid == MMU_NO_CONTEXT))
563 goto no_context;
564
565 - if (!mm_is_core_local(mm)) {
566 + if (!mm_is_thread_local(mm)) {
567 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
568
569 if (lock_tlbie)
570 @@ -201,7 +201,7 @@ void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
571 if (unlikely(pid == MMU_NO_CONTEXT))
572 goto no_context;
573
574 - if (!mm_is_core_local(mm)) {
575 + if (!mm_is_thread_local(mm)) {
576 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
577
578 if (lock_tlbie)
579 @@ -226,7 +226,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
580 pid = mm ? mm->context.id : 0;
581 if (unlikely(pid == MMU_NO_CONTEXT))
582 goto bail;
583 - if (!mm_is_core_local(mm)) {
584 + if (!mm_is_thread_local(mm)) {
585 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
586
587 if (lock_tlbie)
588 @@ -321,7 +321,7 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
589 {
590 unsigned long pid;
591 unsigned long addr;
592 - int local = mm_is_core_local(mm);
593 + int local = mm_is_thread_local(mm);
594 unsigned long ap = mmu_get_ap(psize);
595 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
596 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
597 diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
598 index bd98b7d25200..05c98bb853cf 100644
599 --- a/arch/s390/kvm/sthyi.c
600 +++ b/arch/s390/kvm/sthyi.c
601 @@ -315,7 +315,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
602 if (r < 0)
603 goto out;
604
605 - diag224_buf = kmalloc(PAGE_SIZE, GFP_KERNEL | GFP_DMA);
606 + diag224_buf = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
607 if (!diag224_buf || diag224(diag224_buf))
608 goto out;
609
610 @@ -378,7 +378,7 @@ static void fill_diag(struct sthyi_sctns *sctns)
611 sctns->par.infpval1 |= PAR_WGHT_VLD;
612
613 out:
614 - kfree(diag224_buf);
615 + free_page((unsigned long)diag224_buf);
616 vfree(diag204_buf);
617 }
618
619 diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
620 index 620ab06bcf45..017bda12caae 100644
621 --- a/arch/x86/kernel/cpu/microcode/amd.c
622 +++ b/arch/x86/kernel/cpu/microcode/amd.c
623 @@ -429,7 +429,7 @@ int __init save_microcode_in_initrd_amd(void)
624 * We need the physical address of the container for both bitness since
625 * boot_params.hdr.ramdisk_image is a physical address.
626 */
627 - cont = __pa(container);
628 + cont = __pa_nodebug(container);
629 cont_va = container;
630 #endif
631
632 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
633 index 98c9cd6f3b5d..d5219b1c8775 100644
634 --- a/arch/x86/kernel/setup.c
635 +++ b/arch/x86/kernel/setup.c
636 @@ -1222,11 +1222,16 @@ void __init setup_arch(char **cmdline_p)
637 if (smp_found_config)
638 get_smp_config();
639
640 + /*
641 + * Systems w/o ACPI and mptables might not have it mapped the local
642 + * APIC yet, but prefill_possible_map() might need to access it.
643 + */
644 + init_apic_mappings();
645 +
646 prefill_possible_map();
647
648 init_cpu_to_node();
649
650 - init_apic_mappings();
651 io_apic_init_mappings();
652
653 kvm_guest_init();
654 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
655 index 4e95d3eb2955..cbd7b92585bb 100644
656 --- a/arch/x86/kvm/emulate.c
657 +++ b/arch/x86/kvm/emulate.c
658 @@ -5045,7 +5045,7 @@ done_prefixes:
659 /* Decode and fetch the destination operand: register or memory. */
660 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
661
662 - if (ctxt->rip_relative)
663 + if (ctxt->rip_relative && likely(ctxt->memopp))
664 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
665 ctxt->memopp->addr.mem.ea + ctxt->_eip);
666
667 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
668 index 699f8726539a..46f74d461f3f 100644
669 --- a/arch/x86/kvm/x86.c
670 +++ b/arch/x86/kvm/x86.c
671 @@ -7372,10 +7372,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
672
673 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
674 {
675 + void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
676 +
677 kvmclock_reset(vcpu);
678
679 - free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
680 kvm_x86_ops->vcpu_free(vcpu);
681 + free_cpumask_var(wbinvd_dirty_mask);
682 }
683
684 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
685 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
686 index 16288e777ec3..4b1e4eabeba4 100644
687 --- a/drivers/android/binder.c
688 +++ b/drivers/android/binder.c
689 @@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
690
691
692 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
693 - uint32_t desc)
694 + u32 desc, bool need_strong_ref)
695 {
696 struct rb_node *n = proc->refs_by_desc.rb_node;
697 struct binder_ref *ref;
698 @@ -1011,12 +1011,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
699 while (n) {
700 ref = rb_entry(n, struct binder_ref, rb_node_desc);
701
702 - if (desc < ref->desc)
703 + if (desc < ref->desc) {
704 n = n->rb_left;
705 - else if (desc > ref->desc)
706 + } else if (desc > ref->desc) {
707 n = n->rb_right;
708 - else
709 + } else if (need_strong_ref && !ref->strong) {
710 + binder_user_error("tried to use weak ref as strong ref\n");
711 + return NULL;
712 + } else {
713 return ref;
714 + }
715 }
716 return NULL;
717 }
718 @@ -1286,7 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
719 } break;
720 case BINDER_TYPE_HANDLE:
721 case BINDER_TYPE_WEAK_HANDLE: {
722 - struct binder_ref *ref = binder_get_ref(proc, fp->handle);
723 + struct binder_ref *ref;
724 +
725 + ref = binder_get_ref(proc, fp->handle,
726 + fp->type == BINDER_TYPE_HANDLE);
727
728 if (ref == NULL) {
729 pr_err("transaction release %d bad handle %d\n",
730 @@ -1381,7 +1388,7 @@ static void binder_transaction(struct binder_proc *proc,
731 if (tr->target.handle) {
732 struct binder_ref *ref;
733
734 - ref = binder_get_ref(proc, tr->target.handle);
735 + ref = binder_get_ref(proc, tr->target.handle, true);
736 if (ref == NULL) {
737 binder_user_error("%d:%d got transaction to invalid handle\n",
738 proc->pid, thread->pid);
739 @@ -1578,7 +1585,9 @@ static void binder_transaction(struct binder_proc *proc,
740 fp->type = BINDER_TYPE_HANDLE;
741 else
742 fp->type = BINDER_TYPE_WEAK_HANDLE;
743 + fp->binder = 0;
744 fp->handle = ref->desc;
745 + fp->cookie = 0;
746 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
747 &thread->todo);
748
749 @@ -1590,7 +1599,10 @@ static void binder_transaction(struct binder_proc *proc,
750 } break;
751 case BINDER_TYPE_HANDLE:
752 case BINDER_TYPE_WEAK_HANDLE: {
753 - struct binder_ref *ref = binder_get_ref(proc, fp->handle);
754 + struct binder_ref *ref;
755 +
756 + ref = binder_get_ref(proc, fp->handle,
757 + fp->type == BINDER_TYPE_HANDLE);
758
759 if (ref == NULL) {
760 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
761 @@ -1625,7 +1637,9 @@ static void binder_transaction(struct binder_proc *proc,
762 return_error = BR_FAILED_REPLY;
763 goto err_binder_get_ref_for_node_failed;
764 }
765 + fp->binder = 0;
766 fp->handle = new_ref->desc;
767 + fp->cookie = 0;
768 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
769 trace_binder_transaction_ref_to_ref(t, ref,
770 new_ref);
771 @@ -1679,6 +1693,7 @@ static void binder_transaction(struct binder_proc *proc,
772 binder_debug(BINDER_DEBUG_TRANSACTION,
773 " fd %d -> %d\n", fp->handle, target_fd);
774 /* TODO: fput? */
775 + fp->binder = 0;
776 fp->handle = target_fd;
777 } break;
778
779 @@ -1801,7 +1816,9 @@ static int binder_thread_write(struct binder_proc *proc,
780 ref->desc);
781 }
782 } else
783 - ref = binder_get_ref(proc, target);
784 + ref = binder_get_ref(proc, target,
785 + cmd == BC_ACQUIRE ||
786 + cmd == BC_RELEASE);
787 if (ref == NULL) {
788 binder_user_error("%d:%d refcount change on invalid ref %d\n",
789 proc->pid, thread->pid, target);
790 @@ -1997,7 +2014,7 @@ static int binder_thread_write(struct binder_proc *proc,
791 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
792 return -EFAULT;
793 ptr += sizeof(binder_uintptr_t);
794 - ref = binder_get_ref(proc, target);
795 + ref = binder_get_ref(proc, target, false);
796 if (ref == NULL) {
797 binder_user_error("%d:%d %s invalid ref %d\n",
798 proc->pid, thread->pid,
799 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
800 index 5da47e26a012..4aae0d27e382 100644
801 --- a/drivers/char/virtio_console.c
802 +++ b/drivers/char/virtio_console.c
803 @@ -1540,19 +1540,29 @@ static void remove_port_data(struct port *port)
804 spin_lock_irq(&port->inbuf_lock);
805 /* Remove unused data this port might have received. */
806 discard_port_data(port);
807 + spin_unlock_irq(&port->inbuf_lock);
808
809 /* Remove buffers we queued up for the Host to send us data in. */
810 - while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
811 - free_buf(buf, true);
812 - spin_unlock_irq(&port->inbuf_lock);
813 + do {
814 + spin_lock_irq(&port->inbuf_lock);
815 + buf = virtqueue_detach_unused_buf(port->in_vq);
816 + spin_unlock_irq(&port->inbuf_lock);
817 + if (buf)
818 + free_buf(buf, true);
819 + } while (buf);
820
821 spin_lock_irq(&port->outvq_lock);
822 reclaim_consumed_buffers(port);
823 + spin_unlock_irq(&port->outvq_lock);
824
825 /* Free pending buffers from the out-queue. */
826 - while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
827 - free_buf(buf, true);
828 - spin_unlock_irq(&port->outvq_lock);
829 + do {
830 + spin_lock_irq(&port->outvq_lock);
831 + buf = virtqueue_detach_unused_buf(port->out_vq);
832 + spin_unlock_irq(&port->outvq_lock);
833 + if (buf)
834 + free_buf(buf, true);
835 + } while (buf);
836 }
837
838 /*
839 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
840 index b46547e907be..8c347f5c2562 100644
841 --- a/drivers/cpufreq/intel_pstate.c
842 +++ b/drivers/cpufreq/intel_pstate.c
843 @@ -1133,10 +1133,8 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
844 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
845 }
846
847 -static void intel_pstate_set_min_pstate(struct cpudata *cpu)
848 +static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
849 {
850 - int pstate = cpu->pstate.min_pstate;
851 -
852 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
853 cpu->pstate.current_pstate = pstate;
854 /*
855 @@ -1148,6 +1146,20 @@ static void intel_pstate_set_min_pstate(struct cpudata *cpu)
856 pstate_funcs.get_val(cpu, pstate));
857 }
858
859 +static void intel_pstate_set_min_pstate(struct cpudata *cpu)
860 +{
861 + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
862 +}
863 +
864 +static void intel_pstate_max_within_limits(struct cpudata *cpu)
865 +{
866 + int min_pstate, max_pstate;
867 +
868 + update_turbo_state();
869 + intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
870 + intel_pstate_set_pstate(cpu, max_pstate);
871 +}
872 +
873 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
874 {
875 cpu->pstate.min_pstate = pstate_funcs.get_min();
876 @@ -1465,7 +1477,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
877 pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
878 policy->cpuinfo.max_freq, policy->max);
879
880 - cpu = all_cpu_data[0];
881 + cpu = all_cpu_data[policy->cpu];
882 if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
883 policy->max < policy->cpuinfo.max_freq &&
884 policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
885 @@ -1509,6 +1521,15 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
886 limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
887
888 out:
889 + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
890 + /*
891 + * NOHZ_FULL CPUs need this as the governor callback may not
892 + * be invoked on them.
893 + */
894 + intel_pstate_clear_update_util_hook(policy->cpu);
895 + intel_pstate_max_within_limits(cpu);
896 + }
897 +
898 intel_pstate_set_update_util_hook(policy->cpu);
899
900 intel_pstate_hwp_set_policy(policy);
901 diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
902 index 1f01e98c83c7..73ae849f5170 100644
903 --- a/drivers/dax/pmem.c
904 +++ b/drivers/dax/pmem.c
905 @@ -44,7 +44,6 @@ static void dax_pmem_percpu_exit(void *data)
906
907 dev_dbg(dax_pmem->dev, "%s\n", __func__);
908 percpu_ref_exit(ref);
909 - wait_for_completion(&dax_pmem->cmp);
910 }
911
912 static void dax_pmem_percpu_kill(void *data)
913 @@ -54,6 +53,7 @@ static void dax_pmem_percpu_kill(void *data)
914
915 dev_dbg(dax_pmem->dev, "%s\n", __func__);
916 percpu_ref_kill(ref);
917 + wait_for_completion(&dax_pmem->cmp);
918 }
919
920 static int dax_pmem_probe(struct device *dev)
921 diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
922 index 309311b1faae..15475892af0c 100644
923 --- a/drivers/firewire/net.c
924 +++ b/drivers/firewire/net.c
925 @@ -73,13 +73,13 @@ struct rfc2734_header {
926
927 #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
928 #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
929 -#define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16)
930 +#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
931 #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
932 #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
933
934 -#define fwnet_set_hdr_lf(lf) ((lf) << 30)
935 +#define fwnet_set_hdr_lf(lf) ((lf) << 30)
936 #define fwnet_set_hdr_ether_type(et) (et)
937 -#define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16)
938 +#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
939 #define fwnet_set_hdr_fg_off(fgo) (fgo)
940
941 #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
942 @@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
943 int retval;
944 u16 ether_type;
945
946 + if (len <= RFC2374_UNFRAG_HDR_SIZE)
947 + return 0;
948 +
949 hdr.w0 = be32_to_cpu(buf[0]);
950 lf = fwnet_get_hdr_lf(&hdr);
951 if (lf == RFC2374_HDR_UNFRAG) {
952 @@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
953 return fwnet_finish_incoming_packet(net, skb, source_node_id,
954 is_broadcast, ether_type);
955 }
956 +
957 /* A datagram fragment has been received, now the fun begins. */
958 +
959 + if (len <= RFC2374_FRAG_HDR_SIZE)
960 + return 0;
961 +
962 hdr.w1 = ntohl(buf[1]);
963 buf += 2;
964 len -= RFC2374_FRAG_HDR_SIZE;
965 @@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
966 fg_off = fwnet_get_hdr_fg_off(&hdr);
967 }
968 datagram_label = fwnet_get_hdr_dgl(&hdr);
969 - dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
970 + dg_size = fwnet_get_hdr_dg_size(&hdr);
971 +
972 + if (fg_off + len > dg_size)
973 + return 0;
974
975 spin_lock_irqsave(&dev->lock, flags);
976
977 @@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
978 fw_send_response(card, r, rcode);
979 }
980
981 +static int gasp_source_id(__be32 *p)
982 +{
983 + return be32_to_cpu(p[0]) >> 16;
984 +}
985 +
986 +static u32 gasp_specifier_id(__be32 *p)
987 +{
988 + return (be32_to_cpu(p[0]) & 0xffff) << 8 |
989 + (be32_to_cpu(p[1]) & 0xff000000) >> 24;
990 +}
991 +
992 +static u32 gasp_version(__be32 *p)
993 +{
994 + return be32_to_cpu(p[1]) & 0xffffff;
995 +}
996 +
997 static void fwnet_receive_broadcast(struct fw_iso_context *context,
998 u32 cycle, size_t header_length, void *header, void *data)
999 {
1000 @@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
1001 __be32 *buf_ptr;
1002 int retval;
1003 u32 length;
1004 - u16 source_node_id;
1005 - u32 specifier_id;
1006 - u32 ver;
1007 unsigned long offset;
1008 unsigned long flags;
1009
1010 @@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
1011
1012 spin_unlock_irqrestore(&dev->lock, flags);
1013
1014 - specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
1015 - | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
1016 - ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
1017 - source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
1018 -
1019 - if (specifier_id == IANA_SPECIFIER_ID &&
1020 - (ver == RFC2734_SW_VERSION
1021 + if (length > IEEE1394_GASP_HDR_SIZE &&
1022 + gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
1023 + (gasp_version(buf_ptr) == RFC2734_SW_VERSION
1024 #if IS_ENABLED(CONFIG_IPV6)
1025 - || ver == RFC3146_SW_VERSION
1026 + || gasp_version(buf_ptr) == RFC3146_SW_VERSION
1027 #endif
1028 - )) {
1029 - buf_ptr += 2;
1030 - length -= IEEE1394_GASP_HDR_SIZE;
1031 - fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
1032 + ))
1033 + fwnet_incoming_packet(dev, buf_ptr + 2,
1034 + length - IEEE1394_GASP_HDR_SIZE,
1035 + gasp_source_id(buf_ptr),
1036 context->card->generation, true);
1037 - }
1038
1039 packet.payload_length = dev->rcv_buffer_size;
1040 packet.interrupt = 1;
1041 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
1042 index af514618d7fb..14f2d9835723 100644
1043 --- a/drivers/gpio/gpiolib-acpi.c
1044 +++ b/drivers/gpio/gpiolib-acpi.c
1045 @@ -602,14 +602,17 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
1046 {
1047 int idx, i;
1048 unsigned int irq_flags;
1049 + int ret = -ENOENT;
1050
1051 for (i = 0, idx = 0; idx <= index; i++) {
1052 struct acpi_gpio_info info;
1053 struct gpio_desc *desc;
1054
1055 desc = acpi_get_gpiod_by_index(adev, NULL, i, &info);
1056 - if (IS_ERR(desc))
1057 + if (IS_ERR(desc)) {
1058 + ret = PTR_ERR(desc);
1059 break;
1060 + }
1061 if (info.gpioint && idx++ == index) {
1062 int irq = gpiod_to_irq(desc);
1063
1064 @@ -628,7 +631,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
1065 }
1066
1067 }
1068 - return -ENOENT;
1069 + return ret;
1070 }
1071 EXPORT_SYMBOL_GPL(acpi_dev_gpio_irq_get);
1072
1073 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1074 index 53ff25ac66d8..b2dee1024166 100644
1075 --- a/drivers/gpio/gpiolib.c
1076 +++ b/drivers/gpio/gpiolib.c
1077 @@ -21,6 +21,7 @@
1078 #include <linux/uaccess.h>
1079 #include <linux/compat.h>
1080 #include <linux/anon_inodes.h>
1081 +#include <linux/file.h>
1082 #include <linux/kfifo.h>
1083 #include <linux/poll.h>
1084 #include <linux/timekeeping.h>
1085 @@ -331,6 +332,13 @@ struct linehandle_state {
1086 u32 numdescs;
1087 };
1088
1089 +#define GPIOHANDLE_REQUEST_VALID_FLAGS \
1090 + (GPIOHANDLE_REQUEST_INPUT | \
1091 + GPIOHANDLE_REQUEST_OUTPUT | \
1092 + GPIOHANDLE_REQUEST_ACTIVE_LOW | \
1093 + GPIOHANDLE_REQUEST_OPEN_DRAIN | \
1094 + GPIOHANDLE_REQUEST_OPEN_SOURCE)
1095 +
1096 static long linehandle_ioctl(struct file *filep, unsigned int cmd,
1097 unsigned long arg)
1098 {
1099 @@ -342,6 +350,8 @@ static long linehandle_ioctl(struct file *filep, unsigned int cmd,
1100 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1101 int val;
1102
1103 + memset(&ghd, 0, sizeof(ghd));
1104 +
1105 /* TODO: check if descriptors are really input */
1106 for (i = 0; i < lh->numdescs; i++) {
1107 val = gpiod_get_value_cansleep(lh->descs[i]);
1108 @@ -412,6 +422,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
1109 {
1110 struct gpiohandle_request handlereq;
1111 struct linehandle_state *lh;
1112 + struct file *file;
1113 int fd, i, ret;
1114
1115 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
1116 @@ -442,6 +453,17 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
1117 u32 lflags = handlereq.flags;
1118 struct gpio_desc *desc;
1119
1120 + if (offset >= gdev->ngpio) {
1121 + ret = -EINVAL;
1122 + goto out_free_descs;
1123 + }
1124 +
1125 + /* Return an error if a unknown flag is set */
1126 + if (lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) {
1127 + ret = -EINVAL;
1128 + goto out_free_descs;
1129 + }
1130 +
1131 desc = &gdev->descs[offset];
1132 ret = gpiod_request(desc, lh->label);
1133 if (ret)
1134 @@ -477,26 +499,41 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
1135 i--;
1136 lh->numdescs = handlereq.lines;
1137
1138 - fd = anon_inode_getfd("gpio-linehandle",
1139 - &linehandle_fileops,
1140 - lh,
1141 - O_RDONLY | O_CLOEXEC);
1142 + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1143 if (fd < 0) {
1144 ret = fd;
1145 goto out_free_descs;
1146 }
1147
1148 + file = anon_inode_getfile("gpio-linehandle",
1149 + &linehandle_fileops,
1150 + lh,
1151 + O_RDONLY | O_CLOEXEC);
1152 + if (IS_ERR(file)) {
1153 + ret = PTR_ERR(file);
1154 + goto out_put_unused_fd;
1155 + }
1156 +
1157 handlereq.fd = fd;
1158 if (copy_to_user(ip, &handlereq, sizeof(handlereq))) {
1159 - ret = -EFAULT;
1160 - goto out_free_descs;
1161 + /*
1162 + * fput() will trigger the release() callback, so do not go onto
1163 + * the regular error cleanup path here.
1164 + */
1165 + fput(file);
1166 + put_unused_fd(fd);
1167 + return -EFAULT;
1168 }
1169
1170 + fd_install(fd, file);
1171 +
1172 dev_dbg(&gdev->dev, "registered chardev handle for %d lines\n",
1173 lh->numdescs);
1174
1175 return 0;
1176
1177 +out_put_unused_fd:
1178 + put_unused_fd(fd);
1179 out_free_descs:
1180 for (; i >= 0; i--)
1181 gpiod_free(lh->descs[i]);
1182 @@ -534,6 +571,10 @@ struct lineevent_state {
1183 struct mutex read_lock;
1184 };
1185
1186 +#define GPIOEVENT_REQUEST_VALID_FLAGS \
1187 + (GPIOEVENT_REQUEST_RISING_EDGE | \
1188 + GPIOEVENT_REQUEST_FALLING_EDGE)
1189 +
1190 static unsigned int lineevent_poll(struct file *filep,
1191 struct poll_table_struct *wait)
1192 {
1193 @@ -621,6 +662,8 @@ static long lineevent_ioctl(struct file *filep, unsigned int cmd,
1194 if (cmd == GPIOHANDLE_GET_LINE_VALUES_IOCTL) {
1195 int val;
1196
1197 + memset(&ghd, 0, sizeof(ghd));
1198 +
1199 val = gpiod_get_value_cansleep(le->desc);
1200 if (val < 0)
1201 return val;
1202 @@ -693,6 +736,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1203 struct gpioevent_request eventreq;
1204 struct lineevent_state *le;
1205 struct gpio_desc *desc;
1206 + struct file *file;
1207 u32 offset;
1208 u32 lflags;
1209 u32 eflags;
1210 @@ -724,6 +768,18 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1211 lflags = eventreq.handleflags;
1212 eflags = eventreq.eventflags;
1213
1214 + if (offset >= gdev->ngpio) {
1215 + ret = -EINVAL;
1216 + goto out_free_label;
1217 + }
1218 +
1219 + /* Return an error if a unknown flag is set */
1220 + if ((lflags & ~GPIOHANDLE_REQUEST_VALID_FLAGS) ||
1221 + (eflags & ~GPIOEVENT_REQUEST_VALID_FLAGS)) {
1222 + ret = -EINVAL;
1223 + goto out_free_label;
1224 + }
1225 +
1226 /* This is just wrong: we don't look for events on output lines */
1227 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) {
1228 ret = -EINVAL;
1229 @@ -775,23 +831,38 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
1230 if (ret)
1231 goto out_free_desc;
1232
1233 - fd = anon_inode_getfd("gpio-event",
1234 - &lineevent_fileops,
1235 - le,
1236 - O_RDONLY | O_CLOEXEC);
1237 + fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
1238 if (fd < 0) {
1239 ret = fd;
1240 goto out_free_irq;
1241 }
1242
1243 + file = anon_inode_getfile("gpio-event",
1244 + &lineevent_fileops,
1245 + le,
1246 + O_RDONLY | O_CLOEXEC);
1247 + if (IS_ERR(file)) {
1248 + ret = PTR_ERR(file);
1249 + goto out_put_unused_fd;
1250 + }
1251 +
1252 eventreq.fd = fd;
1253 if (copy_to_user(ip, &eventreq, sizeof(eventreq))) {
1254 - ret = -EFAULT;
1255 - goto out_free_irq;
1256 + /*
1257 + * fput() will trigger the release() callback, so do not go onto
1258 + * the regular error cleanup path here.
1259 + */
1260 + fput(file);
1261 + put_unused_fd(fd);
1262 + return -EFAULT;
1263 }
1264
1265 + fd_install(fd, file);
1266 +
1267 return 0;
1268
1269 +out_put_unused_fd:
1270 + put_unused_fd(fd);
1271 out_free_irq:
1272 free_irq(le->irq, le);
1273 out_free_desc:
1274 @@ -821,6 +892,8 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1275 if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
1276 struct gpiochip_info chipinfo;
1277
1278 + memset(&chipinfo, 0, sizeof(chipinfo));
1279 +
1280 strncpy(chipinfo.name, dev_name(&gdev->dev),
1281 sizeof(chipinfo.name));
1282 chipinfo.name[sizeof(chipinfo.name)-1] = '\0';
1283 @@ -837,7 +910,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1284
1285 if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
1286 return -EFAULT;
1287 - if (lineinfo.line_offset > gdev->ngpio)
1288 + if (lineinfo.line_offset >= gdev->ngpio)
1289 return -EINVAL;
1290
1291 desc = &gdev->descs[lineinfo.line_offset];
1292 diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
1293 index 2a3ded44cf2a..7c8c185c90ea 100644
1294 --- a/drivers/gpu/drm/drm_atomic.c
1295 +++ b/drivers/gpu/drm/drm_atomic.c
1296 @@ -420,18 +420,21 @@ drm_atomic_replace_property_blob_from_id(struct drm_crtc *crtc,
1297 ssize_t expected_size,
1298 bool *replaced)
1299 {
1300 - struct drm_device *dev = crtc->dev;
1301 struct drm_property_blob *new_blob = NULL;
1302
1303 if (blob_id != 0) {
1304 - new_blob = drm_property_lookup_blob(dev, blob_id);
1305 + new_blob = drm_property_lookup_blob(crtc->dev, blob_id);
1306 if (new_blob == NULL)
1307 return -EINVAL;
1308 - if (expected_size > 0 && expected_size != new_blob->length)
1309 +
1310 + if (expected_size > 0 && expected_size != new_blob->length) {
1311 + drm_property_unreference_blob(new_blob);
1312 return -EINVAL;
1313 + }
1314 }
1315
1316 drm_atomic_replace_property_blob(blob, new_blob, replaced);
1317 + drm_property_unreference_blob(new_blob);
1318
1319 return 0;
1320 }
1321 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1322 index 04e457117980..aa644487749c 100644
1323 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1324 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1325 @@ -914,6 +914,7 @@ static void drm_dp_destroy_port(struct kref *kref)
1326 /* no need to clean up vcpi
1327 * as if we have no connector we never setup a vcpi */
1328 drm_dp_port_teardown_pdt(port, port->pdt);
1329 + port->pdt = DP_PEER_DEVICE_NONE;
1330 }
1331 kfree(port);
1332 }
1333 @@ -1159,7 +1160,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1334 drm_dp_put_port(port);
1335 goto out;
1336 }
1337 - if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1338 + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1339 + port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1340 + port->port_num >= DP_MST_LOGICAL_PORT_0) {
1341 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1342 drm_mode_connector_set_tile_property(port->connector);
1343 }
1344 @@ -2919,6 +2922,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
1345 mgr->cbs->destroy_connector(mgr, port->connector);
1346
1347 drm_dp_port_teardown_pdt(port, port->pdt);
1348 + port->pdt = DP_PEER_DEVICE_NONE;
1349
1350 if (!port->input && port->vcpi.vcpi > 0) {
1351 drm_dp_mst_reset_vcpi_slots(mgr, port);
1352 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1353 index 0a06f9120b5a..337c55597ccd 100644
1354 --- a/drivers/gpu/drm/drm_fb_helper.c
1355 +++ b/drivers/gpu/drm/drm_fb_helper.c
1356 @@ -129,7 +129,12 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
1357 return 0;
1358 fail:
1359 for (i = 0; i < fb_helper->connector_count; i++) {
1360 - kfree(fb_helper->connector_info[i]);
1361 + struct drm_fb_helper_connector *fb_helper_connector =
1362 + fb_helper->connector_info[i];
1363 +
1364 + drm_connector_unreference(fb_helper_connector->connector);
1365 +
1366 + kfree(fb_helper_connector);
1367 fb_helper->connector_info[i] = NULL;
1368 }
1369 fb_helper->connector_count = 0;
1370 @@ -601,6 +606,24 @@ int drm_fb_helper_blank(int blank, struct fb_info *info)
1371 }
1372 EXPORT_SYMBOL(drm_fb_helper_blank);
1373
1374 +static void drm_fb_helper_modeset_release(struct drm_fb_helper *helper,
1375 + struct drm_mode_set *modeset)
1376 +{
1377 + int i;
1378 +
1379 + for (i = 0; i < modeset->num_connectors; i++) {
1380 + drm_connector_unreference(modeset->connectors[i]);
1381 + modeset->connectors[i] = NULL;
1382 + }
1383 + modeset->num_connectors = 0;
1384 +
1385 + drm_mode_destroy(helper->dev, modeset->mode);
1386 + modeset->mode = NULL;
1387 +
1388 + /* FIXME should hold a ref? */
1389 + modeset->fb = NULL;
1390 +}
1391 +
1392 static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
1393 {
1394 int i;
1395 @@ -610,10 +633,12 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
1396 kfree(helper->connector_info[i]);
1397 }
1398 kfree(helper->connector_info);
1399 +
1400 for (i = 0; i < helper->crtc_count; i++) {
1401 - kfree(helper->crtc_info[i].mode_set.connectors);
1402 - if (helper->crtc_info[i].mode_set.mode)
1403 - drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
1404 + struct drm_mode_set *modeset = &helper->crtc_info[i].mode_set;
1405 +
1406 + drm_fb_helper_modeset_release(helper, modeset);
1407 + kfree(modeset->connectors);
1408 }
1409 kfree(helper->crtc_info);
1410 }
1411 @@ -632,7 +657,9 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
1412 clip->x2 = clip->y2 = 0;
1413 spin_unlock_irqrestore(&helper->dirty_lock, flags);
1414
1415 - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
1416 + /* call dirty callback only when it has been really touched */
1417 + if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)
1418 + helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
1419 }
1420
1421 /**
1422 @@ -2027,7 +2054,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1423 struct drm_fb_helper_crtc **crtcs;
1424 struct drm_display_mode **modes;
1425 struct drm_fb_offset *offsets;
1426 - struct drm_mode_set *modeset;
1427 bool *enabled;
1428 int width, height;
1429 int i;
1430 @@ -2075,45 +2101,35 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1431
1432 /* need to set the modesets up here for use later */
1433 /* fill out the connector<->crtc mappings into the modesets */
1434 - for (i = 0; i < fb_helper->crtc_count; i++) {
1435 - modeset = &fb_helper->crtc_info[i].mode_set;
1436 - modeset->num_connectors = 0;
1437 - modeset->fb = NULL;
1438 - }
1439 + for (i = 0; i < fb_helper->crtc_count; i++)
1440 + drm_fb_helper_modeset_release(fb_helper,
1441 + &fb_helper->crtc_info[i].mode_set);
1442
1443 for (i = 0; i < fb_helper->connector_count; i++) {
1444 struct drm_display_mode *mode = modes[i];
1445 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
1446 struct drm_fb_offset *offset = &offsets[i];
1447 - modeset = &fb_crtc->mode_set;
1448 + struct drm_mode_set *modeset = &fb_crtc->mode_set;
1449
1450 if (mode && fb_crtc) {
1451 + struct drm_connector *connector =
1452 + fb_helper->connector_info[i]->connector;
1453 +
1454 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
1455 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
1456 +
1457 fb_crtc->desired_mode = mode;
1458 fb_crtc->x = offset->x;
1459 fb_crtc->y = offset->y;
1460 - if (modeset->mode)
1461 - drm_mode_destroy(dev, modeset->mode);
1462 modeset->mode = drm_mode_duplicate(dev,
1463 fb_crtc->desired_mode);
1464 - modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
1465 + drm_connector_reference(connector);
1466 + modeset->connectors[modeset->num_connectors++] = connector;
1467 modeset->fb = fb_helper->fb;
1468 modeset->x = offset->x;
1469 modeset->y = offset->y;
1470 }
1471 }
1472 -
1473 - /* Clear out any old modes if there are no more connected outputs. */
1474 - for (i = 0; i < fb_helper->crtc_count; i++) {
1475 - modeset = &fb_helper->crtc_info[i].mode_set;
1476 - if (modeset->num_connectors == 0) {
1477 - BUG_ON(modeset->fb);
1478 - if (modeset->mode)
1479 - drm_mode_destroy(dev, modeset->mode);
1480 - modeset->mode = NULL;
1481 - }
1482 - }
1483 out:
1484 kfree(crtcs);
1485 kfree(modes);
1486 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
1487 index c6e69e4cfa83..1f8af87c6294 100644
1488 --- a/drivers/gpu/drm/i915/intel_bios.c
1489 +++ b/drivers/gpu/drm/i915/intel_bios.c
1490 @@ -1031,6 +1031,77 @@ static u8 translate_iboost(u8 val)
1491 return mapping[val];
1492 }
1493
1494 +static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
1495 + enum port port)
1496 +{
1497 + const struct ddi_vbt_port_info *info =
1498 + &dev_priv->vbt.ddi_port_info[port];
1499 + enum port p;
1500 +
1501 + if (!info->alternate_ddc_pin)
1502 + return;
1503 +
1504 + for_each_port_masked(p, (1 << port) - 1) {
1505 + struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
1506 +
1507 + if (info->alternate_ddc_pin != i->alternate_ddc_pin)
1508 + continue;
1509 +
1510 + DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
1511 + "disabling port %c DVI/HDMI support\n",
1512 + port_name(p), i->alternate_ddc_pin,
1513 + port_name(port), port_name(p));
1514 +
1515 + /*
1516 + * If we have multiple ports supposedly sharing the
1517 + * pin, then dvi/hdmi couldn't exist on the shared
1518 + * port. Otherwise they share the same ddc bin and
1519 + * system couldn't communicate with them separately.
1520 + *
1521 + * Due to parsing the ports in alphabetical order,
1522 + * a higher port will always clobber a lower one.
1523 + */
1524 + i->supports_dvi = false;
1525 + i->supports_hdmi = false;
1526 + i->alternate_ddc_pin = 0;
1527 + }
1528 +}
1529 +
1530 +static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
1531 + enum port port)
1532 +{
1533 + const struct ddi_vbt_port_info *info =
1534 + &dev_priv->vbt.ddi_port_info[port];
1535 + enum port p;
1536 +
1537 + if (!info->alternate_aux_channel)
1538 + return;
1539 +
1540 + for_each_port_masked(p, (1 << port) - 1) {
1541 + struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
1542 +
1543 + if (info->alternate_aux_channel != i->alternate_aux_channel)
1544 + continue;
1545 +
1546 + DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
1547 + "disabling port %c DP support\n",
1548 + port_name(p), i->alternate_aux_channel,
1549 + port_name(port), port_name(p));
1550 +
1551 + /*
1552 + * If we have multiple ports supposedlt sharing the
1553 + * aux channel, then DP couldn't exist on the shared
1554 + * port. Otherwise they share the same aux channel
1555 + * and system couldn't communicate with them separately.
1556 + *
1557 + * Due to parsing the ports in alphabetical order,
1558 + * a higher port will always clobber a lower one.
1559 + */
1560 + i->supports_dp = false;
1561 + i->alternate_aux_channel = 0;
1562 + }
1563 +}
1564 +
1565 static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1566 const struct bdb_header *bdb)
1567 {
1568 @@ -1105,54 +1176,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1569 DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port));
1570
1571 if (is_dvi) {
1572 - if (port == PORT_E) {
1573 - info->alternate_ddc_pin = ddc_pin;
1574 - /* if DDIE share ddc pin with other port, then
1575 - * dvi/hdmi couldn't exist on the shared port.
1576 - * Otherwise they share the same ddc bin and system
1577 - * couldn't communicate with them seperately. */
1578 - if (ddc_pin == DDC_PIN_B) {
1579 - dev_priv->vbt.ddi_port_info[PORT_B].supports_dvi = 0;
1580 - dev_priv->vbt.ddi_port_info[PORT_B].supports_hdmi = 0;
1581 - } else if (ddc_pin == DDC_PIN_C) {
1582 - dev_priv->vbt.ddi_port_info[PORT_C].supports_dvi = 0;
1583 - dev_priv->vbt.ddi_port_info[PORT_C].supports_hdmi = 0;
1584 - } else if (ddc_pin == DDC_PIN_D) {
1585 - dev_priv->vbt.ddi_port_info[PORT_D].supports_dvi = 0;
1586 - dev_priv->vbt.ddi_port_info[PORT_D].supports_hdmi = 0;
1587 - }
1588 - } else if (ddc_pin == DDC_PIN_B && port != PORT_B)
1589 - DRM_DEBUG_KMS("Unexpected DDC pin for port B\n");
1590 - else if (ddc_pin == DDC_PIN_C && port != PORT_C)
1591 - DRM_DEBUG_KMS("Unexpected DDC pin for port C\n");
1592 - else if (ddc_pin == DDC_PIN_D && port != PORT_D)
1593 - DRM_DEBUG_KMS("Unexpected DDC pin for port D\n");
1594 + info->alternate_ddc_pin = ddc_pin;
1595 +
1596 + sanitize_ddc_pin(dev_priv, port);
1597 }
1598
1599 if (is_dp) {
1600 - if (port == PORT_E) {
1601 - info->alternate_aux_channel = aux_channel;
1602 - /* if DDIE share aux channel with other port, then
1603 - * DP couldn't exist on the shared port. Otherwise
1604 - * they share the same aux channel and system
1605 - * couldn't communicate with them seperately. */
1606 - if (aux_channel == DP_AUX_A)
1607 - dev_priv->vbt.ddi_port_info[PORT_A].supports_dp = 0;
1608 - else if (aux_channel == DP_AUX_B)
1609 - dev_priv->vbt.ddi_port_info[PORT_B].supports_dp = 0;
1610 - else if (aux_channel == DP_AUX_C)
1611 - dev_priv->vbt.ddi_port_info[PORT_C].supports_dp = 0;
1612 - else if (aux_channel == DP_AUX_D)
1613 - dev_priv->vbt.ddi_port_info[PORT_D].supports_dp = 0;
1614 - }
1615 - else if (aux_channel == DP_AUX_A && port != PORT_A)
1616 - DRM_DEBUG_KMS("Unexpected AUX channel for port A\n");
1617 - else if (aux_channel == DP_AUX_B && port != PORT_B)
1618 - DRM_DEBUG_KMS("Unexpected AUX channel for port B\n");
1619 - else if (aux_channel == DP_AUX_C && port != PORT_C)
1620 - DRM_DEBUG_KMS("Unexpected AUX channel for port C\n");
1621 - else if (aux_channel == DP_AUX_D && port != PORT_D)
1622 - DRM_DEBUG_KMS("Unexpected AUX channel for port D\n");
1623 + info->alternate_aux_channel = aux_channel;
1624 +
1625 + sanitize_aux_ch(dev_priv, port);
1626 }
1627
1628 if (bdb->version >= 158) {
1629 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1630 index e9a64fba6333..63462f279187 100644
1631 --- a/drivers/gpu/drm/i915/intel_display.c
1632 +++ b/drivers/gpu/drm/i915/intel_display.c
1633 @@ -13834,7 +13834,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
1634
1635 for_each_plane_in_state(state, plane, plane_state, i) {
1636 struct intel_plane_state *intel_plane_state =
1637 - to_intel_plane_state(plane_state);
1638 + to_intel_plane_state(plane->state);
1639
1640 if (!intel_plane_state->wait_req)
1641 continue;
1642 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1643 index 1ca155f4d368..3051182cf483 100644
1644 --- a/drivers/gpu/drm/i915/intel_dp.c
1645 +++ b/drivers/gpu/drm/i915/intel_dp.c
1646 @@ -1090,6 +1090,44 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1647 return ret;
1648 }
1649
1650 +static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1651 + enum port port)
1652 +{
1653 + const struct ddi_vbt_port_info *info =
1654 + &dev_priv->vbt.ddi_port_info[port];
1655 + enum port aux_port;
1656 +
1657 + if (!info->alternate_aux_channel) {
1658 + DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1659 + port_name(port), port_name(port));
1660 + return port;
1661 + }
1662 +
1663 + switch (info->alternate_aux_channel) {
1664 + case DP_AUX_A:
1665 + aux_port = PORT_A;
1666 + break;
1667 + case DP_AUX_B:
1668 + aux_port = PORT_B;
1669 + break;
1670 + case DP_AUX_C:
1671 + aux_port = PORT_C;
1672 + break;
1673 + case DP_AUX_D:
1674 + aux_port = PORT_D;
1675 + break;
1676 + default:
1677 + MISSING_CASE(info->alternate_aux_channel);
1678 + aux_port = PORT_A;
1679 + break;
1680 + }
1681 +
1682 + DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1683 + port_name(aux_port), port_name(port));
1684 +
1685 + return aux_port;
1686 +}
1687 +
1688 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1689 enum port port)
1690 {
1691 @@ -1150,36 +1188,9 @@ static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1692 }
1693 }
1694
1695 -/*
1696 - * On SKL we don't have Aux for port E so we rely
1697 - * on VBT to set a proper alternate aux channel.
1698 - */
1699 -static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1700 -{
1701 - const struct ddi_vbt_port_info *info =
1702 - &dev_priv->vbt.ddi_port_info[PORT_E];
1703 -
1704 - switch (info->alternate_aux_channel) {
1705 - case DP_AUX_A:
1706 - return PORT_A;
1707 - case DP_AUX_B:
1708 - return PORT_B;
1709 - case DP_AUX_C:
1710 - return PORT_C;
1711 - case DP_AUX_D:
1712 - return PORT_D;
1713 - default:
1714 - MISSING_CASE(info->alternate_aux_channel);
1715 - return PORT_A;
1716 - }
1717 -}
1718 -
1719 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1720 enum port port)
1721 {
1722 - if (port == PORT_E)
1723 - port = skl_porte_aux_port(dev_priv);
1724 -
1725 switch (port) {
1726 case PORT_A:
1727 case PORT_B:
1728 @@ -1195,9 +1206,6 @@ static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1729 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1730 enum port port, int index)
1731 {
1732 - if (port == PORT_E)
1733 - port = skl_porte_aux_port(dev_priv);
1734 -
1735 switch (port) {
1736 case PORT_A:
1737 case PORT_B:
1738 @@ -1235,7 +1243,8 @@ static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1739 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1740 {
1741 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1742 - enum port port = dp_to_dig_port(intel_dp)->port;
1743 + enum port port = intel_aux_port(dev_priv,
1744 + dp_to_dig_port(intel_dp)->port);
1745 int i;
1746
1747 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1748 diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
1749 index 3836a1c79714..ad483376bdfa 100644
1750 --- a/drivers/gpu/drm/i915/intel_fbc.c
1751 +++ b/drivers/gpu/drm/i915/intel_fbc.c
1752 @@ -104,8 +104,10 @@ static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
1753 int lines;
1754
1755 intel_fbc_get_plane_source_size(cache, NULL, &lines);
1756 - if (INTEL_INFO(dev_priv)->gen >= 7)
1757 + if (INTEL_GEN(dev_priv) == 7)
1758 lines = min(lines, 2048);
1759 + else if (INTEL_GEN(dev_priv) >= 8)
1760 + lines = min(lines, 2560);
1761
1762 /* Hardware needs the full buffer stride, not just the active area. */
1763 return lines * cache->fb.stride;
1764 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1765 index e59a28cb3158..a69160568254 100644
1766 --- a/drivers/gpu/drm/i915/intel_pm.c
1767 +++ b/drivers/gpu/drm/i915/intel_pm.c
1768 @@ -3363,13 +3363,15 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
1769 int num_active;
1770 int id, i;
1771
1772 + /* Clear the partitioning for disabled planes. */
1773 + memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
1774 + memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
1775 +
1776 if (WARN_ON(!state))
1777 return 0;
1778
1779 if (!cstate->base.active) {
1780 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
1781 - memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
1782 - memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
1783 return 0;
1784 }
1785
1786 @@ -3469,12 +3471,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
1787 return 0;
1788 }
1789
1790 -static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
1791 -{
1792 - /* TODO: Take into account the scalers once we support them */
1793 - return config->base.adjusted_mode.crtc_clock;
1794 -}
1795 -
1796 /*
1797 * The max latency should be 257 (max the punit can code is 255 and we add 2us
1798 * for the read latency) and cpp should always be <= 8, so that
1799 @@ -3525,7 +3521,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
1800 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
1801 * with additional adjustments for plane-specific scaling.
1802 */
1803 - adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
1804 + adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
1805 downscale_amount = skl_plane_downscale_amount(pstate);
1806
1807 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
1808 @@ -3737,11 +3733,11 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
1809 if (!cstate->base.active)
1810 return 0;
1811
1812 - if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0))
1813 + if (WARN_ON(ilk_pipe_pixel_rate(cstate) == 0))
1814 return 0;
1815
1816 return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000,
1817 - skl_pipe_pixel_rate(cstate));
1818 + ilk_pipe_pixel_rate(cstate));
1819 }
1820
1821 static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
1822 @@ -4051,6 +4047,12 @@ skl_compute_ddb(struct drm_atomic_state *state)
1823 intel_state->wm_results.dirty_pipes = ~0;
1824 }
1825
1826 + /*
1827 + * We're not recomputing for the pipes not included in the commit, so
1828 + * make sure we start with the current state.
1829 + */
1830 + memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
1831 +
1832 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
1833 struct intel_crtc_state *cstate;
1834
1835 diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
1836 index 29423e757d36..927c51e8abc6 100644
1837 --- a/drivers/gpu/drm/imx/ipuv3-plane.c
1838 +++ b/drivers/gpu/drm/imx/ipuv3-plane.c
1839 @@ -108,6 +108,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
1840 {
1841 struct drm_plane *plane = &ipu_plane->base;
1842 struct drm_plane_state *state = plane->state;
1843 + struct drm_crtc_state *crtc_state = state->crtc->state;
1844 struct drm_framebuffer *fb = state->fb;
1845 unsigned long eba, ubo, vbo;
1846 int active;
1847 @@ -149,7 +150,7 @@ static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
1848 break;
1849 }
1850
1851 - if (old_state->fb) {
1852 + if (!drm_atomic_crtc_needs_modeset(crtc_state)) {
1853 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
1854 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
1855 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
1856 @@ -359,7 +360,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
1857 if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
1858 return -EINVAL;
1859
1860 - if (old_fb) {
1861 + if (old_fb &&
1862 + (old_fb->pixel_format == DRM_FORMAT_YUV420 ||
1863 + old_fb->pixel_format == DRM_FORMAT_YVU420)) {
1864 old_ubo = drm_plane_state_to_ubo(old_state);
1865 old_vbo = drm_plane_state_to_vbo(old_state);
1866 if (ubo != old_ubo || vbo != old_vbo)
1867 diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
1868 index dc57b628e074..193573d191e5 100644
1869 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
1870 +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
1871 @@ -240,7 +240,8 @@ static bool nouveau_pr3_present(struct pci_dev *pdev)
1872 if (!parent_adev)
1873 return false;
1874
1875 - return acpi_has_method(parent_adev->handle, "_PR3");
1876 + return parent_adev->power.flags.power_resources &&
1877 + acpi_has_method(parent_adev->handle, "_PR3");
1878 }
1879
1880 static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
1881 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1882 index 4a3d7cab83f7..4b9c2d5ff6a1 100644
1883 --- a/drivers/gpu/drm/radeon/ni.c
1884 +++ b/drivers/gpu/drm/radeon/ni.c
1885 @@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
1886 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
1887 int ring, u32 cp_int_cntl)
1888 {
1889 - u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
1890 -
1891 - WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
1892 + WREG32(SRBM_GFX_CNTL, RINGID(ring));
1893 WREG32(CP_INT_CNTL, cp_int_cntl);
1894 }
1895
1896 diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
1897 index db64e0062689..3b0c229d7dcd 100644
1898 --- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
1899 +++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
1900 @@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
1901
1902 tmp &= AUX_HPD_SEL(0x7);
1903 tmp |= AUX_HPD_SEL(chan->rec.hpd);
1904 - tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
1905 + tmp |= AUX_EN | AUX_LS_READ_EN;
1906
1907 WREG32(AUX_CONTROL + aux_offset[instance], tmp);
1908
1909 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1910 index 89bdf20344ae..c49934527a87 100644
1911 --- a/drivers/gpu/drm/radeon/si_dpm.c
1912 +++ b/drivers/gpu/drm/radeon/si_dpm.c
1913 @@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1914 int i;
1915 struct si_dpm_quirk *p = si_dpm_quirk_list;
1916
1917 + /* limit all SI kickers */
1918 + if (rdev->family == CHIP_PITCAIRN) {
1919 + if ((rdev->pdev->revision == 0x81) ||
1920 + (rdev->pdev->device == 0x6810) ||
1921 + (rdev->pdev->device == 0x6811) ||
1922 + (rdev->pdev->device == 0x6816) ||
1923 + (rdev->pdev->device == 0x6817) ||
1924 + (rdev->pdev->device == 0x6806))
1925 + max_mclk = 120000;
1926 + } else if (rdev->family == CHIP_VERDE) {
1927 + if ((rdev->pdev->revision == 0x81) ||
1928 + (rdev->pdev->revision == 0x83) ||
1929 + (rdev->pdev->revision == 0x87) ||
1930 + (rdev->pdev->device == 0x6820) ||
1931 + (rdev->pdev->device == 0x6821) ||
1932 + (rdev->pdev->device == 0x6822) ||
1933 + (rdev->pdev->device == 0x6823) ||
1934 + (rdev->pdev->device == 0x682A) ||
1935 + (rdev->pdev->device == 0x682B)) {
1936 + max_sclk = 75000;
1937 + max_mclk = 80000;
1938 + }
1939 + } else if (rdev->family == CHIP_OLAND) {
1940 + if ((rdev->pdev->revision == 0xC7) ||
1941 + (rdev->pdev->revision == 0x80) ||
1942 + (rdev->pdev->revision == 0x81) ||
1943 + (rdev->pdev->revision == 0x83) ||
1944 + (rdev->pdev->device == 0x6604) ||
1945 + (rdev->pdev->device == 0x6605)) {
1946 + max_sclk = 75000;
1947 + max_mclk = 80000;
1948 + }
1949 + } else if (rdev->family == CHIP_HAINAN) {
1950 + if ((rdev->pdev->revision == 0x81) ||
1951 + (rdev->pdev->revision == 0x83) ||
1952 + (rdev->pdev->revision == 0xC3) ||
1953 + (rdev->pdev->device == 0x6664) ||
1954 + (rdev->pdev->device == 0x6665) ||
1955 + (rdev->pdev->device == 0x6667)) {
1956 + max_sclk = 75000;
1957 + max_mclk = 80000;
1958 + }
1959 + }
1960 /* Apply dpm quirks */
1961 while (p && p->chip_device != 0) {
1962 if (rdev->pdev->vendor == p->chip_vendor &&
1963 @@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
1964 }
1965 ++p;
1966 }
1967 - /* limit mclk on all R7 370 parts for stability */
1968 - if (rdev->pdev->device == 0x6811 &&
1969 - rdev->pdev->revision == 0x81)
1970 - max_mclk = 120000;
1971 - /* limit sclk/mclk on Jet parts for stability */
1972 - if (rdev->pdev->device == 0x6665 &&
1973 - rdev->pdev->revision == 0xc3) {
1974 - max_sclk = 75000;
1975 - max_mclk = 80000;
1976 - }
1977
1978 if (rps->vce_active) {
1979 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
1980 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1981 index e92b09d32605..9ab703c1042e 100644
1982 --- a/drivers/hid/hid-ids.h
1983 +++ b/drivers/hid/hid-ids.h
1984 @@ -179,6 +179,7 @@
1985 #define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
1986 #define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
1987 #define USB_DEVICE_ID_ATEN_CS682 0x2213
1988 +#define USB_DEVICE_ID_ATEN_CS692 0x8021
1989
1990 #define USB_VENDOR_ID_ATMEL 0x03eb
1991 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
1992 diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1993 index bb400081efe4..85fcf60f3bba 100644
1994 --- a/drivers/hid/usbhid/hid-quirks.c
1995 +++ b/drivers/hid/usbhid/hid-quirks.c
1996 @@ -63,6 +63,7 @@ static const struct hid_blacklist {
1997 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
1998 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
1999 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
2000 + { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
2001 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
2002 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
2003 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
2004 diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
2005 index d5acaa2d8e61..9dc63725363d 100644
2006 --- a/drivers/hv/hv_util.c
2007 +++ b/drivers/hv/hv_util.c
2008 @@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
2009 u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
2010 struct icmsg_negotiate *negop = NULL;
2011
2012 - vmbus_recvpacket(channel, hbeat_txf_buf,
2013 - PAGE_SIZE, &recvlen, &requestid);
2014 + while (1) {
2015 +
2016 + vmbus_recvpacket(channel, hbeat_txf_buf,
2017 + PAGE_SIZE, &recvlen, &requestid);
2018 +
2019 + if (!recvlen)
2020 + break;
2021
2022 - if (recvlen > 0) {
2023 icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
2024 sizeof(struct vmbuspipe_hdr)];
2025
2026 diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
2027 index 5c5b7cada8be..dfae43523d34 100644
2028 --- a/drivers/i2c/busses/i2c-rk3x.c
2029 +++ b/drivers/i2c/busses/i2c-rk3x.c
2030 @@ -694,6 +694,8 @@ static int rk3x_i2c_v0_calc_timings(unsigned long clk_rate,
2031 t_calc->div_low--;
2032 t_calc->div_high--;
2033
2034 + /* Give the tuning value 0, that would not update con register */
2035 + t_calc->tuning = 0;
2036 /* Maximum divider supported by hw is 0xffff */
2037 if (t_calc->div_low > 0xffff) {
2038 t_calc->div_low = 0xffff;
2039 diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
2040 index 4233f5695352..3c38029e3fe9 100644
2041 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c
2042 +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
2043 @@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
2044 struct mbox_chan *mbox_chan;
2045 struct mbox_client mbox_client;
2046 struct completion rd_complete;
2047 - u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
2048 + u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
2049 u32 *resp_msg;
2050 };
2051
2052 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
2053 index da3a02ef4a31..a9a9f66947e8 100644
2054 --- a/drivers/i2c/i2c-core.c
2055 +++ b/drivers/i2c/i2c-core.c
2056 @@ -1592,6 +1592,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
2057 static void of_i2c_register_devices(struct i2c_adapter *adap)
2058 {
2059 struct device_node *node;
2060 + struct i2c_client *client;
2061
2062 /* Only register child devices if the adapter has a node pointer set */
2063 if (!adap->dev.of_node)
2064 @@ -1602,7 +1603,14 @@ static void of_i2c_register_devices(struct i2c_adapter *adap)
2065 for_each_available_child_of_node(adap->dev.of_node, node) {
2066 if (of_node_test_and_set_flag(node, OF_POPULATED))
2067 continue;
2068 - of_i2c_register_device(adap, node);
2069 +
2070 + client = of_i2c_register_device(adap, node);
2071 + if (IS_ERR(client)) {
2072 + dev_warn(&adap->dev,
2073 + "Failed to create I2C device for %s\n",
2074 + node->full_name);
2075 + of_node_clear_flag(node, OF_POPULATED);
2076 + }
2077 }
2078 }
2079
2080 @@ -2073,6 +2081,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
2081 /* add the driver to the list of i2c drivers in the driver core */
2082 driver->driver.owner = owner;
2083 driver->driver.bus = &i2c_bus_type;
2084 + INIT_LIST_HEAD(&driver->clients);
2085
2086 /* When registration returns, the driver core
2087 * will have called probe() for all matching-but-unbound devices.
2088 @@ -2083,7 +2092,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
2089
2090 pr_debug("driver [%s] registered\n", driver->driver.name);
2091
2092 - INIT_LIST_HEAD(&driver->clients);
2093 /* Walk the adapters that are already present */
2094 i2c_for_each_dev(driver, __process_new_driver);
2095
2096 @@ -2201,6 +2209,7 @@ static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
2097 if (IS_ERR(client)) {
2098 dev_err(&adap->dev, "failed to create client for '%s'\n",
2099 rd->dn->full_name);
2100 + of_node_clear_flag(rd->dn, OF_POPULATED);
2101 return notifier_from_errno(PTR_ERR(client));
2102 }
2103 break;
2104 diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
2105 index 407f141a1eee..a3fbdb761b5f 100644
2106 --- a/drivers/iio/chemical/atlas-ph-sensor.c
2107 +++ b/drivers/iio/chemical/atlas-ph-sensor.c
2108 @@ -207,13 +207,14 @@ static int atlas_check_ec_calibration(struct atlas_data *data)
2109 struct device *dev = &data->client->dev;
2110 int ret;
2111 unsigned int val;
2112 + __be16 rval;
2113
2114 - ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
2115 + ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &rval, 2);
2116 if (ret)
2117 return ret;
2118
2119 - dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
2120 - be16_to_cpu(val) % 100);
2121 + val = be16_to_cpu(rval);
2122 + dev_info(dev, "probe set to K = %d.%.2d", val / 100, val % 100);
2123
2124 ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
2125 if (ret)
2126 diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
2127 index f4bfb4b2d50a..073246c7d163 100644
2128 --- a/drivers/input/serio/i8042-x86ia64io.h
2129 +++ b/drivers/input/serio/i8042-x86ia64io.h
2130 @@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
2131 DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
2132 },
2133 },
2134 + {
2135 + /* Schenker XMG C504 - Elantech touchpad */
2136 + .matches = {
2137 + DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
2138 + DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
2139 + },
2140 + },
2141 { }
2142 };
2143
2144 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
2145 index 8abde6b8cedc..6d53810963f7 100644
2146 --- a/drivers/md/dm-raid.c
2147 +++ b/drivers/md/dm-raid.c
2148 @@ -266,7 +266,7 @@ static struct raid_type {
2149 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET},
2150 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR},
2151 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT},
2152 - {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */
2153 + {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */
2154 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N},
2155 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
2156 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
2157 @@ -2087,11 +2087,11 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2158 /*
2159 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata
2160 */
2161 - if (le32_to_cpu(sb->level) != mddev->level) {
2162 + if (le32_to_cpu(sb->level) != mddev->new_level) {
2163 DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)");
2164 return -EINVAL;
2165 }
2166 - if (le32_to_cpu(sb->layout) != mddev->layout) {
2167 + if (le32_to_cpu(sb->layout) != mddev->new_layout) {
2168 DMERR("Reshaping raid sets not yet supported. (raid layout change)");
2169 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
2170 DMERR(" Old layout: %s w/ %d copies",
2171 @@ -2102,7 +2102,7 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2172 raid10_md_layout_to_copies(mddev->layout));
2173 return -EINVAL;
2174 }
2175 - if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
2176 + if (le32_to_cpu(sb->stripe_sectors) != mddev->new_chunk_sectors) {
2177 DMERR("Reshaping raid sets not yet supported. (stripe sectors change)");
2178 return -EINVAL;
2179 }
2180 @@ -2115,6 +2115,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
2181 return -EINVAL;
2182 }
2183
2184 + DMINFO("Discovered old metadata format; upgrading to extended metadata format");
2185 +
2186 /* Table line is checked vs. authoritative superblock */
2187 rs_set_new(rs);
2188 }
2189 @@ -2258,7 +2260,8 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
2190 if (!mddev->events && super_init_validation(rs, rdev))
2191 return -EINVAL;
2192
2193 - if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2194 + if (le32_to_cpu(sb->compat_features) &&
2195 + le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) {
2196 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags";
2197 return -EINVAL;
2198 }
2199 @@ -3646,7 +3649,7 @@ static void raid_resume(struct dm_target *ti)
2200
2201 static struct target_type raid_target = {
2202 .name = "raid",
2203 - .version = {1, 9, 0},
2204 + .version = {1, 9, 1},
2205 .module = THIS_MODULE,
2206 .ctr = raid_ctr,
2207 .dtr = raid_dtr,
2208 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
2209 index bdf1606f67bc..7a6254d54baf 100644
2210 --- a/drivers/md/dm-raid1.c
2211 +++ b/drivers/md/dm-raid1.c
2212 @@ -1292,6 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
2213
2214 dm_bio_restore(bd, bio);
2215 bio_record->details.bi_bdev = NULL;
2216 + bio->bi_error = 0;
2217
2218 queue_bio(ms, bio, rw);
2219 return DM_ENDIO_INCOMPLETE;
2220 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
2221 index 5da86c8b6545..2154596eedf3 100644
2222 --- a/drivers/md/dm-rq.c
2223 +++ b/drivers/md/dm-rq.c
2224 @@ -835,8 +835,11 @@ int dm_old_init_request_queue(struct mapped_device *md)
2225 init_kthread_worker(&md->kworker);
2226 md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
2227 "kdmwork-%s", dm_device_name(md));
2228 - if (IS_ERR(md->kworker_task))
2229 - return PTR_ERR(md->kworker_task);
2230 + if (IS_ERR(md->kworker_task)) {
2231 + int error = PTR_ERR(md->kworker_task);
2232 + md->kworker_task = NULL;
2233 + return error;
2234 + }
2235
2236 elv_register_queue(md->queue);
2237
2238 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
2239 index 3e407a9cde1f..c4b53b332607 100644
2240 --- a/drivers/md/dm-table.c
2241 +++ b/drivers/md/dm-table.c
2242 @@ -695,37 +695,32 @@ int dm_table_add_target(struct dm_table *t, const char *type,
2243
2244 tgt->type = dm_get_target_type(type);
2245 if (!tgt->type) {
2246 - DMERR("%s: %s: unknown target type", dm_device_name(t->md),
2247 - type);
2248 + DMERR("%s: %s: unknown target type", dm_device_name(t->md), type);
2249 return -EINVAL;
2250 }
2251
2252 if (dm_target_needs_singleton(tgt->type)) {
2253 if (t->num_targets) {
2254 - DMERR("%s: target type %s must appear alone in table",
2255 - dm_device_name(t->md), type);
2256 - return -EINVAL;
2257 + tgt->error = "singleton target type must appear alone in table";
2258 + goto bad;
2259 }
2260 t->singleton = true;
2261 }
2262
2263 if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) {
2264 - DMERR("%s: target type %s may not be included in read-only tables",
2265 - dm_device_name(t->md), type);
2266 - return -EINVAL;
2267 + tgt->error = "target type may not be included in a read-only table";
2268 + goto bad;
2269 }
2270
2271 if (t->immutable_target_type) {
2272 if (t->immutable_target_type != tgt->type) {
2273 - DMERR("%s: immutable target type %s cannot be mixed with other target types",
2274 - dm_device_name(t->md), t->immutable_target_type->name);
2275 - return -EINVAL;
2276 + tgt->error = "immutable target type cannot be mixed with other target types";
2277 + goto bad;
2278 }
2279 } else if (dm_target_is_immutable(tgt->type)) {
2280 if (t->num_targets) {
2281 - DMERR("%s: immutable target type %s cannot be mixed with other target types",
2282 - dm_device_name(t->md), tgt->type->name);
2283 - return -EINVAL;
2284 + tgt->error = "immutable target type cannot be mixed with other target types";
2285 + goto bad;
2286 }
2287 t->immutable_target_type = tgt->type;
2288 }
2289 @@ -740,7 +735,6 @@ int dm_table_add_target(struct dm_table *t, const char *type,
2290 */
2291 if (!adjoin(t, tgt)) {
2292 tgt->error = "Gap in table";
2293 - r = -EINVAL;
2294 goto bad;
2295 }
2296
2297 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
2298 index 0f2928b3136b..eeef575fb54b 100644
2299 --- a/drivers/md/dm.c
2300 +++ b/drivers/md/dm.c
2301 @@ -1423,8 +1423,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
2302 if (md->bs)
2303 bioset_free(md->bs);
2304
2305 - cleanup_srcu_struct(&md->io_barrier);
2306 -
2307 if (md->disk) {
2308 spin_lock(&_minor_lock);
2309 md->disk->private_data = NULL;
2310 @@ -1436,6 +1434,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
2311 if (md->queue)
2312 blk_cleanup_queue(md->queue);
2313
2314 + cleanup_srcu_struct(&md->io_barrier);
2315 +
2316 if (md->bdev) {
2317 bdput(md->bdev);
2318 md->bdev = NULL;
2319 diff --git a/drivers/md/md.c b/drivers/md/md.c
2320 index 915e84d631a2..db0aa6c058e7 100644
2321 --- a/drivers/md/md.c
2322 +++ b/drivers/md/md.c
2323 @@ -8120,14 +8120,14 @@ void md_do_sync(struct md_thread *thread)
2324
2325 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
2326 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
2327 - mddev->curr_resync > 2) {
2328 + mddev->curr_resync > 3) {
2329 mddev->curr_resync_completed = mddev->curr_resync;
2330 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
2331 }
2332 mddev->pers->sync_request(mddev, max_sectors, &skipped);
2333
2334 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
2335 - mddev->curr_resync > 2) {
2336 + mddev->curr_resync > 3) {
2337 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2338 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
2339 if (mddev->curr_resync >= mddev->recovery_cp) {
2340 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2341 index 21dc00eb1989..95bf4cd8fcc0 100644
2342 --- a/drivers/md/raid1.c
2343 +++ b/drivers/md/raid1.c
2344 @@ -407,11 +407,14 @@ static void raid1_end_write_request(struct bio *bio)
2345 struct bio *to_put = NULL;
2346 int mirror = find_bio_disk(r1_bio, bio);
2347 struct md_rdev *rdev = conf->mirrors[mirror].rdev;
2348 + bool discard_error;
2349 +
2350 + discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
2351
2352 /*
2353 * 'one mirror IO has finished' event handler:
2354 */
2355 - if (bio->bi_error) {
2356 + if (bio->bi_error && !discard_error) {
2357 set_bit(WriteErrorSeen, &rdev->flags);
2358 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2359 set_bit(MD_RECOVERY_NEEDED, &
2360 @@ -448,7 +451,7 @@ static void raid1_end_write_request(struct bio *bio)
2361
2362 /* Maybe we can clear some bad blocks. */
2363 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
2364 - &first_bad, &bad_sectors)) {
2365 + &first_bad, &bad_sectors) && !discard_error) {
2366 r1_bio->bios[mirror] = IO_MADE_GOOD;
2367 set_bit(R1BIO_MadeGood, &r1_bio->state);
2368 }
2369 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
2370 index be1a9fca3b2d..39fddda2fef2 100644
2371 --- a/drivers/md/raid10.c
2372 +++ b/drivers/md/raid10.c
2373 @@ -447,6 +447,9 @@ static void raid10_end_write_request(struct bio *bio)
2374 struct r10conf *conf = r10_bio->mddev->private;
2375 int slot, repl;
2376 struct md_rdev *rdev = NULL;
2377 + bool discard_error;
2378 +
2379 + discard_error = bio->bi_error && bio_op(bio) == REQ_OP_DISCARD;
2380
2381 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2382
2383 @@ -460,7 +463,7 @@ static void raid10_end_write_request(struct bio *bio)
2384 /*
2385 * this branch is our 'one mirror IO has finished' event handler:
2386 */
2387 - if (bio->bi_error) {
2388 + if (bio->bi_error && !discard_error) {
2389 if (repl)
2390 /* Never record new bad blocks to replacement,
2391 * just fail it.
2392 @@ -503,7 +506,7 @@ static void raid10_end_write_request(struct bio *bio)
2393 if (is_badblock(rdev,
2394 r10_bio->devs[slot].addr,
2395 r10_bio->sectors,
2396 - &first_bad, &bad_sectors)) {
2397 + &first_bad, &bad_sectors) && !discard_error) {
2398 bio_put(bio);
2399 if (repl)
2400 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
2401 diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
2402 index 9fb4fc26a359..ed9759e8a6fc 100644
2403 --- a/drivers/media/platform/vsp1/vsp1_video.c
2404 +++ b/drivers/media/platform/vsp1/vsp1_video.c
2405 @@ -675,6 +675,13 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
2406 unsigned long flags;
2407 int ret;
2408
2409 + /* Clear the buffers ready flag to make sure the device won't be started
2410 + * by a QBUF on the video node on the other side of the pipeline.
2411 + */
2412 + spin_lock_irqsave(&video->irqlock, flags);
2413 + pipe->buffers_ready &= ~(1 << video->pipe_index);
2414 + spin_unlock_irqrestore(&video->irqlock, flags);
2415 +
2416 mutex_lock(&pipe->lock);
2417 if (--pipe->stream_count == pipe->num_inputs) {
2418 /* Stop the pipeline. */
2419 diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
2420 index af23d7dfe752..2e5233b60971 100644
2421 --- a/drivers/misc/cxl/api.c
2422 +++ b/drivers/misc/cxl/api.c
2423 @@ -247,7 +247,9 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
2424 cxl_ctx_get();
2425
2426 if ((rc = cxl_ops->attach_process(ctx, kernel, wed, 0))) {
2427 + put_pid(ctx->glpid);
2428 put_pid(ctx->pid);
2429 + ctx->glpid = ctx->pid = NULL;
2430 cxl_adapter_context_put(ctx->afu->adapter);
2431 cxl_ctx_put();
2432 goto out;
2433 diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
2434 index d0b421f49b39..77080cc5fa0a 100644
2435 --- a/drivers/misc/cxl/file.c
2436 +++ b/drivers/misc/cxl/file.c
2437 @@ -194,6 +194,16 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
2438 ctx->mmio_err_ff = !!(work.flags & CXL_START_WORK_ERR_FF);
2439
2440 /*
2441 + * Increment the mapped context count for adapter. This also checks
2442 + * if adapter_context_lock is taken.
2443 + */
2444 + rc = cxl_adapter_context_get(ctx->afu->adapter);
2445 + if (rc) {
2446 + afu_release_irqs(ctx, ctx);
2447 + goto out;
2448 + }
2449 +
2450 + /*
2451 * We grab the PID here and not in the file open to allow for the case
2452 * where a process (master, some daemon, etc) has opened the chardev on
2453 * behalf of another process, so the AFU's mm gets bound to the process
2454 @@ -205,15 +215,6 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
2455 ctx->pid = get_task_pid(current, PIDTYPE_PID);
2456 ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
2457
2458 - /*
2459 - * Increment the mapped context count for adapter. This also checks
2460 - * if adapter_context_lock is taken.
2461 - */
2462 - rc = cxl_adapter_context_get(ctx->afu->adapter);
2463 - if (rc) {
2464 - afu_release_irqs(ctx, ctx);
2465 - goto out;
2466 - }
2467
2468 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
2469
2470 @@ -221,6 +222,9 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
2471 amr))) {
2472 afu_release_irqs(ctx, ctx);
2473 cxl_adapter_context_put(ctx->afu->adapter);
2474 + put_pid(ctx->glpid);
2475 + put_pid(ctx->pid);
2476 + ctx->glpid = ctx->pid = NULL;
2477 goto out;
2478 }
2479
2480 diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
2481 index 222367cc8c81..524660510599 100644
2482 --- a/drivers/misc/genwqe/card_utils.c
2483 +++ b/drivers/misc/genwqe/card_utils.c
2484 @@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
2485 if (copy_from_user(sgl->lpage, user_addr + user_size -
2486 sgl->lpage_size, sgl->lpage_size)) {
2487 rc = -EFAULT;
2488 - goto err_out1;
2489 + goto err_out2;
2490 }
2491 }
2492 return 0;
2493
2494 + err_out2:
2495 + __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
2496 + sgl->lpage_dma_addr);
2497 + sgl->lpage = NULL;
2498 + sgl->lpage_dma_addr = 0;
2499 err_out1:
2500 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
2501 sgl->fpage_dma_addr);
2502 + sgl->fpage = NULL;
2503 + sgl->fpage_dma_addr = 0;
2504 err_out:
2505 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
2506 sgl->sgl_dma_addr);
2507 + sgl->sgl = NULL;
2508 + sgl->sgl_dma_addr = 0;
2509 + sgl->sgl_size = 0;
2510 return -ENOMEM;
2511 }
2512
2513 diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
2514 index 4a6c1b85f11e..2d23cdf8a734 100644
2515 --- a/drivers/misc/mei/hw-txe.c
2516 +++ b/drivers/misc/mei/hw-txe.c
2517 @@ -978,11 +978,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
2518 hisr = mei_txe_br_reg_read(hw, HISR_REG);
2519
2520 aliveness = mei_txe_aliveness_get(dev);
2521 - if (hhisr & IPC_HHIER_SEC && aliveness)
2522 + if (hhisr & IPC_HHIER_SEC && aliveness) {
2523 ipc_isr = mei_txe_sec_reg_read_silent(hw,
2524 SEC_IPC_HOST_INT_STATUS_REG);
2525 - else
2526 + } else {
2527 ipc_isr = 0;
2528 + hhisr &= ~IPC_HHIER_SEC;
2529 + }
2530
2531 generated = generated ||
2532 (hisr & HISR_INT_STS_MSK) ||
2533 diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c
2534 index c0bb0c793e84..dbbc4303bdd0 100644
2535 --- a/drivers/mmc/host/dw_mmc-pltfm.c
2536 +++ b/drivers/mmc/host/dw_mmc-pltfm.c
2537 @@ -46,12 +46,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
2538 host->pdata = pdev->dev.platform_data;
2539
2540 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2541 - /* Get registers' physical base address */
2542 - host->phy_regs = regs->start;
2543 host->regs = devm_ioremap_resource(&pdev->dev, regs);
2544 if (IS_ERR(host->regs))
2545 return PTR_ERR(host->regs);
2546
2547 + /* Get registers' physical base address */
2548 + host->phy_regs = regs->start;
2549 +
2550 platform_set_drvdata(pdev, host);
2551 return dw_mci_probe(host);
2552 }
2553 diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
2554 index 48eb55f344eb..a01a70a8fd3b 100644
2555 --- a/drivers/mtd/ubi/fastmap.c
2556 +++ b/drivers/mtd/ubi/fastmap.c
2557 @@ -515,10 +515,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
2558 unsigned long long ec = be64_to_cpu(ech->ec);
2559 unmap_peb(ai, pnum);
2560 dbg_bld("Adding PEB to free: %i", pnum);
2561 +
2562 if (err == UBI_IO_FF_BITFLIPS)
2563 - add_aeb(ai, free, pnum, ec, 1);
2564 - else
2565 - add_aeb(ai, free, pnum, ec, 0);
2566 + scrub = 1;
2567 +
2568 + add_aeb(ai, free, pnum, ec, scrub);
2569 continue;
2570 } else if (err == 0 || err == UBI_IO_BITFLIPS) {
2571 dbg_bld("Found non empty PEB:%i in pool", pnum);
2572 @@ -750,11 +751,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
2573 fmvhdr->vol_type,
2574 be32_to_cpu(fmvhdr->last_eb_bytes));
2575
2576 - if (!av)
2577 - goto fail_bad;
2578 - if (PTR_ERR(av) == -EINVAL) {
2579 - ubi_err(ubi, "volume (ID %i) already exists",
2580 - fmvhdr->vol_id);
2581 + if (IS_ERR(av)) {
2582 + if (PTR_ERR(av) == -EEXIST)
2583 + ubi_err(ubi, "volume (ID %i) already exists",
2584 + fmvhdr->vol_id);
2585 +
2586 goto fail_bad;
2587 }
2588
2589 diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
2590 index 30ae5bf81611..76ad825a823d 100644
2591 --- a/drivers/net/wireless/ath/ath10k/core.h
2592 +++ b/drivers/net/wireless/ath/ath10k/core.h
2593 @@ -445,6 +445,7 @@ struct ath10k_debug {
2594 u32 pktlog_filter;
2595 u32 reg_addr;
2596 u32 nf_cal_period;
2597 + void *cal_data;
2598
2599 struct ath10k_fw_crash_data *fw_crash_data;
2600 };
2601 diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
2602 index 8f0fd41dfd4b..8c6a5dd7e178 100644
2603 --- a/drivers/net/wireless/ath/ath10k/debug.c
2604 +++ b/drivers/net/wireless/ath/ath10k/debug.c
2605 @@ -30,6 +30,8 @@
2606 /* ms */
2607 #define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
2608
2609 +#define ATH10K_DEBUG_CAL_DATA_LEN 12064
2610 +
2611 #define ATH10K_FW_CRASH_DUMP_VERSION 1
2612
2613 /**
2614 @@ -1450,56 +1452,51 @@ static const struct file_operations fops_fw_dbglog = {
2615 .llseek = default_llseek,
2616 };
2617
2618 -static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
2619 +static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
2620 {
2621 - struct ath10k *ar = inode->i_private;
2622 - void *buf;
2623 u32 hi_addr;
2624 __le32 addr;
2625 int ret;
2626
2627 - mutex_lock(&ar->conf_mutex);
2628 -
2629 - if (ar->state != ATH10K_STATE_ON &&
2630 - ar->state != ATH10K_STATE_UTF) {
2631 - ret = -ENETDOWN;
2632 - goto err;
2633 - }
2634 + lockdep_assert_held(&ar->conf_mutex);
2635
2636 - buf = vmalloc(ar->hw_params.cal_data_len);
2637 - if (!buf) {
2638 - ret = -ENOMEM;
2639 - goto err;
2640 - }
2641 + if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
2642 + return -EINVAL;
2643
2644 hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
2645
2646 ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
2647 if (ret) {
2648 - ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
2649 - goto err_vfree;
2650 + ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
2651 + ret);
2652 + return ret;
2653 }
2654
2655 - ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
2656 + ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
2657 ar->hw_params.cal_data_len);
2658 if (ret) {
2659 ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
2660 - goto err_vfree;
2661 + return ret;
2662 }
2663
2664 - file->private_data = buf;
2665 + return 0;
2666 +}
2667
2668 - mutex_unlock(&ar->conf_mutex);
2669 +static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
2670 +{
2671 + struct ath10k *ar = inode->i_private;
2672
2673 - return 0;
2674 + mutex_lock(&ar->conf_mutex);
2675
2676 -err_vfree:
2677 - vfree(buf);
2678 + if (ar->state == ATH10K_STATE_ON ||
2679 + ar->state == ATH10K_STATE_UTF) {
2680 + ath10k_debug_cal_data_fetch(ar);
2681 + }
2682
2683 -err:
2684 + file->private_data = ar;
2685 mutex_unlock(&ar->conf_mutex);
2686
2687 - return ret;
2688 + return 0;
2689 }
2690
2691 static ssize_t ath10k_debug_cal_data_read(struct file *file,
2692 @@ -1507,18 +1504,16 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
2693 size_t count, loff_t *ppos)
2694 {
2695 struct ath10k *ar = file->private_data;
2696 - void *buf = file->private_data;
2697
2698 - return simple_read_from_buffer(user_buf, count, ppos,
2699 - buf, ar->hw_params.cal_data_len);
2700 -}
2701 + mutex_lock(&ar->conf_mutex);
2702
2703 -static int ath10k_debug_cal_data_release(struct inode *inode,
2704 - struct file *file)
2705 -{
2706 - vfree(file->private_data);
2707 + count = simple_read_from_buffer(user_buf, count, ppos,
2708 + ar->debug.cal_data,
2709 + ar->hw_params.cal_data_len);
2710
2711 - return 0;
2712 + mutex_unlock(&ar->conf_mutex);
2713 +
2714 + return count;
2715 }
2716
2717 static ssize_t ath10k_write_ani_enable(struct file *file,
2718 @@ -1579,7 +1574,6 @@ static const struct file_operations fops_ani_enable = {
2719 static const struct file_operations fops_cal_data = {
2720 .open = ath10k_debug_cal_data_open,
2721 .read = ath10k_debug_cal_data_read,
2722 - .release = ath10k_debug_cal_data_release,
2723 .owner = THIS_MODULE,
2724 .llseek = default_llseek,
2725 };
2726 @@ -1931,6 +1925,8 @@ void ath10k_debug_stop(struct ath10k *ar)
2727 {
2728 lockdep_assert_held(&ar->conf_mutex);
2729
2730 + ath10k_debug_cal_data_fetch(ar);
2731 +
2732 /* Must not use _sync to avoid deadlock, we do that in
2733 * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
2734 * warning from del_timer(). */
2735 @@ -2343,6 +2339,10 @@ int ath10k_debug_create(struct ath10k *ar)
2736 if (!ar->debug.fw_crash_data)
2737 return -ENOMEM;
2738
2739 + ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
2740 + if (!ar->debug.cal_data)
2741 + return -ENOMEM;
2742 +
2743 INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
2744 INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
2745 INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
2746 @@ -2356,6 +2356,9 @@ void ath10k_debug_destroy(struct ath10k *ar)
2747 vfree(ar->debug.fw_crash_data);
2748 ar->debug.fw_crash_data = NULL;
2749
2750 + vfree(ar->debug.cal_data);
2751 + ar->debug.cal_data = NULL;
2752 +
2753 ath10k_debug_fw_stats_reset(ar);
2754
2755 kfree(ar->debug.tpc_stats);
2756 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
2757 index b6f064a8d264..7e27a06e5df1 100644
2758 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
2759 +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
2760 @@ -33,7 +33,6 @@ struct coeff {
2761
2762 enum ar9003_cal_types {
2763 IQ_MISMATCH_CAL = BIT(0),
2764 - TEMP_COMP_CAL = BIT(1),
2765 };
2766
2767 static void ar9003_hw_setup_calibration(struct ath_hw *ah,
2768 @@ -59,12 +58,6 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
2769 /* Kick-off cal */
2770 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
2771 break;
2772 - case TEMP_COMP_CAL:
2773 - ath_dbg(common, CALIBRATE,
2774 - "starting Temperature Compensation Calibration\n");
2775 - REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_LOCAL);
2776 - REG_SET_BIT(ah, AR_CH0_THERM, AR_CH0_THERM_START);
2777 - break;
2778 default:
2779 ath_err(common, "Invalid calibration type\n");
2780 break;
2781 @@ -93,8 +86,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
2782 /*
2783 * Accumulate cal measures for active chains
2784 */
2785 - if (cur_caldata->calCollect)
2786 - cur_caldata->calCollect(ah);
2787 + cur_caldata->calCollect(ah);
2788 ah->cal_samples++;
2789
2790 if (ah->cal_samples >= cur_caldata->calNumSamples) {
2791 @@ -107,8 +99,7 @@ static bool ar9003_hw_per_calibration(struct ath_hw *ah,
2792 /*
2793 * Process accumulated data
2794 */
2795 - if (cur_caldata->calPostProc)
2796 - cur_caldata->calPostProc(ah, numChains);
2797 + cur_caldata->calPostProc(ah, numChains);
2798
2799 /* Calibration has finished. */
2800 caldata->CalValid |= cur_caldata->calType;
2801 @@ -323,16 +314,9 @@ static const struct ath9k_percal_data iq_cal_single_sample = {
2802 ar9003_hw_iqcalibrate
2803 };
2804
2805 -static const struct ath9k_percal_data temp_cal_single_sample = {
2806 - TEMP_COMP_CAL,
2807 - MIN_CAL_SAMPLES,
2808 - PER_MAX_LOG_COUNT,
2809 -};
2810 -
2811 static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
2812 {
2813 ah->iq_caldata.calData = &iq_cal_single_sample;
2814 - ah->temp_caldata.calData = &temp_cal_single_sample;
2815
2816 if (AR_SREV_9300_20_OR_LATER(ah)) {
2817 ah->enabled_cals |= TX_IQ_CAL;
2818 @@ -340,7 +324,7 @@ static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
2819 ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
2820 }
2821
2822 - ah->supp_cals = IQ_MISMATCH_CAL | TEMP_COMP_CAL;
2823 + ah->supp_cals = IQ_MISMATCH_CAL;
2824 }
2825
2826 #define OFF_UPPER_LT 24
2827 @@ -1399,9 +1383,6 @@ static void ar9003_hw_init_cal_common(struct ath_hw *ah)
2828 INIT_CAL(&ah->iq_caldata);
2829 INSERT_CAL(ah, &ah->iq_caldata);
2830
2831 - INIT_CAL(&ah->temp_caldata);
2832 - INSERT_CAL(ah, &ah->temp_caldata);
2833 -
2834 /* Initialize current pointer to first element in list */
2835 ah->cal_list_curr = ah->cal_list;
2836
2837 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
2838 index 2a5d3ad1169c..9cbca1229bac 100644
2839 --- a/drivers/net/wireless/ath/ath9k/hw.h
2840 +++ b/drivers/net/wireless/ath/ath9k/hw.h
2841 @@ -830,7 +830,6 @@ struct ath_hw {
2842 /* Calibration */
2843 u32 supp_cals;
2844 struct ath9k_cal_list iq_caldata;
2845 - struct ath9k_cal_list temp_caldata;
2846 struct ath9k_cal_list adcgain_caldata;
2847 struct ath9k_cal_list adcdc_caldata;
2848 struct ath9k_cal_list *cal_list;
2849 diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
2850 index 4341d56805f8..a28093235ee0 100644
2851 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
2852 +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
2853 @@ -231,7 +231,7 @@ struct rtl8xxxu_rxdesc16 {
2854 u32 pattern1match:1;
2855 u32 pattern0match:1;
2856 #endif
2857 - __le32 tsfl;
2858 + u32 tsfl;
2859 #if 0
2860 u32 bassn:12;
2861 u32 bavld:1;
2862 @@ -361,7 +361,7 @@ struct rtl8xxxu_rxdesc24 {
2863 u32 ldcp:1;
2864 u32 splcp:1;
2865 #endif
2866 - __le32 tsfl;
2867 + u32 tsfl;
2868 };
2869
2870 struct rtl8xxxu_txdesc32 {
2871 diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
2872 index 9d45afb0e3fd..c831a586766c 100644
2873 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
2874 +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
2875 @@ -1498,6 +1498,10 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
2876 u32 val32;
2877 u8 val8;
2878
2879 + val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
2880 + val32 |= (BIT(22) | BIT(23));
2881 + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
2882 +
2883 /*
2884 * No indication anywhere as to what 0x0790 does. The 2 antenna
2885 * vendor code preserves bits 6-7 here.
2886 diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
2887 index 77048db3b32a..c6b246aa2419 100644
2888 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
2889 +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
2890 @@ -5201,7 +5201,12 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
2891 pkt_offset = roundup(pkt_len + drvinfo_sz + desc_shift +
2892 sizeof(struct rtl8xxxu_rxdesc16), 128);
2893
2894 - if (pkt_cnt > 1)
2895 + /*
2896 + * Only clone the skb if there's enough data at the end to
2897 + * at least cover the rx descriptor
2898 + */
2899 + if (pkt_cnt > 1 &&
2900 + urb_len > (pkt_offset + sizeof(struct rtl8xxxu_rxdesc16)))
2901 next_skb = skb_clone(skb, GFP_ATOMIC);
2902
2903 rx_status = IEEE80211_SKB_RXCB(skb);
2904 @@ -5219,7 +5224,7 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
2905 rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
2906 rx_desc->rxmcs);
2907
2908 - rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
2909 + rx_status->mactime = rx_desc->tsfl;
2910 rx_status->flag |= RX_FLAG_MACTIME_START;
2911
2912 if (!rx_desc->swdec)
2913 @@ -5289,7 +5294,7 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
2914 rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
2915 rx_desc->rxmcs);
2916
2917 - rx_status->mactime = le32_to_cpu(rx_desc->tsfl);
2918 + rx_status->mactime = rx_desc->tsfl;
2919 rx_status->flag |= RX_FLAG_MACTIME_START;
2920
2921 if (!rx_desc->swdec)
2922 diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
2923 index 0dbd29e287db..172ef8245811 100644
2924 --- a/drivers/pwm/core.c
2925 +++ b/drivers/pwm/core.c
2926 @@ -339,6 +339,8 @@ int pwmchip_remove(struct pwm_chip *chip)
2927 unsigned int i;
2928 int ret = 0;
2929
2930 + pwmchip_sysfs_unexport_children(chip);
2931 +
2932 mutex_lock(&pwm_lock);
2933
2934 for (i = 0; i < chip->npwm; i++) {
2935 diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
2936 index 18ed725594c3..0296d8178ae2 100644
2937 --- a/drivers/pwm/sysfs.c
2938 +++ b/drivers/pwm/sysfs.c
2939 @@ -409,6 +409,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
2940 }
2941 }
2942
2943 +void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
2944 +{
2945 + struct device *parent;
2946 + unsigned int i;
2947 +
2948 + parent = class_find_device(&pwm_class, NULL, chip,
2949 + pwmchip_sysfs_match);
2950 + if (!parent)
2951 + return;
2952 +
2953 + for (i = 0; i < chip->npwm; i++) {
2954 + struct pwm_device *pwm = &chip->pwms[i];
2955 +
2956 + if (test_bit(PWMF_EXPORTED, &pwm->flags))
2957 + pwm_unexport_child(parent, pwm);
2958 + }
2959 +}
2960 +
2961 static int __init pwm_sysfs_init(void)
2962 {
2963 return class_register(&pwm_class);
2964 diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
2965 index 3d53d636b17b..f0cfb0451757 100644
2966 --- a/drivers/scsi/arcmsr/arcmsr_hba.c
2967 +++ b/drivers/scsi/arcmsr/arcmsr_hba.c
2968 @@ -2636,18 +2636,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
2969 struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
2970 struct CommandControlBlock *ccb;
2971 int target = cmd->device->id;
2972 - int lun = cmd->device->lun;
2973 - uint8_t scsicmd = cmd->cmnd[0];
2974 cmd->scsi_done = done;
2975 cmd->host_scribble = NULL;
2976 cmd->result = 0;
2977 - if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
2978 - if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
2979 - cmd->result = (DID_NO_CONNECT << 16);
2980 - }
2981 - cmd->scsi_done(cmd);
2982 - return 0;
2983 - }
2984 if (target == 16) {
2985 /* virtual device for iop message transfer */
2986 arcmsr_handle_virtual_command(acb, cmd);
2987 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
2988 index 6a219a0844d3..05e892a231a5 100644
2989 --- a/drivers/scsi/scsi_debug.c
2990 +++ b/drivers/scsi/scsi_debug.c
2991 @@ -5134,6 +5134,7 @@ static void __exit scsi_debug_exit(void)
2992 bus_unregister(&pseudo_lld_bus);
2993 root_device_unregister(pseudo_primary);
2994
2995 + vfree(map_storep);
2996 vfree(dif_storep);
2997 vfree(fake_storep);
2998 kfree(sdebug_q_arr);
2999 diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
3000 index 8d85a3c343da..3f3561371410 100644
3001 --- a/drivers/spi/spi-fsl-espi.c
3002 +++ b/drivers/spi/spi-fsl-espi.c
3003 @@ -581,7 +581,7 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
3004
3005 mspi->len -= rx_nr_bytes;
3006
3007 - if (mspi->rx)
3008 + if (rx_nr_bytes && mspi->rx)
3009 mspi->get_rx(rx_data, mspi);
3010 }
3011
3012 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3013 index 200ca228d885..935f1a511856 100644
3014 --- a/drivers/spi/spi.c
3015 +++ b/drivers/spi/spi.c
3016 @@ -1607,9 +1607,11 @@ static void of_register_spi_devices(struct spi_master *master)
3017 if (of_node_test_and_set_flag(nc, OF_POPULATED))
3018 continue;
3019 spi = of_register_spi_device(master, nc);
3020 - if (IS_ERR(spi))
3021 + if (IS_ERR(spi)) {
3022 dev_warn(&master->dev, "Failed to create SPI device for %s\n",
3023 nc->full_name);
3024 + of_node_clear_flag(nc, OF_POPULATED);
3025 + }
3026 }
3027 }
3028 #else
3029 @@ -3120,6 +3122,7 @@ static int of_spi_notify(struct notifier_block *nb, unsigned long action,
3030 if (IS_ERR(spi)) {
3031 pr_err("%s: failed to create for '%s'\n",
3032 __func__, rd->dn->full_name);
3033 + of_node_clear_flag(rd->dn, OF_POPULATED);
3034 return notifier_from_errno(PTR_ERR(spi));
3035 }
3036 break;
3037 diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
3038 index 78f524fcd214..f4dbcb19d7c5 100644
3039 --- a/drivers/staging/wilc1000/host_interface.c
3040 +++ b/drivers/staging/wilc1000/host_interface.c
3041 @@ -3391,7 +3391,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
3042
3043 clients_count++;
3044
3045 - destroy_workqueue(hif_workqueue);
3046 _fail_:
3047 return result;
3048 }
3049 diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
3050 index 0e4dc0afcfd2..7a223074df3d 100644
3051 --- a/drivers/thermal/intel_powerclamp.c
3052 +++ b/drivers/thermal/intel_powerclamp.c
3053 @@ -669,20 +669,10 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
3054 .set_cur_state = powerclamp_set_cur_state,
3055 };
3056
3057 -static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
3058 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
3059 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
3060 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
3061 - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
3062 - {}
3063 -};
3064 -MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
3065 -
3066 static int __init powerclamp_probe(void)
3067 {
3068 - if (!x86_match_cpu(intel_powerclamp_ids)) {
3069 - pr_err("Intel powerclamp does not run on family %d model %d\n",
3070 - boot_cpu_data.x86, boot_cpu_data.x86_model);
3071 + if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
3072 + pr_err("CPU does not support MWAIT");
3073 return -ENODEV;
3074 }
3075
3076 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3077 index 2705ca960e92..fd375f15bbbd 100644
3078 --- a/drivers/tty/vt/vt.c
3079 +++ b/drivers/tty/vt/vt.c
3080 @@ -870,10 +870,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
3081 if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
3082 return 0;
3083
3084 + if (new_screen_size > (4 << 20))
3085 + return -EINVAL;
3086 newscreen = kmalloc(new_screen_size, GFP_USER);
3087 if (!newscreen)
3088 return -ENOMEM;
3089
3090 + if (vc == sel_cons)
3091 + clear_selection();
3092 +
3093 old_rows = vc->vc_rows;
3094 old_row_size = vc->vc_size_row;
3095
3096 @@ -1176,7 +1181,7 @@ static void csi_J(struct vc_data *vc, int vpar)
3097 break;
3098 case 3: /* erase scroll-back buffer (and whole display) */
3099 scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
3100 - vc->vc_screenbuf_size >> 1);
3101 + vc->vc_screenbuf_size);
3102 set_origin(vc);
3103 if (con_is_visible(vc))
3104 update_screen(vc);
3105 diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
3106 index 053bac9d983c..887be343fcd4 100644
3107 --- a/drivers/usb/chipidea/host.c
3108 +++ b/drivers/usb/chipidea/host.c
3109 @@ -185,6 +185,8 @@ static void host_stop(struct ci_hdrc *ci)
3110
3111 if (hcd) {
3112 usb_remove_hcd(hcd);
3113 + ci->role = CI_ROLE_END;
3114 + synchronize_irq(ci->irq);
3115 usb_put_hcd(hcd);
3116 if (ci->platdata->reg_vbus && !ci_otg_is_fsm_mode(ci) &&
3117 (ci->platdata->flags & CI_HDRC_TURN_VBUS_EARLY_ON))
3118 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3119 index 68544618982e..6443cfba7b55 100644
3120 --- a/drivers/usb/dwc3/gadget.c
3121 +++ b/drivers/usb/dwc3/gadget.c
3122 @@ -3055,7 +3055,7 @@ err3:
3123 kfree(dwc->setup_buf);
3124
3125 err2:
3126 - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3127 + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
3128 dwc->ep0_trb, dwc->ep0_trb_addr);
3129
3130 err1:
3131 @@ -3080,7 +3080,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
3132 kfree(dwc->setup_buf);
3133 kfree(dwc->zlp_buf);
3134
3135 - dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
3136 + dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
3137 dwc->ep0_trb, dwc->ep0_trb_addr);
3138
3139 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
3140 diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
3141 index 5f562c1ec795..9b9e71f2c66e 100644
3142 --- a/drivers/usb/gadget/function/u_ether.c
3143 +++ b/drivers/usb/gadget/function/u_ether.c
3144 @@ -587,8 +587,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
3145
3146 /* throttle high/super speed IRQ rate back slightly */
3147 if (gadget_is_dualspeed(dev->gadget))
3148 - req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
3149 - dev->gadget->speed == USB_SPEED_SUPER)
3150 + req->no_interrupt = (((dev->gadget->speed == USB_SPEED_HIGH ||
3151 + dev->gadget->speed == USB_SPEED_SUPER)) &&
3152 + !list_empty(&dev->tx_reqs))
3153 ? ((atomic_read(&dev->tx_qlen) % dev->qmult) != 0)
3154 : 0;
3155
3156 diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
3157 index bb1f6c8f0f01..45bc997d0711 100644
3158 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c
3159 +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
3160 @@ -1978,7 +1978,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
3161 dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
3162 goto err;
3163 }
3164 - ep->ep.name = name;
3165 + ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
3166
3167 ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
3168 ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
3169 diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
3170 index 1700908b84ef..86612ac3fda2 100644
3171 --- a/drivers/usb/host/ohci-hcd.c
3172 +++ b/drivers/usb/host/ohci-hcd.c
3173 @@ -72,7 +72,7 @@
3174 static const char hcd_name [] = "ohci_hcd";
3175
3176 #define STATECHANGE_DELAY msecs_to_jiffies(300)
3177 -#define IO_WATCHDOG_DELAY msecs_to_jiffies(250)
3178 +#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
3179
3180 #include "ohci.h"
3181 #include "pci-quirks.h"
3182 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
3183 index 730b9fd26685..0ef16900efed 100644
3184 --- a/drivers/usb/host/xhci-hub.c
3185 +++ b/drivers/usb/host/xhci-hub.c
3186 @@ -1166,7 +1166,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
3187 xhci_set_link_state(xhci, port_array, wIndex,
3188 XDEV_RESUME);
3189 spin_unlock_irqrestore(&xhci->lock, flags);
3190 - msleep(20);
3191 + msleep(USB_RESUME_TIMEOUT);
3192 spin_lock_irqsave(&xhci->lock, flags);
3193 xhci_set_link_state(xhci, port_array, wIndex,
3194 XDEV_U0);
3195 @@ -1355,6 +1355,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
3196 return 0;
3197 }
3198
3199 +/*
3200 + * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
3201 + * warm reset a USB3 device stuck in polling or compliance mode after resume.
3202 + * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8
3203 + */
3204 +static bool xhci_port_missing_cas_quirk(int port_index,
3205 + __le32 __iomem **port_array)
3206 +{
3207 + u32 portsc;
3208 +
3209 + portsc = readl(port_array[port_index]);
3210 +
3211 + /* if any of these are set we are not stuck */
3212 + if (portsc & (PORT_CONNECT | PORT_CAS))
3213 + return false;
3214 +
3215 + if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) &&
3216 + ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE))
3217 + return false;
3218 +
3219 + /* clear wakeup/change bits, and do a warm port reset */
3220 + portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
3221 + portsc |= PORT_WR;
3222 + writel(portsc, port_array[port_index]);
3223 + /* flush write */
3224 + readl(port_array[port_index]);
3225 + return true;
3226 +}
3227 +
3228 int xhci_bus_resume(struct usb_hcd *hcd)
3229 {
3230 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3231 @@ -1392,6 +1421,14 @@ int xhci_bus_resume(struct usb_hcd *hcd)
3232 u32 temp;
3233
3234 temp = readl(port_array[port_index]);
3235 +
3236 + /* warm reset CAS limited ports stuck in polling/compliance */
3237 + if ((xhci->quirks & XHCI_MISSING_CAS) &&
3238 + (hcd->speed >= HCD_USB3) &&
3239 + xhci_port_missing_cas_quirk(port_index, port_array)) {
3240 + xhci_dbg(xhci, "reset stuck port %d\n", port_index);
3241 + continue;
3242 + }
3243 if (DEV_SUPERSPEED_ANY(temp))
3244 temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS);
3245 else
3246 @@ -1410,7 +1447,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
3247
3248 if (need_usb2_u3_exit) {
3249 spin_unlock_irqrestore(&xhci->lock, flags);
3250 - msleep(20);
3251 + msleep(USB_RESUME_TIMEOUT);
3252 spin_lock_irqsave(&xhci->lock, flags);
3253 }
3254
3255 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3256 index d7b0f97abbad..e96ae80d107e 100644
3257 --- a/drivers/usb/host/xhci-pci.c
3258 +++ b/drivers/usb/host/xhci-pci.c
3259 @@ -45,11 +45,13 @@
3260
3261 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
3262 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
3263 +#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
3264 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5
3265 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
3266 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
3267 #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
3268 #define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
3269 +#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
3270
3271 static const char hcd_name[] = "xhci_hcd";
3272
3273 @@ -153,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3274 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
3275 }
3276 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3277 - pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
3278 + (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
3279 + pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
3280 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
3281 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
3282 }
3283 @@ -169,6 +172,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3284 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
3285 xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
3286 }
3287 + if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3288 + (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
3289 + pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
3290 + xhci->quirks |= XHCI_MISSING_CAS;
3291 +
3292 if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3293 pdev->device == PCI_DEVICE_ID_EJ168) {
3294 xhci->quirks |= XHCI_RESET_ON_RESUME;
3295 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3296 index b2c1dc5dc0f3..f945380035d0 100644
3297 --- a/drivers/usb/host/xhci.h
3298 +++ b/drivers/usb/host/xhci.h
3299 @@ -314,6 +314,8 @@ struct xhci_op_regs {
3300 #define XDEV_U2 (0x2 << 5)
3301 #define XDEV_U3 (0x3 << 5)
3302 #define XDEV_INACTIVE (0x6 << 5)
3303 +#define XDEV_POLLING (0x7 << 5)
3304 +#define XDEV_COMP_MODE (0xa << 5)
3305 #define XDEV_RESUME (0xf << 5)
3306 /* true: port has power (see HCC_PPC) */
3307 #define PORT_POWER (1 << 9)
3308 @@ -1653,6 +1655,7 @@ struct xhci_hcd {
3309 #define XHCI_MTK_HOST (1 << 21)
3310 #define XHCI_SSIC_PORT_UNUSED (1 << 22)
3311 #define XHCI_NO_64BIT_SUPPORT (1 << 23)
3312 +#define XHCI_MISSING_CAS (1 << 24)
3313 unsigned int num_active_eps;
3314 unsigned int limit_active_eps;
3315 /* There are two roothubs to keep track of bus suspend info for */
3316 diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
3317 index 0b4cec940386..dae92de92592 100644
3318 --- a/drivers/usb/musb/omap2430.c
3319 +++ b/drivers/usb/musb/omap2430.c
3320 @@ -337,6 +337,7 @@ static int omap2430_musb_init(struct musb *musb)
3321 }
3322 musb->isr = omap2430_musb_interrupt;
3323 phy_init(musb->phy);
3324 + phy_power_on(musb->phy);
3325
3326 l = musb_readl(musb->mregs, OTG_INTERFSEL);
3327
3328 @@ -373,8 +374,6 @@ static void omap2430_musb_enable(struct musb *musb)
3329 struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
3330 struct omap_musb_board_data *data = pdata->board_data;
3331
3332 - if (!WARN_ON(!musb->phy))
3333 - phy_power_on(musb->phy);
3334
3335 omap2430_set_power(musb, true, glue->cable_connected);
3336
3337 @@ -413,9 +412,6 @@ static void omap2430_musb_disable(struct musb *musb)
3338 struct device *dev = musb->controller;
3339 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
3340
3341 - if (!WARN_ON(!musb->phy))
3342 - phy_power_off(musb->phy);
3343 -
3344 if (glue->status != MUSB_UNKNOWN)
3345 omap_control_usb_set_mode(glue->control_otghs,
3346 USB_MODE_DISCONNECT);
3347 @@ -429,6 +425,7 @@ static int omap2430_musb_exit(struct musb *musb)
3348 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
3349
3350 omap2430_low_level_exit(musb);
3351 + phy_power_off(musb->phy);
3352 phy_exit(musb->phy);
3353 musb->phy = NULL;
3354 cancel_work_sync(&glue->omap_musb_mailbox_work);
3355 diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
3356 index 1d70add926f0..d544b331c9f2 100644
3357 --- a/drivers/usb/renesas_usbhs/rcar3.c
3358 +++ b/drivers/usb/renesas_usbhs/rcar3.c
3359 @@ -9,6 +9,7 @@
3360 *
3361 */
3362
3363 +#include <linux/delay.h>
3364 #include <linux/io.h>
3365 #include "common.h"
3366 #include "rcar3.h"
3367 @@ -35,10 +36,13 @@ static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
3368
3369 usbhs_write32(priv, UGCTRL2, UGCTRL2_RESERVED_3 | UGCTRL2_USB0SEL_OTG);
3370
3371 - if (enable)
3372 + if (enable) {
3373 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
3374 - else
3375 + /* The controller on R-Car Gen3 needs to wait up to 45 usec */
3376 + udelay(45);
3377 + } else {
3378 usbhs_bset(priv, LPSTS, LPSTS_SUSPM, 0);
3379 + }
3380
3381 return 0;
3382 }
3383 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
3384 index 54a4de0efdba..f61477bed3a8 100644
3385 --- a/drivers/usb/serial/cp210x.c
3386 +++ b/drivers/usb/serial/cp210x.c
3387 @@ -1077,7 +1077,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
3388 u8 control;
3389 int result;
3390
3391 - cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
3392 + result = cp210x_read_u8_reg(port, CP210X_GET_MDMSTS, &control);
3393 + if (result)
3394 + return result;
3395
3396 result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
3397 |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
3398 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
3399 index b2d767e743fc..0ff7f38d7800 100644
3400 --- a/drivers/usb/serial/ftdi_sio.c
3401 +++ b/drivers/usb/serial/ftdi_sio.c
3402 @@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
3403 /* ekey Devices */
3404 { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
3405 /* Infineon Devices */
3406 - { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
3407 + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
3408 + { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
3409 /* GE Healthcare devices */
3410 { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
3411 /* Active Research (Actisense) devices */
3412 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
3413 index f87a938cf005..21011c0a4c64 100644
3414 --- a/drivers/usb/serial/ftdi_sio_ids.h
3415 +++ b/drivers/usb/serial/ftdi_sio_ids.h
3416 @@ -626,8 +626,9 @@
3417 /*
3418 * Infineon Technologies
3419 */
3420 -#define INFINEON_VID 0x058b
3421 -#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
3422 +#define INFINEON_VID 0x058b
3423 +#define INFINEON_TRIBOARD_TC1798_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
3424 +#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
3425
3426 /*
3427 * Acton Research Corp.
3428 diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
3429 index d213cf44a7e4..4a037b4a79cf 100644
3430 --- a/drivers/usb/serial/usb-serial.c
3431 +++ b/drivers/usb/serial/usb-serial.c
3432 @@ -1078,7 +1078,8 @@ static int usb_serial_probe(struct usb_interface *interface,
3433
3434 serial->disconnected = 0;
3435
3436 - usb_serial_console_init(serial->port[0]->minor);
3437 + if (num_ports > 0)
3438 + usb_serial_console_init(serial->port[0]->minor);
3439 exit:
3440 module_put(type->driver.owner);
3441 return 0;
3442 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
3443 index 9e4800a4e3d1..951dd93f89b2 100644
3444 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
3445 +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
3446 @@ -5348,7 +5348,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
3447
3448 dsi->phy_base = devm_ioremap(&dsidev->dev, res->start,
3449 resource_size(res));
3450 - if (!dsi->proto_base) {
3451 + if (!dsi->phy_base) {
3452 DSSERR("can't ioremap DSI PHY\n");
3453 return -ENOMEM;
3454 }
3455 @@ -5368,7 +5368,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
3456
3457 dsi->pll_base = devm_ioremap(&dsidev->dev, res->start,
3458 resource_size(res));
3459 - if (!dsi->proto_base) {
3460 + if (!dsi->pll_base) {
3461 DSSERR("can't ioremap DSI PLL\n");
3462 return -ENOMEM;
3463 }
3464 diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
3465 index 2c0487f4f805..ed41fdb42d13 100644
3466 --- a/drivers/video/fbdev/pxafb.c
3467 +++ b/drivers/video/fbdev/pxafb.c
3468 @@ -2125,7 +2125,7 @@ static int of_get_pxafb_display(struct device *dev, struct device_node *disp,
3469
3470 timings = of_get_display_timings(disp);
3471 if (!timings)
3472 - goto out;
3473 + return -EINVAL;
3474
3475 ret = -ENOMEM;
3476 info->modes = kmalloc_array(timings->num_timings,
3477 diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
3478 index 8c4e61783441..6d9e5173d5fa 100644
3479 --- a/drivers/virtio/virtio_pci_legacy.c
3480 +++ b/drivers/virtio/virtio_pci_legacy.c
3481 @@ -212,10 +212,18 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
3482 return -ENODEV;
3483 }
3484
3485 - rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
3486 - if (rc)
3487 - rc = dma_set_mask_and_coherent(&pci_dev->dev,
3488 - DMA_BIT_MASK(32));
3489 + rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
3490 + if (rc) {
3491 + rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
3492 + } else {
3493 + /*
3494 + * The virtio ring base address is expressed as a 32-bit PFN,
3495 + * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
3496 + */
3497 + dma_set_coherent_mask(&pci_dev->dev,
3498 + DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
3499 + }
3500 +
3501 if (rc)
3502 dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
3503
3504 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
3505 index ed9c9eeedfe5..6b2cd922d322 100644
3506 --- a/drivers/virtio/virtio_ring.c
3507 +++ b/drivers/virtio/virtio_ring.c
3508 @@ -732,7 +732,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
3509
3510 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
3511 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
3512 - vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
3513 + if (!vq->event)
3514 + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
3515 }
3516
3517 }
3518 @@ -764,7 +765,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
3519 * entry. Always do both to keep code simple. */
3520 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
3521 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
3522 - vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
3523 + if (!vq->event)
3524 + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
3525 }
3526 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
3527 END_USE(vq);
3528 @@ -832,10 +834,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
3529 * more to do. */
3530 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
3531 * either clear the flags bit or point the event index at the next
3532 - * entry. Always do both to keep code simple. */
3533 + * entry. Always update the event index to keep code simple. */
3534 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
3535 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
3536 - vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
3537 + if (!vq->event)
3538 + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
3539 }
3540 /* TODO: tune this threshold */
3541 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
3542 @@ -953,7 +956,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
3543 /* No callback? Tell other side not to bother us. */
3544 if (!callback) {
3545 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
3546 - vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
3547 + if (!vq->event)
3548 + vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
3549 }
3550
3551 /* Put everything in free lists. */
3552 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3553 index e6811c42e41e..bc1a004d4264 100644
3554 --- a/fs/btrfs/inode.c
3555 +++ b/fs/btrfs/inode.c
3556 @@ -8915,9 +8915,14 @@ again:
3557 * So even we call qgroup_free_data(), it won't decrease reserved
3558 * space.
3559 * 2) Not written to disk
3560 - * This means the reserved space should be freed here.
3561 + * This means the reserved space should be freed here. However,
3562 + * if a truncate invalidates the page (by clearing PageDirty)
3563 + * and the page is accounted for while allocating extent
3564 + * in btrfs_check_data_free_space() we let delayed_ref to
3565 + * free the entire extent.
3566 */
3567 - btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
3568 + if (PageDirty(page))
3569 + btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE);
3570 if (!inode_evicting) {
3571 clear_extent_bit(tree, page_start, page_end,
3572 EXTENT_LOCKED | EXTENT_DIRTY |
3573 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
3574 index ef9c55bc7907..90e1198bc63d 100644
3575 --- a/fs/btrfs/tree-log.c
3576 +++ b/fs/btrfs/tree-log.c
3577 @@ -2713,14 +2713,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
3578 int index, int error)
3579 {
3580 struct btrfs_log_ctx *ctx;
3581 + struct btrfs_log_ctx *safe;
3582
3583 - if (!error) {
3584 - INIT_LIST_HEAD(&root->log_ctxs[index]);
3585 - return;
3586 - }
3587 -
3588 - list_for_each_entry(ctx, &root->log_ctxs[index], list)
3589 + list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3590 + list_del_init(&ctx->list);
3591 ctx->log_ret = error;
3592 + }
3593
3594 INIT_LIST_HEAD(&root->log_ctxs[index]);
3595 }
3596 @@ -2961,13 +2959,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
3597 mutex_unlock(&root->log_mutex);
3598
3599 out_wake_log_root:
3600 - /*
3601 - * We needn't get log_mutex here because we are sure all
3602 - * the other tasks are blocked.
3603 - */
3604 + mutex_lock(&log_root_tree->log_mutex);
3605 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3606
3607 - mutex_lock(&log_root_tree->log_mutex);
3608 log_root_tree->log_transid_committed++;
3609 atomic_set(&log_root_tree->log_commit[index2], 0);
3610 mutex_unlock(&log_root_tree->log_mutex);
3611 @@ -2978,10 +2972,8 @@ out_wake_log_root:
3612 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
3613 wake_up(&log_root_tree->log_commit_wait[index2]);
3614 out:
3615 - /* See above. */
3616 - btrfs_remove_all_log_ctxs(root, index1, ret);
3617 -
3618 mutex_lock(&root->log_mutex);
3619 + btrfs_remove_all_log_ctxs(root, index1, ret);
3620 root->log_transid_committed++;
3621 atomic_set(&root->log_commit[index1], 0);
3622 mutex_unlock(&root->log_mutex);
3623 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3624 index a204d7e109d4..0fe31b4b110d 100644
3625 --- a/fs/nfsd/nfs4state.c
3626 +++ b/fs/nfsd/nfs4state.c
3627 @@ -1147,9 +1147,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
3628
3629 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
3630 {
3631 - struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
3632 -
3633 - lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
3634 + lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
3635
3636 list_del_init(&stp->st_locks);
3637 nfs4_unhash_stid(&stp->st_stid);
3638 @@ -1158,12 +1156,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
3639
3640 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
3641 {
3642 - struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
3643 + struct nfs4_client *clp = stp->st_stid.sc_client;
3644 bool unhashed;
3645
3646 - spin_lock(&oo->oo_owner.so_client->cl_lock);
3647 + spin_lock(&clp->cl_lock);
3648 unhashed = unhash_lock_stateid(stp);
3649 - spin_unlock(&oo->oo_owner.so_client->cl_lock);
3650 + spin_unlock(&clp->cl_lock);
3651 if (unhashed)
3652 nfs4_put_stid(&stp->st_stid);
3653 }
3654 diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
3655 index abadbc30e013..767377e522c6 100644
3656 --- a/fs/overlayfs/copy_up.c
3657 +++ b/fs/overlayfs/copy_up.c
3658 @@ -171,6 +171,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
3659 len -= bytes;
3660 }
3661
3662 + if (!error)
3663 + error = vfs_fsync(new_file, 0);
3664 fput(new_file);
3665 out_fput:
3666 fput(old_file);
3667 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
3668 index c75625c1efa3..cf2bfeb1b385 100644
3669 --- a/fs/overlayfs/inode.c
3670 +++ b/fs/overlayfs/inode.c
3671 @@ -294,9 +294,6 @@ struct posix_acl *ovl_get_acl(struct inode *inode, int type)
3672 if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !IS_POSIXACL(realinode))
3673 return NULL;
3674
3675 - if (!realinode->i_op->get_acl)
3676 - return NULL;
3677 -
3678 old_cred = ovl_override_creds(inode->i_sb);
3679 acl = get_acl(realinode, type);
3680 revert_creds(old_cred);
3681 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
3682 index e2a94a26767b..a78415d77434 100644
3683 --- a/fs/overlayfs/super.c
3684 +++ b/fs/overlayfs/super.c
3685 @@ -1026,6 +1026,21 @@ ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
3686
3687 posix_acl_release(acl);
3688
3689 + /*
3690 + * Check if sgid bit needs to be cleared (actual setacl operation will
3691 + * be done with mounter's capabilities and so that won't do it for us).
3692 + */
3693 + if (unlikely(inode->i_mode & S_ISGID) &&
3694 + handler->flags == ACL_TYPE_ACCESS &&
3695 + !in_group_p(inode->i_gid) &&
3696 + !capable_wrt_inode_uidgid(inode, CAP_FSETID)) {
3697 + struct iattr iattr = { .ia_valid = ATTR_KILL_SGID };
3698 +
3699 + err = ovl_setattr(dentry, &iattr);
3700 + if (err)
3701 + return err;
3702 + }
3703 +
3704 err = ovl_xattr_set(dentry, handler->name, value, size, flags);
3705 if (!err)
3706 ovl_copyattr(ovl_inode_real(inode, NULL), inode);
3707 diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
3708 index 4b86d3a738e1..3b27145f985f 100644
3709 --- a/fs/ubifs/dir.c
3710 +++ b/fs/ubifs/dir.c
3711 @@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
3712 */
3713 static int ubifs_readdir(struct file *file, struct dir_context *ctx)
3714 {
3715 - int err;
3716 + int err = 0;
3717 struct qstr nm;
3718 union ubifs_key key;
3719 struct ubifs_dent_node *dent;
3720 @@ -452,14 +452,20 @@ out:
3721 kfree(file->private_data);
3722 file->private_data = NULL;
3723
3724 - if (err != -ENOENT) {
3725 + if (err != -ENOENT)
3726 ubifs_err(c, "cannot find next direntry, error %d", err);
3727 - return err;
3728 - }
3729 + else
3730 + /*
3731 + * -ENOENT is a non-fatal error in this context, the TNC uses
3732 + * it to indicate that the cursor moved past the current directory
3733 + * and readdir() has to stop.
3734 + */
3735 + err = 0;
3736 +
3737
3738 /* 2 is a special value indicating that there are no more direntries */
3739 ctx->pos = 2;
3740 - return 0;
3741 + return err;
3742 }
3743
3744 /* Free saved readdir() state when the directory is closed */
3745 diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
3746 index 3cc3cf767474..ac9a003dd29a 100644
3747 --- a/fs/xfs/libxfs/xfs_dquot_buf.c
3748 +++ b/fs/xfs/libxfs/xfs_dquot_buf.c
3749 @@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
3750 if (mp->m_quotainfo)
3751 ndquots = mp->m_quotainfo->qi_dqperchunk;
3752 else
3753 - ndquots = xfs_calc_dquots_per_chunk(
3754 - XFS_BB_TO_FSB(mp, bp->b_length));
3755 + ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
3756
3757 for (i = 0; i < ndquots; i++, d++) {
3758 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
3759 diff --git a/include/linux/pwm.h b/include/linux/pwm.h
3760 index f1bbae014889..2c6c5114c089 100644
3761 --- a/include/linux/pwm.h
3762 +++ b/include/linux/pwm.h
3763 @@ -641,6 +641,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
3764 #ifdef CONFIG_PWM_SYSFS
3765 void pwmchip_sysfs_export(struct pwm_chip *chip);
3766 void pwmchip_sysfs_unexport(struct pwm_chip *chip);
3767 +void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
3768 #else
3769 static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
3770 {
3771 @@ -649,6 +650,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
3772 static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
3773 {
3774 }
3775 +
3776 +static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
3777 +{
3778 +}
3779 #endif /* CONFIG_PWM_SYSFS */
3780
3781 #endif /* __LINUX_PWM_H */
3782 diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
3783 index 185f8ea2702f..407ca0d7a938 100644
3784 --- a/include/uapi/linux/Kbuild
3785 +++ b/include/uapi/linux/Kbuild
3786 @@ -396,6 +396,7 @@ header-y += string.h
3787 header-y += suspend_ioctls.h
3788 header-y += swab.h
3789 header-y += synclink.h
3790 +header-y += sync_file.h
3791 header-y += sysctl.h
3792 header-y += sysinfo.h
3793 header-y += target_core_user.h
3794 diff --git a/kernel/time/timer.c b/kernel/time/timer.c
3795 index 32bf6f75a8fe..96db64bdedbb 100644
3796 --- a/kernel/time/timer.c
3797 +++ b/kernel/time/timer.c
3798 @@ -878,7 +878,7 @@ static inline struct timer_base *get_timer_base(u32 tflags)
3799
3800 #ifdef CONFIG_NO_HZ_COMMON
3801 static inline struct timer_base *
3802 -__get_target_base(struct timer_base *base, unsigned tflags)
3803 +get_target_base(struct timer_base *base, unsigned tflags)
3804 {
3805 #ifdef CONFIG_SMP
3806 if ((tflags & TIMER_PINNED) || !base->migration_enabled)
3807 @@ -891,25 +891,27 @@ __get_target_base(struct timer_base *base, unsigned tflags)
3808
3809 static inline void forward_timer_base(struct timer_base *base)
3810 {
3811 + unsigned long jnow = READ_ONCE(jiffies);
3812 +
3813 /*
3814 * We only forward the base when it's idle and we have a delta between
3815 * base clock and jiffies.
3816 */
3817 - if (!base->is_idle || (long) (jiffies - base->clk) < 2)
3818 + if (!base->is_idle || (long) (jnow - base->clk) < 2)
3819 return;
3820
3821 /*
3822 * If the next expiry value is > jiffies, then we fast forward to
3823 * jiffies otherwise we forward to the next expiry value.
3824 */
3825 - if (time_after(base->next_expiry, jiffies))
3826 - base->clk = jiffies;
3827 + if (time_after(base->next_expiry, jnow))
3828 + base->clk = jnow;
3829 else
3830 base->clk = base->next_expiry;
3831 }
3832 #else
3833 static inline struct timer_base *
3834 -__get_target_base(struct timer_base *base, unsigned tflags)
3835 +get_target_base(struct timer_base *base, unsigned tflags)
3836 {
3837 return get_timer_this_cpu_base(tflags);
3838 }
3839 @@ -917,14 +919,6 @@ __get_target_base(struct timer_base *base, unsigned tflags)
3840 static inline void forward_timer_base(struct timer_base *base) { }
3841 #endif
3842
3843 -static inline struct timer_base *
3844 -get_target_base(struct timer_base *base, unsigned tflags)
3845 -{
3846 - struct timer_base *target = __get_target_base(base, tflags);
3847 -
3848 - forward_timer_base(target);
3849 - return target;
3850 -}
3851
3852 /*
3853 * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
3854 @@ -943,7 +937,14 @@ static struct timer_base *lock_timer_base(struct timer_list *timer,
3855 {
3856 for (;;) {
3857 struct timer_base *base;
3858 - u32 tf = timer->flags;
3859 + u32 tf;
3860 +
3861 + /*
3862 + * We need to use READ_ONCE() here, otherwise the compiler
3863 + * might re-read @tf between the check for TIMER_MIGRATING
3864 + * and spin_lock().
3865 + */
3866 + tf = READ_ONCE(timer->flags);
3867
3868 if (!(tf & TIMER_MIGRATING)) {
3869 base = get_timer_base(tf);
3870 @@ -964,6 +965,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
3871 unsigned long clk = 0, flags;
3872 int ret = 0;
3873
3874 + BUG_ON(!timer->function);
3875 +
3876 /*
3877 * This is a common optimization triggered by the networking code - if
3878 * the timer is re-modified to have the same timeout or ends up in the
3879 @@ -972,13 +975,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
3880 if (timer_pending(timer)) {
3881 if (timer->expires == expires)
3882 return 1;
3883 +
3884 /*
3885 - * Take the current timer_jiffies of base, but without holding
3886 - * the lock!
3887 + * We lock timer base and calculate the bucket index right
3888 + * here. If the timer ends up in the same bucket, then we
3889 + * just update the expiry time and avoid the whole
3890 + * dequeue/enqueue dance.
3891 */
3892 - base = get_timer_base(timer->flags);
3893 - clk = base->clk;
3894 + base = lock_timer_base(timer, &flags);
3895
3896 + clk = base->clk;
3897 idx = calc_wheel_index(expires, clk);
3898
3899 /*
3900 @@ -988,14 +994,14 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
3901 */
3902 if (idx == timer_get_idx(timer)) {
3903 timer->expires = expires;
3904 - return 1;
3905 + ret = 1;
3906 + goto out_unlock;
3907 }
3908 + } else {
3909 + base = lock_timer_base(timer, &flags);
3910 }
3911
3912 timer_stats_timer_set_start_info(timer);
3913 - BUG_ON(!timer->function);
3914 -
3915 - base = lock_timer_base(timer, &flags);
3916
3917 ret = detach_if_pending(timer, base, false);
3918 if (!ret && pending_only)
3919 @@ -1025,12 +1031,16 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
3920 }
3921 }
3922
3923 + /* Try to forward a stale timer base clock */
3924 + forward_timer_base(base);
3925 +
3926 timer->expires = expires;
3927 /*
3928 * If 'idx' was calculated above and the base time did not advance
3929 - * between calculating 'idx' and taking the lock, only enqueue_timer()
3930 - * and trigger_dyntick_cpu() is required. Otherwise we need to
3931 - * (re)calculate the wheel index via internal_add_timer().
3932 + * between calculating 'idx' and possibly switching the base, only
3933 + * enqueue_timer() and trigger_dyntick_cpu() is required. Otherwise
3934 + * we need to (re)calculate the wheel index via
3935 + * internal_add_timer().
3936 */
3937 if (idx != UINT_MAX && clk == base->clk) {
3938 enqueue_timer(base, timer, idx);
3939 @@ -1510,12 +1520,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
3940 is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
3941 base->next_expiry = nextevt;
3942 /*
3943 - * We have a fresh next event. Check whether we can forward the base:
3944 + * We have a fresh next event. Check whether we can forward the
3945 + * base. We can only do that when @basej is past base->clk
3946 + * otherwise we might rewind base->clk.
3947 */
3948 - if (time_after(nextevt, jiffies))
3949 - base->clk = jiffies;
3950 - else if (time_after(nextevt, base->clk))
3951 - base->clk = nextevt;
3952 + if (time_after(basej, base->clk)) {
3953 + if (time_after(nextevt, basej))
3954 + base->clk = basej;
3955 + else if (time_after(nextevt, base->clk))
3956 + base->clk = nextevt;
3957 + }
3958
3959 if (time_before_eq(nextevt, basej)) {
3960 expires = basem;
3961 diff --git a/mm/list_lru.c b/mm/list_lru.c
3962 index 1d05cb9d363d..234676e31edd 100644
3963 --- a/mm/list_lru.c
3964 +++ b/mm/list_lru.c
3965 @@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
3966 err = memcg_init_list_lru(lru, memcg_aware);
3967 if (err) {
3968 kfree(lru->node);
3969 + /* Do this so a list_lru_destroy() doesn't crash: */
3970 + lru->node = NULL;
3971 goto out;
3972 }
3973
3974 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
3975 index 4be518d4e68a..dddead146459 100644
3976 --- a/mm/memcontrol.c
3977 +++ b/mm/memcontrol.c
3978 @@ -1947,6 +1947,15 @@ retry:
3979 current->flags & PF_EXITING))
3980 goto force;
3981
3982 + /*
3983 + * Prevent unbounded recursion when reclaim operations need to
3984 + * allocate memory. This might exceed the limits temporarily,
3985 + * but we prefer facilitating memory reclaim and getting back
3986 + * under the limit over triggering OOM kills in these cases.
3987 + */
3988 + if (unlikely(current->flags & PF_MEMALLOC))
3989 + goto force;
3990 +
3991 if (unlikely(task_in_memcg_oom(current)))
3992 goto nomem;
3993
3994 diff --git a/mm/slab.c b/mm/slab.c
3995 index b67271024135..525a911985a2 100644
3996 --- a/mm/slab.c
3997 +++ b/mm/slab.c
3998 @@ -964,7 +964,7 @@ static int setup_kmem_cache_node(struct kmem_cache *cachep,
3999 * guaranteed to be valid until irq is re-enabled, because it will be
4000 * freed after synchronize_sched().
4001 */
4002 - if (force_change)
4003 + if (old_shared && force_change)
4004 synchronize_sched();
4005
4006 fail:
4007 diff --git a/mm/vmscan.c b/mm/vmscan.c
4008 index 0fe8b7113868..ba0fad78e5d4 100644
4009 --- a/mm/vmscan.c
4010 +++ b/mm/vmscan.c
4011 @@ -3048,7 +3048,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
4012 sc.gfp_mask,
4013 sc.reclaim_idx);
4014
4015 + current->flags |= PF_MEMALLOC;
4016 nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
4017 + current->flags &= ~PF_MEMALLOC;
4018
4019 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
4020
4021 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4022 index 9dce3b157908..59a96034979b 100644
4023 --- a/net/mac80211/rx.c
4024 +++ b/net/mac80211/rx.c
4025 @@ -2253,16 +2253,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
4026 if (!(status->rx_flags & IEEE80211_RX_AMSDU))
4027 return RX_CONTINUE;
4028
4029 - if (ieee80211_has_a4(hdr->frame_control) &&
4030 - rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4031 - !rx->sdata->u.vlan.sta)
4032 - return RX_DROP_UNUSABLE;
4033 + if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
4034 + switch (rx->sdata->vif.type) {
4035 + case NL80211_IFTYPE_AP_VLAN:
4036 + if (!rx->sdata->u.vlan.sta)
4037 + return RX_DROP_UNUSABLE;
4038 + break;
4039 + case NL80211_IFTYPE_STATION:
4040 + if (!rx->sdata->u.mgd.use_4addr)
4041 + return RX_DROP_UNUSABLE;
4042 + break;
4043 + default:
4044 + return RX_DROP_UNUSABLE;
4045 + }
4046 + }
4047
4048 - if (is_multicast_ether_addr(hdr->addr1) &&
4049 - ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
4050 - rx->sdata->u.vlan.sta) ||
4051 - (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
4052 - rx->sdata->u.mgd.use_4addr)))
4053 + if (is_multicast_ether_addr(hdr->addr1))
4054 return RX_DROP_UNUSABLE;
4055
4056 skb->dev = dev;
4057 diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
4058 index 018eed7e1ff1..8668a5c18dc3 100644
4059 --- a/net/netfilter/xt_NFLOG.c
4060 +++ b/net/netfilter/xt_NFLOG.c
4061 @@ -32,6 +32,7 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
4062 li.u.ulog.copy_len = info->len;
4063 li.u.ulog.group = info->group;
4064 li.u.ulog.qthreshold = info->threshold;
4065 + li.u.ulog.flags = 0;
4066
4067 if (info->flags & XT_NFLOG_F_COPY_LEN)
4068 li.u.ulog.flags |= NF_LOG_F_COPY_LEN;
4069 diff --git a/security/keys/Kconfig b/security/keys/Kconfig
4070 index f826e8739023..d942c7c2bc0a 100644
4071 --- a/security/keys/Kconfig
4072 +++ b/security/keys/Kconfig
4073 @@ -41,7 +41,7 @@ config BIG_KEYS
4074 bool "Large payload keys"
4075 depends on KEYS
4076 depends on TMPFS
4077 - select CRYPTO
4078 + depends on (CRYPTO_ANSI_CPRNG = y || CRYPTO_DRBG = y)
4079 select CRYPTO_AES
4080 select CRYPTO_ECB
4081 select CRYPTO_RNG
4082 diff --git a/security/keys/big_key.c b/security/keys/big_key.c
4083 index c0b3030b5634..835c1ab30d01 100644
4084 --- a/security/keys/big_key.c
4085 +++ b/security/keys/big_key.c
4086 @@ -9,6 +9,7 @@
4087 * 2 of the Licence, or (at your option) any later version.
4088 */
4089
4090 +#define pr_fmt(fmt) "big_key: "fmt
4091 #include <linux/init.h>
4092 #include <linux/seq_file.h>
4093 #include <linux/file.h>
4094 @@ -341,44 +342,48 @@ error:
4095 */
4096 static int __init big_key_init(void)
4097 {
4098 - return register_key_type(&key_type_big_key);
4099 -}
4100 -
4101 -/*
4102 - * Initialize big_key crypto and RNG algorithms
4103 - */
4104 -static int __init big_key_crypto_init(void)
4105 -{
4106 - int ret = -EINVAL;
4107 + struct crypto_skcipher *cipher;
4108 + struct crypto_rng *rng;
4109 + int ret;
4110
4111 - /* init RNG */
4112 - big_key_rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
4113 - if (IS_ERR(big_key_rng)) {
4114 - big_key_rng = NULL;
4115 - return -EFAULT;
4116 + rng = crypto_alloc_rng(big_key_rng_name, 0, 0);
4117 + if (IS_ERR(rng)) {
4118 + pr_err("Can't alloc rng: %ld\n", PTR_ERR(rng));
4119 + return PTR_ERR(rng);
4120 }
4121
4122 + big_key_rng = rng;
4123 +
4124 /* seed RNG */
4125 - ret = crypto_rng_reset(big_key_rng, NULL, crypto_rng_seedsize(big_key_rng));
4126 - if (ret)
4127 - goto error;
4128 + ret = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
4129 + if (ret) {
4130 + pr_err("Can't reset rng: %d\n", ret);
4131 + goto error_rng;
4132 + }
4133
4134 /* init block cipher */
4135 - big_key_skcipher = crypto_alloc_skcipher(big_key_alg_name,
4136 - 0, CRYPTO_ALG_ASYNC);
4137 - if (IS_ERR(big_key_skcipher)) {
4138 - big_key_skcipher = NULL;
4139 - ret = -EFAULT;
4140 - goto error;
4141 + cipher = crypto_alloc_skcipher(big_key_alg_name, 0, CRYPTO_ALG_ASYNC);
4142 + if (IS_ERR(cipher)) {
4143 + ret = PTR_ERR(cipher);
4144 + pr_err("Can't alloc crypto: %d\n", ret);
4145 + goto error_rng;
4146 + }
4147 +
4148 + big_key_skcipher = cipher;
4149 +
4150 + ret = register_key_type(&key_type_big_key);
4151 + if (ret < 0) {
4152 + pr_err("Can't register type: %d\n", ret);
4153 + goto error_cipher;
4154 }
4155
4156 return 0;
4157
4158 -error:
4159 +error_cipher:
4160 + crypto_free_skcipher(big_key_skcipher);
4161 +error_rng:
4162 crypto_free_rng(big_key_rng);
4163 - big_key_rng = NULL;
4164 return ret;
4165 }
4166
4167 -device_initcall(big_key_init);
4168 -late_initcall(big_key_crypto_init);
4169 +late_initcall(big_key_init);
4170 diff --git a/security/keys/proc.c b/security/keys/proc.c
4171 index f0611a6368cd..b9f531c9e4fa 100644
4172 --- a/security/keys/proc.c
4173 +++ b/security/keys/proc.c
4174 @@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
4175 struct timespec now;
4176 unsigned long timo;
4177 key_ref_t key_ref, skey_ref;
4178 - char xbuf[12];
4179 + char xbuf[16];
4180 int rc;
4181
4182 struct keyring_search_context ctx = {
4183 diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
4184 index dcc102813aef..37d9cfbc29f9 100644
4185 --- a/sound/core/seq/seq_timer.c
4186 +++ b/sound/core/seq/seq_timer.c
4187 @@ -448,8 +448,8 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
4188
4189 ktime_get_ts64(&tm);
4190 tm = timespec64_sub(tm, tmr->last_update);
4191 - cur_time.tv_nsec = tm.tv_nsec;
4192 - cur_time.tv_sec = tm.tv_sec;
4193 + cur_time.tv_nsec += tm.tv_nsec;
4194 + cur_time.tv_sec += tm.tv_sec;
4195 snd_seq_sanity_real_time(&cur_time);
4196 }
4197 spin_unlock_irqrestore(&tmr->lock, flags);
4198 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4199 index 160c7f713722..487fcbf9473e 100644
4200 --- a/sound/pci/hda/hda_intel.c
4201 +++ b/sound/pci/hda/hda_intel.c
4202 @@ -340,8 +340,7 @@ enum {
4203
4204 /* quirks for Nvidia */
4205 #define AZX_DCAPS_PRESET_NVIDIA \
4206 - (AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
4207 - AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
4208 + (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
4209 AZX_DCAPS_SNOOP_TYPE(NVIDIA))
4210
4211 #define AZX_DCAPS_PRESET_CTHDA \
4212 @@ -1699,6 +1698,10 @@ static int azx_first_init(struct azx *chip)
4213 }
4214 }
4215
4216 + /* NVidia hardware normally only supports up to 40 bits of DMA */
4217 + if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
4218 + dma_bits = 40;
4219 +
4220 /* disable 64bit DMA address on some devices */
4221 if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
4222 dev_dbg(card->dev, "Disabling 64bit DMA\n");
4223 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4224 index bd481ac23faf..26e866f65314 100644
4225 --- a/sound/pci/hda/patch_realtek.c
4226 +++ b/sound/pci/hda/patch_realtek.c
4227 @@ -5809,8 +5809,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4228 #define ALC295_STANDARD_PINS \
4229 {0x12, 0xb7a60130}, \
4230 {0x14, 0x90170110}, \
4231 - {0x17, 0x21014020}, \
4232 - {0x18, 0x21a19030}, \
4233 {0x21, 0x04211020}
4234
4235 #define ALC298_STANDARD_PINS \
4236 @@ -5857,11 +5855,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4237 {0x1b, 0x02011020},
4238 {0x21, 0x0221101f}),
4239 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4240 + {0x14, 0x90170110},
4241 + {0x1b, 0x01011020},
4242 + {0x21, 0x0221101f}),
4243 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4244 {0x14, 0x90170130},
4245 {0x1b, 0x01014020},
4246 {0x21, 0x0221103f}),
4247 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4248 {0x14, 0x90170130},
4249 + {0x1b, 0x01011020},
4250 + {0x21, 0x0221103f}),
4251 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4252 + {0x14, 0x90170130},
4253 {0x1b, 0x02011020},
4254 {0x21, 0x0221103f}),
4255 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4256 @@ -6037,7 +6043,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4257 ALC292_STANDARD_PINS,
4258 {0x13, 0x90a60140}),
4259 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
4260 - ALC295_STANDARD_PINS),
4261 + ALC295_STANDARD_PINS,
4262 + {0x17, 0x21014020},
4263 + {0x18, 0x21a19030}),
4264 + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
4265 + ALC295_STANDARD_PINS,
4266 + {0x17, 0x21014040},
4267 + {0x18, 0x21a19050}),
4268 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
4269 ALC298_STANDARD_PINS,
4270 {0x17, 0x90170110}),
4271 @@ -6611,6 +6623,7 @@ enum {
4272 ALC891_FIXUP_HEADSET_MODE,
4273 ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
4274 ALC662_FIXUP_ACER_VERITON,
4275 + ALC892_FIXUP_ASROCK_MOBO,
4276 };
4277
4278 static const struct hda_fixup alc662_fixups[] = {
4279 @@ -6887,6 +6900,16 @@ static const struct hda_fixup alc662_fixups[] = {
4280 { }
4281 }
4282 },
4283 + [ALC892_FIXUP_ASROCK_MOBO] = {
4284 + .type = HDA_FIXUP_PINS,
4285 + .v.pins = (const struct hda_pintbl[]) {
4286 + { 0x15, 0x40f000f0 }, /* disabled */
4287 + { 0x16, 0x40f000f0 }, /* disabled */
4288 + { 0x18, 0x01014011 }, /* LO */
4289 + { 0x1a, 0x01014012 }, /* LO */
4290 + { }
4291 + }
4292 + },
4293 };
4294
4295 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4296 @@ -6924,6 +6947,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4297 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
4298 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
4299 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
4300 + SND_PCI_QUIRK(0x1849, 0x5892, "ASRock B150M", ALC892_FIXUP_ASROCK_MOBO),
4301 SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
4302 SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
4303 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
4304 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
4305 index c60a776e815d..8a59d4782a0f 100644
4306 --- a/sound/usb/quirks-table.h
4307 +++ b/sound/usb/quirks-table.h
4308 @@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
4309 AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
4310 AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
4311
4312 +/* Syntek STK1160 */
4313 +{
4314 + .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
4315 + USB_DEVICE_ID_MATCH_INT_CLASS |
4316 + USB_DEVICE_ID_MATCH_INT_SUBCLASS,
4317 + .idVendor = 0x05e1,
4318 + .idProduct = 0x0408,
4319 + .bInterfaceClass = USB_CLASS_AUDIO,
4320 + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
4321 + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
4322 + .vendor_name = "Syntek",
4323 + .product_name = "STK1160",
4324 + .ifnum = QUIRK_ANY_INTERFACE,
4325 + .type = QUIRK_AUDIO_ALIGN_TRANSFER
4326 + }
4327 +},
4328 +
4329 /* Digidesign Mbox */
4330 {
4331 /* Thanks to Clemens Ladisch <clemens@ladisch.de> */