Contents of /trunk/kernel-magellan/patches-3.17/0104-3.17.5-all-fixes.patch
Parent Directory | Revision Log
Revision 2535 -
(show annotations)
(download)
Fri Jan 30 10:05:27 2015 UTC (9 years, 7 months ago) by niro
File size: 147520 byte(s)
Fri Jan 30 10:05:27 2015 UTC (9 years, 7 months ago) by niro
File size: 147520 byte(s)
-linux-3.17.5
1 | diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt |
2 | index ce6a1a072028..8a3c40829899 100644 |
3 | --- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt |
4 | +++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt |
5 | @@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents. |
6 | Example: |
7 | interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; |
8 | |
9 | -A device node may contain either "interrupts" or "interrupts-extended", but not |
10 | -both. If both properties are present, then the operating system should log an |
11 | -error and use only the data in "interrupts". |
12 | - |
13 | 2) Interrupt controller nodes |
14 | ----------------------------- |
15 | |
16 | diff --git a/Makefile b/Makefile |
17 | index b60b64d65416..42585f6a819a 100644 |
18 | --- a/Makefile |
19 | +++ b/Makefile |
20 | @@ -1,6 +1,6 @@ |
21 | VERSION = 3 |
22 | PATCHLEVEL = 17 |
23 | -SUBLEVEL = 4 |
24 | +SUBLEVEL = 5 |
25 | EXTRAVERSION = |
26 | NAME = Shuffling Zombie Juror |
27 | |
28 | diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h |
29 | index fc44d3761f9e..ce73ab635414 100644 |
30 | --- a/arch/arm/include/asm/thread_info.h |
31 | +++ b/arch/arm/include/asm/thread_info.h |
32 | @@ -44,16 +44,6 @@ struct cpu_context_save { |
33 | __u32 extra[2]; /* Xscale 'acc' register, etc */ |
34 | }; |
35 | |
36 | -struct arm_restart_block { |
37 | - union { |
38 | - /* For user cache flushing */ |
39 | - struct { |
40 | - unsigned long start; |
41 | - unsigned long end; |
42 | - } cache; |
43 | - }; |
44 | -}; |
45 | - |
46 | /* |
47 | * low level task data that entry.S needs immediate access to. |
48 | * __switch_to() assumes cpu_context follows immediately after cpu_domain. |
49 | @@ -79,7 +69,6 @@ struct thread_info { |
50 | unsigned long thumbee_state; /* ThumbEE Handler Base register */ |
51 | #endif |
52 | struct restart_block restart_block; |
53 | - struct arm_restart_block arm_restart_block; |
54 | }; |
55 | |
56 | #define INIT_THREAD_INFO(tsk) \ |
57 | diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c |
58 | index a964c9f40f87..bea63f5fdbbd 100644 |
59 | --- a/arch/arm/kernel/traps.c |
60 | +++ b/arch/arm/kernel/traps.c |
61 | @@ -513,8 +513,6 @@ static int bad_syscall(int n, struct pt_regs *regs) |
62 | return regs->ARM_r0; |
63 | } |
64 | |
65 | -static long do_cache_op_restart(struct restart_block *); |
66 | - |
67 | static inline int |
68 | __do_cache_op(unsigned long start, unsigned long end) |
69 | { |
70 | @@ -523,24 +521,8 @@ __do_cache_op(unsigned long start, unsigned long end) |
71 | do { |
72 | unsigned long chunk = min(PAGE_SIZE, end - start); |
73 | |
74 | - if (signal_pending(current)) { |
75 | - struct thread_info *ti = current_thread_info(); |
76 | - |
77 | - ti->restart_block = (struct restart_block) { |
78 | - .fn = do_cache_op_restart, |
79 | - }; |
80 | - |
81 | - ti->arm_restart_block = (struct arm_restart_block) { |
82 | - { |
83 | - .cache = { |
84 | - .start = start, |
85 | - .end = end, |
86 | - }, |
87 | - }, |
88 | - }; |
89 | - |
90 | - return -ERESTART_RESTARTBLOCK; |
91 | - } |
92 | + if (fatal_signal_pending(current)) |
93 | + return 0; |
94 | |
95 | ret = flush_cache_user_range(start, start + chunk); |
96 | if (ret) |
97 | @@ -553,15 +535,6 @@ __do_cache_op(unsigned long start, unsigned long end) |
98 | return 0; |
99 | } |
100 | |
101 | -static long do_cache_op_restart(struct restart_block *unused) |
102 | -{ |
103 | - struct arm_restart_block *restart_block; |
104 | - |
105 | - restart_block = ¤t_thread_info()->arm_restart_block; |
106 | - return __do_cache_op(restart_block->cache.start, |
107 | - restart_block->cache.end); |
108 | -} |
109 | - |
110 | static inline int |
111 | do_cache_op(unsigned long start, unsigned long end, int flags) |
112 | { |
113 | diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c |
114 | index 2bdc3233abe2..044b51185fcc 100644 |
115 | --- a/arch/arm/mach-mvebu/coherency.c |
116 | +++ b/arch/arm/mach-mvebu/coherency.c |
117 | @@ -400,6 +400,8 @@ int __init coherency_init(void) |
118 | type == COHERENCY_FABRIC_TYPE_ARMADA_380) |
119 | armada_375_380_coherency_init(np); |
120 | |
121 | + of_node_put(np); |
122 | + |
123 | return 0; |
124 | } |
125 | |
126 | diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S |
127 | index b5d67db20897..197ac5f22535 100644 |
128 | --- a/arch/arm/mm/proc-v7.S |
129 | +++ b/arch/arm/mm/proc-v7.S |
130 | @@ -270,7 +270,6 @@ __v7_pj4b_setup: |
131 | /* Auxiliary Debug Modes Control 1 Register */ |
132 | #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ |
133 | #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ |
134 | -#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */ |
135 | #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ |
136 | |
137 | /* Auxiliary Debug Modes Control 2 Register */ |
138 | @@ -293,7 +292,6 @@ __v7_pj4b_setup: |
139 | /* Auxiliary Debug Modes Control 1 Register */ |
140 | mrc p15, 1, r0, c15, c1, 1 |
141 | orr r0, r0, #PJ4B_CLEAN_LINE |
142 | - orr r0, r0, #PJ4B_BCK_OFF_STREX |
143 | orr r0, r0, #PJ4B_INTER_PARITY |
144 | bic r0, r0, #PJ4B_STATIC_BP |
145 | mcr p15, 1, r0, c15, c1, 1 |
146 | diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S |
147 | index 23259f104c66..afa2b3c4df4a 100644 |
148 | --- a/arch/arm/mm/proc-xscale.S |
149 | +++ b/arch/arm/mm/proc-xscale.S |
150 | @@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend) |
151 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg |
152 | mrc p15, 0, r6, c13, c0, 0 @ PID |
153 | mrc p15, 0, r7, c3, c0, 0 @ domain ID |
154 | - mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg |
155 | + mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg |
156 | mrc p15, 0, r9, c1, c0, 0 @ control reg |
157 | bic r4, r4, #2 @ clear frequency change bit |
158 | stmia r0, {r4 - r9} @ store cp regs |
159 | @@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume) |
160 | mcr p15, 0, r6, c13, c0, 0 @ PID |
161 | mcr p15, 0, r7, c3, c0, 0 @ domain ID |
162 | mcr p15, 0, r1, c2, c0, 0 @ translation table base addr |
163 | - mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg |
164 | + mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg |
165 | mov r0, r9 @ control register |
166 | b cpu_resume_mmu |
167 | ENDPROC(cpu_xscale_do_resume) |
168 | diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h |
169 | index cf3b580c3df6..e7b80c352d21 100644 |
170 | --- a/arch/mips/include/asm/mipsregs.h |
171 | +++ b/arch/mips/include/asm/mipsregs.h |
172 | @@ -661,6 +661,8 @@ |
173 | #define MIPS_CONF6_SYND (_ULCAST_(1) << 13) |
174 | /* proAptiv FTLB on/off bit */ |
175 | #define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15) |
176 | +/* FTLB probability bits */ |
177 | +#define MIPS_CONF6_FTLBP_SHIFT (16) |
178 | |
179 | #define MIPS_CONF7_WII (_ULCAST_(1) << 31) |
180 | |
181 | diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h |
182 | index 4520adc8699b..cd6e0afc6833 100644 |
183 | --- a/arch/mips/include/asm/r4kcache.h |
184 | +++ b/arch/mips/include/asm/r4kcache.h |
185 | @@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr) |
186 | */ |
187 | static inline void protected_writeback_dcache_line(unsigned long addr) |
188 | { |
189 | +#ifdef CONFIG_EVA |
190 | + protected_cachee_op(Hit_Writeback_Inv_D, addr); |
191 | +#else |
192 | protected_cache_op(Hit_Writeback_Inv_D, addr); |
193 | +#endif |
194 | } |
195 | |
196 | static inline void protected_writeback_scache_line(unsigned long addr) |
197 | diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h |
198 | index a10951090234..b9ab717e3619 100644 |
199 | --- a/arch/mips/include/asm/uaccess.h |
200 | +++ b/arch/mips/include/asm/uaccess.h |
201 | @@ -773,10 +773,11 @@ extern void __put_user_unaligned_unknown(void); |
202 | "jal\t" #destination "\n\t" |
203 | #endif |
204 | |
205 | -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS |
206 | -#define DADDI_SCRATCH "$0" |
207 | -#else |
208 | +#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \ |
209 | + defined(CONFIG_CPU_HAS_PREFETCH)) |
210 | #define DADDI_SCRATCH "$3" |
211 | +#else |
212 | +#define DADDI_SCRATCH "$0" |
213 | #endif |
214 | |
215 | extern size_t __copy_user(void *__to, const void *__from, size_t __n); |
216 | diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c |
217 | index e34b10be782e..052665447dab 100644 |
218 | --- a/arch/mips/kernel/cpu-probe.c |
219 | +++ b/arch/mips/kernel/cpu-probe.c |
220 | @@ -192,6 +192,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) |
221 | static char unknown_isa[] = KERN_ERR \ |
222 | "Unsupported ISA type, c0.config0: %d."; |
223 | |
224 | +static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) |
225 | +{ |
226 | + |
227 | + unsigned int probability = c->tlbsize / c->tlbsizevtlb; |
228 | + |
229 | + /* |
230 | + * 0 = All TLBWR instructions go to FTLB |
231 | + * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the |
232 | + * FTLB and 1 goes to the VTLB. |
233 | + * 2 = 7:1: As above with 7:1 ratio. |
234 | + * 3 = 3:1: As above with 3:1 ratio. |
235 | + * |
236 | + * Use the linear midpoint as the probability threshold. |
237 | + */ |
238 | + if (probability >= 12) |
239 | + return 1; |
240 | + else if (probability >= 6) |
241 | + return 2; |
242 | + else |
243 | + /* |
244 | + * So FTLB is less than 4 times bigger than VTLB. |
245 | + * A 3:1 ratio can still be useful though. |
246 | + */ |
247 | + return 3; |
248 | +} |
249 | + |
250 | static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) |
251 | { |
252 | unsigned int config6; |
253 | @@ -202,9 +228,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) |
254 | case CPU_P5600: |
255 | /* proAptiv & related cores use Config6 to enable the FTLB */ |
256 | config6 = read_c0_config6(); |
257 | + /* Clear the old probability value */ |
258 | + config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); |
259 | if (enable) |
260 | /* Enable FTLB */ |
261 | - write_c0_config6(config6 | MIPS_CONF6_FTLBEN); |
262 | + write_c0_config6(config6 | |
263 | + (calculate_ftlb_probability(c) |
264 | + << MIPS_CONF6_FTLBP_SHIFT) |
265 | + | MIPS_CONF6_FTLBEN); |
266 | else |
267 | /* Disable FTLB */ |
268 | write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); |
269 | diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c |
270 | index 1d57605e4615..16f1e4f2bf3c 100644 |
271 | --- a/arch/mips/kernel/signal.c |
272 | +++ b/arch/mips/kernel/signal.c |
273 | @@ -658,13 +658,13 @@ static int signal_setup(void) |
274 | save_fp_context = _save_fp_context; |
275 | restore_fp_context = _restore_fp_context; |
276 | } else { |
277 | - save_fp_context = copy_fp_from_sigcontext; |
278 | - restore_fp_context = copy_fp_to_sigcontext; |
279 | + save_fp_context = copy_fp_to_sigcontext; |
280 | + restore_fp_context = copy_fp_from_sigcontext; |
281 | } |
282 | #endif /* CONFIG_SMP */ |
283 | #else |
284 | - save_fp_context = copy_fp_from_sigcontext;; |
285 | - restore_fp_context = copy_fp_to_sigcontext; |
286 | + save_fp_context = copy_fp_to_sigcontext; |
287 | + restore_fp_context = copy_fp_from_sigcontext; |
288 | #endif |
289 | |
290 | return 0; |
291 | diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S |
292 | index c17ef80cf65a..5d3238af9b5c 100644 |
293 | --- a/arch/mips/lib/memcpy.S |
294 | +++ b/arch/mips/lib/memcpy.S |
295 | @@ -503,6 +503,7 @@ |
296 | STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) |
297 | .Ldone\@: |
298 | jr ra |
299 | + nop |
300 | .if __memcpy == 1 |
301 | END(memcpy) |
302 | .set __memcpy, 0 |
303 | diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile |
304 | index 0bb9cc9dc621..d87e03330b29 100644 |
305 | --- a/arch/mips/loongson/common/Makefile |
306 | +++ b/arch/mips/loongson/common/Makefile |
307 | @@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o |
308 | # Serial port support |
309 | # |
310 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
311 | -obj-$(CONFIG_SERIAL_8250) += serial.o |
312 | +loongson-serial-$(CONFIG_SERIAL_8250) := serial.o |
313 | +obj-y += $(loongson-serial-m) $(loongson-serial-y) |
314 | obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o |
315 | obj-$(CONFIG_LOONGSON_MC146818) += rtc.o |
316 | |
317 | diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c |
318 | index 37ed184398c6..42323bcc5d28 100644 |
319 | --- a/arch/mips/loongson/loongson-3/numa.c |
320 | +++ b/arch/mips/loongson/loongson-3/numa.c |
321 | @@ -33,6 +33,7 @@ |
322 | |
323 | static struct node_data prealloc__node_data[MAX_NUMNODES]; |
324 | unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; |
325 | +EXPORT_SYMBOL(__node_distances); |
326 | struct node_data *__node_data[MAX_NUMNODES]; |
327 | EXPORT_SYMBOL(__node_data); |
328 | |
329 | diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c |
330 | index fa6ebd4bc9e9..c3917e251f59 100644 |
331 | --- a/arch/mips/mm/tlb-r4k.c |
332 | +++ b/arch/mips/mm/tlb-r4k.c |
333 | @@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
334 | |
335 | local_irq_save(flags); |
336 | |
337 | + htw_stop(); |
338 | pid = read_c0_entryhi() & ASID_MASK; |
339 | address &= (PAGE_MASK << 1); |
340 | write_c0_entryhi(address | pid); |
341 | @@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
342 | tlb_write_indexed(); |
343 | } |
344 | tlbw_use_hazard(); |
345 | + htw_start(); |
346 | flush_itlb_vm(vma); |
347 | local_irq_restore(flags); |
348 | } |
349 | @@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, |
350 | |
351 | local_irq_save(flags); |
352 | /* Save old context and create impossible VPN2 value */ |
353 | + htw_stop(); |
354 | old_ctx = read_c0_entryhi(); |
355 | old_pagemask = read_c0_pagemask(); |
356 | wired = read_c0_wired(); |
357 | @@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, |
358 | |
359 | write_c0_entryhi(old_ctx); |
360 | write_c0_pagemask(old_pagemask); |
361 | + htw_start(); |
362 | out: |
363 | local_irq_restore(flags); |
364 | return ret; |
365 | diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c |
366 | index b5f228e7eae6..e3328a96e809 100644 |
367 | --- a/arch/mips/mm/tlbex.c |
368 | +++ b/arch/mips/mm/tlbex.c |
369 | @@ -1872,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, |
370 | uasm_l_smp_pgtable_change(l, *p); |
371 | #endif |
372 | iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ |
373 | - if (!m4kc_tlbp_war()) |
374 | + if (!m4kc_tlbp_war()) { |
375 | build_tlb_probe_entry(p); |
376 | + if (cpu_has_htw) { |
377 | + /* race condition happens, leaving */ |
378 | + uasm_i_ehb(p); |
379 | + uasm_i_mfc0(p, wr.r3, C0_INDEX); |
380 | + uasm_il_bltz(p, r, wr.r3, label_leave); |
381 | + uasm_i_nop(p); |
382 | + } |
383 | + } |
384 | return wr; |
385 | } |
386 | |
387 | diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c |
388 | index 6854ed5097d2..83a1dfd8f0e3 100644 |
389 | --- a/arch/mips/oprofile/backtrace.c |
390 | +++ b/arch/mips/oprofile/backtrace.c |
391 | @@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame, |
392 | /* This marks the end of the previous function, |
393 | which means we overran. */ |
394 | break; |
395 | - stack_size = (unsigned) stack_adjustment; |
396 | + stack_size = (unsigned long) stack_adjustment; |
397 | } else if (is_ra_save_ins(&ip)) { |
398 | int ra_slot = ip.i_format.simmediate; |
399 | if (ra_slot < 0) |
400 | diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c |
401 | index a95c00f5fb96..a304bcc37e4f 100644 |
402 | --- a/arch/mips/sgi-ip27/ip27-memory.c |
403 | +++ b/arch/mips/sgi-ip27/ip27-memory.c |
404 | @@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth) |
405 | } |
406 | |
407 | unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; |
408 | +EXPORT_SYMBOL(__node_distances); |
409 | |
410 | static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) |
411 | { |
412 | diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S |
413 | index 23eb9a9441bd..c62be60c7274 100644 |
414 | --- a/arch/powerpc/kernel/vdso32/getcpu.S |
415 | +++ b/arch/powerpc/kernel/vdso32/getcpu.S |
416 | @@ -30,8 +30,8 @@ |
417 | V_FUNCTION_BEGIN(__kernel_getcpu) |
418 | .cfi_startproc |
419 | mfspr r5,SPRN_SPRG_VDSO_READ |
420 | - cmpdi cr0,r3,0 |
421 | - cmpdi cr1,r4,0 |
422 | + cmpwi cr0,r3,0 |
423 | + cmpwi cr1,r4,0 |
424 | clrlwi r6,r5,16 |
425 | rlwinm r7,r5,16,31-15,31-0 |
426 | beq cr0,1f |
427 | diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c |
428 | index 5e1ed1575aab..b322bfb51343 100644 |
429 | --- a/arch/powerpc/platforms/powernv/opal-hmi.c |
430 | +++ b/arch/powerpc/platforms/powernv/opal-hmi.c |
431 | @@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt) |
432 | }; |
433 | |
434 | /* Print things out */ |
435 | - if (hmi_evt->version != OpalHMIEvt_V1) { |
436 | + if (hmi_evt->version < OpalHMIEvt_V1) { |
437 | pr_err("HMI Interrupt, Unknown event version %d !\n", |
438 | hmi_evt->version); |
439 | return; |
440 | diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c |
441 | index df241b11d4f7..e65092842db1 100644 |
442 | --- a/arch/powerpc/platforms/powernv/pci-ioda.c |
443 | +++ b/arch/powerpc/platforms/powernv/pci-ioda.c |
444 | @@ -1311,7 +1311,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, |
445 | unsigned int is_64, struct msi_msg *msg) |
446 | { |
447 | struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); |
448 | - struct pci_dn *pdn = pci_get_pdn(dev); |
449 | struct irq_data *idata; |
450 | struct irq_chip *ichip; |
451 | unsigned int xive_num = hwirq - phb->msi_base; |
452 | @@ -1327,7 +1326,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, |
453 | return -ENXIO; |
454 | |
455 | /* Force 32-bit MSI on some broken devices */ |
456 | - if (pdn && pdn->force_32bit_msi) |
457 | + if (dev->no_64bit_msi) |
458 | is_64 = 0; |
459 | |
460 | /* Assign XIVE to PE */ |
461 | @@ -1815,7 +1814,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, |
462 | if (is_kdump_kernel()) { |
463 | pr_info(" Issue PHB reset ...\n"); |
464 | ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); |
465 | - ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET); |
466 | + ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); |
467 | } |
468 | |
469 | /* Configure M64 window */ |
470 | diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c |
471 | index b854b57ed5e1..e7cf49c2dd05 100644 |
472 | --- a/arch/powerpc/platforms/powernv/pci.c |
473 | +++ b/arch/powerpc/platforms/powernv/pci.c |
474 | @@ -1,3 +1,4 @@ |
475 | + |
476 | /* |
477 | * Support PCI/PCIe on PowerNV platforms |
478 | * |
479 | @@ -50,9 +51,8 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type) |
480 | { |
481 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
482 | struct pnv_phb *phb = hose->private_data; |
483 | - struct pci_dn *pdn = pci_get_pdn(pdev); |
484 | |
485 | - if (pdn && pdn->force_32bit_msi && !phb->msi32_support) |
486 | + if (pdev->no_64bit_msi && !phb->msi32_support) |
487 | return -ENODEV; |
488 | |
489 | return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; |
490 | diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c |
491 | index 18ff4626d74e..6fd96d828a27 100644 |
492 | --- a/arch/powerpc/platforms/pseries/msi.c |
493 | +++ b/arch/powerpc/platforms/pseries/msi.c |
494 | @@ -429,7 +429,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) |
495 | */ |
496 | again: |
497 | if (type == PCI_CAP_ID_MSI) { |
498 | - if (pdn->force_32bit_msi) { |
499 | + if (pdev->no_64bit_msi) { |
500 | rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); |
501 | if (rc < 0) { |
502 | /* |
503 | diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c |
504 | index b988b5addf86..c8efbb37d6e0 100644 |
505 | --- a/arch/powerpc/xmon/xmon.c |
506 | +++ b/arch/powerpc/xmon/xmon.c |
507 | @@ -293,10 +293,10 @@ static inline void disable_surveillance(void) |
508 | args.token = rtas_token("set-indicator"); |
509 | if (args.token == RTAS_UNKNOWN_SERVICE) |
510 | return; |
511 | - args.nargs = 3; |
512 | - args.nret = 1; |
513 | + args.nargs = cpu_to_be32(3); |
514 | + args.nret = cpu_to_be32(1); |
515 | args.rets = &args.args[3]; |
516 | - args.args[0] = SURVEILLANCE_TOKEN; |
517 | + args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN); |
518 | args.args[1] = 0; |
519 | args.args[2] = 0; |
520 | enter_rtas(__pa(&args)); |
521 | diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h |
522 | index a34ad079487e..4c7c12d69bea 100644 |
523 | --- a/arch/sparc/include/uapi/asm/swab.h |
524 | +++ b/arch/sparc/include/uapi/asm/swab.h |
525 | @@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr) |
526 | { |
527 | __u16 ret; |
528 | |
529 | - __asm__ __volatile__ ("lduha [%1] %2, %0" |
530 | + __asm__ __volatile__ ("lduha [%2] %3, %0" |
531 | : "=r" (ret) |
532 | - : "r" (addr), "i" (ASI_PL)); |
533 | + : "m" (*addr), "r" (addr), "i" (ASI_PL)); |
534 | return ret; |
535 | } |
536 | #define __arch_swab16p __arch_swab16p |
537 | @@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr) |
538 | { |
539 | __u32 ret; |
540 | |
541 | - __asm__ __volatile__ ("lduwa [%1] %2, %0" |
542 | + __asm__ __volatile__ ("lduwa [%2] %3, %0" |
543 | : "=r" (ret) |
544 | - : "r" (addr), "i" (ASI_PL)); |
545 | + : "m" (*addr), "r" (addr), "i" (ASI_PL)); |
546 | return ret; |
547 | } |
548 | #define __arch_swab32p __arch_swab32p |
549 | @@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr) |
550 | { |
551 | __u64 ret; |
552 | |
553 | - __asm__ __volatile__ ("ldxa [%1] %2, %0" |
554 | + __asm__ __volatile__ ("ldxa [%2] %3, %0" |
555 | : "=r" (ret) |
556 | - : "r" (addr), "i" (ASI_PL)); |
557 | + : "m" (*addr), "r" (addr), "i" (ASI_PL)); |
558 | return ret; |
559 | } |
560 | #define __arch_swab64p __arch_swab64p |
561 | diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h |
562 | index bb9b258d60e7..2075e6c34c78 100644 |
563 | --- a/arch/x86/include/asm/cpufeature.h |
564 | +++ b/arch/x86/include/asm/cpufeature.h |
565 | @@ -202,6 +202,7 @@ |
566 | #define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ |
567 | #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ |
568 | #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ |
569 | +#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */ |
570 | |
571 | |
572 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
573 | diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h |
574 | index c7678e43465b..e62cf897f781 100644 |
575 | --- a/arch/x86/include/asm/kvm_para.h |
576 | +++ b/arch/x86/include/asm/kvm_para.h |
577 | @@ -2,6 +2,7 @@ |
578 | #define _ASM_X86_KVM_PARA_H |
579 | |
580 | #include <asm/processor.h> |
581 | +#include <asm/alternative.h> |
582 | #include <uapi/asm/kvm_para.h> |
583 | |
584 | extern void kvmclock_init(void); |
585 | @@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void) |
586 | } |
587 | #endif /* CONFIG_KVM_GUEST */ |
588 | |
589 | -/* This instruction is vmcall. On non-VT architectures, it will generate a |
590 | - * trap that we will then rewrite to the appropriate instruction. |
591 | +#ifdef CONFIG_DEBUG_RODATA |
592 | +#define KVM_HYPERCALL \ |
593 | + ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL) |
594 | +#else |
595 | +/* On AMD processors, vmcall will generate a trap that we will |
596 | + * then rewrite to the appropriate instruction. |
597 | */ |
598 | #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" |
599 | +#endif |
600 | |
601 | /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall |
602 | * instruction. The hypervisor may replace it with something else but only the |
603 | diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h |
604 | index f48b17df4224..3a52ee0e726d 100644 |
605 | --- a/arch/x86/include/asm/page_32_types.h |
606 | +++ b/arch/x86/include/asm/page_32_types.h |
607 | @@ -20,7 +20,6 @@ |
608 | #define THREAD_SIZE_ORDER 1 |
609 | #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) |
610 | |
611 | -#define STACKFAULT_STACK 0 |
612 | #define DOUBLEFAULT_STACK 1 |
613 | #define NMI_STACK 0 |
614 | #define DEBUG_STACK 0 |
615 | diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h |
616 | index 678205195ae1..75450b2c7be4 100644 |
617 | --- a/arch/x86/include/asm/page_64_types.h |
618 | +++ b/arch/x86/include/asm/page_64_types.h |
619 | @@ -14,12 +14,11 @@ |
620 | #define IRQ_STACK_ORDER 2 |
621 | #define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) |
622 | |
623 | -#define STACKFAULT_STACK 1 |
624 | -#define DOUBLEFAULT_STACK 2 |
625 | -#define NMI_STACK 3 |
626 | -#define DEBUG_STACK 4 |
627 | -#define MCE_STACK 5 |
628 | -#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */ |
629 | +#define DOUBLEFAULT_STACK 1 |
630 | +#define NMI_STACK 2 |
631 | +#define DEBUG_STACK 3 |
632 | +#define MCE_STACK 4 |
633 | +#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */ |
634 | |
635 | #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) |
636 | #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) |
637 | diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h |
638 | index 854053889d4d..547e344a6dc6 100644 |
639 | --- a/arch/x86/include/asm/thread_info.h |
640 | +++ b/arch/x86/include/asm/thread_info.h |
641 | @@ -141,7 +141,7 @@ struct thread_info { |
642 | /* Only used for 64 bit */ |
643 | #define _TIF_DO_NOTIFY_MASK \ |
644 | (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ |
645 | - _TIF_USER_RETURN_NOTIFY) |
646 | + _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE) |
647 | |
648 | /* flags to check in __switch_to() */ |
649 | #define _TIF_WORK_CTXSW \ |
650 | diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h |
651 | index bc8352e7010a..707adc6549d8 100644 |
652 | --- a/arch/x86/include/asm/traps.h |
653 | +++ b/arch/x86/include/asm/traps.h |
654 | @@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void); |
655 | |
656 | #ifdef CONFIG_TRACING |
657 | asmlinkage void trace_page_fault(void); |
658 | +#define trace_stack_segment stack_segment |
659 | #define trace_divide_error divide_error |
660 | #define trace_bounds bounds |
661 | #define trace_invalid_op invalid_op |
662 | diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c |
663 | index 60e5497681f5..813d29d00a17 100644 |
664 | --- a/arch/x86/kernel/cpu/amd.c |
665 | +++ b/arch/x86/kernel/cpu/amd.c |
666 | @@ -525,6 +525,13 @@ static void early_init_amd(struct cpuinfo_x86 *c) |
667 | } |
668 | #endif |
669 | |
670 | + /* |
671 | + * This is only needed to tell the kernel whether to use VMCALL |
672 | + * and VMMCALL. VMMCALL is never executed except under virt, so |
673 | + * we can set it unconditionally. |
674 | + */ |
675 | + set_cpu_cap(c, X86_FEATURE_VMMCALL); |
676 | + |
677 | /* F16h erratum 793, CVE-2013-6885 */ |
678 | if (c->x86 == 0x16 && c->x86_model <= 0xf) |
679 | msr_set_bit(MSR_AMD64_LS_CFG, 15); |
680 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c |
681 | index 31265580c38a..35db56b41317 100644 |
682 | --- a/arch/x86/kernel/cpu/common.c |
683 | +++ b/arch/x86/kernel/cpu/common.c |
684 | @@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
685 | |
686 | static int __init x86_xsave_setup(char *s) |
687 | { |
688 | + if (strlen(s)) |
689 | + return 0; |
690 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
691 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
692 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
693 | diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c |
694 | index 1abcb50b48ae..ff86f19b5758 100644 |
695 | --- a/arch/x86/kernel/dumpstack_64.c |
696 | +++ b/arch/x86/kernel/dumpstack_64.c |
697 | @@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = { |
698 | [ DEBUG_STACK-1 ] = "#DB", |
699 | [ NMI_STACK-1 ] = "NMI", |
700 | [ DOUBLEFAULT_STACK-1 ] = "#DF", |
701 | - [ STACKFAULT_STACK-1 ] = "#SS", |
702 | [ MCE_STACK-1 ] = "#MC", |
703 | #if DEBUG_STKSZ > EXCEPTION_STKSZ |
704 | [ N_EXCEPTION_STACKS ... |
705 | diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S |
706 | index 2fac1343a90b..b9dde27d8ad4 100644 |
707 | --- a/arch/x86/kernel/entry_64.S |
708 | +++ b/arch/x86/kernel/entry_64.S |
709 | @@ -841,9 +841,15 @@ ENTRY(native_iret) |
710 | jnz native_irq_return_ldt |
711 | #endif |
712 | |
713 | +.global native_irq_return_iret |
714 | native_irq_return_iret: |
715 | + /* |
716 | + * This may fault. Non-paranoid faults on return to userspace are |
717 | + * handled by fixup_bad_iret. These include #SS, #GP, and #NP. |
718 | + * Double-faults due to espfix64 are handled in do_double_fault. |
719 | + * Other faults here are fatal. |
720 | + */ |
721 | iretq |
722 | - _ASM_EXTABLE(native_irq_return_iret, bad_iret) |
723 | |
724 | #ifdef CONFIG_X86_ESPFIX64 |
725 | native_irq_return_ldt: |
726 | @@ -871,25 +877,6 @@ native_irq_return_ldt: |
727 | jmp native_irq_return_iret |
728 | #endif |
729 | |
730 | - .section .fixup,"ax" |
731 | -bad_iret: |
732 | - /* |
733 | - * The iret traps when the %cs or %ss being restored is bogus. |
734 | - * We've lost the original trap vector and error code. |
735 | - * #GPF is the most likely one to get for an invalid selector. |
736 | - * So pretend we completed the iret and took the #GPF in user mode. |
737 | - * |
738 | - * We are now running with the kernel GS after exception recovery. |
739 | - * But error_entry expects us to have user GS to match the user %cs, |
740 | - * so swap back. |
741 | - */ |
742 | - pushq $0 |
743 | - |
744 | - SWAPGS |
745 | - jmp general_protection |
746 | - |
747 | - .previous |
748 | - |
749 | /* edi: workmask, edx: work */ |
750 | retint_careful: |
751 | CFI_RESTORE_STATE |
752 | @@ -935,37 +922,6 @@ ENTRY(retint_kernel) |
753 | CFI_ENDPROC |
754 | END(common_interrupt) |
755 | |
756 | - /* |
757 | - * If IRET takes a fault on the espfix stack, then we |
758 | - * end up promoting it to a doublefault. In that case, |
759 | - * modify the stack to make it look like we just entered |
760 | - * the #GP handler from user space, similar to bad_iret. |
761 | - */ |
762 | -#ifdef CONFIG_X86_ESPFIX64 |
763 | - ALIGN |
764 | -__do_double_fault: |
765 | - XCPT_FRAME 1 RDI+8 |
766 | - movq RSP(%rdi),%rax /* Trap on the espfix stack? */ |
767 | - sarq $PGDIR_SHIFT,%rax |
768 | - cmpl $ESPFIX_PGD_ENTRY,%eax |
769 | - jne do_double_fault /* No, just deliver the fault */ |
770 | - cmpl $__KERNEL_CS,CS(%rdi) |
771 | - jne do_double_fault |
772 | - movq RIP(%rdi),%rax |
773 | - cmpq $native_irq_return_iret,%rax |
774 | - jne do_double_fault /* This shouldn't happen... */ |
775 | - movq PER_CPU_VAR(kernel_stack),%rax |
776 | - subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ |
777 | - movq %rax,RSP(%rdi) |
778 | - movq $0,(%rax) /* Missing (lost) #GP error code */ |
779 | - movq $general_protection,RIP(%rdi) |
780 | - retq |
781 | - CFI_ENDPROC |
782 | -END(__do_double_fault) |
783 | -#else |
784 | -# define __do_double_fault do_double_fault |
785 | -#endif |
786 | - |
787 | /* |
788 | * APIC interrupts. |
789 | */ |
790 | @@ -1137,7 +1093,7 @@ idtentry overflow do_overflow has_error_code=0 |
791 | idtentry bounds do_bounds has_error_code=0 |
792 | idtentry invalid_op do_invalid_op has_error_code=0 |
793 | idtentry device_not_available do_device_not_available has_error_code=0 |
794 | -idtentry double_fault __do_double_fault has_error_code=1 paranoid=1 |
795 | +idtentry double_fault do_double_fault has_error_code=1 paranoid=1 |
796 | idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 |
797 | idtentry invalid_TSS do_invalid_TSS has_error_code=1 |
798 | idtentry segment_not_present do_segment_not_present has_error_code=1 |
799 | @@ -1302,7 +1258,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
800 | |
801 | idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
802 | idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK |
803 | -idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1 |
804 | +idtentry stack_segment do_stack_segment has_error_code=1 |
805 | #ifdef CONFIG_XEN |
806 | idtentry xen_debug do_debug has_error_code=0 |
807 | idtentry xen_int3 do_int3 has_error_code=0 |
808 | @@ -1412,17 +1368,16 @@ error_sti: |
809 | |
810 | /* |
811 | * There are two places in the kernel that can potentially fault with |
812 | - * usergs. Handle them here. The exception handlers after iret run with |
813 | - * kernel gs again, so don't set the user space flag. B stepping K8s |
814 | - * sometimes report an truncated RIP for IRET exceptions returning to |
815 | - * compat mode. Check for these here too. |
816 | + * usergs. Handle them here. B stepping K8s sometimes report a |
817 | + * truncated RIP for IRET exceptions returning to compat mode. Check |
818 | + * for these here too. |
819 | */ |
820 | error_kernelspace: |
821 | CFI_REL_OFFSET rcx, RCX+8 |
822 | incl %ebx |
823 | leaq native_irq_return_iret(%rip),%rcx |
824 | cmpq %rcx,RIP+8(%rsp) |
825 | - je error_swapgs |
826 | + je error_bad_iret |
827 | movl %ecx,%eax /* zero extend */ |
828 | cmpq %rax,RIP+8(%rsp) |
829 | je bstep_iret |
830 | @@ -1433,7 +1388,15 @@ error_kernelspace: |
831 | bstep_iret: |
832 | /* Fix truncated RIP */ |
833 | movq %rcx,RIP+8(%rsp) |
834 | - jmp error_swapgs |
835 | + /* fall through */ |
836 | + |
837 | +error_bad_iret: |
838 | + SWAPGS |
839 | + mov %rsp,%rdi |
840 | + call fixup_bad_iret |
841 | + mov %rax,%rsp |
842 | + decl %ebx /* Return to usergs */ |
843 | + jmp error_sti |
844 | CFI_ENDPROC |
845 | END(error_entry) |
846 | |
847 | diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c |
848 | index 0d0e922fafc1..de801f22128a 100644 |
849 | --- a/arch/x86/kernel/traps.c |
850 | +++ b/arch/x86/kernel/traps.c |
851 | @@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) |
852 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) |
853 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) |
854 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) |
855 | -#ifdef CONFIG_X86_32 |
856 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) |
857 | -#endif |
858 | DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) |
859 | |
860 | #ifdef CONFIG_X86_64 |
861 | /* Runs on IST stack */ |
862 | -dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) |
863 | -{ |
864 | - enum ctx_state prev_state; |
865 | - |
866 | - prev_state = exception_enter(); |
867 | - if (notify_die(DIE_TRAP, "stack segment", regs, error_code, |
868 | - X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { |
869 | - preempt_conditional_sti(regs); |
870 | - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); |
871 | - preempt_conditional_cli(regs); |
872 | - } |
873 | - exception_exit(prev_state); |
874 | -} |
875 | - |
876 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) |
877 | { |
878 | static const char str[] = "double fault"; |
879 | struct task_struct *tsk = current; |
880 | |
881 | +#ifdef CONFIG_X86_ESPFIX64 |
882 | + extern unsigned char native_irq_return_iret[]; |
883 | + |
884 | + /* |
885 | + * If IRET takes a non-IST fault on the espfix64 stack, then we |
886 | + * end up promoting it to a doublefault. In that case, modify |
887 | + * the stack to make it look like we just entered the #GP |
888 | + * handler from user space, similar to bad_iret. |
889 | + */ |
890 | + if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && |
891 | + regs->cs == __KERNEL_CS && |
892 | + regs->ip == (unsigned long)native_irq_return_iret) |
893 | + { |
894 | + struct pt_regs *normal_regs = task_pt_regs(current); |
895 | + |
896 | + /* Fake a #GP(0) from userspace. */ |
897 | + memmove(&normal_regs->ip, (void *)regs->sp, 5*8); |
898 | + normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ |
899 | + regs->ip = (unsigned long)general_protection; |
900 | + regs->sp = (unsigned long)&normal_regs->orig_ax; |
901 | + return; |
902 | + } |
903 | +#endif |
904 | + |
905 | exception_enter(); |
906 | /* Return not checked because double check cannot be ignored */ |
907 | notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); |
908 | @@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs) |
909 | return regs; |
910 | } |
911 | NOKPROBE_SYMBOL(sync_regs); |
912 | + |
913 | +struct bad_iret_stack { |
914 | + void *error_entry_ret; |
915 | + struct pt_regs regs; |
916 | +}; |
917 | + |
918 | +asmlinkage __visible |
919 | +struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) |
920 | +{ |
921 | + /* |
922 | + * This is called from entry_64.S early in handling a fault |
923 | + * caused by a bad iret to user mode. To handle the fault |
924 | + * correctly, we want move our stack frame to task_pt_regs |
925 | + * and we want to pretend that the exception came from the |
926 | + * iret target. |
927 | + */ |
928 | + struct bad_iret_stack *new_stack = |
929 | + container_of(task_pt_regs(current), |
930 | + struct bad_iret_stack, regs); |
931 | + |
932 | + /* Copy the IRET target to the new stack. */ |
933 | + memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); |
934 | + |
935 | + /* Copy the remainder of the stack from the current stack. */ |
936 | + memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); |
937 | + |
938 | + BUG_ON(!user_mode_vm(&new_stack->regs)); |
939 | + return new_stack; |
940 | +} |
941 | #endif |
942 | |
943 | /* |
944 | @@ -778,7 +815,7 @@ void __init trap_init(void) |
945 | set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); |
946 | set_intr_gate(X86_TRAP_TS, invalid_TSS); |
947 | set_intr_gate(X86_TRAP_NP, segment_not_present); |
948 | - set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); |
949 | + set_intr_gate(X86_TRAP_SS, stack_segment); |
950 | set_intr_gate(X86_TRAP_GP, general_protection); |
951 | set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); |
952 | set_intr_gate(X86_TRAP_MF, coprocessor_error); |
953 | diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c |
954 | index 5621c47d7a1a..ac7de5f0fba7 100644 |
955 | --- a/arch/x86/mm/init_64.c |
956 | +++ b/arch/x86/mm/init_64.c |
957 | @@ -1111,7 +1111,7 @@ void mark_rodata_ro(void) |
958 | unsigned long end = (unsigned long) &__end_rodata_hpage_align; |
959 | unsigned long text_end = PFN_ALIGN(&__stop___ex_table); |
960 | unsigned long rodata_end = PFN_ALIGN(&__end_rodata); |
961 | - unsigned long all_end = PFN_ALIGN(&_end); |
962 | + unsigned long all_end; |
963 | |
964 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
965 | (end - start) >> 10); |
966 | @@ -1122,7 +1122,16 @@ void mark_rodata_ro(void) |
967 | /* |
968 | * The rodata/data/bss/brk section (but not the kernel text!) |
969 | * should also be not-executable. |
970 | + * |
971 | + * We align all_end to PMD_SIZE because the existing mapping |
972 | + * is a full PMD. If we would align _brk_end to PAGE_SIZE we |
973 | + * split the PMD and the reminder between _brk_end and the end |
974 | + * of the PMD will remain mapped executable. |
975 | + * |
976 | + * Any PMD which was setup after the one which covers _brk_end |
977 | + * has been zapped already via cleanup_highmem(). |
978 | */ |
979 | + all_end = roundup((unsigned long)_brk_end, PMD_SIZE); |
980 | set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); |
981 | |
982 | rodata_test(); |
983 | diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl |
984 | index 0b0b124d3ece..23210baade2d 100644 |
985 | --- a/arch/x86/tools/calc_run_size.pl |
986 | +++ b/arch/x86/tools/calc_run_size.pl |
987 | @@ -19,7 +19,16 @@ while (<>) { |
988 | if ($file_offset == 0) { |
989 | $file_offset = $offset; |
990 | } elsif ($file_offset != $offset) { |
991 | - die ".bss and .brk lack common file offset\n"; |
992 | + # BFD linker shows the same file offset in ELF. |
993 | + # Gold linker shows them as consecutive. |
994 | + next if ($file_offset + $mem_size == $offset + $size); |
995 | + |
996 | + printf STDERR "file_offset: 0x%lx\n", $file_offset; |
997 | + printf STDERR "mem_size: 0x%lx\n", $mem_size; |
998 | + printf STDERR "offset: 0x%lx\n", $offset; |
999 | + printf STDERR "size: 0x%lx\n", $size; |
1000 | + |
1001 | + die ".bss and .brk are non-contiguous\n"; |
1002 | } |
1003 | } |
1004 | } |
1005 | diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c |
1006 | index 5e9cbd664286..7462ad1675c0 100644 |
1007 | --- a/drivers/acpi/device_pm.c |
1008 | +++ b/drivers/acpi/device_pm.c |
1009 | @@ -877,7 +877,7 @@ int acpi_dev_suspend_late(struct device *dev) |
1010 | return 0; |
1011 | |
1012 | target_state = acpi_target_system_state(); |
1013 | - wakeup = device_may_wakeup(dev); |
1014 | + wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev); |
1015 | error = acpi_device_wakeup(adev, target_state, wakeup); |
1016 | if (wakeup && error) |
1017 | return error; |
1018 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
1019 | index c874859b4565..cf79c4cdf955 100644 |
1020 | --- a/drivers/acpi/ec.c |
1021 | +++ b/drivers/acpi/ec.c |
1022 | @@ -299,11 +299,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, |
1023 | /* following two actions should be kept atomic */ |
1024 | ec->curr = t; |
1025 | start_transaction(ec); |
1026 | + if (ec->curr->command == ACPI_EC_COMMAND_QUERY) |
1027 | + clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
1028 | spin_unlock_irqrestore(&ec->lock, tmp); |
1029 | ret = ec_poll(ec); |
1030 | spin_lock_irqsave(&ec->lock, tmp); |
1031 | - if (ec->curr->command == ACPI_EC_COMMAND_QUERY) |
1032 | - clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
1033 | ec->curr = NULL; |
1034 | spin_unlock_irqrestore(&ec->lock, tmp); |
1035 | return ret; |
1036 | diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c |
1037 | index 18a9de29df0e..c0a842b335c5 100644 |
1038 | --- a/drivers/clk/clk-divider.c |
1039 | +++ b/drivers/clk/clk-divider.c |
1040 | @@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, |
1041 | if (!rate) |
1042 | rate = 1; |
1043 | |
1044 | + /* if read only, just return current value */ |
1045 | + if (divider->flags & CLK_DIVIDER_READ_ONLY) { |
1046 | + bestdiv = readl(divider->reg) >> divider->shift; |
1047 | + bestdiv &= div_mask(divider); |
1048 | + bestdiv = _get_div(divider, bestdiv); |
1049 | + return bestdiv; |
1050 | + } |
1051 | + |
1052 | maxdiv = _get_maxdiv(divider); |
1053 | |
1054 | if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { |
1055 | @@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = { |
1056 | }; |
1057 | EXPORT_SYMBOL_GPL(clk_divider_ops); |
1058 | |
1059 | -const struct clk_ops clk_divider_ro_ops = { |
1060 | - .recalc_rate = clk_divider_recalc_rate, |
1061 | -}; |
1062 | -EXPORT_SYMBOL_GPL(clk_divider_ro_ops); |
1063 | - |
1064 | static struct clk *_register_divider(struct device *dev, const char *name, |
1065 | const char *parent_name, unsigned long flags, |
1066 | void __iomem *reg, u8 shift, u8 width, |
1067 | @@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name, |
1068 | } |
1069 | |
1070 | init.name = name; |
1071 | - if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) |
1072 | - init.ops = &clk_divider_ro_ops; |
1073 | - else |
1074 | - init.ops = &clk_divider_ops; |
1075 | + init.ops = &clk_divider_ops; |
1076 | init.flags = flags | CLK_IS_BASIC; |
1077 | init.parent_names = (parent_name ? &parent_name: NULL); |
1078 | init.num_parents = (parent_name ? 1 : 0); |
1079 | diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c |
1080 | index 751eea376a2b..2e0526acbd4c 100644 |
1081 | --- a/drivers/clk/qcom/mmcc-apq8084.c |
1082 | +++ b/drivers/clk/qcom/mmcc-apq8084.c |
1083 | @@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = { |
1084 | [ESC1_CLK_SRC] = &esc1_clk_src.clkr, |
1085 | [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, |
1086 | [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, |
1087 | - [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, |
1088 | + [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, |
1089 | [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, |
1090 | [MAPLE_CLK_SRC] = &maple_clk_src.clkr, |
1091 | [VDP_CLK_SRC] = &vdp_clk_src.clkr, |
1092 | diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c |
1093 | index 278cf9dd1e23..a8b3db5cf641 100644 |
1094 | --- a/drivers/clk/rockchip/clk.c |
1095 | +++ b/drivers/clk/rockchip/clk.c |
1096 | @@ -89,9 +89,7 @@ struct clk *rockchip_clk_register_branch(const char *name, |
1097 | div->width = div_width; |
1098 | div->lock = lock; |
1099 | div->table = div_table; |
1100 | - div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) |
1101 | - ? &clk_divider_ro_ops |
1102 | - : &clk_divider_ops; |
1103 | + div_ops = &clk_divider_ops; |
1104 | } |
1105 | |
1106 | clk = clk_register_composite(NULL, name, parent_names, num_parents, |
1107 | diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c |
1108 | index efb17c3ee120..f4a9c0058b4d 100644 |
1109 | --- a/drivers/clocksource/sun4i_timer.c |
1110 | +++ b/drivers/clocksource/sun4i_timer.c |
1111 | @@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node) |
1112 | /* Make sure timer is stopped before playing with interrupts */ |
1113 | sun4i_clkevt_time_stop(0); |
1114 | |
1115 | + sun4i_clockevent.cpumask = cpu_possible_mask; |
1116 | + sun4i_clockevent.irq = irq; |
1117 | + |
1118 | + clockevents_config_and_register(&sun4i_clockevent, rate, |
1119 | + TIMER_SYNC_TICKS, 0xffffffff); |
1120 | + |
1121 | ret = setup_irq(irq, &sun4i_timer_irq); |
1122 | if (ret) |
1123 | pr_warn("failed to setup irq %d\n", irq); |
1124 | @@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node) |
1125 | /* Enable timer0 interrupt */ |
1126 | val = readl(timer_base + TIMER_IRQ_EN_REG); |
1127 | writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); |
1128 | - |
1129 | - sun4i_clockevent.cpumask = cpu_possible_mask; |
1130 | - sun4i_clockevent.irq = irq; |
1131 | - |
1132 | - clockevents_config_and_register(&sun4i_clockevent, rate, |
1133 | - TIMER_SYNC_TICKS, 0xffffffff); |
1134 | } |
1135 | CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", |
1136 | sun4i_timer_init); |
1137 | diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c |
1138 | index 1f92a56fd2b6..08ebf289a7fc 100644 |
1139 | --- a/drivers/dma/sun6i-dma.c |
1140 | +++ b/drivers/dma/sun6i-dma.c |
1141 | @@ -230,30 +230,25 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, |
1142 | readl(pchan->base + DMA_CHAN_CUR_PARA)); |
1143 | } |
1144 | |
1145 | -static inline int convert_burst(u32 maxburst, u8 *burst) |
1146 | +static inline s8 convert_burst(u32 maxburst) |
1147 | { |
1148 | switch (maxburst) { |
1149 | case 1: |
1150 | - *burst = 0; |
1151 | - break; |
1152 | + return 0; |
1153 | case 8: |
1154 | - *burst = 2; |
1155 | - break; |
1156 | + return 2; |
1157 | default: |
1158 | return -EINVAL; |
1159 | } |
1160 | - |
1161 | - return 0; |
1162 | } |
1163 | |
1164 | -static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) |
1165 | +static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width) |
1166 | { |
1167 | if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || |
1168 | (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) |
1169 | return -EINVAL; |
1170 | |
1171 | - *width = addr_width >> 1; |
1172 | - return 0; |
1173 | + return addr_width >> 1; |
1174 | } |
1175 | |
1176 | static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, |
1177 | @@ -284,26 +279,25 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, |
1178 | struct dma_slave_config *config) |
1179 | { |
1180 | u8 src_width, dst_width, src_burst, dst_burst; |
1181 | - int ret; |
1182 | |
1183 | if (!config) |
1184 | return -EINVAL; |
1185 | |
1186 | - ret = convert_burst(config->src_maxburst, &src_burst); |
1187 | - if (ret) |
1188 | - return ret; |
1189 | + src_burst = convert_burst(config->src_maxburst); |
1190 | + if (src_burst) |
1191 | + return src_burst; |
1192 | |
1193 | - ret = convert_burst(config->dst_maxburst, &dst_burst); |
1194 | - if (ret) |
1195 | - return ret; |
1196 | + dst_burst = convert_burst(config->dst_maxburst); |
1197 | + if (dst_burst) |
1198 | + return dst_burst; |
1199 | |
1200 | - ret = convert_buswidth(config->src_addr_width, &src_width); |
1201 | - if (ret) |
1202 | - return ret; |
1203 | + src_width = convert_buswidth(config->src_addr_width); |
1204 | + if (src_width) |
1205 | + return src_width; |
1206 | |
1207 | - ret = convert_buswidth(config->dst_addr_width, &dst_width); |
1208 | - if (ret) |
1209 | - return ret; |
1210 | + dst_width = convert_buswidth(config->dst_addr_width); |
1211 | + if (dst_width) |
1212 | + return dst_width; |
1213 | |
1214 | lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | |
1215 | DMA_CHAN_CFG_SRC_WIDTH(src_width) | |
1216 | @@ -542,11 +536,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( |
1217 | { |
1218 | struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); |
1219 | struct sun6i_vchan *vchan = to_sun6i_vchan(chan); |
1220 | - struct dma_slave_config *sconfig = &vchan->cfg; |
1221 | struct sun6i_dma_lli *v_lli; |
1222 | struct sun6i_desc *txd; |
1223 | dma_addr_t p_lli; |
1224 | - int ret; |
1225 | + s8 burst, width; |
1226 | |
1227 | dev_dbg(chan2dev(chan), |
1228 | "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", |
1229 | @@ -565,14 +558,21 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( |
1230 | goto err_txd_free; |
1231 | } |
1232 | |
1233 | - ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); |
1234 | - if (ret) |
1235 | - goto err_dma_free; |
1236 | + v_lli->src = src; |
1237 | + v_lli->dst = dest; |
1238 | + v_lli->len = len; |
1239 | + v_lli->para = NORMAL_WAIT; |
1240 | |
1241 | + burst = convert_burst(8); |
1242 | + width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); |
1243 | v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | |
1244 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | |
1245 | DMA_CHAN_CFG_DST_LINEAR_MODE | |
1246 | - DMA_CHAN_CFG_SRC_LINEAR_MODE; |
1247 | + DMA_CHAN_CFG_SRC_LINEAR_MODE | |
1248 | + DMA_CHAN_CFG_SRC_BURST(burst) | |
1249 | + DMA_CHAN_CFG_SRC_WIDTH(width) | |
1250 | + DMA_CHAN_CFG_DST_BURST(burst) | |
1251 | + DMA_CHAN_CFG_DST_WIDTH(width); |
1252 | |
1253 | sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); |
1254 | |
1255 | @@ -580,8 +580,6 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( |
1256 | |
1257 | return vchan_tx_prep(&vchan->vc, &txd->vd, flags); |
1258 | |
1259 | -err_dma_free: |
1260 | - dma_pool_free(sdev->pool, v_lli, p_lli); |
1261 | err_txd_free: |
1262 | kfree(txd); |
1263 | return NULL; |
1264 | @@ -938,6 +936,7 @@ static int sun6i_dma_probe(struct platform_device *pdev) |
1265 | sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; |
1266 | sdc->slave.device_control = sun6i_dma_control; |
1267 | sdc->slave.chancnt = NR_MAX_VCHANS; |
1268 | + sdc->slave.copy_align = 4; |
1269 | |
1270 | sdc->slave.dev = &pdev->dev; |
1271 | |
1272 | diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c |
1273 | index 9933c26017ed..2d23e578edd1 100644 |
1274 | --- a/drivers/gpu/drm/i915/i915_dma.c |
1275 | +++ b/drivers/gpu/drm/i915/i915_dma.c |
1276 | @@ -1670,15 +1670,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) |
1277 | goto out_regs; |
1278 | |
1279 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
1280 | - ret = i915_kick_out_vgacon(dev_priv); |
1281 | + /* WARNING: Apparently we must kick fbdev drivers before vgacon, |
1282 | + * otherwise the vga fbdev driver falls over. */ |
1283 | + ret = i915_kick_out_firmware_fb(dev_priv); |
1284 | if (ret) { |
1285 | - DRM_ERROR("failed to remove conflicting VGA console\n"); |
1286 | + DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); |
1287 | goto out_gtt; |
1288 | } |
1289 | |
1290 | - ret = i915_kick_out_firmware_fb(dev_priv); |
1291 | + ret = i915_kick_out_vgacon(dev_priv); |
1292 | if (ret) { |
1293 | - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); |
1294 | + DRM_ERROR("failed to remove conflicting VGA console\n"); |
1295 | goto out_gtt; |
1296 | } |
1297 | } |
1298 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
1299 | index b71a02663bae..7bd17b3ee95c 100644 |
1300 | --- a/drivers/gpu/drm/i915/intel_display.c |
1301 | +++ b/drivers/gpu/drm/i915/intel_display.c |
1302 | @@ -9217,6 +9217,10 @@ static bool page_flip_finished(struct intel_crtc *crtc) |
1303 | struct drm_device *dev = crtc->base.dev; |
1304 | struct drm_i915_private *dev_priv = dev->dev_private; |
1305 | |
1306 | + if (i915_reset_in_progress(&dev_priv->gpu_error) || |
1307 | + crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
1308 | + return true; |
1309 | + |
1310 | /* |
1311 | * The relevant registers doen't exist on pre-ctg. |
1312 | * As the flip done interrupt doesn't trigger for mmio |
1313 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1314 | index 4b3c09636990..1b7375efc670 100644 |
1315 | --- a/drivers/gpu/drm/i915/intel_dp.c |
1316 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
1317 | @@ -4084,6 +4084,18 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) |
1318 | if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) |
1319 | intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; |
1320 | |
1321 | + if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { |
1322 | + /* |
1323 | + * vdd off can generate a long pulse on eDP which |
1324 | + * would require vdd on to handle it, and thus we |
1325 | + * would end up in an endless cycle of |
1326 | + * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." |
1327 | + */ |
1328 | + DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", |
1329 | + port_name(intel_dig_port->port)); |
1330 | + return false; |
1331 | + } |
1332 | + |
1333 | DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port, |
1334 | long_hpd ? "long" : "short"); |
1335 | |
1336 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
1337 | index 40c12295c0bd..1924b5632c6f 100644 |
1338 | --- a/drivers/gpu/drm/i915/intel_pm.c |
1339 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
1340 | @@ -5247,11 +5247,6 @@ static void gen6_init_clock_gating(struct drm_device *dev) |
1341 | I915_WRITE(_3D_CHICKEN, |
1342 | _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); |
1343 | |
1344 | - /* WaSetupGtModeTdRowDispatch:snb */ |
1345 | - if (IS_SNB_GT1(dev)) |
1346 | - I915_WRITE(GEN6_GT_MODE, |
1347 | - _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE)); |
1348 | - |
1349 | /* WaDisable_RenderCache_OperationalFlush:snb */ |
1350 | I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); |
1351 | |
1352 | diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c |
1353 | index 71f4d26669cd..13a27d971d35 100644 |
1354 | --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c |
1355 | +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c |
1356 | @@ -32,7 +32,7 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
1357 | struct drm_connector *connector; |
1358 | struct radeon_connector *radeon_connector = NULL; |
1359 | u32 tmp; |
1360 | - u8 *sadb; |
1361 | + u8 *sadb = NULL; |
1362 | int sad_count; |
1363 | |
1364 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
1365 | diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c |
1366 | index 790d8cafdb87..88dd17955db3 100644 |
1367 | --- a/drivers/gpu/drm/radeon/dce6_afmt.c |
1368 | +++ b/drivers/gpu/drm/radeon/dce6_afmt.c |
1369 | @@ -155,7 +155,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
1370 | struct drm_connector *connector; |
1371 | struct radeon_connector *radeon_connector = NULL; |
1372 | u32 offset, tmp; |
1373 | - u8 *sadb; |
1374 | + u8 *sadb = NULL; |
1375 | int sad_count; |
1376 | |
1377 | if (!dig || !dig->afmt || !dig->afmt->pin) |
1378 | diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c |
1379 | index 71ebdf89fd76..993978051ede 100644 |
1380 | --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c |
1381 | +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c |
1382 | @@ -102,7 +102,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) |
1383 | struct drm_connector *connector; |
1384 | struct radeon_connector *radeon_connector = NULL; |
1385 | u32 tmp; |
1386 | - u8 *sadb; |
1387 | + u8 *sadb = NULL; |
1388 | int sad_count; |
1389 | |
1390 | list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { |
1391 | diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c |
1392 | index 9c61b74ef441..e961be55348a 100644 |
1393 | --- a/drivers/gpu/drm/radeon/r600_dpm.c |
1394 | +++ b/drivers/gpu/drm/radeon/r600_dpm.c |
1395 | @@ -1255,7 +1255,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev) |
1396 | (mode_info->atom_context->bios + data_offset + |
1397 | le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); |
1398 | rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = |
1399 | - ppt->usMaximumPowerDeliveryLimit; |
1400 | + le16_to_cpu(ppt->usMaximumPowerDeliveryLimit); |
1401 | pt = &ppt->power_tune_table; |
1402 | } else { |
1403 | ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) |
1404 | diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c |
1405 | index 300c4b3d4669..26baa9c05f6c 100644 |
1406 | --- a/drivers/gpu/drm/radeon/radeon_connectors.c |
1407 | +++ b/drivers/gpu/drm/radeon/radeon_connectors.c |
1408 | @@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector) |
1409 | } |
1410 | |
1411 | if (!radeon_connector->edid) { |
1412 | + /* don't fetch the edid from the vbios if ddc fails and runpm is |
1413 | + * enabled so we report disconnected. |
1414 | + */ |
1415 | + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) |
1416 | + return; |
1417 | + |
1418 | if (rdev->is_atom_bios) { |
1419 | /* some laptops provide a hardcoded edid in rom for LCDs */ |
1420 | if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || |
1421 | @@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector, |
1422 | static enum drm_connector_status |
1423 | radeon_lvds_detect(struct drm_connector *connector, bool force) |
1424 | { |
1425 | + struct drm_device *dev = connector->dev; |
1426 | + struct radeon_device *rdev = dev->dev_private; |
1427 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
1428 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); |
1429 | enum drm_connector_status ret = connector_status_disconnected; |
1430 | @@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) |
1431 | /* check if panel is valid */ |
1432 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) |
1433 | ret = connector_status_connected; |
1434 | - |
1435 | + /* don't fetch the edid from the vbios if ddc fails and runpm is |
1436 | + * enabled so we report disconnected. |
1437 | + */ |
1438 | + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) |
1439 | + ret = connector_status_disconnected; |
1440 | } |
1441 | |
1442 | /* check for edid as well */ |
1443 | @@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) |
1444 | /* check if panel is valid */ |
1445 | if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) |
1446 | ret = connector_status_connected; |
1447 | + /* don't fetch the edid from the vbios if ddc fails and runpm is |
1448 | + * enabled so we report disconnected. |
1449 | + */ |
1450 | + if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0)) |
1451 | + ret = connector_status_disconnected; |
1452 | } |
1453 | /* eDP is always DP */ |
1454 | radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; |
1455 | diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c |
1456 | index 15edf23b465c..1ca64b615fc0 100644 |
1457 | --- a/drivers/gpu/drm/radeon/radeon_encoders.c |
1458 | +++ b/drivers/gpu/drm/radeon/radeon_encoders.c |
1459 | @@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, |
1460 | (rdev->pdev->subsystem_vendor == 0x1734) && |
1461 | (rdev->pdev->subsystem_device == 0x1107)) |
1462 | use_bl = false; |
1463 | + /* disable native backlight control on older asics */ |
1464 | + else if (rdev->family < CHIP_R600) |
1465 | + use_bl = false; |
1466 | else |
1467 | use_bl = true; |
1468 | } |
1469 | diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1470 | index 16807afab362..c74f12d125f7 100644 |
1471 | --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1472 | +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1473 | @@ -202,6 +202,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev) |
1474 | if (rdev->flags & RADEON_IS_AGP) |
1475 | return false; |
1476 | |
1477 | + /* |
1478 | + * Older chips have a HW limitation, they can only generate 40 bits |
1479 | + * of address for "64-bit" MSIs which breaks on some platforms, notably |
1480 | + * IBM POWER servers, so we limit them |
1481 | + */ |
1482 | + if (rdev->family < CHIP_BONAIRE) { |
1483 | + dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n"); |
1484 | + rdev->pdev->no_64bit_msi = 1; |
1485 | + } |
1486 | + |
1487 | /* force MSI on */ |
1488 | if (radeon_msi == 1) |
1489 | return true; |
1490 | diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c |
1491 | index 6aac695b1688..9b55e673b67c 100644 |
1492 | --- a/drivers/hwmon/g762.c |
1493 | +++ b/drivers/hwmon/g762.c |
1494 | @@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id) |
1495 | if (ret) |
1496 | goto clock_dis; |
1497 | |
1498 | - data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, |
1499 | - client->name, |
1500 | - data, |
1501 | - g762_groups); |
1502 | + data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name, |
1503 | + data, g762_groups); |
1504 | if (IS_ERR(data->hwmon_dev)) { |
1505 | ret = PTR_ERR(data->hwmon_dev); |
1506 | goto clock_dis; |
1507 | diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c |
1508 | index b58d6302521f..d095efe1ba14 100644 |
1509 | --- a/drivers/iio/adc/men_z188_adc.c |
1510 | +++ b/drivers/iio/adc/men_z188_adc.c |
1511 | @@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev) |
1512 | |
1513 | static const struct mcb_device_id men_z188_ids[] = { |
1514 | { .device = 0xbc }, |
1515 | + { } |
1516 | }; |
1517 | MODULE_DEVICE_TABLE(mcb, men_z188_ids); |
1518 | |
1519 | diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c |
1520 | index 4d35bc71e2d6..ed41a19907d3 100644 |
1521 | --- a/drivers/infiniband/ulp/isert/ib_isert.c |
1522 | +++ b/drivers/infiniband/ulp/isert/ib_isert.c |
1523 | @@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id, |
1524 | attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; |
1525 | /* |
1526 | * FIXME: Use devattr.max_sge - 2 for max_send_sge as |
1527 | - * work-around for RDMA_READ.. |
1528 | + * work-around for RDMA_READs with ConnectX-2. |
1529 | + * |
1530 | + * Also, still make sure to have at least two SGEs for |
1531 | + * outgoing control PDU responses. |
1532 | */ |
1533 | - attr.cap.max_send_sge = device->dev_attr.max_sge - 2; |
1534 | + attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2); |
1535 | isert_conn->max_sge = attr.cap.max_send_sge; |
1536 | |
1537 | attr.cap.max_recv_sge = 1; |
1538 | @@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device) |
1539 | struct isert_cq_desc *cq_desc; |
1540 | struct ib_device_attr *dev_attr; |
1541 | int ret = 0, i, j; |
1542 | + int max_rx_cqe, max_tx_cqe; |
1543 | |
1544 | dev_attr = &device->dev_attr; |
1545 | ret = isert_query_device(ib_dev, dev_attr); |
1546 | if (ret) |
1547 | return ret; |
1548 | |
1549 | + max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe); |
1550 | + max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe); |
1551 | + |
1552 | /* asign function handlers */ |
1553 | if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && |
1554 | dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { |
1555 | @@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device) |
1556 | isert_cq_rx_callback, |
1557 | isert_cq_event_callback, |
1558 | (void *)&cq_desc[i], |
1559 | - ISER_MAX_RX_CQ_LEN, i); |
1560 | + max_rx_cqe, i); |
1561 | if (IS_ERR(device->dev_rx_cq[i])) { |
1562 | ret = PTR_ERR(device->dev_rx_cq[i]); |
1563 | device->dev_rx_cq[i] = NULL; |
1564 | @@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device) |
1565 | isert_cq_tx_callback, |
1566 | isert_cq_event_callback, |
1567 | (void *)&cq_desc[i], |
1568 | - ISER_MAX_TX_CQ_LEN, i); |
1569 | + max_tx_cqe, i); |
1570 | if (IS_ERR(device->dev_tx_cq[i])) { |
1571 | ret = PTR_ERR(device->dev_tx_cq[i]); |
1572 | device->dev_tx_cq[i] = NULL; |
1573 | @@ -803,14 +810,25 @@ wake_up: |
1574 | complete(&isert_conn->conn_wait); |
1575 | } |
1576 | |
1577 | -static void |
1578 | +static int |
1579 | isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) |
1580 | { |
1581 | - struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; |
1582 | + struct isert_conn *isert_conn; |
1583 | + |
1584 | + if (!cma_id->qp) { |
1585 | + struct isert_np *isert_np = cma_id->context; |
1586 | + |
1587 | + isert_np->np_cm_id = NULL; |
1588 | + return -1; |
1589 | + } |
1590 | + |
1591 | + isert_conn = (struct isert_conn *)cma_id->context; |
1592 | |
1593 | isert_conn->disconnect = disconnect; |
1594 | INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); |
1595 | schedule_work(&isert_conn->conn_logout_work); |
1596 | + |
1597 | + return 0; |
1598 | } |
1599 | |
1600 | static int |
1601 | @@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
1602 | switch (event->event) { |
1603 | case RDMA_CM_EVENT_CONNECT_REQUEST: |
1604 | ret = isert_connect_request(cma_id, event); |
1605 | + if (ret) |
1606 | + pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", |
1607 | + event->event, ret); |
1608 | break; |
1609 | case RDMA_CM_EVENT_ESTABLISHED: |
1610 | isert_connected_handler(cma_id); |
1611 | @@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
1612 | case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ |
1613 | disconnect = true; |
1614 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ |
1615 | - isert_disconnected_handler(cma_id, disconnect); |
1616 | + ret = isert_disconnected_handler(cma_id, disconnect); |
1617 | break; |
1618 | case RDMA_CM_EVENT_CONNECT_ERROR: |
1619 | default: |
1620 | @@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) |
1621 | break; |
1622 | } |
1623 | |
1624 | - if (ret != 0) { |
1625 | - pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n", |
1626 | - event->event, ret); |
1627 | - dump_stack(); |
1628 | - } |
1629 | - |
1630 | return ret; |
1631 | } |
1632 | |
1633 | @@ -3203,7 +3218,8 @@ isert_free_np(struct iscsi_np *np) |
1634 | { |
1635 | struct isert_np *isert_np = (struct isert_np *)np->np_context; |
1636 | |
1637 | - rdma_destroy_id(isert_np->np_cm_id); |
1638 | + if (isert_np->np_cm_id) |
1639 | + rdma_destroy_id(isert_np->np_cm_id); |
1640 | |
1641 | np->np_context = NULL; |
1642 | kfree(isert_np); |
1643 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
1644 | index d28a8c284da9..d1042ebf43bc 100644 |
1645 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
1646 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
1647 | @@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) |
1648 | if (!qp_init) |
1649 | goto out; |
1650 | |
1651 | +retry: |
1652 | ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, |
1653 | ch->rq_size + srp_sq_size, 0); |
1654 | if (IS_ERR(ch->cq)) { |
1655 | @@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch) |
1656 | ch->qp = ib_create_qp(sdev->pd, qp_init); |
1657 | if (IS_ERR(ch->qp)) { |
1658 | ret = PTR_ERR(ch->qp); |
1659 | + if (ret == -ENOMEM) { |
1660 | + srp_sq_size /= 2; |
1661 | + if (srp_sq_size >= MIN_SRPT_SQ_SIZE) { |
1662 | + ib_destroy_cq(ch->cq); |
1663 | + goto retry; |
1664 | + } |
1665 | + } |
1666 | printk(KERN_ERR "failed to create_qp ret= %d\n", ret); |
1667 | goto err_destroy_cq; |
1668 | } |
1669 | diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c |
1670 | index 177602cf7079..e65d9c0241a9 100644 |
1671 | --- a/drivers/input/joystick/xpad.c |
1672 | +++ b/drivers/input/joystick/xpad.c |
1673 | @@ -1143,9 +1143,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id |
1674 | } |
1675 | |
1676 | ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; |
1677 | - usb_fill_bulk_urb(xpad->bulk_out, udev, |
1678 | - usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), |
1679 | - xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); |
1680 | + if (usb_endpoint_is_bulk_out(ep_irq_in)) { |
1681 | + usb_fill_bulk_urb(xpad->bulk_out, udev, |
1682 | + usb_sndbulkpipe(udev, |
1683 | + ep_irq_in->bEndpointAddress), |
1684 | + xpad->bdata, XPAD_PKT_LEN, |
1685 | + xpad_bulk_out, xpad); |
1686 | + } else { |
1687 | + usb_fill_int_urb(xpad->bulk_out, udev, |
1688 | + usb_sndintpipe(udev, |
1689 | + ep_irq_in->bEndpointAddress), |
1690 | + xpad->bdata, XPAD_PKT_LEN, |
1691 | + xpad_bulk_out, xpad, 0); |
1692 | + } |
1693 | |
1694 | /* |
1695 | * Submit the int URB immediately rather than waiting for open |
1696 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c |
1697 | index 2e8f3ba7b2bd..3ebfb0386300 100644 |
1698 | --- a/drivers/input/mouse/synaptics.c |
1699 | +++ b/drivers/input/mouse/synaptics.c |
1700 | @@ -143,6 +143,10 @@ static const struct min_max_quirk min_max_pnpid_table[] = { |
1701 | (const char * const []){"LEN2001", NULL}, |
1702 | 1024, 5022, 2508, 4832 |
1703 | }, |
1704 | + { |
1705 | + (const char * const []){"LEN2006", NULL}, |
1706 | + 1264, 5675, 1171, 4688 |
1707 | + }, |
1708 | { } |
1709 | }; |
1710 | |
1711 | diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c |
1712 | index 6ae3cdee0681..cc4f9d80122e 100644 |
1713 | --- a/drivers/irqchip/irq-atmel-aic-common.c |
1714 | +++ b/drivers/irqchip/irq-atmel-aic-common.c |
1715 | @@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, |
1716 | } |
1717 | |
1718 | ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, |
1719 | - handle_level_irq, 0, 0, |
1720 | - IRQCHIP_SKIP_SET_WAKE); |
1721 | + handle_fasteoi_irq, |
1722 | + IRQ_NOREQUEST | IRQ_NOPROBE | |
1723 | + IRQ_NOAUTOEN, 0, 0); |
1724 | if (ret) |
1725 | goto err_domain_remove; |
1726 | |
1727 | @@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node, |
1728 | gc->unused = 0; |
1729 | gc->wake_enabled = ~0; |
1730 | gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK; |
1731 | - gc->chip_types[0].handler = handle_fasteoi_irq; |
1732 | gc->chip_types[0].chip.irq_eoi = irq_gc_eoi; |
1733 | gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; |
1734 | gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown; |
1735 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
1736 | index 798ae69fb63c..5d2add761e30 100644 |
1737 | --- a/drivers/net/bonding/bond_main.c |
1738 | +++ b/drivers/net/bonding/bond_main.c |
1739 | @@ -2498,9 +2498,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) |
1740 | if (!rtnl_trylock()) |
1741 | goto re_arm; |
1742 | |
1743 | - if (slave_state_changed) { |
1744 | + if (slave_state_changed) |
1745 | bond_slave_state_change(bond); |
1746 | - } else if (do_failover) { |
1747 | + if (do_failover) { |
1748 | /* the bond_select_active_slave must hold RTNL |
1749 | * and curr_slave_lock for write. |
1750 | */ |
1751 | diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c |
1752 | index 9f91fcba43f8..6403503cf767 100644 |
1753 | --- a/drivers/net/can/dev.c |
1754 | +++ b/drivers/net/can/dev.c |
1755 | @@ -383,7 +383,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx) |
1756 | BUG_ON(idx >= priv->echo_skb_max); |
1757 | |
1758 | if (priv->echo_skb[idx]) { |
1759 | - kfree_skb(priv->echo_skb[idx]); |
1760 | + dev_kfree_skb_any(priv->echo_skb[idx]); |
1761 | priv->echo_skb[idx] = NULL; |
1762 | } |
1763 | } |
1764 | diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c |
1765 | index b7c9e8b11460..7a90075529c3 100644 |
1766 | --- a/drivers/net/can/usb/esd_usb2.c |
1767 | +++ b/drivers/net/can/usb/esd_usb2.c |
1768 | @@ -1143,6 +1143,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf) |
1769 | } |
1770 | } |
1771 | unlink_all_urbs(dev); |
1772 | + kfree(dev); |
1773 | } |
1774 | } |
1775 | |
1776 | diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1777 | index 87bd53fdd209..e82821fdf905 100644 |
1778 | --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1779 | +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c |
1780 | @@ -4059,8 +4059,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev) |
1781 | * if SR-IOV and VMDQ are disabled - otherwise ensure |
1782 | * that hardware VLAN filters remain enabled. |
1783 | */ |
1784 | - if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | |
1785 | - IXGBE_FLAG_SRIOV_ENABLED))) |
1786 | + if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | |
1787 | + IXGBE_FLAG_SRIOV_ENABLED)) |
1788 | vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); |
1789 | } else { |
1790 | if (netdev->flags & IFF_ALLMULTI) { |
1791 | @@ -8084,6 +8084,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1792 | int i, err, pci_using_dac, expected_gts; |
1793 | unsigned int indices = MAX_TX_QUEUES; |
1794 | u8 part_str[IXGBE_PBANUM_LENGTH]; |
1795 | + bool disable_dev = false; |
1796 | #ifdef IXGBE_FCOE |
1797 | u16 device_caps; |
1798 | #endif |
1799 | @@ -8477,13 +8478,14 @@ err_sw_init: |
1800 | iounmap(adapter->io_addr); |
1801 | kfree(adapter->mac_table); |
1802 | err_ioremap: |
1803 | + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); |
1804 | free_netdev(netdev); |
1805 | err_alloc_etherdev: |
1806 | pci_release_selected_regions(pdev, |
1807 | pci_select_bars(pdev, IORESOURCE_MEM)); |
1808 | err_pci_reg: |
1809 | err_dma: |
1810 | - if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) |
1811 | + if (!adapter || disable_dev) |
1812 | pci_disable_device(pdev); |
1813 | return err; |
1814 | } |
1815 | @@ -8501,6 +8503,7 @@ static void ixgbe_remove(struct pci_dev *pdev) |
1816 | { |
1817 | struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); |
1818 | struct net_device *netdev = adapter->netdev; |
1819 | + bool disable_dev; |
1820 | |
1821 | ixgbe_dbg_adapter_exit(adapter); |
1822 | |
1823 | @@ -8550,11 +8553,12 @@ static void ixgbe_remove(struct pci_dev *pdev) |
1824 | e_dev_info("complete\n"); |
1825 | |
1826 | kfree(adapter->mac_table); |
1827 | + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); |
1828 | free_netdev(netdev); |
1829 | |
1830 | pci_disable_pcie_error_reporting(pdev); |
1831 | |
1832 | - if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) |
1833 | + if (disable_dev) |
1834 | pci_disable_device(pdev); |
1835 | } |
1836 | |
1837 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
1838 | index abddcf8c40aa..8c4dc05388d6 100644 |
1839 | --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
1840 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
1841 | @@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev) |
1842 | mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); |
1843 | |
1844 | #ifdef CONFIG_MLX4_EN_VXLAN |
1845 | - if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) |
1846 | + if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
1847 | vxlan_get_rx_port(dev); |
1848 | #endif |
1849 | priv->port_up = true; |
1850 | @@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work) |
1851 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, |
1852 | VXLAN_STEER_BY_OUTER_MAC, 1); |
1853 | out: |
1854 | - if (ret) |
1855 | + if (ret) { |
1856 | en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); |
1857 | + return; |
1858 | + } |
1859 | + |
1860 | + /* set offloads */ |
1861 | + priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | |
1862 | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; |
1863 | + priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; |
1864 | + priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL; |
1865 | } |
1866 | |
1867 | static void mlx4_en_del_vxlan_offloads(struct work_struct *work) |
1868 | @@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work) |
1869 | int ret; |
1870 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, |
1871 | vxlan_del_task); |
1872 | + /* unset offloads */ |
1873 | + priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | |
1874 | + NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); |
1875 | + priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL; |
1876 | + priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL; |
1877 | |
1878 | ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, |
1879 | VXLAN_STEER_BY_OUTER_MAC, 0); |
1880 | @@ -2403,6 +2416,10 @@ static const struct net_device_ops mlx4_netdev_ops_master = { |
1881 | .ndo_rx_flow_steer = mlx4_en_filter_rfs, |
1882 | #endif |
1883 | .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, |
1884 | +#ifdef CONFIG_MLX4_EN_VXLAN |
1885 | + .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, |
1886 | + .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, |
1887 | +#endif |
1888 | }; |
1889 | |
1890 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
1891 | @@ -2567,13 +2584,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
1892 | if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) |
1893 | dev->priv_flags |= IFF_UNICAST_FLT; |
1894 | |
1895 | - if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { |
1896 | - dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | |
1897 | - NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; |
1898 | - dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; |
1899 | - dev->features |= NETIF_F_GSO_UDP_TUNNEL; |
1900 | - } |
1901 | - |
1902 | mdev->pndev[port] = dev; |
1903 | |
1904 | netif_carrier_off(dev); |
1905 | diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c |
1906 | index 9ce854f43917..6cbc56ad9ff4 100644 |
1907 | --- a/drivers/net/ieee802154/fakehard.c |
1908 | +++ b/drivers/net/ieee802154/fakehard.c |
1909 | @@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev) |
1910 | |
1911 | err = wpan_phy_register(phy); |
1912 | if (err) |
1913 | - goto out; |
1914 | + goto err_phy_reg; |
1915 | |
1916 | err = register_netdev(dev); |
1917 | - if (err < 0) |
1918 | - goto out; |
1919 | + if (err) |
1920 | + goto err_netdev_reg; |
1921 | |
1922 | dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); |
1923 | return 0; |
1924 | |
1925 | -out: |
1926 | - unregister_netdev(dev); |
1927 | +err_netdev_reg: |
1928 | + wpan_phy_unregister(phy); |
1929 | +err_phy_reg: |
1930 | + free_netdev(dev); |
1931 | + wpan_phy_free(phy); |
1932 | return err; |
1933 | } |
1934 | |
1935 | diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c |
1936 | index 1aff970be33e..1dc628ffce2b 100644 |
1937 | --- a/drivers/net/ppp/pptp.c |
1938 | +++ b/drivers/net/ppp/pptp.c |
1939 | @@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, |
1940 | int len = sizeof(struct sockaddr_pppox); |
1941 | struct sockaddr_pppox sp; |
1942 | |
1943 | - sp.sa_family = AF_PPPOX; |
1944 | + memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); |
1945 | + |
1946 | + sp.sa_family = AF_PPPOX; |
1947 | sp.sa_protocol = PX_PROTO_PPTP; |
1948 | sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; |
1949 | |
1950 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
1951 | index 22756db53dca..b8a82b86f909 100644 |
1952 | --- a/drivers/net/usb/qmi_wwan.c |
1953 | +++ b/drivers/net/usb/qmi_wwan.c |
1954 | @@ -780,6 +780,7 @@ static const struct usb_device_id products[] = { |
1955 | {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ |
1956 | {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ |
1957 | {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ |
1958 | + {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ |
1959 | |
1960 | /* 4. Gobi 1000 devices */ |
1961 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
1962 | diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c |
1963 | index 542a8d51d3b0..5d1ac1b0103d 100644 |
1964 | --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c |
1965 | +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c |
1966 | @@ -647,6 +647,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah) |
1967 | ah->enabled_cals |= TX_CL_CAL; |
1968 | else |
1969 | ah->enabled_cals &= ~TX_CL_CAL; |
1970 | + |
1971 | + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) { |
1972 | + if (ah->is_clk_25mhz) { |
1973 | + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); |
1974 | + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); |
1975 | + REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); |
1976 | + } else { |
1977 | + REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); |
1978 | + REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); |
1979 | + REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); |
1980 | + } |
1981 | + udelay(100); |
1982 | + } |
1983 | } |
1984 | |
1985 | static void ar9003_hw_prog_ini(struct ath_hw *ah, |
1986 | diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c |
1987 | index 69bbea1184d2..97569cdb3a8b 100644 |
1988 | --- a/drivers/net/wireless/ath/ath9k/hw.c |
1989 | +++ b/drivers/net/wireless/ath/ath9k/hw.c |
1990 | @@ -859,19 +859,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah, |
1991 | udelay(RTC_PLL_SETTLE_DELAY); |
1992 | |
1993 | REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); |
1994 | - |
1995 | - if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) { |
1996 | - if (ah->is_clk_25mhz) { |
1997 | - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1); |
1998 | - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7); |
1999 | - REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae); |
2000 | - } else { |
2001 | - REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1); |
2002 | - REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400); |
2003 | - REG_WRITE(ah, AR_SLP32_INC, 0x0001e800); |
2004 | - } |
2005 | - udelay(100); |
2006 | - } |
2007 | } |
2008 | |
2009 | static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, |
2010 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c |
2011 | index f05f5270fec1..927bffd5be64 100644 |
2012 | --- a/drivers/net/wireless/brcm80211/brcmfmac/of.c |
2013 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c |
2014 | @@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) |
2015 | return; |
2016 | |
2017 | irq = irq_of_parse_and_map(np, 0); |
2018 | - if (irq < 0) { |
2019 | - brcmf_err("interrupt could not be mapped: err=%d\n", irq); |
2020 | + if (!irq) { |
2021 | + brcmf_err("interrupt could not be mapped\n"); |
2022 | devm_kfree(dev, sdiodev->pdata); |
2023 | return; |
2024 | } |
2025 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c |
2026 | index e5101b287e4e..fb30a09b3b0d 100644 |
2027 | --- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c |
2028 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c |
2029 | @@ -19,10 +19,10 @@ |
2030 | #include <linux/pci.h> |
2031 | #include <linux/vmalloc.h> |
2032 | #include <linux/delay.h> |
2033 | -#include <linux/unaligned/access_ok.h> |
2034 | #include <linux/interrupt.h> |
2035 | #include <linux/bcma/bcma.h> |
2036 | #include <linux/sched.h> |
2037 | +#include <asm/unaligned.h> |
2038 | |
2039 | #include <soc.h> |
2040 | #include <chipcommon.h> |
2041 | diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c |
2042 | index 16a246bfc343..952c7e15a6e6 100644 |
2043 | --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c |
2044 | +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c |
2045 | @@ -298,6 +298,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, |
2046 | primary_offset = ch->center_freq1 - ch->chan->center_freq; |
2047 | switch (ch->width) { |
2048 | case NL80211_CHAN_WIDTH_20: |
2049 | + case NL80211_CHAN_WIDTH_20_NOHT: |
2050 | ch_inf.bw = BRCMU_CHAN_BW_20; |
2051 | WARN_ON(primary_offset != 0); |
2052 | break; |
2053 | @@ -322,6 +323,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, |
2054 | ch_inf.sb = BRCMU_CHAN_SB_LU; |
2055 | } |
2056 | break; |
2057 | + case NL80211_CHAN_WIDTH_80P80: |
2058 | + case NL80211_CHAN_WIDTH_160: |
2059 | + case NL80211_CHAN_WIDTH_5: |
2060 | + case NL80211_CHAN_WIDTH_10: |
2061 | default: |
2062 | WARN_ON_ONCE(1); |
2063 | } |
2064 | @@ -332,6 +337,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, |
2065 | case IEEE80211_BAND_5GHZ: |
2066 | ch_inf.band = BRCMU_CHAN_BAND_5G; |
2067 | break; |
2068 | + case IEEE80211_BAND_60GHZ: |
2069 | default: |
2070 | WARN_ON_ONCE(1); |
2071 | } |
2072 | diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h |
2073 | index 1bb5193c5b1b..f03fe0ce9883 100644 |
2074 | --- a/drivers/net/wireless/iwlwifi/iwl-fw.h |
2075 | +++ b/drivers/net/wireless/iwlwifi/iwl-fw.h |
2076 | @@ -138,9 +138,11 @@ enum iwl_ucode_tlv_api { |
2077 | /** |
2078 | * enum iwl_ucode_tlv_capa - ucode capabilities |
2079 | * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3 |
2080 | + * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command |
2081 | */ |
2082 | enum iwl_ucode_tlv_capa { |
2083 | - IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), |
2084 | + IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), |
2085 | + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), |
2086 | }; |
2087 | |
2088 | /* The default calibrate table size if not specified by firmware file */ |
2089 | diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
2090 | index 26de13bb78a8..f8cafc783d56 100644 |
2091 | --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
2092 | +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c |
2093 | @@ -515,7 +515,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, |
2094 | } |
2095 | |
2096 | if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && |
2097 | - !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) |
2098 | + !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && |
2099 | + !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) |
2100 | goto drop; |
2101 | |
2102 | /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ |
2103 | @@ -2403,14 +2404,19 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, |
2104 | /* Set the node address */ |
2105 | memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); |
2106 | |
2107 | + lockdep_assert_held(&mvm->mutex); |
2108 | + |
2109 | + spin_lock_bh(&mvm->time_event_lock); |
2110 | + |
2111 | + if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { |
2112 | + spin_unlock_bh(&mvm->time_event_lock); |
2113 | + return -EIO; |
2114 | + } |
2115 | + |
2116 | te_data->vif = vif; |
2117 | te_data->duration = duration; |
2118 | te_data->id = HOT_SPOT_CMD; |
2119 | |
2120 | - lockdep_assert_held(&mvm->mutex); |
2121 | - |
2122 | - spin_lock_bh(&mvm->time_event_lock); |
2123 | - list_add_tail(&te_data->list, &mvm->time_event_list); |
2124 | spin_unlock_bh(&mvm->time_event_lock); |
2125 | |
2126 | /* |
2127 | @@ -2466,22 +2472,29 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw, |
2128 | IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, |
2129 | duration, type); |
2130 | |
2131 | + mutex_lock(&mvm->mutex); |
2132 | + |
2133 | switch (vif->type) { |
2134 | case NL80211_IFTYPE_STATION: |
2135 | - /* Use aux roc framework (HS20) */ |
2136 | - ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, |
2137 | - vif, duration); |
2138 | - return ret; |
2139 | + if (mvm->fw->ucode_capa.capa[0] & |
2140 | + IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) { |
2141 | + /* Use aux roc framework (HS20) */ |
2142 | + ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, |
2143 | + vif, duration); |
2144 | + goto out_unlock; |
2145 | + } |
2146 | + IWL_ERR(mvm, "hotspot not supported\n"); |
2147 | + ret = -EINVAL; |
2148 | + goto out_unlock; |
2149 | case NL80211_IFTYPE_P2P_DEVICE: |
2150 | /* handle below */ |
2151 | break; |
2152 | default: |
2153 | IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); |
2154 | - return -EINVAL; |
2155 | + ret = -EINVAL; |
2156 | + goto out_unlock; |
2157 | } |
2158 | |
2159 | - mutex_lock(&mvm->mutex); |
2160 | - |
2161 | for (i = 0; i < NUM_PHY_CTX; i++) { |
2162 | phy_ctxt = &mvm->phy_ctxts[i]; |
2163 | if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) |
2164 | diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c |
2165 | index 33e5041f1efc..7229db54df7f 100644 |
2166 | --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c |
2167 | +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c |
2168 | @@ -303,8 +303,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, |
2169 | te_data->running = false; |
2170 | te_data->vif = NULL; |
2171 | te_data->uid = 0; |
2172 | + te_data->id = TE_MAX; |
2173 | } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { |
2174 | - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); |
2175 | set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); |
2176 | te_data->running = true; |
2177 | ieee80211_ready_on_channel(mvm->hw); /* Start TE */ |
2178 | diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c |
2179 | index 6c02467a37c8..bb36d67a7ae9 100644 |
2180 | --- a/drivers/net/wireless/iwlwifi/pcie/trans.c |
2181 | +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c |
2182 | @@ -1891,8 +1891,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans, |
2183 | int reg; |
2184 | __le32 *val; |
2185 | |
2186 | - prph_len += sizeof(*data) + sizeof(*prph) + |
2187 | - num_bytes_in_chunk; |
2188 | + prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk; |
2189 | |
2190 | (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); |
2191 | (*data)->len = cpu_to_le32(sizeof(*prph) + |
2192 | diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c |
2193 | index 8e68f87ab13c..66ff36447b94 100644 |
2194 | --- a/drivers/net/wireless/rt2x00/rt2x00queue.c |
2195 | +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c |
2196 | @@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb) |
2197 | skb_trim(skb, frame_length); |
2198 | } |
2199 | |
2200 | -void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) |
2201 | +/* |
2202 | + * H/W needs L2 padding between the header and the paylod if header size |
2203 | + * is not 4 bytes aligned. |
2204 | + */ |
2205 | +void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) |
2206 | { |
2207 | - unsigned int payload_length = skb->len - header_length; |
2208 | - unsigned int header_align = ALIGN_SIZE(skb, 0); |
2209 | - unsigned int payload_align = ALIGN_SIZE(skb, header_length); |
2210 | - unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0; |
2211 | + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
2212 | |
2213 | - /* |
2214 | - * Adjust the header alignment if the payload needs to be moved more |
2215 | - * than the header. |
2216 | - */ |
2217 | - if (payload_align > header_align) |
2218 | - header_align += 4; |
2219 | - |
2220 | - /* There is nothing to do if no alignment is needed */ |
2221 | - if (!header_align) |
2222 | + if (!l2pad) |
2223 | return; |
2224 | |
2225 | - /* Reserve the amount of space needed in front of the frame */ |
2226 | - skb_push(skb, header_align); |
2227 | - |
2228 | - /* |
2229 | - * Move the header. |
2230 | - */ |
2231 | - memmove(skb->data, skb->data + header_align, header_length); |
2232 | - |
2233 | - /* Move the payload, if present and if required */ |
2234 | - if (payload_length && payload_align) |
2235 | - memmove(skb->data + header_length + l2pad, |
2236 | - skb->data + header_length + l2pad + payload_align, |
2237 | - payload_length); |
2238 | - |
2239 | - /* Trim the skb to the correct size */ |
2240 | - skb_trim(skb, header_length + l2pad + payload_length); |
2241 | + skb_push(skb, l2pad); |
2242 | + memmove(skb->data, skb->data + l2pad, hdr_len); |
2243 | } |
2244 | |
2245 | -void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) |
2246 | +void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) |
2247 | { |
2248 | - /* |
2249 | - * L2 padding is only present if the skb contains more than just the |
2250 | - * IEEE 802.11 header. |
2251 | - */ |
2252 | - unsigned int l2pad = (skb->len > header_length) ? |
2253 | - L2PAD_SIZE(header_length) : 0; |
2254 | + unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; |
2255 | |
2256 | if (!l2pad) |
2257 | return; |
2258 | |
2259 | - memmove(skb->data + l2pad, skb->data, header_length); |
2260 | + memmove(skb->data + l2pad, skb->data, hdr_len); |
2261 | skb_pull(skb, l2pad); |
2262 | } |
2263 | |
2264 | diff --git a/drivers/of/address.c b/drivers/of/address.c |
2265 | index e3718250d66e..f0541fd6865d 100644 |
2266 | --- a/drivers/of/address.c |
2267 | +++ b/drivers/of/address.c |
2268 | @@ -403,6 +403,21 @@ static struct of_bus *of_match_bus(struct device_node *np) |
2269 | return NULL; |
2270 | } |
2271 | |
2272 | +static int of_empty_ranges_quirk(void) |
2273 | +{ |
2274 | + if (IS_ENABLED(CONFIG_PPC)) { |
2275 | + /* To save cycles, we cache the result */ |
2276 | + static int quirk_state = -1; |
2277 | + |
2278 | + if (quirk_state < 0) |
2279 | + quirk_state = |
2280 | + of_machine_is_compatible("Power Macintosh") || |
2281 | + of_machine_is_compatible("MacRISC"); |
2282 | + return quirk_state; |
2283 | + } |
2284 | + return false; |
2285 | +} |
2286 | + |
2287 | static int of_translate_one(struct device_node *parent, struct of_bus *bus, |
2288 | struct of_bus *pbus, __be32 *addr, |
2289 | int na, int ns, int pna, const char *rprop) |
2290 | @@ -428,12 +443,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, |
2291 | * This code is only enabled on powerpc. --gcl |
2292 | */ |
2293 | ranges = of_get_property(parent, rprop, &rlen); |
2294 | -#if !defined(CONFIG_PPC) |
2295 | - if (ranges == NULL) { |
2296 | + if (ranges == NULL && !of_empty_ranges_quirk()) { |
2297 | pr_err("OF: no ranges; cannot translate\n"); |
2298 | return 1; |
2299 | } |
2300 | -#endif /* !defined(CONFIG_PPC) */ |
2301 | if (ranges == NULL || rlen == 0) { |
2302 | offset = of_read_number(addr, na); |
2303 | memset(addr, 0, pna * 4); |
2304 | diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c |
2305 | index d1ffca8b34ea..30e97bcc4f88 100644 |
2306 | --- a/drivers/of/fdt.c |
2307 | +++ b/drivers/of/fdt.c |
2308 | @@ -773,7 +773,7 @@ int __init early_init_dt_scan_chosen_serial(void) |
2309 | if (offset < 0) |
2310 | return -ENODEV; |
2311 | |
2312 | - while (match->compatible) { |
2313 | + while (match->compatible[0]) { |
2314 | unsigned long addr; |
2315 | if (fdt_node_check_compatible(fdt, offset, match->compatible)) { |
2316 | match++; |
2317 | diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c |
2318 | index c92de69fcf7f..612a51214a66 100644 |
2319 | --- a/drivers/of/selftest.c |
2320 | +++ b/drivers/of/selftest.c |
2321 | @@ -799,7 +799,7 @@ static void selftest_data_remove(void) |
2322 | return; |
2323 | } |
2324 | |
2325 | - while (last_node_index >= 0) { |
2326 | + while (last_node_index-- > 0) { |
2327 | if (nodes[last_node_index]) { |
2328 | np = of_find_node_by_path(nodes[last_node_index]->full_name); |
2329 | if (strcmp(np->full_name, "/aliases") != 0) { |
2330 | @@ -812,7 +812,6 @@ static void selftest_data_remove(void) |
2331 | } |
2332 | } |
2333 | } |
2334 | - last_node_index--; |
2335 | } |
2336 | } |
2337 | |
2338 | diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c |
2339 | index 5a40516444f3..6807eddfeb4c 100644 |
2340 | --- a/drivers/pci/msi.c |
2341 | +++ b/drivers/pci/msi.c |
2342 | @@ -610,6 +610,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) |
2343 | return entry; |
2344 | } |
2345 | |
2346 | +static int msi_verify_entries(struct pci_dev *dev) |
2347 | +{ |
2348 | + struct msi_desc *entry; |
2349 | + |
2350 | + list_for_each_entry(entry, &dev->msi_list, list) { |
2351 | + if (!dev->no_64bit_msi || !entry->msg.address_hi) |
2352 | + continue; |
2353 | + dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" |
2354 | + " tried to assign one above 4G\n"); |
2355 | + return -EIO; |
2356 | + } |
2357 | + return 0; |
2358 | +} |
2359 | + |
2360 | /** |
2361 | * msi_capability_init - configure device's MSI capability structure |
2362 | * @dev: pointer to the pci_dev data structure of MSI device function |
2363 | @@ -647,6 +661,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) |
2364 | return ret; |
2365 | } |
2366 | |
2367 | + ret = msi_verify_entries(dev); |
2368 | + if (ret) { |
2369 | + msi_mask_irq(entry, mask, ~mask); |
2370 | + free_msi_irqs(dev); |
2371 | + return ret; |
2372 | + } |
2373 | + |
2374 | ret = populate_msi_sysfs(dev); |
2375 | if (ret) { |
2376 | msi_mask_irq(entry, mask, ~mask); |
2377 | @@ -760,6 +781,11 @@ static int msix_capability_init(struct pci_dev *dev, |
2378 | if (ret) |
2379 | goto out_avail; |
2380 | |
2381 | + /* Check if all MSI entries honor device restrictions */ |
2382 | + ret = msi_verify_entries(dev); |
2383 | + if (ret) |
2384 | + goto out_free; |
2385 | + |
2386 | /* |
2387 | * Some devices require MSI-X to be enabled before we can touch the |
2388 | * MSI-X registers. We need to mask all the vectors to prevent |
2389 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
2390 | index 4170113cde61..9cce960029aa 100644 |
2391 | --- a/drivers/pci/probe.c |
2392 | +++ b/drivers/pci/probe.c |
2393 | @@ -406,15 +406,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) |
2394 | { |
2395 | struct pci_dev *dev = child->self; |
2396 | u16 mem_base_lo, mem_limit_lo; |
2397 | - unsigned long base, limit; |
2398 | + u64 base64, limit64; |
2399 | + dma_addr_t base, limit; |
2400 | struct pci_bus_region region; |
2401 | struct resource *res; |
2402 | |
2403 | res = child->resource[2]; |
2404 | pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); |
2405 | pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); |
2406 | - base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; |
2407 | - limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; |
2408 | + base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; |
2409 | + limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; |
2410 | |
2411 | if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { |
2412 | u32 mem_base_hi, mem_limit_hi; |
2413 | @@ -428,17 +429,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) |
2414 | * this, just assume they are not being used. |
2415 | */ |
2416 | if (mem_base_hi <= mem_limit_hi) { |
2417 | -#if BITS_PER_LONG == 64 |
2418 | - base |= ((unsigned long) mem_base_hi) << 32; |
2419 | - limit |= ((unsigned long) mem_limit_hi) << 32; |
2420 | -#else |
2421 | - if (mem_base_hi || mem_limit_hi) { |
2422 | - dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n"); |
2423 | - return; |
2424 | - } |
2425 | -#endif |
2426 | + base64 |= (u64) mem_base_hi << 32; |
2427 | + limit64 |= (u64) mem_limit_hi << 32; |
2428 | } |
2429 | } |
2430 | + |
2431 | + base = (dma_addr_t) base64; |
2432 | + limit = (dma_addr_t) limit64; |
2433 | + |
2434 | + if (base != base64) { |
2435 | + dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", |
2436 | + (unsigned long long) base64); |
2437 | + return; |
2438 | + } |
2439 | + |
2440 | if (base <= limit) { |
2441 | res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | |
2442 | IORESOURCE_MEM | IORESOURCE_PREFETCH; |
2443 | diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
2444 | index 79e5c94107a9..72533c58c1f3 100644 |
2445 | --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
2446 | +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
2447 | @@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, |
2448 | struct fc_frame_header *fh; |
2449 | struct fcoe_rcv_info *fr; |
2450 | struct fcoe_percpu_s *bg; |
2451 | + struct sk_buff *tmp_skb; |
2452 | unsigned short oxid; |
2453 | |
2454 | interface = container_of(ptype, struct bnx2fc_interface, |
2455 | @@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, |
2456 | goto err; |
2457 | } |
2458 | |
2459 | + tmp_skb = skb_share_check(skb, GFP_ATOMIC); |
2460 | + if (!tmp_skb) |
2461 | + goto err; |
2462 | + |
2463 | + skb = tmp_skb; |
2464 | + |
2465 | if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { |
2466 | printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); |
2467 | goto err; |
2468 | diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c |
2469 | index 49014a143c6a..c1d04d4d3c6c 100644 |
2470 | --- a/drivers/scsi/scsi_devinfo.c |
2471 | +++ b/drivers/scsi/scsi_devinfo.c |
2472 | @@ -202,6 +202,7 @@ static struct { |
2473 | {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, |
2474 | {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, |
2475 | {"INSITE", "I325VM", NULL, BLIST_KEY}, |
2476 | + {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, |
2477 | {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, |
2478 | {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, |
2479 | {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
2480 | diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c |
2481 | index 0dd0623319b0..080e0a343675 100644 |
2482 | --- a/drivers/spi/spi-dw.c |
2483 | +++ b/drivers/spi/spi-dw.c |
2484 | @@ -382,9 +382,6 @@ static void pump_transfers(unsigned long data) |
2485 | chip = dws->cur_chip; |
2486 | spi = message->spi; |
2487 | |
2488 | - if (unlikely(!chip->clk_div)) |
2489 | - chip->clk_div = dws->max_freq / chip->speed_hz; |
2490 | - |
2491 | if (message->state == ERROR_STATE) { |
2492 | message->status = -EIO; |
2493 | goto early_exit; |
2494 | @@ -425,7 +422,7 @@ static void pump_transfers(unsigned long data) |
2495 | if (transfer->speed_hz) { |
2496 | speed = chip->speed_hz; |
2497 | |
2498 | - if (transfer->speed_hz != speed) { |
2499 | + if ((transfer->speed_hz != speed) || (!chip->clk_div)) { |
2500 | speed = transfer->speed_hz; |
2501 | |
2502 | /* clk_div doesn't support odd number */ |
2503 | @@ -586,7 +583,6 @@ static int dw_spi_setup(struct spi_device *spi) |
2504 | dev_err(&spi->dev, "No max speed HZ parameter\n"); |
2505 | return -EINVAL; |
2506 | } |
2507 | - chip->speed_hz = spi->max_speed_hz; |
2508 | |
2509 | chip->tmode = 0; /* Tx & Rx */ |
2510 | /* Default SPI mode is SCPOL = 0, SCPH = 0 */ |
2511 | diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c |
2512 | index 6f0602fd7401..ca67bcf9475a 100644 |
2513 | --- a/drivers/spi/spi-sirf.c |
2514 | +++ b/drivers/spi/spi-sirf.c |
2515 | @@ -565,9 +565,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) |
2516 | |
2517 | sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); |
2518 | txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | |
2519 | - sspi->word_width; |
2520 | + (sspi->word_width >> 1); |
2521 | rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | |
2522 | - sspi->word_width; |
2523 | + (sspi->word_width >> 1); |
2524 | |
2525 | if (!(spi->mode & SPI_CS_HIGH)) |
2526 | regval |= SIRFSOC_SPI_CS_IDLE_STAT; |
2527 | diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c |
2528 | index ca935df80c88..2bf2dfa609dd 100644 |
2529 | --- a/drivers/spi/spi.c |
2530 | +++ b/drivers/spi/spi.c |
2531 | @@ -609,13 +609,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev, |
2532 | sg_free_table(sgt); |
2533 | return -ENOMEM; |
2534 | } |
2535 | - sg_buf = page_address(vm_page) + |
2536 | - ((size_t)buf & ~PAGE_MASK); |
2537 | + sg_set_page(&sgt->sgl[i], vm_page, |
2538 | + min, offset_in_page(buf)); |
2539 | } else { |
2540 | sg_buf = buf; |
2541 | + sg_set_buf(&sgt->sgl[i], sg_buf, min); |
2542 | } |
2543 | |
2544 | - sg_set_buf(&sgt->sgl[i], sg_buf, min); |
2545 | |
2546 | buf += min; |
2547 | len -= min; |
2548 | diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c |
2549 | index 407a318b09db..2f87150a21b7 100644 |
2550 | --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c |
2551 | +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c |
2552 | @@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { |
2553 | {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ |
2554 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ |
2555 | {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ |
2556 | + {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ |
2557 | {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ |
2558 | {} /* Terminating entry */ |
2559 | }; |
2560 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
2561 | index ab610146681d..a9c77b5116e3 100644 |
2562 | --- a/drivers/target/target_core_transport.c |
2563 | +++ b/drivers/target/target_core_transport.c |
2564 | @@ -2295,7 +2295,7 @@ transport_generic_new_cmd(struct se_cmd *cmd) |
2565 | * and let it call back once the write buffers are ready. |
2566 | */ |
2567 | target_add_to_state_list(cmd); |
2568 | - if (cmd->data_direction != DMA_TO_DEVICE) { |
2569 | + if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { |
2570 | target_execute_cmd(cmd); |
2571 | return 0; |
2572 | } |
2573 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
2574 | index 39b4081b632d..96fafed92b76 100644 |
2575 | --- a/drivers/usb/core/quirks.c |
2576 | +++ b/drivers/usb/core/quirks.c |
2577 | @@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = { |
2578 | /* Creative SB Audigy 2 NX */ |
2579 | { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, |
2580 | |
2581 | + /* Microsoft Wireless Laser Mouse 6000 Receiver */ |
2582 | + { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME }, |
2583 | + |
2584 | /* Microsoft LifeCam-VX700 v2.0 */ |
2585 | { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, |
2586 | |
2587 | diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c |
2588 | index 69aece31143a..64f0ddac957f 100644 |
2589 | --- a/drivers/usb/host/xhci-hub.c |
2590 | +++ b/drivers/usb/host/xhci-hub.c |
2591 | @@ -22,7 +22,6 @@ |
2592 | |
2593 | |
2594 | #include <linux/slab.h> |
2595 | -#include <linux/device.h> |
2596 | #include <asm/unaligned.h> |
2597 | |
2598 | #include "xhci.h" |
2599 | @@ -1142,9 +1141,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) |
2600 | * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME |
2601 | * is enabled, so also enable remote wake here. |
2602 | */ |
2603 | - if (hcd->self.root_hub->do_remote_wakeup |
2604 | - && device_may_wakeup(hcd->self.controller)) { |
2605 | - |
2606 | + if (hcd->self.root_hub->do_remote_wakeup) { |
2607 | if (t1 & PORT_CONNECT) { |
2608 | t2 |= PORT_WKOC_E | PORT_WKDISC_E; |
2609 | t2 &= ~PORT_WKCONN_E; |
2610 | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
2611 | index d125568d73ff..d41ce7aaa97d 100644 |
2612 | --- a/drivers/usb/host/xhci-pci.c |
2613 | +++ b/drivers/usb/host/xhci-pci.c |
2614 | @@ -279,7 +279,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) |
2615 | if (xhci_compliance_mode_recovery_timer_quirk_check()) |
2616 | pdev->no_d3cold = true; |
2617 | |
2618 | - return xhci_suspend(xhci); |
2619 | + return xhci_suspend(xhci, do_wakeup); |
2620 | } |
2621 | |
2622 | static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) |
2623 | diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
2624 | index 1a0cf9f31e43..dcfddeb6c56a 100644 |
2625 | --- a/drivers/usb/host/xhci-plat.c |
2626 | +++ b/drivers/usb/host/xhci-plat.c |
2627 | @@ -255,7 +255,15 @@ static int xhci_plat_suspend(struct device *dev) |
2628 | struct usb_hcd *hcd = dev_get_drvdata(dev); |
2629 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
2630 | |
2631 | - return xhci_suspend(xhci); |
2632 | + /* |
2633 | + * xhci_suspend() needs `do_wakeup` to know whether host is allowed |
2634 | + * to do wakeup during suspend. Since xhci_plat_suspend is currently |
2635 | + * only designed for system suspend, device_may_wakeup() is enough |
2636 | + * to dertermine whether host is allowed to do wakeup. Need to |
2637 | + * reconsider this when xhci_plat_suspend enlarges its scope, e.g., |
2638 | + * also applies to runtime suspend. |
2639 | + */ |
2640 | + return xhci_suspend(xhci, device_may_wakeup(dev)); |
2641 | } |
2642 | |
2643 | static int xhci_plat_resume(struct device *dev) |
2644 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
2645 | index abed30b82905..08f00fe6dcf2 100644 |
2646 | --- a/drivers/usb/host/xhci-ring.c |
2647 | +++ b/drivers/usb/host/xhci-ring.c |
2648 | @@ -1106,9 +1106,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, |
2649 | false); |
2650 | xhci_ring_cmd_db(xhci); |
2651 | } else { |
2652 | - /* Clear our internal halted state and restart the ring(s) */ |
2653 | + /* Clear our internal halted state */ |
2654 | xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; |
2655 | - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); |
2656 | } |
2657 | } |
2658 | |
2659 | @@ -1851,22 +1850,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td, |
2660 | ep->stopped_td = td; |
2661 | return 0; |
2662 | } else { |
2663 | - if (trb_comp_code == COMP_STALL) { |
2664 | - /* The transfer is completed from the driver's |
2665 | - * perspective, but we need to issue a set dequeue |
2666 | - * command for this stalled endpoint to move the dequeue |
2667 | - * pointer past the TD. We can't do that here because |
2668 | - * the halt condition must be cleared first. Let the |
2669 | - * USB class driver clear the stall later. |
2670 | - */ |
2671 | - ep->stopped_td = td; |
2672 | - ep->stopped_stream = ep_ring->stream_id; |
2673 | - } else if (xhci_requires_manual_halt_cleanup(xhci, |
2674 | - ep_ctx, trb_comp_code)) { |
2675 | - /* Other types of errors halt the endpoint, but the |
2676 | - * class driver doesn't call usb_reset_endpoint() unless |
2677 | - * the error is -EPIPE. Clear the halted status in the |
2678 | - * xHCI hardware manually. |
2679 | + if (trb_comp_code == COMP_STALL || |
2680 | + xhci_requires_manual_halt_cleanup(xhci, ep_ctx, |
2681 | + trb_comp_code)) { |
2682 | + /* Issue a reset endpoint command to clear the host side |
2683 | + * halt, followed by a set dequeue command to move the |
2684 | + * dequeue pointer past the TD. |
2685 | + * The class driver clears the device side halt later. |
2686 | */ |
2687 | xhci_cleanup_halted_endpoint(xhci, |
2688 | slot_id, ep_index, ep_ring->stream_id, |
2689 | @@ -1986,9 +1976,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, |
2690 | else |
2691 | td->urb->actual_length = 0; |
2692 | |
2693 | - xhci_cleanup_halted_endpoint(xhci, |
2694 | - slot_id, ep_index, 0, td, event_trb); |
2695 | - return finish_td(xhci, td, event_trb, event, ep, status, true); |
2696 | + return finish_td(xhci, td, event_trb, event, ep, status, false); |
2697 | } |
2698 | /* |
2699 | * Did we transfer any data, despite the errors that might have |
2700 | @@ -2542,17 +2530,8 @@ cleanup: |
2701 | if (ret) { |
2702 | urb = td->urb; |
2703 | urb_priv = urb->hcpriv; |
2704 | - /* Leave the TD around for the reset endpoint function |
2705 | - * to use(but only if it's not a control endpoint, |
2706 | - * since we already queued the Set TR dequeue pointer |
2707 | - * command for stalled control endpoints). |
2708 | - */ |
2709 | - if (usb_endpoint_xfer_control(&urb->ep->desc) || |
2710 | - (trb_comp_code != COMP_STALL && |
2711 | - trb_comp_code != COMP_BABBLE)) |
2712 | - xhci_urb_free_priv(xhci, urb_priv); |
2713 | - else |
2714 | - kfree(urb_priv); |
2715 | + |
2716 | + xhci_urb_free_priv(xhci, urb_priv); |
2717 | |
2718 | usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); |
2719 | if ((urb->actual_length != urb->transfer_buffer_length && |
2720 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
2721 | index c4a8fca8ae93..48eec36edfa5 100644 |
2722 | --- a/drivers/usb/host/xhci.c |
2723 | +++ b/drivers/usb/host/xhci.c |
2724 | @@ -35,6 +35,8 @@ |
2725 | #define DRIVER_AUTHOR "Sarah Sharp" |
2726 | #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" |
2727 | |
2728 | +#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E) |
2729 | + |
2730 | /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ |
2731 | static int link_quirk; |
2732 | module_param(link_quirk, int, S_IRUGO | S_IWUSR); |
2733 | @@ -850,13 +852,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) |
2734 | xhci_set_cmd_ring_deq(xhci); |
2735 | } |
2736 | |
2737 | +static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci) |
2738 | +{ |
2739 | + int port_index; |
2740 | + __le32 __iomem **port_array; |
2741 | + unsigned long flags; |
2742 | + u32 t1, t2; |
2743 | + |
2744 | + spin_lock_irqsave(&xhci->lock, flags); |
2745 | + |
2746 | + /* disble usb3 ports Wake bits*/ |
2747 | + port_index = xhci->num_usb3_ports; |
2748 | + port_array = xhci->usb3_ports; |
2749 | + while (port_index--) { |
2750 | + t1 = readl(port_array[port_index]); |
2751 | + t1 = xhci_port_state_to_neutral(t1); |
2752 | + t2 = t1 & ~PORT_WAKE_BITS; |
2753 | + if (t1 != t2) |
2754 | + writel(t2, port_array[port_index]); |
2755 | + } |
2756 | + |
2757 | + /* disble usb2 ports Wake bits*/ |
2758 | + port_index = xhci->num_usb2_ports; |
2759 | + port_array = xhci->usb2_ports; |
2760 | + while (port_index--) { |
2761 | + t1 = readl(port_array[port_index]); |
2762 | + t1 = xhci_port_state_to_neutral(t1); |
2763 | + t2 = t1 & ~PORT_WAKE_BITS; |
2764 | + if (t1 != t2) |
2765 | + writel(t2, port_array[port_index]); |
2766 | + } |
2767 | + |
2768 | + spin_unlock_irqrestore(&xhci->lock, flags); |
2769 | +} |
2770 | + |
2771 | /* |
2772 | * Stop HC (not bus-specific) |
2773 | * |
2774 | * This is called when the machine transition into S3/S4 mode. |
2775 | * |
2776 | */ |
2777 | -int xhci_suspend(struct xhci_hcd *xhci) |
2778 | +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) |
2779 | { |
2780 | int rc = 0; |
2781 | unsigned int delay = XHCI_MAX_HALT_USEC; |
2782 | @@ -867,6 +903,10 @@ int xhci_suspend(struct xhci_hcd *xhci) |
2783 | xhci->shared_hcd->state != HC_STATE_SUSPENDED) |
2784 | return -EINVAL; |
2785 | |
2786 | + /* Clear root port wake on bits if wakeup not allowed. */ |
2787 | + if (!do_wakeup) |
2788 | + xhci_disable_port_wake_on_bits(xhci); |
2789 | + |
2790 | /* Don't poll the roothubs on bus suspend. */ |
2791 | xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); |
2792 | clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); |
2793 | @@ -2910,68 +2950,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
2794 | } |
2795 | } |
2796 | |
2797 | -/* Deal with stalled endpoints. The core should have sent the control message |
2798 | - * to clear the halt condition. However, we need to make the xHCI hardware |
2799 | - * reset its sequence number, since a device will expect a sequence number of |
2800 | - * zero after the halt condition is cleared. |
2801 | +/* Called when clearing halted device. The core should have sent the control |
2802 | + * message to clear the device halt condition. The host side of the halt should |
2803 | + * already be cleared with a reset endpoint command issued when the STALL tx |
2804 | + * event was received. |
2805 | + * |
2806 | * Context: in_interrupt |
2807 | */ |
2808 | + |
2809 | void xhci_endpoint_reset(struct usb_hcd *hcd, |
2810 | struct usb_host_endpoint *ep) |
2811 | { |
2812 | struct xhci_hcd *xhci; |
2813 | - struct usb_device *udev; |
2814 | - unsigned int ep_index; |
2815 | - unsigned long flags; |
2816 | - int ret; |
2817 | - struct xhci_virt_ep *virt_ep; |
2818 | - struct xhci_command *command; |
2819 | |
2820 | xhci = hcd_to_xhci(hcd); |
2821 | - udev = (struct usb_device *) ep->hcpriv; |
2822 | - /* Called with a root hub endpoint (or an endpoint that wasn't added |
2823 | - * with xhci_add_endpoint() |
2824 | - */ |
2825 | - if (!ep->hcpriv) |
2826 | - return; |
2827 | - ep_index = xhci_get_endpoint_index(&ep->desc); |
2828 | - virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index]; |
2829 | - if (!virt_ep->stopped_td) { |
2830 | - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
2831 | - "Endpoint 0x%x not halted, refusing to reset.", |
2832 | - ep->desc.bEndpointAddress); |
2833 | - return; |
2834 | - } |
2835 | - if (usb_endpoint_xfer_control(&ep->desc)) { |
2836 | - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
2837 | - "Control endpoint stall already handled."); |
2838 | - return; |
2839 | - } |
2840 | |
2841 | - command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC); |
2842 | - if (!command) |
2843 | - return; |
2844 | - |
2845 | - xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep, |
2846 | - "Queueing reset endpoint command"); |
2847 | - spin_lock_irqsave(&xhci->lock, flags); |
2848 | - ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index); |
2849 | /* |
2850 | - * Can't change the ring dequeue pointer until it's transitioned to the |
2851 | - * stopped state, which is only upon a successful reset endpoint |
2852 | - * command. Better hope that last command worked! |
2853 | + * We might need to implement the config ep cmd in xhci 4.8.1 note: |
2854 | + * The Reset Endpoint Command may only be issued to endpoints in the |
2855 | + * Halted state. If software wishes reset the Data Toggle or Sequence |
2856 | + * Number of an endpoint that isn't in the Halted state, then software |
2857 | + * may issue a Configure Endpoint Command with the Drop and Add bits set |
2858 | + * for the target endpoint. that is in the Stopped state. |
2859 | */ |
2860 | - if (!ret) { |
2861 | - xhci_cleanup_stalled_ring(xhci, udev, ep_index); |
2862 | - kfree(virt_ep->stopped_td); |
2863 | - xhci_ring_cmd_db(xhci); |
2864 | - } |
2865 | - virt_ep->stopped_td = NULL; |
2866 | - virt_ep->stopped_stream = 0; |
2867 | - spin_unlock_irqrestore(&xhci->lock, flags); |
2868 | |
2869 | - if (ret) |
2870 | - xhci_warn(xhci, "FIXME allocate a new ring segment\n"); |
2871 | + /* For now just print debug to follow the situation */ |
2872 | + xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n", |
2873 | + ep->desc.bEndpointAddress); |
2874 | } |
2875 | |
2876 | static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, |
2877 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
2878 | index dace5152e179..00ed780c0563 100644 |
2879 | --- a/drivers/usb/host/xhci.h |
2880 | +++ b/drivers/usb/host/xhci.h |
2881 | @@ -1764,7 +1764,7 @@ void xhci_shutdown(struct usb_hcd *hcd); |
2882 | int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); |
2883 | |
2884 | #ifdef CONFIG_PM |
2885 | -int xhci_suspend(struct xhci_hcd *xhci); |
2886 | +int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup); |
2887 | int xhci_resume(struct xhci_hcd *xhci, bool hibernated); |
2888 | #else |
2889 | #define xhci_suspend NULL |
2890 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
2891 | index cfd009dc4018..6c4eb3cf5efd 100644 |
2892 | --- a/drivers/usb/serial/cp210x.c |
2893 | +++ b/drivers/usb/serial/cp210x.c |
2894 | @@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = { |
2895 | { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ |
2896 | { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ |
2897 | { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ |
2898 | + { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ |
2899 | { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ |
2900 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ |
2901 | { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ |
2902 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
2903 | index 0dad8ce5a609..1ebb351b9e9a 100644 |
2904 | --- a/drivers/usb/serial/ftdi_sio.c |
2905 | +++ b/drivers/usb/serial/ftdi_sio.c |
2906 | @@ -470,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = { |
2907 | { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, |
2908 | { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, |
2909 | { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, |
2910 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) }, |
2911 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) }, |
2912 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) }, |
2913 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) }, |
2914 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) }, |
2915 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) }, |
2916 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) }, |
2917 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) }, |
2918 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) }, |
2919 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) }, |
2920 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) }, |
2921 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) }, |
2922 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) }, |
2923 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) }, |
2924 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) }, |
2925 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) }, |
2926 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) }, |
2927 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) }, |
2928 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) }, |
2929 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) }, |
2930 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) }, |
2931 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) }, |
2932 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) }, |
2933 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) }, |
2934 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) }, |
2935 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) }, |
2936 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) }, |
2937 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) }, |
2938 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) }, |
2939 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) }, |
2940 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) }, |
2941 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) }, |
2942 | + { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) }, |
2943 | { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, |
2944 | { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, |
2945 | { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, |
2946 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
2947 | index 6786b705ccf6..e52409c9be99 100644 |
2948 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
2949 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
2950 | @@ -926,8 +926,8 @@ |
2951 | #define BAYER_CONTOUR_CABLE_PID 0x6001 |
2952 | |
2953 | /* |
2954 | - * The following are the values for the Matrix Orbital FTDI Range |
2955 | - * Anything in this range will use an FT232RL. |
2956 | + * Matrix Orbital Intelligent USB displays. |
2957 | + * http://www.matrixorbital.com |
2958 | */ |
2959 | #define MTXORB_VID 0x1B3D |
2960 | #define MTXORB_FTDI_RANGE_0100_PID 0x0100 |
2961 | @@ -1186,8 +1186,39 @@ |
2962 | #define MTXORB_FTDI_RANGE_01FD_PID 0x01FD |
2963 | #define MTXORB_FTDI_RANGE_01FE_PID 0x01FE |
2964 | #define MTXORB_FTDI_RANGE_01FF_PID 0x01FF |
2965 | - |
2966 | - |
2967 | +#define MTXORB_FTDI_RANGE_4701_PID 0x4701 |
2968 | +#define MTXORB_FTDI_RANGE_9300_PID 0x9300 |
2969 | +#define MTXORB_FTDI_RANGE_9301_PID 0x9301 |
2970 | +#define MTXORB_FTDI_RANGE_9302_PID 0x9302 |
2971 | +#define MTXORB_FTDI_RANGE_9303_PID 0x9303 |
2972 | +#define MTXORB_FTDI_RANGE_9304_PID 0x9304 |
2973 | +#define MTXORB_FTDI_RANGE_9305_PID 0x9305 |
2974 | +#define MTXORB_FTDI_RANGE_9306_PID 0x9306 |
2975 | +#define MTXORB_FTDI_RANGE_9307_PID 0x9307 |
2976 | +#define MTXORB_FTDI_RANGE_9308_PID 0x9308 |
2977 | +#define MTXORB_FTDI_RANGE_9309_PID 0x9309 |
2978 | +#define MTXORB_FTDI_RANGE_930A_PID 0x930A |
2979 | +#define MTXORB_FTDI_RANGE_930B_PID 0x930B |
2980 | +#define MTXORB_FTDI_RANGE_930C_PID 0x930C |
2981 | +#define MTXORB_FTDI_RANGE_930D_PID 0x930D |
2982 | +#define MTXORB_FTDI_RANGE_930E_PID 0x930E |
2983 | +#define MTXORB_FTDI_RANGE_930F_PID 0x930F |
2984 | +#define MTXORB_FTDI_RANGE_9310_PID 0x9310 |
2985 | +#define MTXORB_FTDI_RANGE_9311_PID 0x9311 |
2986 | +#define MTXORB_FTDI_RANGE_9312_PID 0x9312 |
2987 | +#define MTXORB_FTDI_RANGE_9313_PID 0x9313 |
2988 | +#define MTXORB_FTDI_RANGE_9314_PID 0x9314 |
2989 | +#define MTXORB_FTDI_RANGE_9315_PID 0x9315 |
2990 | +#define MTXORB_FTDI_RANGE_9316_PID 0x9316 |
2991 | +#define MTXORB_FTDI_RANGE_9317_PID 0x9317 |
2992 | +#define MTXORB_FTDI_RANGE_9318_PID 0x9318 |
2993 | +#define MTXORB_FTDI_RANGE_9319_PID 0x9319 |
2994 | +#define MTXORB_FTDI_RANGE_931A_PID 0x931A |
2995 | +#define MTXORB_FTDI_RANGE_931B_PID 0x931B |
2996 | +#define MTXORB_FTDI_RANGE_931C_PID 0x931C |
2997 | +#define MTXORB_FTDI_RANGE_931D_PID 0x931D |
2998 | +#define MTXORB_FTDI_RANGE_931E_PID 0x931E |
2999 | +#define MTXORB_FTDI_RANGE_931F_PID 0x931F |
3000 | |
3001 | /* |
3002 | * The Mobility Lab (TML) |
3003 | diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c |
3004 | index 93cb7cebda62..077c714f1285 100644 |
3005 | --- a/drivers/usb/serial/keyspan.c |
3006 | +++ b/drivers/usb/serial/keyspan.c |
3007 | @@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb) |
3008 | if ((data[0] & 0x80) == 0) { |
3009 | /* no errors on individual bytes, only |
3010 | possible overrun err */ |
3011 | - if (data[0] & RXERROR_OVERRUN) |
3012 | - err = TTY_OVERRUN; |
3013 | - else |
3014 | - err = 0; |
3015 | + if (data[0] & RXERROR_OVERRUN) { |
3016 | + tty_insert_flip_char(&port->port, 0, |
3017 | + TTY_OVERRUN); |
3018 | + } |
3019 | for (i = 1; i < urb->actual_length ; ++i) |
3020 | - tty_insert_flip_char(&port->port, data[i], err); |
3021 | + tty_insert_flip_char(&port->port, data[i], |
3022 | + TTY_NORMAL); |
3023 | } else { |
3024 | /* some bytes had errors, every byte has status */ |
3025 | dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); |
3026 | for (i = 0; i + 1 < urb->actual_length; i += 2) { |
3027 | - int stat = data[i], flag = 0; |
3028 | - if (stat & RXERROR_OVERRUN) |
3029 | - flag |= TTY_OVERRUN; |
3030 | - if (stat & RXERROR_FRAMING) |
3031 | - flag |= TTY_FRAME; |
3032 | - if (stat & RXERROR_PARITY) |
3033 | - flag |= TTY_PARITY; |
3034 | + int stat = data[i]; |
3035 | + int flag = TTY_NORMAL; |
3036 | + |
3037 | + if (stat & RXERROR_OVERRUN) { |
3038 | + tty_insert_flip_char(&port->port, 0, |
3039 | + TTY_OVERRUN); |
3040 | + } |
3041 | /* XXX should handle break (0x10) */ |
3042 | + if (stat & RXERROR_PARITY) |
3043 | + flag = TTY_PARITY; |
3044 | + else if (stat & RXERROR_FRAMING) |
3045 | + flag = TTY_FRAME; |
3046 | + |
3047 | tty_insert_flip_char(&port->port, data[i+1], |
3048 | flag); |
3049 | } |
3050 | @@ -649,14 +655,19 @@ static void usa49_indat_callback(struct urb *urb) |
3051 | } else { |
3052 | /* some bytes had errors, every byte has status */ |
3053 | for (i = 0; i + 1 < urb->actual_length; i += 2) { |
3054 | - int stat = data[i], flag = 0; |
3055 | - if (stat & RXERROR_OVERRUN) |
3056 | - flag |= TTY_OVERRUN; |
3057 | - if (stat & RXERROR_FRAMING) |
3058 | - flag |= TTY_FRAME; |
3059 | - if (stat & RXERROR_PARITY) |
3060 | - flag |= TTY_PARITY; |
3061 | + int stat = data[i]; |
3062 | + int flag = TTY_NORMAL; |
3063 | + |
3064 | + if (stat & RXERROR_OVERRUN) { |
3065 | + tty_insert_flip_char(&port->port, 0, |
3066 | + TTY_OVERRUN); |
3067 | + } |
3068 | /* XXX should handle break (0x10) */ |
3069 | + if (stat & RXERROR_PARITY) |
3070 | + flag = TTY_PARITY; |
3071 | + else if (stat & RXERROR_FRAMING) |
3072 | + flag = TTY_FRAME; |
3073 | + |
3074 | tty_insert_flip_char(&port->port, data[i+1], |
3075 | flag); |
3076 | } |
3077 | @@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb) |
3078 | */ |
3079 | for (x = 0; x + 1 < len && |
3080 | i + 1 < urb->actual_length; x += 2) { |
3081 | - int stat = data[i], flag = 0; |
3082 | + int stat = data[i]; |
3083 | + int flag = TTY_NORMAL; |
3084 | |
3085 | - if (stat & RXERROR_OVERRUN) |
3086 | - flag |= TTY_OVERRUN; |
3087 | - if (stat & RXERROR_FRAMING) |
3088 | - flag |= TTY_FRAME; |
3089 | - if (stat & RXERROR_PARITY) |
3090 | - flag |= TTY_PARITY; |
3091 | + if (stat & RXERROR_OVERRUN) { |
3092 | + tty_insert_flip_char(&port->port, 0, |
3093 | + TTY_OVERRUN); |
3094 | + } |
3095 | /* XXX should handle break (0x10) */ |
3096 | + if (stat & RXERROR_PARITY) |
3097 | + flag = TTY_PARITY; |
3098 | + else if (stat & RXERROR_FRAMING) |
3099 | + flag = TTY_FRAME; |
3100 | + |
3101 | tty_insert_flip_char(&port->port, data[i+1], |
3102 | flag); |
3103 | i += 2; |
3104 | @@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb) |
3105 | if ((data[0] & 0x80) == 0) { |
3106 | /* no errors on individual bytes, only |
3107 | possible overrun err*/ |
3108 | - if (data[0] & RXERROR_OVERRUN) |
3109 | - err = TTY_OVERRUN; |
3110 | - else |
3111 | - err = 0; |
3112 | + if (data[0] & RXERROR_OVERRUN) { |
3113 | + tty_insert_flip_char(&port->port, 0, |
3114 | + TTY_OVERRUN); |
3115 | + } |
3116 | for (i = 1; i < urb->actual_length ; ++i) |
3117 | tty_insert_flip_char(&port->port, |
3118 | - data[i], err); |
3119 | + data[i], TTY_NORMAL); |
3120 | } else { |
3121 | /* some bytes had errors, every byte has status */ |
3122 | dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); |
3123 | for (i = 0; i + 1 < urb->actual_length; i += 2) { |
3124 | - int stat = data[i], flag = 0; |
3125 | - if (stat & RXERROR_OVERRUN) |
3126 | - flag |= TTY_OVERRUN; |
3127 | - if (stat & RXERROR_FRAMING) |
3128 | - flag |= TTY_FRAME; |
3129 | - if (stat & RXERROR_PARITY) |
3130 | - flag |= TTY_PARITY; |
3131 | + int stat = data[i]; |
3132 | + int flag = TTY_NORMAL; |
3133 | + |
3134 | + if (stat & RXERROR_OVERRUN) { |
3135 | + tty_insert_flip_char( |
3136 | + &port->port, 0, |
3137 | + TTY_OVERRUN); |
3138 | + } |
3139 | /* XXX should handle break (0x10) */ |
3140 | + if (stat & RXERROR_PARITY) |
3141 | + flag = TTY_PARITY; |
3142 | + else if (stat & RXERROR_FRAMING) |
3143 | + flag = TTY_FRAME; |
3144 | + |
3145 | tty_insert_flip_char(&port->port, |
3146 | data[i+1], flag); |
3147 | } |
3148 | diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c |
3149 | index a7fe664b6b7d..70a098de429f 100644 |
3150 | --- a/drivers/usb/serial/ssu100.c |
3151 | +++ b/drivers/usb/serial/ssu100.c |
3152 | @@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr, |
3153 | if (*tty_flag == TTY_NORMAL) |
3154 | *tty_flag = TTY_FRAME; |
3155 | } |
3156 | - if (lsr & UART_LSR_OE){ |
3157 | + if (lsr & UART_LSR_OE) { |
3158 | port->icount.overrun++; |
3159 | - if (*tty_flag == TTY_NORMAL) |
3160 | - *tty_flag = TTY_OVERRUN; |
3161 | + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); |
3162 | } |
3163 | } |
3164 | |
3165 | @@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb) |
3166 | if ((len >= 4) && |
3167 | (packet[0] == 0x1b) && (packet[1] == 0x1b) && |
3168 | ((packet[2] == 0x00) || (packet[2] == 0x01))) { |
3169 | - if (packet[2] == 0x00) { |
3170 | + if (packet[2] == 0x00) |
3171 | ssu100_update_lsr(port, packet[3], &flag); |
3172 | - if (flag == TTY_OVERRUN) |
3173 | - tty_insert_flip_char(&port->port, 0, |
3174 | - TTY_OVERRUN); |
3175 | - } |
3176 | if (packet[2] == 0x01) |
3177 | ssu100_update_msr(port, packet[3]); |
3178 | |
3179 | diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h |
3180 | index 2fefaf923e4a..18a283d6de1c 100644 |
3181 | --- a/drivers/usb/storage/unusual_uas.h |
3182 | +++ b/drivers/usb/storage/unusual_uas.h |
3183 | @@ -103,3 +103,10 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, |
3184 | "VL711", |
3185 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
3186 | US_FL_NO_ATA_1X), |
3187 | + |
3188 | +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ |
3189 | +UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, |
3190 | + "Hitachi", |
3191 | + "External HDD", |
3192 | + USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
3193 | + US_FL_IGNORE_UAS), |
3194 | diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c |
3195 | index 69906cacd04f..a17f11850669 100644 |
3196 | --- a/drivers/vhost/scsi.c |
3197 | +++ b/drivers/vhost/scsi.c |
3198 | @@ -1312,6 +1312,7 @@ static int |
3199 | vhost_scsi_set_endpoint(struct vhost_scsi *vs, |
3200 | struct vhost_scsi_target *t) |
3201 | { |
3202 | + struct se_portal_group *se_tpg; |
3203 | struct tcm_vhost_tport *tv_tport; |
3204 | struct tcm_vhost_tpg *tpg; |
3205 | struct tcm_vhost_tpg **vs_tpg; |
3206 | @@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, |
3207 | ret = -EEXIST; |
3208 | goto out; |
3209 | } |
3210 | + /* |
3211 | + * In order to ensure individual vhost-scsi configfs |
3212 | + * groups cannot be removed while in use by vhost ioctl, |
3213 | + * go ahead and take an explicit se_tpg->tpg_group.cg_item |
3214 | + * dependency now. |
3215 | + */ |
3216 | + se_tpg = &tpg->se_tpg; |
3217 | + ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys, |
3218 | + &se_tpg->tpg_group.cg_item); |
3219 | + if (ret) { |
3220 | + pr_warn("configfs_depend_item() failed: %d\n", ret); |
3221 | + kfree(vs_tpg); |
3222 | + mutex_unlock(&tpg->tv_tpg_mutex); |
3223 | + goto out; |
3224 | + } |
3225 | tpg->tv_tpg_vhost_count++; |
3226 | tpg->vhost_scsi = vs; |
3227 | vs_tpg[tpg->tport_tpgt] = tpg; |
3228 | @@ -1401,6 +1417,7 @@ static int |
3229 | vhost_scsi_clear_endpoint(struct vhost_scsi *vs, |
3230 | struct vhost_scsi_target *t) |
3231 | { |
3232 | + struct se_portal_group *se_tpg; |
3233 | struct tcm_vhost_tport *tv_tport; |
3234 | struct tcm_vhost_tpg *tpg; |
3235 | struct vhost_virtqueue *vq; |
3236 | @@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, |
3237 | vs->vs_tpg[target] = NULL; |
3238 | match = true; |
3239 | mutex_unlock(&tpg->tv_tpg_mutex); |
3240 | + /* |
3241 | + * Release se_tpg->tpg_group.cg_item configfs dependency now |
3242 | + * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur. |
3243 | + */ |
3244 | + se_tpg = &tpg->se_tpg; |
3245 | + configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys, |
3246 | + &se_tpg->tpg_group.cg_item); |
3247 | } |
3248 | if (match) { |
3249 | for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { |
3250 | diff --git a/fs/aio.c b/fs/aio.c |
3251 | index 733750096b71..0ff7c464a478 100644 |
3252 | --- a/fs/aio.c |
3253 | +++ b/fs/aio.c |
3254 | @@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt; |
3255 | static const struct file_operations aio_ring_fops; |
3256 | static const struct address_space_operations aio_ctx_aops; |
3257 | |
3258 | +/* Backing dev info for aio fs. |
3259 | + * -no dirty page accounting or writeback happens |
3260 | + */ |
3261 | +static struct backing_dev_info aio_fs_backing_dev_info = { |
3262 | + .name = "aiofs", |
3263 | + .state = 0, |
3264 | + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY, |
3265 | +}; |
3266 | + |
3267 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) |
3268 | { |
3269 | struct qstr this = QSTR_INIT("[aio]", 5); |
3270 | @@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) |
3271 | |
3272 | inode->i_mapping->a_ops = &aio_ctx_aops; |
3273 | inode->i_mapping->private_data = ctx; |
3274 | + inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info; |
3275 | inode->i_size = PAGE_SIZE * nr_pages; |
3276 | |
3277 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); |
3278 | @@ -220,6 +230,9 @@ static int __init aio_setup(void) |
3279 | if (IS_ERR(aio_mnt)) |
3280 | panic("Failed to create aio fs mount."); |
3281 | |
3282 | + if (bdi_init(&aio_fs_backing_dev_info)) |
3283 | + panic("Failed to init aio fs backing dev info."); |
3284 | + |
3285 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
3286 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
3287 | |
3288 | @@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = { |
3289 | .mmap = aio_ring_mmap, |
3290 | }; |
3291 | |
3292 | -static int aio_set_page_dirty(struct page *page) |
3293 | -{ |
3294 | - return 0; |
3295 | -} |
3296 | - |
3297 | #if IS_ENABLED(CONFIG_MIGRATION) |
3298 | static int aio_migratepage(struct address_space *mapping, struct page *new, |
3299 | struct page *old, enum migrate_mode mode) |
3300 | @@ -357,7 +365,7 @@ out: |
3301 | #endif |
3302 | |
3303 | static const struct address_space_operations aio_ctx_aops = { |
3304 | - .set_page_dirty = aio_set_page_dirty, |
3305 | + .set_page_dirty = __set_page_dirty_no_writeback, |
3306 | #if IS_ENABLED(CONFIG_MIGRATION) |
3307 | .migratepage = aio_migratepage, |
3308 | #endif |
3309 | @@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx) |
3310 | pr_debug("pid(%d) page[%d]->count=%d\n", |
3311 | current->pid, i, page_count(page)); |
3312 | SetPageUptodate(page); |
3313 | - SetPageDirty(page); |
3314 | unlock_page(page); |
3315 | |
3316 | ctx->ring_pages[i] = page; |
3317 | diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c |
3318 | index 44ee5d2e52a4..8bbcc24fd429 100644 |
3319 | --- a/fs/btrfs/ctree.c |
3320 | +++ b/fs/btrfs/ctree.c |
3321 | @@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, |
3322 | { |
3323 | int i; |
3324 | |
3325 | -#ifdef CONFIG_DEBUG_LOCK_ALLOC |
3326 | - /* lockdep really cares that we take all of these spinlocks |
3327 | - * in the right order. If any of the locks in the path are not |
3328 | - * currently blocking, it is going to complain. So, make really |
3329 | - * really sure by forcing the path to blocking before we clear |
3330 | - * the path blocking. |
3331 | - */ |
3332 | if (held) { |
3333 | btrfs_set_lock_blocking_rw(held, held_rw); |
3334 | if (held_rw == BTRFS_WRITE_LOCK) |
3335 | @@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, |
3336 | held_rw = BTRFS_READ_LOCK_BLOCKING; |
3337 | } |
3338 | btrfs_set_path_blocking(p); |
3339 | -#endif |
3340 | |
3341 | for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { |
3342 | if (p->nodes[i] && p->locks[i]) { |
3343 | @@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p, |
3344 | } |
3345 | } |
3346 | |
3347 | -#ifdef CONFIG_DEBUG_LOCK_ALLOC |
3348 | if (held) |
3349 | btrfs_clear_lock_blocking_rw(held, held_rw); |
3350 | -#endif |
3351 | } |
3352 | |
3353 | /* this also releases the path */ |
3354 | @@ -2902,7 +2892,7 @@ cow_done: |
3355 | } |
3356 | p->locks[level] = BTRFS_WRITE_LOCK; |
3357 | } else { |
3358 | - err = btrfs_try_tree_read_lock(b); |
3359 | + err = btrfs_tree_read_lock_atomic(b); |
3360 | if (!err) { |
3361 | btrfs_set_path_blocking(p); |
3362 | btrfs_tree_read_lock(b); |
3363 | @@ -3034,7 +3024,7 @@ again: |
3364 | } |
3365 | |
3366 | level = btrfs_header_level(b); |
3367 | - err = btrfs_try_tree_read_lock(b); |
3368 | + err = btrfs_tree_read_lock_atomic(b); |
3369 | if (!err) { |
3370 | btrfs_set_path_blocking(p); |
3371 | btrfs_tree_read_lock(b); |
3372 | diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c |
3373 | index 5665d2149249..f8229ef1b46d 100644 |
3374 | --- a/fs/btrfs/locking.c |
3375 | +++ b/fs/btrfs/locking.c |
3376 | @@ -128,6 +128,26 @@ again: |
3377 | } |
3378 | |
3379 | /* |
3380 | + * take a spinning read lock. |
3381 | + * returns 1 if we get the read lock and 0 if we don't |
3382 | + * this won't wait for blocking writers |
3383 | + */ |
3384 | +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb) |
3385 | +{ |
3386 | + if (atomic_read(&eb->blocking_writers)) |
3387 | + return 0; |
3388 | + |
3389 | + read_lock(&eb->lock); |
3390 | + if (atomic_read(&eb->blocking_writers)) { |
3391 | + read_unlock(&eb->lock); |
3392 | + return 0; |
3393 | + } |
3394 | + atomic_inc(&eb->read_locks); |
3395 | + atomic_inc(&eb->spinning_readers); |
3396 | + return 1; |
3397 | +} |
3398 | + |
3399 | +/* |
3400 | * returns 1 if we get the read lock and 0 if we don't |
3401 | * this won't wait for blocking writers |
3402 | */ |
3403 | @@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) |
3404 | atomic_read(&eb->blocking_readers)) |
3405 | return 0; |
3406 | |
3407 | - if (!write_trylock(&eb->lock)) |
3408 | - return 0; |
3409 | - |
3410 | + write_lock(&eb->lock); |
3411 | if (atomic_read(&eb->blocking_writers) || |
3412 | atomic_read(&eb->blocking_readers)) { |
3413 | write_unlock(&eb->lock); |
3414 | diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h |
3415 | index b81e0e9a4894..c44a9d5f5362 100644 |
3416 | --- a/fs/btrfs/locking.h |
3417 | +++ b/fs/btrfs/locking.h |
3418 | @@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw); |
3419 | void btrfs_assert_tree_locked(struct extent_buffer *eb); |
3420 | int btrfs_try_tree_read_lock(struct extent_buffer *eb); |
3421 | int btrfs_try_tree_write_lock(struct extent_buffer *eb); |
3422 | +int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); |
3423 | + |
3424 | |
3425 | static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) |
3426 | { |
3427 | diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c |
3428 | index e0be57b0f79b..c6d7c879867e 100644 |
3429 | --- a/fs/nfsd/nfs4callback.c |
3430 | +++ b/fs/nfsd/nfs4callback.c |
3431 | @@ -801,8 +801,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) |
3432 | { |
3433 | if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { |
3434 | rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); |
3435 | - dprintk("%s slot is busy\n", __func__); |
3436 | - return false; |
3437 | + /* Race breaker */ |
3438 | + if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { |
3439 | + dprintk("%s slot is busy\n", __func__); |
3440 | + return false; |
3441 | + } |
3442 | + rpc_wake_up_queued_task(&clp->cl_cb_waitq, task); |
3443 | } |
3444 | return true; |
3445 | } |
3446 | diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h |
3447 | index 847daf37e566..1579b2171fcc 100644 |
3448 | --- a/fs/nfsd/nfsd.h |
3449 | +++ b/fs/nfsd/nfsd.h |
3450 | @@ -335,12 +335,15 @@ void nfsd_lockd_shutdown(void); |
3451 | (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) |
3452 | |
3453 | #ifdef CONFIG_NFSD_V4_SECURITY_LABEL |
3454 | -#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ |
3455 | - (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL) |
3456 | +#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL |
3457 | #else |
3458 | -#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0 |
3459 | +#define NFSD4_2_SECURITY_ATTRS 0 |
3460 | #endif |
3461 | |
3462 | +#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ |
3463 | + (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \ |
3464 | + NFSD4_2_SECURITY_ATTRS) |
3465 | + |
3466 | static inline u32 nfsd_suppattrs0(u32 minorversion) |
3467 | { |
3468 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 |
3469 | diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h |
3470 | index a929f86d0ddd..d72b5b35f15e 100644 |
3471 | --- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h |
3472 | +++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h |
3473 | @@ -60,7 +60,7 @@ |
3474 | #define ESC1_CLK_SRC 43 |
3475 | #define HDMI_CLK_SRC 44 |
3476 | #define VSYNC_CLK_SRC 45 |
3477 | -#define RBCPR_CLK_SRC 46 |
3478 | +#define MMSS_RBCPR_CLK_SRC 46 |
3479 | #define RBBMTIMER_CLK_SRC 47 |
3480 | #define MAPLE_CLK_SRC 48 |
3481 | #define VDP_CLK_SRC 49 |
3482 | diff --git a/include/linux/bitops.h b/include/linux/bitops.h |
3483 | index cbc5833fb221..38b5f5c88c18 100644 |
3484 | --- a/include/linux/bitops.h |
3485 | +++ b/include/linux/bitops.h |
3486 | @@ -18,8 +18,11 @@ |
3487 | * position @h. For example |
3488 | * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. |
3489 | */ |
3490 | -#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
3491 | -#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) |
3492 | +#define GENMASK(h, l) \ |
3493 | + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) |
3494 | + |
3495 | +#define GENMASK_ULL(h, l) \ |
3496 | + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) |
3497 | |
3498 | extern unsigned int __sw_hweight8(unsigned int w); |
3499 | extern unsigned int __sw_hweight16(unsigned int w); |
3500 | diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h |
3501 | index 411dd7eb2653..da6996efbf83 100644 |
3502 | --- a/include/linux/clk-provider.h |
3503 | +++ b/include/linux/clk-provider.h |
3504 | @@ -341,7 +341,6 @@ struct clk_divider { |
3505 | #define CLK_DIVIDER_READ_ONLY BIT(5) |
3506 | |
3507 | extern const struct clk_ops clk_divider_ops; |
3508 | -extern const struct clk_ops clk_divider_ro_ops; |
3509 | struct clk *clk_register_divider(struct device *dev, const char *name, |
3510 | const char *parent_name, unsigned long flags, |
3511 | void __iomem *reg, u8 shift, u8 width, |
3512 | diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h |
3513 | index 8bbd7bc1043d..03fa332ad2a8 100644 |
3514 | --- a/include/linux/iio/events.h |
3515 | +++ b/include/linux/iio/events.h |
3516 | @@ -72,7 +72,7 @@ struct iio_event_data { |
3517 | |
3518 | #define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) |
3519 | |
3520 | -#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) |
3521 | +#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F) |
3522 | |
3523 | #define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) |
3524 | |
3525 | diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h |
3526 | index 0068708161ff..0a21fbefdfbe 100644 |
3527 | --- a/include/linux/inetdevice.h |
3528 | +++ b/include/linux/inetdevice.h |
3529 | @@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev) |
3530 | static __inline__ __be32 inet_make_mask(int logmask) |
3531 | { |
3532 | if (logmask) |
3533 | - return htonl(~((1<<(32-logmask))-1)); |
3534 | + return htonl(~((1U<<(32-logmask))-1)); |
3535 | return 0; |
3536 | } |
3537 | |
3538 | diff --git a/include/linux/pci.h b/include/linux/pci.h |
3539 | index 96453f9bc8ba..6b6da8f539b6 100644 |
3540 | --- a/include/linux/pci.h |
3541 | +++ b/include/linux/pci.h |
3542 | @@ -331,6 +331,7 @@ struct pci_dev { |
3543 | unsigned int is_added:1; |
3544 | unsigned int is_busmaster:1; /* device is busmaster */ |
3545 | unsigned int no_msi:1; /* device may not use msi */ |
3546 | + unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ |
3547 | unsigned int block_cfg_access:1; /* config space access is blocked */ |
3548 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
3549 | unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ |
3550 | diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h |
3551 | index 2883a7a6f9f3..98f2ade0266e 100644 |
3552 | --- a/include/sound/soc-dpcm.h |
3553 | +++ b/include/sound/soc-dpcm.h |
3554 | @@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime { |
3555 | /* state and update */ |
3556 | enum snd_soc_dpcm_update runtime_update; |
3557 | enum snd_soc_dpcm_state state; |
3558 | + |
3559 | + int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */ |
3560 | }; |
3561 | |
3562 | /* can this BE stop and free */ |
3563 | diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c |
3564 | index 1d0af8a2c646..ed8f2cde34c5 100644 |
3565 | --- a/kernel/events/uprobes.c |
3566 | +++ b/kernel/events/uprobes.c |
3567 | @@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void) |
3568 | if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { |
3569 | utask->state = UTASK_SSTEP_TRAPPED; |
3570 | set_tsk_thread_flag(t, TIF_UPROBE); |
3571 | - set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); |
3572 | } |
3573 | } |
3574 | |
3575 | diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c |
3576 | index f2e15738534d..8f7bd56955b0 100644 |
3577 | --- a/net/ipv4/fib_rules.c |
3578 | +++ b/net/ipv4/fib_rules.c |
3579 | @@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) |
3580 | else |
3581 | res->tclassid = 0; |
3582 | #endif |
3583 | + |
3584 | + if (err == -ESRCH) |
3585 | + err = -ENETUNREACH; |
3586 | + |
3587 | return err; |
3588 | } |
3589 | EXPORT_SYMBOL_GPL(__fib_lookup); |
3590 | diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c |
3591 | index a3c59a077a5f..352476233608 100644 |
3592 | --- a/net/ipv4/ping.c |
3593 | +++ b/net/ipv4/ping.c |
3594 | @@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident) |
3595 | &ipv6_hdr(skb)->daddr)) |
3596 | continue; |
3597 | #endif |
3598 | + } else { |
3599 | + continue; |
3600 | } |
3601 | |
3602 | if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) |
3603 | diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c |
3604 | index 91729b807c7d..1b095ca37aa4 100644 |
3605 | --- a/net/ipx/af_ipx.c |
3606 | +++ b/net/ipx/af_ipx.c |
3607 | @@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, |
3608 | struct ipxhdr *ipx = NULL; |
3609 | struct sk_buff *skb; |
3610 | int copied, rc; |
3611 | + bool locked = true; |
3612 | |
3613 | lock_sock(sk); |
3614 | /* put the autobinding in */ |
3615 | @@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, |
3616 | if (sock_flag(sk, SOCK_ZAPPED)) |
3617 | goto out; |
3618 | |
3619 | + release_sock(sk); |
3620 | + locked = false; |
3621 | skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, |
3622 | flags & MSG_DONTWAIT, &rc); |
3623 | if (!skb) { |
3624 | @@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, |
3625 | out_free: |
3626 | skb_free_datagram(sk, skb); |
3627 | out: |
3628 | - release_sock(sk); |
3629 | + if (locked) |
3630 | + release_sock(sk); |
3631 | return rc; |
3632 | } |
3633 | |
3634 | diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c |
3635 | index ec24378caaaf..09d9caaec591 100644 |
3636 | --- a/net/mac80211/aes_ccm.c |
3637 | +++ b/net/mac80211/aes_ccm.c |
3638 | @@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, |
3639 | __aligned(__alignof__(struct aead_request)); |
3640 | struct aead_request *aead_req = (void *) aead_req_data; |
3641 | |
3642 | + if (data_len == 0) |
3643 | + return -EINVAL; |
3644 | + |
3645 | memset(aead_req, 0, sizeof(aead_req_data)); |
3646 | |
3647 | sg_init_one(&pt, data, data_len); |
3648 | diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
3649 | index de88c4ab5146..ec94ba94079f 100644 |
3650 | --- a/net/netfilter/nf_conntrack_core.c |
3651 | +++ b/net/netfilter/nf_conntrack_core.c |
3652 | @@ -611,12 +611,16 @@ __nf_conntrack_confirm(struct sk_buff *skb) |
3653 | */ |
3654 | NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); |
3655 | pr_debug("Confirming conntrack %p\n", ct); |
3656 | - /* We have to check the DYING flag inside the lock to prevent |
3657 | - a race against nf_ct_get_next_corpse() possibly called from |
3658 | - user context, else we insert an already 'dead' hash, blocking |
3659 | - further use of that particular connection -JM */ |
3660 | + |
3661 | + /* We have to check the DYING flag after unlink to prevent |
3662 | + * a race against nf_ct_get_next_corpse() possibly called from |
3663 | + * user context, else we insert an already 'dead' hash, blocking |
3664 | + * further use of that particular connection -JM. |
3665 | + */ |
3666 | + nf_ct_del_from_dying_or_unconfirmed_list(ct); |
3667 | |
3668 | if (unlikely(nf_ct_is_dying(ct))) { |
3669 | + nf_ct_add_to_dying_list(ct); |
3670 | nf_conntrack_double_unlock(hash, reply_hash); |
3671 | local_bh_enable(); |
3672 | return NF_ACCEPT; |
3673 | @@ -636,8 +640,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) |
3674 | zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) |
3675 | goto out; |
3676 | |
3677 | - nf_ct_del_from_dying_or_unconfirmed_list(ct); |
3678 | - |
3679 | /* Timer relative to confirmation time, not original |
3680 | setting time, otherwise we'd get timer wrap in |
3681 | weird delay cases. */ |
3682 | diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c |
3683 | index 0a7f848590d2..aae9b84ae603 100644 |
3684 | --- a/sound/pci/hda/hda_intel.c |
3685 | +++ b/sound/pci/hda/hda_intel.c |
3686 | @@ -296,7 +296,8 @@ enum { |
3687 | |
3688 | /* quirks for ATI/AMD HDMI */ |
3689 | #define AZX_DCAPS_PRESET_ATI_HDMI \ |
3690 | - (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) |
3691 | + (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\ |
3692 | + AZX_DCAPS_NO_MSI64) |
3693 | |
3694 | /* quirks for Nvidia */ |
3695 | #define AZX_DCAPS_PRESET_NVIDIA \ |
3696 | @@ -1484,6 +1485,7 @@ static int azx_first_init(struct azx *chip) |
3697 | struct snd_card *card = chip->card; |
3698 | int err; |
3699 | unsigned short gcap; |
3700 | + unsigned int dma_bits = 64; |
3701 | |
3702 | #if BITS_PER_LONG != 64 |
3703 | /* Fix up base address on ULI M5461 */ |
3704 | @@ -1507,9 +1509,14 @@ static int azx_first_init(struct azx *chip) |
3705 | return -ENXIO; |
3706 | } |
3707 | |
3708 | - if (chip->msi) |
3709 | + if (chip->msi) { |
3710 | + if (chip->driver_caps & AZX_DCAPS_NO_MSI64) { |
3711 | + dev_dbg(card->dev, "Disabling 64bit MSI\n"); |
3712 | + pci->no_64bit_msi = true; |
3713 | + } |
3714 | if (pci_enable_msi(pci) < 0) |
3715 | chip->msi = 0; |
3716 | + } |
3717 | |
3718 | if (azx_acquire_irq(chip, 0) < 0) |
3719 | return -EBUSY; |
3720 | @@ -1520,9 +1527,14 @@ static int azx_first_init(struct azx *chip) |
3721 | gcap = azx_readw(chip, GCAP); |
3722 | dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); |
3723 | |
3724 | + /* AMD devices support 40 or 48bit DMA, take the safe one */ |
3725 | + if (chip->pci->vendor == PCI_VENDOR_ID_AMD) |
3726 | + dma_bits = 40; |
3727 | + |
3728 | /* disable SB600 64bit support for safety */ |
3729 | if (chip->pci->vendor == PCI_VENDOR_ID_ATI) { |
3730 | struct pci_dev *p_smbus; |
3731 | + dma_bits = 40; |
3732 | p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, |
3733 | PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
3734 | NULL); |
3735 | @@ -1552,9 +1564,11 @@ static int azx_first_init(struct azx *chip) |
3736 | } |
3737 | |
3738 | /* allow 64bit DMA address if supported by H/W */ |
3739 | - if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) |
3740 | - pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); |
3741 | - else { |
3742 | + if (!(gcap & AZX_GCAP_64OK)) |
3743 | + dma_bits = 32; |
3744 | + if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) { |
3745 | + pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits)); |
3746 | + } else { |
3747 | pci_set_dma_mask(pci, DMA_BIT_MASK(32)); |
3748 | pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); |
3749 | } |
3750 | diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h |
3751 | index 949cd437eeb2..5016014e57f2 100644 |
3752 | --- a/sound/pci/hda/hda_priv.h |
3753 | +++ b/sound/pci/hda/hda_priv.h |
3754 | @@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; |
3755 | #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ |
3756 | #define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ |
3757 | #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ |
3758 | +#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ |
3759 | |
3760 | /* HD Audio class code */ |
3761 | #define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 |
3762 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
3763 | index 0c9d5880859a..623a9d0c8d52 100644 |
3764 | --- a/sound/pci/hda/patch_realtek.c |
3765 | +++ b/sound/pci/hda/patch_realtek.c |
3766 | @@ -5007,7 +5007,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
3767 | SND_PCI_QUIRK(0x103c, 0x2223, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3768 | SND_PCI_QUIRK(0x103c, 0x2224, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3769 | SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3770 | - SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3771 | SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3772 | SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3773 | SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
3774 | diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c |
3775 | index cee51ae177c1..c40428f25ba5 100644 |
3776 | --- a/sound/soc/codecs/cs42l51-i2c.c |
3777 | +++ b/sound/soc/codecs/cs42l51-i2c.c |
3778 | @@ -46,6 +46,7 @@ static struct i2c_driver cs42l51_i2c_driver = { |
3779 | .driver = { |
3780 | .name = "cs42l51", |
3781 | .owner = THIS_MODULE, |
3782 | + .of_match_table = cs42l51_of_match, |
3783 | }, |
3784 | .probe = cs42l51_i2c_probe, |
3785 | .remove = cs42l51_i2c_remove, |
3786 | diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c |
3787 | index 09488d97de60..669c38fc3034 100644 |
3788 | --- a/sound/soc/codecs/cs42l51.c |
3789 | +++ b/sound/soc/codecs/cs42l51.c |
3790 | @@ -558,11 +558,13 @@ error: |
3791 | } |
3792 | EXPORT_SYMBOL_GPL(cs42l51_probe); |
3793 | |
3794 | -static const struct of_device_id cs42l51_of_match[] = { |
3795 | +const struct of_device_id cs42l51_of_match[] = { |
3796 | { .compatible = "cirrus,cs42l51", }, |
3797 | { } |
3798 | }; |
3799 | MODULE_DEVICE_TABLE(of, cs42l51_of_match); |
3800 | +EXPORT_SYMBOL_GPL(cs42l51_of_match); |
3801 | + |
3802 | MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); |
3803 | MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver"); |
3804 | MODULE_LICENSE("GPL"); |
3805 | diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h |
3806 | index 8c55bf384bc6..0ca805492ac4 100644 |
3807 | --- a/sound/soc/codecs/cs42l51.h |
3808 | +++ b/sound/soc/codecs/cs42l51.h |
3809 | @@ -22,6 +22,7 @@ struct device; |
3810 | |
3811 | extern const struct regmap_config cs42l51_regmap; |
3812 | int cs42l51_probe(struct device *dev, struct regmap *regmap); |
3813 | +extern const struct of_device_id cs42l51_of_match[]; |
3814 | |
3815 | #define CS42L51_CHIP_ID 0x1B |
3816 | #define CS42L51_CHIP_REV_A 0x00 |
3817 | diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c |
3818 | index ba9d9b4d4857..57ab19dbaaf3 100644 |
3819 | --- a/sound/soc/codecs/rt5670.c |
3820 | +++ b/sound/soc/codecs/rt5670.c |
3821 | @@ -100,18 +100,18 @@ static const struct reg_default rt5670_reg[] = { |
3822 | { 0x4c, 0x5380 }, |
3823 | { 0x4f, 0x0073 }, |
3824 | { 0x52, 0x00d3 }, |
3825 | - { 0x53, 0xf0f0 }, |
3826 | + { 0x53, 0xf000 }, |
3827 | { 0x61, 0x0000 }, |
3828 | { 0x62, 0x0001 }, |
3829 | { 0x63, 0x00c3 }, |
3830 | { 0x64, 0x0000 }, |
3831 | - { 0x65, 0x0000 }, |
3832 | + { 0x65, 0x0001 }, |
3833 | { 0x66, 0x0000 }, |
3834 | { 0x6f, 0x8000 }, |
3835 | { 0x70, 0x8000 }, |
3836 | { 0x71, 0x8000 }, |
3837 | { 0x72, 0x8000 }, |
3838 | - { 0x73, 0x1110 }, |
3839 | + { 0x73, 0x7770 }, |
3840 | { 0x74, 0x0e00 }, |
3841 | { 0x75, 0x1505 }, |
3842 | { 0x76, 0x0015 }, |
3843 | @@ -125,21 +125,21 @@ static const struct reg_default rt5670_reg[] = { |
3844 | { 0x83, 0x0000 }, |
3845 | { 0x84, 0x0000 }, |
3846 | { 0x85, 0x0000 }, |
3847 | - { 0x86, 0x0008 }, |
3848 | + { 0x86, 0x0004 }, |
3849 | { 0x87, 0x0000 }, |
3850 | { 0x88, 0x0000 }, |
3851 | { 0x89, 0x0000 }, |
3852 | { 0x8a, 0x0000 }, |
3853 | { 0x8b, 0x0000 }, |
3854 | - { 0x8c, 0x0007 }, |
3855 | + { 0x8c, 0x0003 }, |
3856 | { 0x8d, 0x0000 }, |
3857 | { 0x8e, 0x0004 }, |
3858 | { 0x8f, 0x1100 }, |
3859 | { 0x90, 0x0646 }, |
3860 | { 0x91, 0x0c06 }, |
3861 | { 0x93, 0x0000 }, |
3862 | - { 0x94, 0x0000 }, |
3863 | - { 0x95, 0x0000 }, |
3864 | + { 0x94, 0x1270 }, |
3865 | + { 0x95, 0x1000 }, |
3866 | { 0x97, 0x0000 }, |
3867 | { 0x98, 0x0000 }, |
3868 | { 0x99, 0x0000 }, |
3869 | @@ -150,11 +150,11 @@ static const struct reg_default rt5670_reg[] = { |
3870 | { 0x9e, 0x0400 }, |
3871 | { 0xae, 0x7000 }, |
3872 | { 0xaf, 0x0000 }, |
3873 | - { 0xb0, 0x6000 }, |
3874 | + { 0xb0, 0x7000 }, |
3875 | { 0xb1, 0x0000 }, |
3876 | { 0xb2, 0x0000 }, |
3877 | { 0xb3, 0x001f }, |
3878 | - { 0xb4, 0x2206 }, |
3879 | + { 0xb4, 0x220c }, |
3880 | { 0xb5, 0x1f00 }, |
3881 | { 0xb6, 0x0000 }, |
3882 | { 0xb7, 0x0000 }, |
3883 | @@ -171,25 +171,25 @@ static const struct reg_default rt5670_reg[] = { |
3884 | { 0xcf, 0x1813 }, |
3885 | { 0xd0, 0x0690 }, |
3886 | { 0xd1, 0x1c17 }, |
3887 | - { 0xd3, 0xb320 }, |
3888 | + { 0xd3, 0xa220 }, |
3889 | { 0xd4, 0x0000 }, |
3890 | { 0xd6, 0x0400 }, |
3891 | { 0xd9, 0x0809 }, |
3892 | { 0xda, 0x0000 }, |
3893 | { 0xdb, 0x0001 }, |
3894 | { 0xdc, 0x0049 }, |
3895 | - { 0xdd, 0x0009 }, |
3896 | + { 0xdd, 0x0024 }, |
3897 | { 0xe6, 0x8000 }, |
3898 | { 0xe7, 0x0000 }, |
3899 | - { 0xec, 0xb300 }, |
3900 | + { 0xec, 0xa200 }, |
3901 | { 0xed, 0x0000 }, |
3902 | - { 0xee, 0xb300 }, |
3903 | + { 0xee, 0xa200 }, |
3904 | { 0xef, 0x0000 }, |
3905 | { 0xf8, 0x0000 }, |
3906 | { 0xf9, 0x0000 }, |
3907 | { 0xfa, 0x8010 }, |
3908 | { 0xfb, 0x0033 }, |
3909 | - { 0xfc, 0x0080 }, |
3910 | + { 0xfc, 0x0100 }, |
3911 | }; |
3912 | |
3913 | static bool rt5670_volatile_register(struct device *dev, unsigned int reg) |
3914 | diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c |
3915 | index e997d271728d..46ba6ac07442 100644 |
3916 | --- a/sound/soc/codecs/sgtl5000.c |
3917 | +++ b/sound/soc/codecs/sgtl5000.c |
3918 | @@ -1316,8 +1316,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) |
3919 | |
3920 | /* enable small pop, introduce 400ms delay in turning off */ |
3921 | snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL, |
3922 | - SGTL5000_SMALL_POP, |
3923 | - SGTL5000_SMALL_POP); |
3924 | + SGTL5000_SMALL_POP, 1); |
3925 | |
3926 | /* disable short cut detector */ |
3927 | snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0); |
3928 | diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h |
3929 | index 2f8c88931f69..bd7a344bf8c5 100644 |
3930 | --- a/sound/soc/codecs/sgtl5000.h |
3931 | +++ b/sound/soc/codecs/sgtl5000.h |
3932 | @@ -275,7 +275,7 @@ |
3933 | #define SGTL5000_BIAS_CTRL_MASK 0x000e |
3934 | #define SGTL5000_BIAS_CTRL_SHIFT 1 |
3935 | #define SGTL5000_BIAS_CTRL_WIDTH 3 |
3936 | -#define SGTL5000_SMALL_POP 0x0001 |
3937 | +#define SGTL5000_SMALL_POP 0 |
3938 | |
3939 | /* |
3940 | * SGTL5000_CHIP_MIC_CTRL |
3941 | diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c |
3942 | index f412a9911a75..67124783558a 100644 |
3943 | --- a/sound/soc/codecs/wm_adsp.c |
3944 | +++ b/sound/soc/codecs/wm_adsp.c |
3945 | @@ -1355,6 +1355,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp) |
3946 | file, blocks, pos - firmware->size); |
3947 | |
3948 | out_fw: |
3949 | + regmap_async_complete(regmap); |
3950 | release_firmware(firmware); |
3951 | wm_adsp_buf_free(&buf_list); |
3952 | out: |
3953 | diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c |
3954 | index fb9e05c9f471..244fb1cd1795 100644 |
3955 | --- a/sound/soc/rockchip/rockchip_i2s.c |
3956 | +++ b/sound/soc/rockchip/rockchip_i2s.c |
3957 | @@ -152,8 +152,10 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on) |
3958 | while (val) { |
3959 | regmap_read(i2s->regmap, I2S_CLR, &val); |
3960 | retry--; |
3961 | - if (!retry) |
3962 | + if (!retry) { |
3963 | dev_warn(i2s->dev, "fail to clear\n"); |
3964 | + break; |
3965 | + } |
3966 | } |
3967 | } |
3968 | } |
3969 | diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c |
3970 | index 0acf5d0eed53..72118a77dd5b 100644 |
3971 | --- a/sound/soc/samsung/snow.c |
3972 | +++ b/sound/soc/samsung/snow.c |
3973 | @@ -110,6 +110,7 @@ static const struct of_device_id snow_of_match[] = { |
3974 | { .compatible = "google,snow-audio-max98095", }, |
3975 | {}, |
3976 | }; |
3977 | +MODULE_DEVICE_TABLE(of, snow_of_match); |
3978 | |
3979 | static struct platform_driver snow_driver = { |
3980 | .driver = { |
3981 | diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c |
3982 | index c76344350e44..199f9223346c 100644 |
3983 | --- a/sound/soc/sh/fsi.c |
3984 | +++ b/sound/soc/sh/fsi.c |
3985 | @@ -1706,8 +1706,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = { |
3986 | static struct snd_pcm_hardware fsi_pcm_hardware = { |
3987 | .info = SNDRV_PCM_INFO_INTERLEAVED | |
3988 | SNDRV_PCM_INFO_MMAP | |
3989 | - SNDRV_PCM_INFO_MMAP_VALID | |
3990 | - SNDRV_PCM_INFO_PAUSE, |
3991 | + SNDRV_PCM_INFO_MMAP_VALID, |
3992 | .buffer_bytes_max = 64 * 1024, |
3993 | .period_bytes_min = 32, |
3994 | .period_bytes_max = 8192, |
3995 | diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c |
3996 | index 19f78963e8b9..b67becb8d3dc 100644 |
3997 | --- a/sound/soc/sh/rcar/core.c |
3998 | +++ b/sound/soc/sh/rcar/core.c |
3999 | @@ -888,8 +888,7 @@ static int rsnd_dai_probe(struct platform_device *pdev, |
4000 | static struct snd_pcm_hardware rsnd_pcm_hardware = { |
4001 | .info = SNDRV_PCM_INFO_INTERLEAVED | |
4002 | SNDRV_PCM_INFO_MMAP | |
4003 | - SNDRV_PCM_INFO_MMAP_VALID | |
4004 | - SNDRV_PCM_INFO_PAUSE, |
4005 | + SNDRV_PCM_INFO_MMAP_VALID, |
4006 | .buffer_bytes_max = 64 * 1024, |
4007 | .period_bytes_min = 32, |
4008 | .period_bytes_max = 8192, |
4009 | diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c |
4010 | index 002311afdeaa..57277dd79e11 100644 |
4011 | --- a/sound/soc/soc-pcm.c |
4012 | +++ b/sound/soc/soc-pcm.c |
4013 | @@ -1522,13 +1522,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream) |
4014 | dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture); |
4015 | } |
4016 | |
4017 | +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd); |
4018 | + |
4019 | +/* Set FE's runtime_update state; the state is protected via PCM stream lock |
4020 | + * for avoiding the race with trigger callback. |
4021 | + * If the state is unset and a trigger is pending while the previous operation, |
4022 | + * process the pending trigger action here. |
4023 | + */ |
4024 | +static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe, |
4025 | + int stream, enum snd_soc_dpcm_update state) |
4026 | +{ |
4027 | + struct snd_pcm_substream *substream = |
4028 | + snd_soc_dpcm_get_substream(fe, stream); |
4029 | + |
4030 | + snd_pcm_stream_lock_irq(substream); |
4031 | + if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) { |
4032 | + dpcm_fe_dai_do_trigger(substream, |
4033 | + fe->dpcm[stream].trigger_pending - 1); |
4034 | + fe->dpcm[stream].trigger_pending = 0; |
4035 | + } |
4036 | + fe->dpcm[stream].runtime_update = state; |
4037 | + snd_pcm_stream_unlock_irq(substream); |
4038 | +} |
4039 | + |
4040 | static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) |
4041 | { |
4042 | struct snd_soc_pcm_runtime *fe = fe_substream->private_data; |
4043 | struct snd_pcm_runtime *runtime = fe_substream->runtime; |
4044 | int stream = fe_substream->stream, ret = 0; |
4045 | |
4046 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; |
4047 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); |
4048 | |
4049 | ret = dpcm_be_dai_startup(fe, fe_substream->stream); |
4050 | if (ret < 0) { |
4051 | @@ -1550,13 +1573,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) |
4052 | dpcm_set_fe_runtime(fe_substream); |
4053 | snd_pcm_limit_hw_rates(runtime); |
4054 | |
4055 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4056 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4057 | return 0; |
4058 | |
4059 | unwind: |
4060 | dpcm_be_dai_startup_unwind(fe, fe_substream->stream); |
4061 | be_err: |
4062 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4063 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4064 | return ret; |
4065 | } |
4066 | |
4067 | @@ -1603,7 +1626,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) |
4068 | struct snd_soc_pcm_runtime *fe = substream->private_data; |
4069 | int stream = substream->stream; |
4070 | |
4071 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; |
4072 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); |
4073 | |
4074 | /* shutdown the BEs */ |
4075 | dpcm_be_dai_shutdown(fe, substream->stream); |
4076 | @@ -1617,7 +1640,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream) |
4077 | dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); |
4078 | |
4079 | fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; |
4080 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4081 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4082 | return 0; |
4083 | } |
4084 | |
4085 | @@ -1665,7 +1688,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) |
4086 | int err, stream = substream->stream; |
4087 | |
4088 | mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); |
4089 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; |
4090 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); |
4091 | |
4092 | dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name); |
4093 | |
4094 | @@ -1680,7 +1703,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream) |
4095 | err = dpcm_be_dai_hw_free(fe, stream); |
4096 | |
4097 | fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; |
4098 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4099 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4100 | |
4101 | mutex_unlock(&fe->card->mutex); |
4102 | return 0; |
4103 | @@ -1773,7 +1796,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, |
4104 | int ret, stream = substream->stream; |
4105 | |
4106 | mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); |
4107 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; |
4108 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); |
4109 | |
4110 | memcpy(&fe->dpcm[substream->stream].hw_params, params, |
4111 | sizeof(struct snd_pcm_hw_params)); |
4112 | @@ -1796,7 +1819,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream, |
4113 | fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; |
4114 | |
4115 | out: |
4116 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4117 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4118 | mutex_unlock(&fe->card->mutex); |
4119 | return ret; |
4120 | } |
4121 | @@ -1910,7 +1933,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, |
4122 | } |
4123 | EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); |
4124 | |
4125 | -static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) |
4126 | +static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd) |
4127 | { |
4128 | struct snd_soc_pcm_runtime *fe = substream->private_data; |
4129 | int stream = substream->stream, ret; |
4130 | @@ -1984,6 +2007,23 @@ out: |
4131 | return ret; |
4132 | } |
4133 | |
4134 | +static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) |
4135 | +{ |
4136 | + struct snd_soc_pcm_runtime *fe = substream->private_data; |
4137 | + int stream = substream->stream; |
4138 | + |
4139 | + /* if FE's runtime_update is already set, we're in race; |
4140 | + * process this trigger later at exit |
4141 | + */ |
4142 | + if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) { |
4143 | + fe->dpcm[stream].trigger_pending = cmd + 1; |
4144 | + return 0; /* delayed, assuming it's successful */ |
4145 | + } |
4146 | + |
4147 | + /* we're alone, let's trigger */ |
4148 | + return dpcm_fe_dai_do_trigger(substream, cmd); |
4149 | +} |
4150 | + |
4151 | int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) |
4152 | { |
4153 | struct snd_soc_dpcm *dpcm; |
4154 | @@ -2027,7 +2067,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) |
4155 | |
4156 | dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name); |
4157 | |
4158 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; |
4159 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE); |
4160 | |
4161 | /* there is no point preparing this FE if there are no BEs */ |
4162 | if (list_empty(&fe->dpcm[stream].be_clients)) { |
4163 | @@ -2054,7 +2094,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) |
4164 | fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; |
4165 | |
4166 | out: |
4167 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4168 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4169 | mutex_unlock(&fe->card->mutex); |
4170 | |
4171 | return ret; |
4172 | @@ -2201,11 +2241,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream) |
4173 | { |
4174 | int ret; |
4175 | |
4176 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; |
4177 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); |
4178 | ret = dpcm_run_update_startup(fe, stream); |
4179 | if (ret < 0) |
4180 | dev_err(fe->dev, "ASoC: failed to startup some BEs\n"); |
4181 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4182 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4183 | |
4184 | return ret; |
4185 | } |
4186 | @@ -2214,11 +2254,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream) |
4187 | { |
4188 | int ret; |
4189 | |
4190 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; |
4191 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE); |
4192 | ret = dpcm_run_update_shutdown(fe, stream); |
4193 | if (ret < 0) |
4194 | dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n"); |
4195 | - fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; |
4196 | + dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); |
4197 | |
4198 | return ret; |
4199 | } |
4200 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
4201 | index 19a921eb75f1..337067cce251 100644 |
4202 | --- a/sound/usb/quirks.c |
4203 | +++ b/sound/usb/quirks.c |
4204 | @@ -1146,6 +1146,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, |
4205 | if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) && |
4206 | (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) |
4207 | mdelay(20); |
4208 | + |
4209 | + /* Marantz/Denon devices with USB DAC functionality need a delay |
4210 | + * after each class compliant request |
4211 | + */ |
4212 | + if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) && |
4213 | + (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) { |
4214 | + |
4215 | + switch (le16_to_cpu(dev->descriptor.idProduct)) { |
4216 | + case 0x3005: /* Marantz HD-DAC1 */ |
4217 | + case 0x3006: /* Marantz SA-14S1 */ |
4218 | + mdelay(20); |
4219 | + break; |
4220 | + } |
4221 | + } |
4222 | } |
4223 | |
4224 | /* |