Annotation of /trunk/kernel-alx/patches-4.19/0159-4.19.60-all-fixes.patch
Parent Directory | Revision Log
Revision 3438 -
(hide annotations)
(download)
Fri Aug 2 11:48:05 2019 UTC (4 years, 9 months ago) by niro
File size: 71201 byte(s)
Fri Aug 2 11:48:05 2019 UTC (4 years, 9 months ago) by niro
File size: 71201 byte(s)
-linux-4.19.60
1 | niro | 3438 | diff --git a/Makefile b/Makefile |
2 | index 38f2150457fd..5fb79d493012 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,7 +1,7 @@ | ||
6 | # SPDX-License-Identifier: GPL-2.0 | ||
7 | VERSION = 4 | ||
8 | PATCHLEVEL = 19 | ||
9 | -SUBLEVEL = 59 | ||
10 | +SUBLEVEL = 60 | ||
11 | EXTRAVERSION = | ||
12 | NAME = "People's Front" | ||
13 | |||
14 | diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c | ||
15 | index 183391d4d33a..9cf2ee8b4349 100644 | ||
16 | --- a/arch/arc/kernel/unwind.c | ||
17 | +++ b/arch/arc/kernel/unwind.c | ||
18 | @@ -185,11 +185,6 @@ static void *__init unw_hdr_alloc_early(unsigned long sz) | ||
19 | MAX_DMA_ADDRESS); | ||
20 | } | ||
21 | |||
22 | -static void *unw_hdr_alloc(unsigned long sz) | ||
23 | -{ | ||
24 | - return kmalloc(sz, GFP_KERNEL); | ||
25 | -} | ||
26 | - | ||
27 | static void init_unwind_table(struct unwind_table *table, const char *name, | ||
28 | const void *core_start, unsigned long core_size, | ||
29 | const void *init_start, unsigned long init_size, | ||
30 | @@ -370,6 +365,10 @@ ret_err: | ||
31 | } | ||
32 | |||
33 | #ifdef CONFIG_MODULES | ||
34 | +static void *unw_hdr_alloc(unsigned long sz) | ||
35 | +{ | ||
36 | + return kmalloc(sz, GFP_KERNEL); | ||
37 | +} | ||
38 | |||
39 | static struct unwind_table *last_table; | ||
40 | |||
41 | diff --git a/arch/arm/boot/dts/gemini-dlink-dns-313.dts b/arch/arm/boot/dts/gemini-dlink-dns-313.dts | ||
42 | index d1329322b968..361dccd6c7ee 100644 | ||
43 | --- a/arch/arm/boot/dts/gemini-dlink-dns-313.dts | ||
44 | +++ b/arch/arm/boot/dts/gemini-dlink-dns-313.dts | ||
45 | @@ -11,7 +11,7 @@ | ||
46 | |||
47 | / { | ||
48 | model = "D-Link DNS-313 1-Bay Network Storage Enclosure"; | ||
49 | - compatible = "dlink,dir-313", "cortina,gemini"; | ||
50 | + compatible = "dlink,dns-313", "cortina,gemini"; | ||
51 | #address-cells = <1>; | ||
52 | #size-cells = <1>; | ||
53 | |||
54 | diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi | ||
55 | index 2366f093cc76..336cdead3da5 100644 | ||
56 | --- a/arch/arm/boot/dts/imx6ul.dtsi | ||
57 | +++ b/arch/arm/boot/dts/imx6ul.dtsi | ||
58 | @@ -359,7 +359,7 @@ | ||
59 | pwm1: pwm@2080000 { | ||
60 | compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; | ||
61 | reg = <0x02080000 0x4000>; | ||
62 | - interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; | ||
63 | + interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; | ||
64 | clocks = <&clks IMX6UL_CLK_PWM1>, | ||
65 | <&clks IMX6UL_CLK_PWM1>; | ||
66 | clock-names = "ipg", "per"; | ||
67 | @@ -370,7 +370,7 @@ | ||
68 | pwm2: pwm@2084000 { | ||
69 | compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; | ||
70 | reg = <0x02084000 0x4000>; | ||
71 | - interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; | ||
72 | + interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>; | ||
73 | clocks = <&clks IMX6UL_CLK_PWM2>, | ||
74 | <&clks IMX6UL_CLK_PWM2>; | ||
75 | clock-names = "ipg", "per"; | ||
76 | @@ -381,7 +381,7 @@ | ||
77 | pwm3: pwm@2088000 { | ||
78 | compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; | ||
79 | reg = <0x02088000 0x4000>; | ||
80 | - interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; | ||
81 | + interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>; | ||
82 | clocks = <&clks IMX6UL_CLK_PWM3>, | ||
83 | <&clks IMX6UL_CLK_PWM3>; | ||
84 | clock-names = "ipg", "per"; | ||
85 | @@ -392,7 +392,7 @@ | ||
86 | pwm4: pwm@208c000 { | ||
87 | compatible = "fsl,imx6ul-pwm", "fsl,imx27-pwm"; | ||
88 | reg = <0x0208c000 0x4000>; | ||
89 | - interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; | ||
90 | + interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; | ||
91 | clocks = <&clks IMX6UL_CLK_PWM4>, | ||
92 | <&clks IMX6UL_CLK_PWM4>; | ||
93 | clock-names = "ipg", "per"; | ||
94 | diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c | ||
95 | index 05858f966f7d..dfa65fc2c82b 100644 | ||
96 | --- a/arch/arm/mach-omap2/prm3xxx.c | ||
97 | +++ b/arch/arm/mach-omap2/prm3xxx.c | ||
98 | @@ -433,7 +433,7 @@ static void omap3_prm_reconfigure_io_chain(void) | ||
99 | * registers, and omap3xxx_prm_reconfigure_io_chain() must be called. | ||
100 | * No return value. | ||
101 | */ | ||
102 | -static void __init omap3xxx_prm_enable_io_wakeup(void) | ||
103 | +static void omap3xxx_prm_enable_io_wakeup(void) | ||
104 | { | ||
105 | if (prm_features & PRM_HAS_IO_WAKEUP) | ||
106 | omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, | ||
107 | diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h | ||
108 | index 99c8ce30b3cd..7ffbc5d7ccf3 100644 | ||
109 | --- a/arch/s390/include/asm/facility.h | ||
110 | +++ b/arch/s390/include/asm/facility.h | ||
111 | @@ -59,6 +59,18 @@ static inline int test_facility(unsigned long nr) | ||
112 | return __test_facility(nr, &S390_lowcore.stfle_fac_list); | ||
113 | } | ||
114 | |||
115 | +static inline unsigned long __stfle_asm(u64 *stfle_fac_list, int size) | ||
116 | +{ | ||
117 | + register unsigned long reg0 asm("0") = size - 1; | ||
118 | + | ||
119 | + asm volatile( | ||
120 | + ".insn s,0xb2b00000,0(%1)" /* stfle */ | ||
121 | + : "+d" (reg0) | ||
122 | + : "a" (stfle_fac_list) | ||
123 | + : "memory", "cc"); | ||
124 | + return reg0; | ||
125 | +} | ||
126 | + | ||
127 | /** | ||
128 | * stfle - Store facility list extended | ||
129 | * @stfle_fac_list: array where facility list can be stored | ||
130 | @@ -76,13 +88,8 @@ static inline void stfle(u64 *stfle_fac_list, int size) | ||
131 | memcpy(stfle_fac_list, &S390_lowcore.stfl_fac_list, 4); | ||
132 | if (S390_lowcore.stfl_fac_list & 0x01000000) { | ||
133 | /* More facility bits available with stfle */ | ||
134 | - register unsigned long reg0 asm("0") = size - 1; | ||
135 | - | ||
136 | - asm volatile(".insn s,0xb2b00000,0(%1)" /* stfle */ | ||
137 | - : "+d" (reg0) | ||
138 | - : "a" (stfle_fac_list) | ||
139 | - : "memory", "cc"); | ||
140 | - nr = (reg0 + 1) * 8; /* # bytes stored by stfle */ | ||
141 | + nr = __stfle_asm(stfle_fac_list, size); | ||
142 | + nr = min_t(unsigned long, (nr + 1) * 8, size * 8); | ||
143 | } | ||
144 | memset((char *) stfle_fac_list + nr, 0, size * 8 - nr); | ||
145 | preempt_enable(); | ||
146 | diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S | ||
147 | index b5c2b1091b18..8059d4fd915c 100644 | ||
148 | --- a/arch/x86/entry/entry_32.S | ||
149 | +++ b/arch/x86/entry/entry_32.S | ||
150 | @@ -1098,6 +1098,30 @@ ENTRY(irq_entries_start) | ||
151 | .endr | ||
152 | END(irq_entries_start) | ||
153 | |||
154 | +#ifdef CONFIG_X86_LOCAL_APIC | ||
155 | + .align 8 | ||
156 | +ENTRY(spurious_entries_start) | ||
157 | + vector=FIRST_SYSTEM_VECTOR | ||
158 | + .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) | ||
159 | + pushl $(~vector+0x80) /* Note: always in signed byte range */ | ||
160 | + vector=vector+1 | ||
161 | + jmp common_spurious | ||
162 | + .align 8 | ||
163 | + .endr | ||
164 | +END(spurious_entries_start) | ||
165 | + | ||
166 | +common_spurious: | ||
167 | + ASM_CLAC | ||
168 | + addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ | ||
169 | + SAVE_ALL switch_stacks=1 | ||
170 | + ENCODE_FRAME_POINTER | ||
171 | + TRACE_IRQS_OFF | ||
172 | + movl %esp, %eax | ||
173 | + call smp_spurious_interrupt | ||
174 | + jmp ret_from_intr | ||
175 | +ENDPROC(common_spurious) | ||
176 | +#endif | ||
177 | + | ||
178 | /* | ||
179 | * the CPU automatically disables interrupts when executing an IRQ vector, | ||
180 | * so IRQ-flags tracing has to follow that: | ||
181 | diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S | ||
182 | index c90e00db5c13..206df099950e 100644 | ||
183 | --- a/arch/x86/entry/entry_64.S | ||
184 | +++ b/arch/x86/entry/entry_64.S | ||
185 | @@ -438,6 +438,18 @@ ENTRY(irq_entries_start) | ||
186 | .endr | ||
187 | END(irq_entries_start) | ||
188 | |||
189 | + .align 8 | ||
190 | +ENTRY(spurious_entries_start) | ||
191 | + vector=FIRST_SYSTEM_VECTOR | ||
192 | + .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) | ||
193 | + UNWIND_HINT_IRET_REGS | ||
194 | + pushq $(~vector+0x80) /* Note: always in signed byte range */ | ||
195 | + jmp common_spurious | ||
196 | + .align 8 | ||
197 | + vector=vector+1 | ||
198 | + .endr | ||
199 | +END(spurious_entries_start) | ||
200 | + | ||
201 | .macro DEBUG_ENTRY_ASSERT_IRQS_OFF | ||
202 | #ifdef CONFIG_DEBUG_ENTRY | ||
203 | pushq %rax | ||
204 | @@ -634,10 +646,20 @@ _ASM_NOKPROBE(interrupt_entry) | ||
205 | |||
206 | /* Interrupt entry/exit. */ | ||
207 | |||
208 | - /* | ||
209 | - * The interrupt stubs push (~vector+0x80) onto the stack and | ||
210 | - * then jump to common_interrupt. | ||
211 | - */ | ||
212 | +/* | ||
213 | + * The interrupt stubs push (~vector+0x80) onto the stack and | ||
214 | + * then jump to common_spurious/interrupt. | ||
215 | + */ | ||
216 | +common_spurious: | ||
217 | + addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ | ||
218 | + call interrupt_entry | ||
219 | + UNWIND_HINT_REGS indirect=1 | ||
220 | + call smp_spurious_interrupt /* rdi points to pt_regs */ | ||
221 | + jmp ret_from_intr | ||
222 | +END(common_spurious) | ||
223 | +_ASM_NOKPROBE(common_spurious) | ||
224 | + | ||
225 | +/* common_interrupt is a hotpath. Align it */ | ||
226 | .p2align CONFIG_X86_L1_CACHE_SHIFT | ||
227 | common_interrupt: | ||
228 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ | ||
229 | diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h | ||
230 | index 32e666e1231e..cbd97e22d2f3 100644 | ||
231 | --- a/arch/x86/include/asm/hw_irq.h | ||
232 | +++ b/arch/x86/include/asm/hw_irq.h | ||
233 | @@ -150,8 +150,11 @@ extern char irq_entries_start[]; | ||
234 | #define trace_irq_entries_start irq_entries_start | ||
235 | #endif | ||
236 | |||
237 | +extern char spurious_entries_start[]; | ||
238 | + | ||
239 | #define VECTOR_UNUSED NULL | ||
240 | -#define VECTOR_RETRIGGERED ((void *)~0UL) | ||
241 | +#define VECTOR_SHUTDOWN ((void *)~0UL) | ||
242 | +#define VECTOR_RETRIGGERED ((void *)~1UL) | ||
243 | |||
244 | typedef struct irq_desc* vector_irq_t[NR_VECTORS]; | ||
245 | DECLARE_PER_CPU(vector_irq_t, vector_irq); | ||
246 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c | ||
247 | index 84132eddb5a8..02020f2e0080 100644 | ||
248 | --- a/arch/x86/kernel/apic/apic.c | ||
249 | +++ b/arch/x86/kernel/apic/apic.c | ||
250 | @@ -1452,7 +1452,8 @@ static void apic_pending_intr_clear(void) | ||
251 | if (queued) { | ||
252 | if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { | ||
253 | ntsc = rdtsc(); | ||
254 | - max_loops = (cpu_khz << 10) - (ntsc - tsc); | ||
255 | + max_loops = (long long)cpu_khz << 10; | ||
256 | + max_loops -= ntsc - tsc; | ||
257 | } else { | ||
258 | max_loops--; | ||
259 | } | ||
260 | @@ -2026,21 +2027,32 @@ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs) | ||
261 | entering_irq(); | ||
262 | trace_spurious_apic_entry(vector); | ||
263 | |||
264 | + inc_irq_stat(irq_spurious_count); | ||
265 | + | ||
266 | + /* | ||
267 | + * If this is a spurious interrupt then do not acknowledge | ||
268 | + */ | ||
269 | + if (vector == SPURIOUS_APIC_VECTOR) { | ||
270 | + /* See SDM vol 3 */ | ||
271 | + pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n", | ||
272 | + smp_processor_id()); | ||
273 | + goto out; | ||
274 | + } | ||
275 | + | ||
276 | /* | ||
277 | - * Check if this really is a spurious interrupt and ACK it | ||
278 | - * if it is a vectored one. Just in case... | ||
279 | - * Spurious interrupts should not be ACKed. | ||
280 | + * If it is a vectored one, verify it's set in the ISR. If set, | ||
281 | + * acknowledge it. | ||
282 | */ | ||
283 | v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1)); | ||
284 | - if (v & (1 << (vector & 0x1f))) | ||
285 | + if (v & (1 << (vector & 0x1f))) { | ||
286 | + pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n", | ||
287 | + vector, smp_processor_id()); | ||
288 | ack_APIC_irq(); | ||
289 | - | ||
290 | - inc_irq_stat(irq_spurious_count); | ||
291 | - | ||
292 | - /* see sw-dev-man vol 3, chapter 7.4.13.5 */ | ||
293 | - pr_info("spurious APIC interrupt through vector %02x on CPU#%d, " | ||
294 | - "should never happen.\n", vector, smp_processor_id()); | ||
295 | - | ||
296 | + } else { | ||
297 | + pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n", | ||
298 | + vector, smp_processor_id()); | ||
299 | + } | ||
300 | +out: | ||
301 | trace_spurious_apic_exit(vector); | ||
302 | exiting_irq(); | ||
303 | } | ||
304 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c | ||
305 | index ff0d14cd9e82..4077e309e5c4 100644 | ||
306 | --- a/arch/x86/kernel/apic/io_apic.c | ||
307 | +++ b/arch/x86/kernel/apic/io_apic.c | ||
308 | @@ -1891,6 +1891,50 @@ static int ioapic_set_affinity(struct irq_data *irq_data, | ||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | +/* | ||
313 | + * Interrupt shutdown masks the ioapic pin, but the interrupt might already | ||
314 | + * be in flight, but not yet serviced by the target CPU. That means | ||
315 | + * __synchronize_hardirq() would return and claim that everything is calmed | ||
316 | + * down. So free_irq() would proceed and deactivate the interrupt and free | ||
317 | + * resources. | ||
318 | + * | ||
319 | + * Once the target CPU comes around to service it it will find a cleared | ||
320 | + * vector and complain. While the spurious interrupt is harmless, the full | ||
321 | + * release of resources might prevent the interrupt from being acknowledged | ||
322 | + * which keeps the hardware in a weird state. | ||
323 | + * | ||
324 | + * Verify that the corresponding Remote-IRR bits are clear. | ||
325 | + */ | ||
326 | +static int ioapic_irq_get_chip_state(struct irq_data *irqd, | ||
327 | + enum irqchip_irq_state which, | ||
328 | + bool *state) | ||
329 | +{ | ||
330 | + struct mp_chip_data *mcd = irqd->chip_data; | ||
331 | + struct IO_APIC_route_entry rentry; | ||
332 | + struct irq_pin_list *p; | ||
333 | + | ||
334 | + if (which != IRQCHIP_STATE_ACTIVE) | ||
335 | + return -EINVAL; | ||
336 | + | ||
337 | + *state = false; | ||
338 | + raw_spin_lock(&ioapic_lock); | ||
339 | + for_each_irq_pin(p, mcd->irq_2_pin) { | ||
340 | + rentry = __ioapic_read_entry(p->apic, p->pin); | ||
341 | + /* | ||
342 | + * The remote IRR is only valid in level trigger mode. It's | ||
343 | + * meaning is undefined for edge triggered interrupts and | ||
344 | + * irrelevant because the IO-APIC treats them as fire and | ||
345 | + * forget. | ||
346 | + */ | ||
347 | + if (rentry.irr && rentry.trigger) { | ||
348 | + *state = true; | ||
349 | + break; | ||
350 | + } | ||
351 | + } | ||
352 | + raw_spin_unlock(&ioapic_lock); | ||
353 | + return 0; | ||
354 | +} | ||
355 | + | ||
356 | static struct irq_chip ioapic_chip __read_mostly = { | ||
357 | .name = "IO-APIC", | ||
358 | .irq_startup = startup_ioapic_irq, | ||
359 | @@ -1900,6 +1944,7 @@ static struct irq_chip ioapic_chip __read_mostly = { | ||
360 | .irq_eoi = ioapic_ack_level, | ||
361 | .irq_set_affinity = ioapic_set_affinity, | ||
362 | .irq_retrigger = irq_chip_retrigger_hierarchy, | ||
363 | + .irq_get_irqchip_state = ioapic_irq_get_chip_state, | ||
364 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
365 | }; | ||
366 | |||
367 | @@ -1912,6 +1957,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { | ||
368 | .irq_eoi = ioapic_ir_ack_level, | ||
369 | .irq_set_affinity = ioapic_set_affinity, | ||
370 | .irq_retrigger = irq_chip_retrigger_hierarchy, | ||
371 | + .irq_get_irqchip_state = ioapic_irq_get_chip_state, | ||
372 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
373 | }; | ||
374 | |||
375 | diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c | ||
376 | index 652e7ffa9b9d..10e1d17aa060 100644 | ||
377 | --- a/arch/x86/kernel/apic/vector.c | ||
378 | +++ b/arch/x86/kernel/apic/vector.c | ||
379 | @@ -342,7 +342,7 @@ static void clear_irq_vector(struct irq_data *irqd) | ||
380 | trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, | ||
381 | apicd->prev_cpu); | ||
382 | |||
383 | - per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED; | ||
384 | + per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; | ||
385 | irq_matrix_free(vector_matrix, apicd->cpu, vector, managed); | ||
386 | apicd->vector = 0; | ||
387 | |||
388 | @@ -351,7 +351,7 @@ static void clear_irq_vector(struct irq_data *irqd) | ||
389 | if (!vector) | ||
390 | return; | ||
391 | |||
392 | - per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED; | ||
393 | + per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; | ||
394 | irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed); | ||
395 | apicd->prev_vector = 0; | ||
396 | apicd->move_in_progress = 0; | ||
397 | diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c | ||
398 | index ddee1f0870c4..250cfa85b633 100644 | ||
399 | --- a/arch/x86/kernel/head64.c | ||
400 | +++ b/arch/x86/kernel/head64.c | ||
401 | @@ -184,24 +184,25 @@ unsigned long __head __startup_64(unsigned long physaddr, | ||
402 | pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); | ||
403 | |||
404 | if (la57) { | ||
405 | - p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); | ||
406 | + p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], | ||
407 | + physaddr); | ||
408 | |||
409 | i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; | ||
410 | pgd[i + 0] = (pgdval_t)p4d + pgtable_flags; | ||
411 | pgd[i + 1] = (pgdval_t)p4d + pgtable_flags; | ||
412 | |||
413 | - i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D; | ||
414 | - p4d[i + 0] = (pgdval_t)pud + pgtable_flags; | ||
415 | - p4d[i + 1] = (pgdval_t)pud + pgtable_flags; | ||
416 | + i = physaddr >> P4D_SHIFT; | ||
417 | + p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; | ||
418 | + p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags; | ||
419 | } else { | ||
420 | i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; | ||
421 | pgd[i + 0] = (pgdval_t)pud + pgtable_flags; | ||
422 | pgd[i + 1] = (pgdval_t)pud + pgtable_flags; | ||
423 | } | ||
424 | |||
425 | - i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD; | ||
426 | - pud[i + 0] = (pudval_t)pmd + pgtable_flags; | ||
427 | - pud[i + 1] = (pudval_t)pmd + pgtable_flags; | ||
428 | + i = physaddr >> PUD_SHIFT; | ||
429 | + pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; | ||
430 | + pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags; | ||
431 | |||
432 | pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL; | ||
433 | /* Filter out unsupported __PAGE_KERNEL_* bits: */ | ||
434 | @@ -211,8 +212,9 @@ unsigned long __head __startup_64(unsigned long physaddr, | ||
435 | pmd_entry += physaddr; | ||
436 | |||
437 | for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) { | ||
438 | - int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD; | ||
439 | - pmd[idx] = pmd_entry + i * PMD_SIZE; | ||
440 | + int idx = i + (physaddr >> PMD_SHIFT); | ||
441 | + | ||
442 | + pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE; | ||
443 | } | ||
444 | |||
445 | /* | ||
446 | diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c | ||
447 | index 01adea278a71..a7e0e975043f 100644 | ||
448 | --- a/arch/x86/kernel/idt.c | ||
449 | +++ b/arch/x86/kernel/idt.c | ||
450 | @@ -321,7 +321,8 @@ void __init idt_setup_apic_and_irq_gates(void) | ||
451 | #ifdef CONFIG_X86_LOCAL_APIC | ||
452 | for_each_clear_bit_from(i, system_vectors, NR_VECTORS) { | ||
453 | set_bit(i, system_vectors); | ||
454 | - set_intr_gate(i, spurious_interrupt); | ||
455 | + entry = spurious_entries_start + 8 * (i - FIRST_SYSTEM_VECTOR); | ||
456 | + set_intr_gate(i, entry); | ||
457 | } | ||
458 | #endif | ||
459 | } | ||
460 | diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c | ||
461 | index 59b5f2ea7c2f..a975246074b5 100644 | ||
462 | --- a/arch/x86/kernel/irq.c | ||
463 | +++ b/arch/x86/kernel/irq.c | ||
464 | @@ -246,7 +246,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs) | ||
465 | if (!handle_irq(desc, regs)) { | ||
466 | ack_APIC_irq(); | ||
467 | |||
468 | - if (desc != VECTOR_RETRIGGERED) { | ||
469 | + if (desc != VECTOR_RETRIGGERED && desc != VECTOR_SHUTDOWN) { | ||
470 | pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n", | ||
471 | __func__, smp_processor_id(), | ||
472 | vector); | ||
473 | diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c | ||
474 | index dd6a6850cb45..ce015ce2977c 100644 | ||
475 | --- a/drivers/base/cacheinfo.c | ||
476 | +++ b/drivers/base/cacheinfo.c | ||
477 | @@ -653,7 +653,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu) | ||
478 | |||
479 | static int __init cacheinfo_sysfs_init(void) | ||
480 | { | ||
481 | - return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online", | ||
482 | + return cpuhp_setup_state(CPUHP_AP_BASE_CACHEINFO_ONLINE, | ||
483 | + "base/cacheinfo:online", | ||
484 | cacheinfo_cpu_online, cacheinfo_cpu_pre_down); | ||
485 | } | ||
486 | device_initcall(cacheinfo_sysfs_init); | ||
487 | diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c | ||
488 | index b5c865fe263b..818d8c37d70a 100644 | ||
489 | --- a/drivers/base/firmware_loader/fallback.c | ||
490 | +++ b/drivers/base/firmware_loader/fallback.c | ||
491 | @@ -659,7 +659,7 @@ static bool fw_run_sysfs_fallback(enum fw_opt opt_flags) | ||
492 | /* Also permit LSMs and IMA to fail firmware sysfs fallback */ | ||
493 | ret = security_kernel_load_data(LOADING_FIRMWARE); | ||
494 | if (ret < 0) | ||
495 | - return ret; | ||
496 | + return false; | ||
497 | |||
498 | return fw_force_sysfs_fallback(opt_flags); | ||
499 | } | ||
500 | diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c | ||
501 | index 429ca8ed7e51..982c7ac311b8 100644 | ||
502 | --- a/drivers/base/regmap/regmap-irq.c | ||
503 | +++ b/drivers/base/regmap/regmap-irq.c | ||
504 | @@ -91,6 +91,9 @@ static void regmap_irq_sync_unlock(struct irq_data *data) | ||
505 | * suppress pointless writes. | ||
506 | */ | ||
507 | for (i = 0; i < d->chip->num_regs; i++) { | ||
508 | + if (!d->chip->mask_base) | ||
509 | + continue; | ||
510 | + | ||
511 | reg = d->chip->mask_base + | ||
512 | (i * map->reg_stride * d->irq_reg_stride); | ||
513 | if (d->chip->mask_invert) { | ||
514 | @@ -526,6 +529,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | ||
515 | /* Mask all the interrupts by default */ | ||
516 | for (i = 0; i < chip->num_regs; i++) { | ||
517 | d->mask_buf[i] = d->mask_buf_def[i]; | ||
518 | + if (!chip->mask_base) | ||
519 | + continue; | ||
520 | + | ||
521 | reg = chip->mask_base + | ||
522 | (i * map->reg_stride * d->irq_reg_stride); | ||
523 | if (chip->mask_invert) | ||
524 | diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c | ||
525 | index ca3218337fd7..dfaa5aad0692 100644 | ||
526 | --- a/drivers/clk/ti/clkctrl.c | ||
527 | +++ b/drivers/clk/ti/clkctrl.c | ||
528 | @@ -229,6 +229,7 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, | ||
529 | { | ||
530 | struct omap_clkctrl_provider *provider = data; | ||
531 | struct omap_clkctrl_clk *entry; | ||
532 | + bool found = false; | ||
533 | |||
534 | if (clkspec->args_count != 2) | ||
535 | return ERR_PTR(-EINVAL); | ||
536 | @@ -238,11 +239,13 @@ static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec, | ||
537 | |||
538 | list_for_each_entry(entry, &provider->clocks, node) { | ||
539 | if (entry->reg_offset == clkspec->args[0] && | ||
540 | - entry->bit_offset == clkspec->args[1]) | ||
541 | + entry->bit_offset == clkspec->args[1]) { | ||
542 | + found = true; | ||
543 | break; | ||
544 | + } | ||
545 | } | ||
546 | |||
547 | - if (!entry) | ||
548 | + if (!found) | ||
549 | return ERR_PTR(-EINVAL); | ||
550 | |||
551 | return entry->clk; | ||
552 | diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c | ||
553 | index c68df7e8bee1..7ce2467c771e 100644 | ||
554 | --- a/drivers/crypto/nx/nx-842-powernv.c | ||
555 | +++ b/drivers/crypto/nx/nx-842-powernv.c | ||
556 | @@ -36,8 +36,6 @@ MODULE_ALIAS_CRYPTO("842-nx"); | ||
557 | #define WORKMEM_ALIGN (CRB_ALIGN) | ||
558 | #define CSB_WAIT_MAX (5000) /* ms */ | ||
559 | #define VAS_RETRIES (10) | ||
560 | -/* # of requests allowed per RxFIFO at a time. 0 for unlimited */ | ||
561 | -#define MAX_CREDITS_PER_RXFIFO (1024) | ||
562 | |||
563 | struct nx842_workmem { | ||
564 | /* Below fields must be properly aligned */ | ||
565 | @@ -821,7 +819,11 @@ static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, | ||
566 | rxattr.lnotify_lpid = lpid; | ||
567 | rxattr.lnotify_pid = pid; | ||
568 | rxattr.lnotify_tid = tid; | ||
569 | - rxattr.wcreds_max = MAX_CREDITS_PER_RXFIFO; | ||
570 | + /* | ||
571 | + * Maximum RX window credits can not be more than #CRBs in | ||
572 | + * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. | ||
573 | + */ | ||
574 | + rxattr.wcreds_max = fifo_size / CRB_SIZE; | ||
575 | |||
576 | /* | ||
577 | * Open a VAS receice window which is used to configure RxFIFO | ||
578 | diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c | ||
579 | index c5859d3cb825..5849075d54c7 100644 | ||
580 | --- a/drivers/crypto/talitos.c | ||
581 | +++ b/drivers/crypto/talitos.c | ||
582 | @@ -334,6 +334,21 @@ int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, | ||
583 | } | ||
584 | EXPORT_SYMBOL(talitos_submit); | ||
585 | |||
586 | +static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1) | ||
587 | +{ | ||
588 | + struct talitos_edesc *edesc; | ||
589 | + | ||
590 | + if (!is_sec1) | ||
591 | + return request->desc->hdr; | ||
592 | + | ||
593 | + if (!request->desc->next_desc) | ||
594 | + return request->desc->hdr1; | ||
595 | + | ||
596 | + edesc = container_of(request->desc, struct talitos_edesc, desc); | ||
597 | + | ||
598 | + return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1; | ||
599 | +} | ||
600 | + | ||
601 | /* | ||
602 | * process what was done, notify callback of error if not | ||
603 | */ | ||
604 | @@ -355,12 +370,7 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch) | ||
605 | |||
606 | /* descriptors with their done bits set don't get the error */ | ||
607 | rmb(); | ||
608 | - if (!is_sec1) | ||
609 | - hdr = request->desc->hdr; | ||
610 | - else if (request->desc->next_desc) | ||
611 | - hdr = (request->desc + 1)->hdr1; | ||
612 | - else | ||
613 | - hdr = request->desc->hdr1; | ||
614 | + hdr = get_request_hdr(request, is_sec1); | ||
615 | |||
616 | if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) | ||
617 | status = 0; | ||
618 | @@ -490,8 +500,14 @@ static u32 current_desc_hdr(struct device *dev, int ch) | ||
619 | } | ||
620 | } | ||
621 | |||
622 | - if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) | ||
623 | - return (priv->chan[ch].fifo[iter].desc + 1)->hdr; | ||
624 | + if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { | ||
625 | + struct talitos_edesc *edesc; | ||
626 | + | ||
627 | + edesc = container_of(priv->chan[ch].fifo[iter].desc, | ||
628 | + struct talitos_edesc, desc); | ||
629 | + return ((struct talitos_desc *) | ||
630 | + (edesc->buf + edesc->dma_len))->hdr; | ||
631 | + } | ||
632 | |||
633 | return priv->chan[ch].fifo[iter].desc->hdr; | ||
634 | } | ||
635 | @@ -913,36 +929,6 @@ badkey: | ||
636 | return -EINVAL; | ||
637 | } | ||
638 | |||
639 | -/* | ||
640 | - * talitos_edesc - s/w-extended descriptor | ||
641 | - * @src_nents: number of segments in input scatterlist | ||
642 | - * @dst_nents: number of segments in output scatterlist | ||
643 | - * @icv_ool: whether ICV is out-of-line | ||
644 | - * @iv_dma: dma address of iv for checking continuity and link table | ||
645 | - * @dma_len: length of dma mapped link_tbl space | ||
646 | - * @dma_link_tbl: bus physical address of link_tbl/buf | ||
647 | - * @desc: h/w descriptor | ||
648 | - * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) | ||
649 | - * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) | ||
650 | - * | ||
651 | - * if decrypting (with authcheck), or either one of src_nents or dst_nents | ||
652 | - * is greater than 1, an integrity check value is concatenated to the end | ||
653 | - * of link_tbl data | ||
654 | - */ | ||
655 | -struct talitos_edesc { | ||
656 | - int src_nents; | ||
657 | - int dst_nents; | ||
658 | - bool icv_ool; | ||
659 | - dma_addr_t iv_dma; | ||
660 | - int dma_len; | ||
661 | - dma_addr_t dma_link_tbl; | ||
662 | - struct talitos_desc desc; | ||
663 | - union { | ||
664 | - struct talitos_ptr link_tbl[0]; | ||
665 | - u8 buf[0]; | ||
666 | - }; | ||
667 | -}; | ||
668 | - | ||
669 | static void talitos_sg_unmap(struct device *dev, | ||
670 | struct talitos_edesc *edesc, | ||
671 | struct scatterlist *src, | ||
672 | @@ -1431,15 +1417,11 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, | ||
673 | edesc->dst_nents = dst_nents; | ||
674 | edesc->iv_dma = iv_dma; | ||
675 | edesc->dma_len = dma_len; | ||
676 | - if (dma_len) { | ||
677 | - void *addr = &edesc->link_tbl[0]; | ||
678 | - | ||
679 | - if (is_sec1 && !dst) | ||
680 | - addr += sizeof(struct talitos_desc); | ||
681 | - edesc->dma_link_tbl = dma_map_single(dev, addr, | ||
682 | + if (dma_len) | ||
683 | + edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], | ||
684 | edesc->dma_len, | ||
685 | DMA_BIDIRECTIONAL); | ||
686 | - } | ||
687 | + | ||
688 | return edesc; | ||
689 | } | ||
690 | |||
691 | @@ -1706,14 +1688,16 @@ static void common_nonsnoop_hash_unmap(struct device *dev, | ||
692 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
693 | bool is_sec1 = has_ftr_sec1(priv); | ||
694 | struct talitos_desc *desc = &edesc->desc; | ||
695 | - struct talitos_desc *desc2 = desc + 1; | ||
696 | + struct talitos_desc *desc2 = (struct talitos_desc *) | ||
697 | + (edesc->buf + edesc->dma_len); | ||
698 | |||
699 | unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); | ||
700 | if (desc->next_desc && | ||
701 | desc->ptr[5].ptr != desc2->ptr[5].ptr) | ||
702 | unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); | ||
703 | |||
704 | - talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); | ||
705 | + if (req_ctx->psrc) | ||
706 | + talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); | ||
707 | |||
708 | /* When using hashctx-in, must unmap it. */ | ||
709 | if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) | ||
710 | @@ -1780,7 +1764,6 @@ static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, | ||
711 | |||
712 | static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
713 | struct ahash_request *areq, unsigned int length, | ||
714 | - unsigned int offset, | ||
715 | void (*callback) (struct device *dev, | ||
716 | struct talitos_desc *desc, | ||
717 | void *context, int error)) | ||
718 | @@ -1819,9 +1802,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
719 | |||
720 | sg_count = edesc->src_nents ?: 1; | ||
721 | if (is_sec1 && sg_count > 1) | ||
722 | - sg_pcopy_to_buffer(req_ctx->psrc, sg_count, | ||
723 | - edesc->buf + sizeof(struct talitos_desc), | ||
724 | - length, req_ctx->nbuf); | ||
725 | + sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); | ||
726 | else if (length) | ||
727 | sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, | ||
728 | DMA_TO_DEVICE); | ||
729 | @@ -1834,7 +1815,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
730 | DMA_TO_DEVICE); | ||
731 | } else { | ||
732 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | ||
733 | - &desc->ptr[3], sg_count, offset, 0); | ||
734 | + &desc->ptr[3], sg_count, 0, 0); | ||
735 | if (sg_count > 1) | ||
736 | sync_needed = true; | ||
737 | } | ||
738 | @@ -1858,7 +1839,8 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
739 | talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); | ||
740 | |||
741 | if (is_sec1 && req_ctx->nbuf && length) { | ||
742 | - struct talitos_desc *desc2 = desc + 1; | ||
743 | + struct talitos_desc *desc2 = (struct talitos_desc *) | ||
744 | + (edesc->buf + edesc->dma_len); | ||
745 | dma_addr_t next_desc; | ||
746 | |||
747 | memset(desc2, 0, sizeof(*desc2)); | ||
748 | @@ -1879,7 +1861,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc, | ||
749 | DMA_TO_DEVICE); | ||
750 | copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); | ||
751 | sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, | ||
752 | - &desc2->ptr[3], sg_count, offset, 0); | ||
753 | + &desc2->ptr[3], sg_count, 0, 0); | ||
754 | if (sg_count > 1) | ||
755 | sync_needed = true; | ||
756 | copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); | ||
757 | @@ -1990,7 +1972,6 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | ||
758 | struct device *dev = ctx->dev; | ||
759 | struct talitos_private *priv = dev_get_drvdata(dev); | ||
760 | bool is_sec1 = has_ftr_sec1(priv); | ||
761 | - int offset = 0; | ||
762 | u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; | ||
763 | |||
764 | if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { | ||
765 | @@ -2030,6 +2011,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | ||
766 | sg_chain(req_ctx->bufsl, 2, areq->src); | ||
767 | req_ctx->psrc = req_ctx->bufsl; | ||
768 | } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { | ||
769 | + int offset; | ||
770 | + | ||
771 | if (nbytes_to_hash > blocksize) | ||
772 | offset = blocksize - req_ctx->nbuf; | ||
773 | else | ||
774 | @@ -2042,7 +2025,8 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | ||
775 | sg_copy_to_buffer(areq->src, nents, | ||
776 | ctx_buf + req_ctx->nbuf, offset); | ||
777 | req_ctx->nbuf += offset; | ||
778 | - req_ctx->psrc = areq->src; | ||
779 | + req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src, | ||
780 | + offset); | ||
781 | } else | ||
782 | req_ctx->psrc = areq->src; | ||
783 | |||
784 | @@ -2082,8 +2066,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) | ||
785 | if (ctx->keylen && (req_ctx->first || req_ctx->last)) | ||
786 | edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; | ||
787 | |||
788 | - return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, | ||
789 | - ahash_done); | ||
790 | + return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done); | ||
791 | } | ||
792 | |||
793 | static int ahash_update(struct ahash_request *areq) | ||
794 | diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h | ||
795 | index a65a63e0d6c1..979f6a61e545 100644 | ||
796 | --- a/drivers/crypto/talitos.h | ||
797 | +++ b/drivers/crypto/talitos.h | ||
798 | @@ -65,6 +65,36 @@ struct talitos_desc { | ||
799 | |||
800 | #define TALITOS_DESC_SIZE (sizeof(struct talitos_desc) - sizeof(__be32)) | ||
801 | |||
802 | +/* | ||
803 | + * talitos_edesc - s/w-extended descriptor | ||
804 | + * @src_nents: number of segments in input scatterlist | ||
805 | + * @dst_nents: number of segments in output scatterlist | ||
806 | + * @icv_ool: whether ICV is out-of-line | ||
807 | + * @iv_dma: dma address of iv for checking continuity and link table | ||
808 | + * @dma_len: length of dma mapped link_tbl space | ||
809 | + * @dma_link_tbl: bus physical address of link_tbl/buf | ||
810 | + * @desc: h/w descriptor | ||
811 | + * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) | ||
812 | + * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) | ||
813 | + * | ||
814 | + * if decrypting (with authcheck), or either one of src_nents or dst_nents | ||
815 | + * is greater than 1, an integrity check value is concatenated to the end | ||
816 | + * of link_tbl data | ||
817 | + */ | ||
818 | +struct talitos_edesc { | ||
819 | + int src_nents; | ||
820 | + int dst_nents; | ||
821 | + bool icv_ool; | ||
822 | + dma_addr_t iv_dma; | ||
823 | + int dma_len; | ||
824 | + dma_addr_t dma_link_tbl; | ||
825 | + struct talitos_desc desc; | ||
826 | + union { | ||
827 | + struct talitos_ptr link_tbl[0]; | ||
828 | + u8 buf[0]; | ||
829 | + }; | ||
830 | +}; | ||
831 | + | ||
832 | /** | ||
833 | * talitos_request - descriptor submission request | ||
834 | * @desc: descriptor pointer (kernel virtual) | ||
835 | diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c | ||
836 | index b22ccfb0c991..2bf4d31f4967 100644 | ||
837 | --- a/drivers/firmware/efi/efi-bgrt.c | ||
838 | +++ b/drivers/firmware/efi/efi-bgrt.c | ||
839 | @@ -50,11 +50,6 @@ void __init efi_bgrt_init(struct acpi_table_header *table) | ||
840 | bgrt->version); | ||
841 | goto out; | ||
842 | } | ||
843 | - if (bgrt->status & 0xfe) { | ||
844 | - pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n", | ||
845 | - bgrt->status); | ||
846 | - goto out; | ||
847 | - } | ||
848 | if (bgrt->image_type != 0) { | ||
849 | pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n", | ||
850 | bgrt->image_type); | ||
851 | diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c | ||
852 | index 54e767bd5ddb..f28703db8dbd 100644 | ||
853 | --- a/drivers/gpu/drm/udl/udl_drv.c | ||
854 | +++ b/drivers/gpu/drm/udl/udl_drv.c | ||
855 | @@ -47,10 +47,16 @@ static const struct file_operations udl_driver_fops = { | ||
856 | .llseek = noop_llseek, | ||
857 | }; | ||
858 | |||
859 | +static void udl_driver_release(struct drm_device *dev) | ||
860 | +{ | ||
861 | + udl_fini(dev); | ||
862 | + udl_modeset_cleanup(dev); | ||
863 | + drm_dev_fini(dev); | ||
864 | + kfree(dev); | ||
865 | +} | ||
866 | + | ||
867 | static struct drm_driver driver = { | ||
868 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, | ||
869 | - .load = udl_driver_load, | ||
870 | - .unload = udl_driver_unload, | ||
871 | .release = udl_driver_release, | ||
872 | |||
873 | /* gem hooks */ | ||
874 | @@ -74,28 +80,56 @@ static struct drm_driver driver = { | ||
875 | .patchlevel = DRIVER_PATCHLEVEL, | ||
876 | }; | ||
877 | |||
878 | +static struct udl_device *udl_driver_create(struct usb_interface *interface) | ||
879 | +{ | ||
880 | + struct usb_device *udev = interface_to_usbdev(interface); | ||
881 | + struct udl_device *udl; | ||
882 | + int r; | ||
883 | + | ||
884 | + udl = kzalloc(sizeof(*udl), GFP_KERNEL); | ||
885 | + if (!udl) | ||
886 | + return ERR_PTR(-ENOMEM); | ||
887 | + | ||
888 | + r = drm_dev_init(&udl->drm, &driver, &interface->dev); | ||
889 | + if (r) { | ||
890 | + kfree(udl); | ||
891 | + return ERR_PTR(r); | ||
892 | + } | ||
893 | + | ||
894 | + udl->udev = udev; | ||
895 | + udl->drm.dev_private = udl; | ||
896 | + | ||
897 | + r = udl_init(udl); | ||
898 | + if (r) { | ||
899 | + drm_dev_fini(&udl->drm); | ||
900 | + kfree(udl); | ||
901 | + return ERR_PTR(r); | ||
902 | + } | ||
903 | + | ||
904 | + usb_set_intfdata(interface, udl); | ||
905 | + return udl; | ||
906 | +} | ||
907 | + | ||
908 | static int udl_usb_probe(struct usb_interface *interface, | ||
909 | const struct usb_device_id *id) | ||
910 | { | ||
911 | - struct usb_device *udev = interface_to_usbdev(interface); | ||
912 | - struct drm_device *dev; | ||
913 | int r; | ||
914 | + struct udl_device *udl; | ||
915 | |||
916 | - dev = drm_dev_alloc(&driver, &interface->dev); | ||
917 | - if (IS_ERR(dev)) | ||
918 | - return PTR_ERR(dev); | ||
919 | + udl = udl_driver_create(interface); | ||
920 | + if (IS_ERR(udl)) | ||
921 | + return PTR_ERR(udl); | ||
922 | |||
923 | - r = drm_dev_register(dev, (unsigned long)udev); | ||
924 | + r = drm_dev_register(&udl->drm, 0); | ||
925 | if (r) | ||
926 | goto err_free; | ||
927 | |||
928 | - usb_set_intfdata(interface, dev); | ||
929 | - DRM_INFO("Initialized udl on minor %d\n", dev->primary->index); | ||
930 | + DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index); | ||
931 | |||
932 | return 0; | ||
933 | |||
934 | err_free: | ||
935 | - drm_dev_unref(dev); | ||
936 | + drm_dev_put(&udl->drm); | ||
937 | return r; | ||
938 | } | ||
939 | |||
940 | diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h | ||
941 | index 4ae67d882eae..35c1f33fbc1a 100644 | ||
942 | --- a/drivers/gpu/drm/udl/udl_drv.h | ||
943 | +++ b/drivers/gpu/drm/udl/udl_drv.h | ||
944 | @@ -50,8 +50,8 @@ struct urb_list { | ||
945 | struct udl_fbdev; | ||
946 | |||
947 | struct udl_device { | ||
948 | + struct drm_device drm; | ||
949 | struct device *dev; | ||
950 | - struct drm_device *ddev; | ||
951 | struct usb_device *udev; | ||
952 | struct drm_crtc *crtc; | ||
953 | |||
954 | @@ -71,6 +71,8 @@ struct udl_device { | ||
955 | atomic_t cpu_kcycles_used; /* transpired during pixel processing */ | ||
956 | }; | ||
957 | |||
958 | +#define to_udl(x) container_of(x, struct udl_device, drm) | ||
959 | + | ||
960 | struct udl_gem_object { | ||
961 | struct drm_gem_object base; | ||
962 | struct page **pages; | ||
963 | @@ -102,9 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev); | ||
964 | int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len); | ||
965 | void udl_urb_completion(struct urb *urb); | ||
966 | |||
967 | -int udl_driver_load(struct drm_device *dev, unsigned long flags); | ||
968 | -void udl_driver_unload(struct drm_device *dev); | ||
969 | -void udl_driver_release(struct drm_device *dev); | ||
970 | +int udl_init(struct udl_device *udl); | ||
971 | +void udl_fini(struct drm_device *dev); | ||
972 | |||
973 | int udl_fbdev_init(struct drm_device *dev); | ||
974 | void udl_fbdev_cleanup(struct drm_device *dev); | ||
975 | diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c | ||
976 | index dd9ffded223b..4ab101bf1df0 100644 | ||
977 | --- a/drivers/gpu/drm/udl/udl_fb.c | ||
978 | +++ b/drivers/gpu/drm/udl/udl_fb.c | ||
979 | @@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, | ||
980 | int width, int height) | ||
981 | { | ||
982 | struct drm_device *dev = fb->base.dev; | ||
983 | - struct udl_device *udl = dev->dev_private; | ||
984 | + struct udl_device *udl = to_udl(dev); | ||
985 | int i, ret; | ||
986 | char *cmd; | ||
987 | cycles_t start_cycles, end_cycles; | ||
988 | @@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user) | ||
989 | { | ||
990 | struct udl_fbdev *ufbdev = info->par; | ||
991 | struct drm_device *dev = ufbdev->ufb.base.dev; | ||
992 | - struct udl_device *udl = dev->dev_private; | ||
993 | + struct udl_device *udl = to_udl(dev); | ||
994 | |||
995 | /* If the USB device is gone, we don't accept new opens */ | ||
996 | - if (drm_dev_is_unplugged(udl->ddev)) | ||
997 | + if (drm_dev_is_unplugged(&udl->drm)) | ||
998 | return -ENODEV; | ||
999 | |||
1000 | ufbdev->fb_count++; | ||
1001 | @@ -441,7 +441,7 @@ static void udl_fbdev_destroy(struct drm_device *dev, | ||
1002 | |||
1003 | int udl_fbdev_init(struct drm_device *dev) | ||
1004 | { | ||
1005 | - struct udl_device *udl = dev->dev_private; | ||
1006 | + struct udl_device *udl = to_udl(dev); | ||
1007 | int bpp_sel = fb_bpp; | ||
1008 | struct udl_fbdev *ufbdev; | ||
1009 | int ret; | ||
1010 | @@ -480,7 +480,7 @@ free: | ||
1011 | |||
1012 | void udl_fbdev_cleanup(struct drm_device *dev) | ||
1013 | { | ||
1014 | - struct udl_device *udl = dev->dev_private; | ||
1015 | + struct udl_device *udl = to_udl(dev); | ||
1016 | if (!udl->fbdev) | ||
1017 | return; | ||
1018 | |||
1019 | @@ -491,7 +491,7 @@ void udl_fbdev_cleanup(struct drm_device *dev) | ||
1020 | |||
1021 | void udl_fbdev_unplug(struct drm_device *dev) | ||
1022 | { | ||
1023 | - struct udl_device *udl = dev->dev_private; | ||
1024 | + struct udl_device *udl = to_udl(dev); | ||
1025 | struct udl_fbdev *ufbdev; | ||
1026 | if (!udl->fbdev) | ||
1027 | return; | ||
1028 | diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c | ||
1029 | index bb7b58407039..3b3e17652bb2 100644 | ||
1030 | --- a/drivers/gpu/drm/udl/udl_gem.c | ||
1031 | +++ b/drivers/gpu/drm/udl/udl_gem.c | ||
1032 | @@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev, | ||
1033 | { | ||
1034 | struct udl_gem_object *gobj; | ||
1035 | struct drm_gem_object *obj; | ||
1036 | - struct udl_device *udl = dev->dev_private; | ||
1037 | + struct udl_device *udl = to_udl(dev); | ||
1038 | int ret = 0; | ||
1039 | |||
1040 | mutex_lock(&udl->gem_lock); | ||
1041 | diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c | ||
1042 | index 19055dda3140..8d22b6cd5241 100644 | ||
1043 | --- a/drivers/gpu/drm/udl/udl_main.c | ||
1044 | +++ b/drivers/gpu/drm/udl/udl_main.c | ||
1045 | @@ -29,7 +29,7 @@ | ||
1046 | static int udl_parse_vendor_descriptor(struct drm_device *dev, | ||
1047 | struct usb_device *usbdev) | ||
1048 | { | ||
1049 | - struct udl_device *udl = dev->dev_private; | ||
1050 | + struct udl_device *udl = to_udl(dev); | ||
1051 | char *desc; | ||
1052 | char *buf; | ||
1053 | char *desc_end; | ||
1054 | @@ -165,7 +165,7 @@ void udl_urb_completion(struct urb *urb) | ||
1055 | |||
1056 | static void udl_free_urb_list(struct drm_device *dev) | ||
1057 | { | ||
1058 | - struct udl_device *udl = dev->dev_private; | ||
1059 | + struct udl_device *udl = to_udl(dev); | ||
1060 | int count = udl->urbs.count; | ||
1061 | struct list_head *node; | ||
1062 | struct urb_node *unode; | ||
1063 | @@ -198,7 +198,7 @@ static void udl_free_urb_list(struct drm_device *dev) | ||
1064 | |||
1065 | static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) | ||
1066 | { | ||
1067 | - struct udl_device *udl = dev->dev_private; | ||
1068 | + struct udl_device *udl = to_udl(dev); | ||
1069 | struct urb *urb; | ||
1070 | struct urb_node *unode; | ||
1071 | char *buf; | ||
1072 | @@ -262,7 +262,7 @@ retry: | ||
1073 | |||
1074 | struct urb *udl_get_urb(struct drm_device *dev) | ||
1075 | { | ||
1076 | - struct udl_device *udl = dev->dev_private; | ||
1077 | + struct udl_device *udl = to_udl(dev); | ||
1078 | int ret = 0; | ||
1079 | struct list_head *entry; | ||
1080 | struct urb_node *unode; | ||
1081 | @@ -295,7 +295,7 @@ error: | ||
1082 | |||
1083 | int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) | ||
1084 | { | ||
1085 | - struct udl_device *udl = dev->dev_private; | ||
1086 | + struct udl_device *udl = to_udl(dev); | ||
1087 | int ret; | ||
1088 | |||
1089 | BUG_ON(len > udl->urbs.size); | ||
1090 | @@ -310,20 +310,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) | ||
1091 | return ret; | ||
1092 | } | ||
1093 | |||
1094 | -int udl_driver_load(struct drm_device *dev, unsigned long flags) | ||
1095 | +int udl_init(struct udl_device *udl) | ||
1096 | { | ||
1097 | - struct usb_device *udev = (void*)flags; | ||
1098 | - struct udl_device *udl; | ||
1099 | + struct drm_device *dev = &udl->drm; | ||
1100 | int ret = -ENOMEM; | ||
1101 | |||
1102 | DRM_DEBUG("\n"); | ||
1103 | - udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL); | ||
1104 | - if (!udl) | ||
1105 | - return -ENOMEM; | ||
1106 | - | ||
1107 | - udl->udev = udev; | ||
1108 | - udl->ddev = dev; | ||
1109 | - dev->dev_private = udl; | ||
1110 | |||
1111 | mutex_init(&udl->gem_lock); | ||
1112 | |||
1113 | @@ -357,7 +349,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags) | ||
1114 | err: | ||
1115 | if (udl->urbs.count) | ||
1116 | udl_free_urb_list(dev); | ||
1117 | - kfree(udl); | ||
1118 | DRM_ERROR("%d\n", ret); | ||
1119 | return ret; | ||
1120 | } | ||
1121 | @@ -368,9 +359,9 @@ int udl_drop_usb(struct drm_device *dev) | ||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | -void udl_driver_unload(struct drm_device *dev) | ||
1126 | +void udl_fini(struct drm_device *dev) | ||
1127 | { | ||
1128 | - struct udl_device *udl = dev->dev_private; | ||
1129 | + struct udl_device *udl = to_udl(dev); | ||
1130 | |||
1131 | drm_kms_helper_poll_fini(dev); | ||
1132 | |||
1133 | @@ -378,12 +369,4 @@ void udl_driver_unload(struct drm_device *dev) | ||
1134 | udl_free_urb_list(dev); | ||
1135 | |||
1136 | udl_fbdev_cleanup(dev); | ||
1137 | - kfree(udl); | ||
1138 | -} | ||
1139 | - | ||
1140 | -void udl_driver_release(struct drm_device *dev) | ||
1141 | -{ | ||
1142 | - udl_modeset_cleanup(dev); | ||
1143 | - drm_dev_fini(dev); | ||
1144 | - kfree(dev); | ||
1145 | } | ||
1146 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h | ||
1147 | index 92452992b368..50b3c0d89c9c 100644 | ||
1148 | --- a/drivers/hid/hid-ids.h | ||
1149 | +++ b/drivers/hid/hid-ids.h | ||
1150 | @@ -82,6 +82,7 @@ | ||
1151 | #define HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP 0x1220 | ||
1152 | #define HID_DEVICE_ID_ALPS_U1 0x1215 | ||
1153 | #define HID_DEVICE_ID_ALPS_T4_BTNLESS 0x120C | ||
1154 | +#define HID_DEVICE_ID_ALPS_1222 0x1222 | ||
1155 | |||
1156 | |||
1157 | #define USB_VENDOR_ID_AMI 0x046b | ||
1158 | @@ -265,6 +266,7 @@ | ||
1159 | #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d | ||
1160 | #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 | ||
1161 | #define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE 0x1053 | ||
1162 | +#define USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2 0x0939 | ||
1163 | #define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 | ||
1164 | #define USB_DEVICE_ID_ASUS_AK1D 0x1125 | ||
1165 | #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421 | ||
1166 | diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c | ||
1167 | index 184e49036e1d..f9167d0e095c 100644 | ||
1168 | --- a/drivers/hid/hid-multitouch.c | ||
1169 | +++ b/drivers/hid/hid-multitouch.c | ||
1170 | @@ -1788,6 +1788,10 @@ static const struct hid_device_id mt_devices[] = { | ||
1171 | HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, | ||
1172 | USB_VENDOR_ID_ALPS_JP, | ||
1173 | HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, | ||
1174 | + { .driver_data = MT_CLS_WIN_8_DUAL, | ||
1175 | + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, | ||
1176 | + USB_VENDOR_ID_ALPS_JP, | ||
1177 | + HID_DEVICE_ID_ALPS_1222) }, | ||
1178 | |||
1179 | /* Lenovo X1 TAB Gen 2 */ | ||
1180 | { .driver_data = MT_CLS_WIN_8_DUAL, | ||
1181 | diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c | ||
1182 | index 5892f1bd037e..91e86af44a04 100644 | ||
1183 | --- a/drivers/hid/hid-quirks.c | ||
1184 | +++ b/drivers/hid/hid-quirks.c | ||
1185 | @@ -45,6 +45,7 @@ static const struct hid_device_id hid_quirks[] = { | ||
1186 | { HID_USB_DEVICE(USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM), HID_QUIRK_NOGET }, | ||
1187 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_MULTI_TOUCH), HID_QUIRK_MULTI_INPUT }, | ||
1188 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, | ||
1189 | + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE2), HID_QUIRK_ALWAYS_POLL }, | ||
1190 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS), HID_QUIRK_MULTI_INPUT }, | ||
1191 | { HID_USB_DEVICE(USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD), HID_QUIRK_BADPAD }, | ||
1192 | { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK), HID_QUIRK_NOGET }, | ||
1193 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c | ||
1194 | index 8e6077d8e434..68fd8232d44c 100644 | ||
1195 | --- a/drivers/input/mouse/synaptics.c | ||
1196 | +++ b/drivers/input/mouse/synaptics.c | ||
1197 | @@ -176,6 +176,7 @@ static const char * const smbus_pnp_ids[] = { | ||
1198 | "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ | ||
1199 | "LEN0073", /* X1 Carbon G5 (Elantech) */ | ||
1200 | "LEN0092", /* X1 Carbon 6 */ | ||
1201 | + "LEN0093", /* T480 */ | ||
1202 | "LEN0096", /* X280 */ | ||
1203 | "LEN0097", /* X280 -> ALPS trackpoint */ | ||
1204 | "LEN200f", /* T450s */ | ||
1205 | diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c | ||
1206 | index 65ab2c80529c..ee30e8965d1b 100644 | ||
1207 | --- a/drivers/irqchip/irq-gic-v3-its.c | ||
1208 | +++ b/drivers/irqchip/irq-gic-v3-its.c | ||
1209 | @@ -740,32 +740,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) | ||
1210 | } | ||
1211 | |||
1212 | static int its_wait_for_range_completion(struct its_node *its, | ||
1213 | - struct its_cmd_block *from, | ||
1214 | + u64 prev_idx, | ||
1215 | struct its_cmd_block *to) | ||
1216 | { | ||
1217 | - u64 rd_idx, from_idx, to_idx; | ||
1218 | + u64 rd_idx, to_idx, linear_idx; | ||
1219 | u32 count = 1000000; /* 1s! */ | ||
1220 | |||
1221 | - from_idx = its_cmd_ptr_to_offset(its, from); | ||
1222 | + /* Linearize to_idx if the command set has wrapped around */ | ||
1223 | to_idx = its_cmd_ptr_to_offset(its, to); | ||
1224 | + if (to_idx < prev_idx) | ||
1225 | + to_idx += ITS_CMD_QUEUE_SZ; | ||
1226 | + | ||
1227 | + linear_idx = prev_idx; | ||
1228 | |||
1229 | while (1) { | ||
1230 | + s64 delta; | ||
1231 | + | ||
1232 | rd_idx = readl_relaxed(its->base + GITS_CREADR); | ||
1233 | |||
1234 | - /* Direct case */ | ||
1235 | - if (from_idx < to_idx && rd_idx >= to_idx) | ||
1236 | - break; | ||
1237 | + /* | ||
1238 | + * Compute the read pointer progress, taking the | ||
1239 | + * potential wrap-around into account. | ||
1240 | + */ | ||
1241 | + delta = rd_idx - prev_idx; | ||
1242 | + if (rd_idx < prev_idx) | ||
1243 | + delta += ITS_CMD_QUEUE_SZ; | ||
1244 | |||
1245 | - /* Wrapped case */ | ||
1246 | - if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx) | ||
1247 | + linear_idx += delta; | ||
1248 | + if (linear_idx >= to_idx) | ||
1249 | break; | ||
1250 | |||
1251 | count--; | ||
1252 | if (!count) { | ||
1253 | - pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", | ||
1254 | - from_idx, to_idx, rd_idx); | ||
1255 | + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", | ||
1256 | + to_idx, linear_idx); | ||
1257 | return -1; | ||
1258 | } | ||
1259 | + prev_idx = rd_idx; | ||
1260 | cpu_relax(); | ||
1261 | udelay(1); | ||
1262 | } | ||
1263 | @@ -782,6 +793,7 @@ void name(struct its_node *its, \ | ||
1264 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ | ||
1265 | synctype *sync_obj; \ | ||
1266 | unsigned long flags; \ | ||
1267 | + u64 rd_idx; \ | ||
1268 | \ | ||
1269 | raw_spin_lock_irqsave(&its->lock, flags); \ | ||
1270 | \ | ||
1271 | @@ -803,10 +815,11 @@ void name(struct its_node *its, \ | ||
1272 | } \ | ||
1273 | \ | ||
1274 | post: \ | ||
1275 | + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ | ||
1276 | next_cmd = its_post_commands(its); \ | ||
1277 | raw_spin_unlock_irqrestore(&its->lock, flags); \ | ||
1278 | \ | ||
1279 | - if (its_wait_for_range_completion(its, cmd, next_cmd)) \ | ||
1280 | + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ | ||
1281 | pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ | ||
1282 | } | ||
1283 | |||
1284 | diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c | ||
1285 | index c7fe4789c40e..34ab30dd5de9 100644 | ||
1286 | --- a/drivers/md/dm-table.c | ||
1287 | +++ b/drivers/md/dm-table.c | ||
1288 | @@ -562,7 +562,7 @@ static char **realloc_argv(unsigned *size, char **old_argv) | ||
1289 | gfp = GFP_NOIO; | ||
1290 | } | ||
1291 | argv = kmalloc_array(new_size, sizeof(*argv), gfp); | ||
1292 | - if (argv) { | ||
1293 | + if (argv && old_argv) { | ||
1294 | memcpy(argv, old_argv, *size * sizeof(*argv)); | ||
1295 | *size = new_size; | ||
1296 | } | ||
1297 | diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c | ||
1298 | index fc65f0dedf7f..e3599b43f9eb 100644 | ||
1299 | --- a/drivers/md/dm-verity-target.c | ||
1300 | +++ b/drivers/md/dm-verity-target.c | ||
1301 | @@ -236,8 +236,8 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, | ||
1302 | BUG(); | ||
1303 | } | ||
1304 | |||
1305 | - DMERR("%s: %s block %llu is corrupted", v->data_dev->name, type_str, | ||
1306 | - block); | ||
1307 | + DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name, | ||
1308 | + type_str, block); | ||
1309 | |||
1310 | if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) | ||
1311 | DMERR("%s: reached maximum errors", v->data_dev->name); | ||
1312 | diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c | ||
1313 | index bfb16a474490..d1905d50c26c 100644 | ||
1314 | --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c | ||
1315 | +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c | ||
1316 | @@ -895,7 +895,7 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, | ||
1317 | u64 *data) | ||
1318 | { | ||
1319 | struct be_adapter *adapter = netdev_priv(netdev); | ||
1320 | - int status; | ||
1321 | + int status, cnt; | ||
1322 | u8 link_status = 0; | ||
1323 | |||
1324 | if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { | ||
1325 | @@ -906,6 +906,9 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, | ||
1326 | |||
1327 | memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); | ||
1328 | |||
1329 | + /* check link status before offline tests */ | ||
1330 | + link_status = netif_carrier_ok(netdev); | ||
1331 | + | ||
1332 | if (test->flags & ETH_TEST_FL_OFFLINE) { | ||
1333 | if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) | ||
1334 | test->flags |= ETH_TEST_FL_FAILED; | ||
1335 | @@ -926,13 +929,26 @@ static void be_self_test(struct net_device *netdev, struct ethtool_test *test, | ||
1336 | test->flags |= ETH_TEST_FL_FAILED; | ||
1337 | } | ||
1338 | |||
1339 | - status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); | ||
1340 | - if (status) { | ||
1341 | - test->flags |= ETH_TEST_FL_FAILED; | ||
1342 | - data[4] = -1; | ||
1343 | - } else if (!link_status) { | ||
1344 | + /* link status was down prior to test */ | ||
1345 | + if (!link_status) { | ||
1346 | test->flags |= ETH_TEST_FL_FAILED; | ||
1347 | data[4] = 1; | ||
1348 | + return; | ||
1349 | + } | ||
1350 | + | ||
1351 | + for (cnt = 10; cnt; cnt--) { | ||
1352 | + status = be_cmd_link_status_query(adapter, NULL, &link_status, | ||
1353 | + 0); | ||
1354 | + if (status) { | ||
1355 | + test->flags |= ETH_TEST_FL_FAILED; | ||
1356 | + data[4] = -1; | ||
1357 | + break; | ||
1358 | + } | ||
1359 | + | ||
1360 | + if (link_status) | ||
1361 | + break; | ||
1362 | + | ||
1363 | + msleep_interruptible(500); | ||
1364 | } | ||
1365 | } | ||
1366 | |||
1367 | diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c | ||
1368 | index 8cd339c92c1a..a7b5a47ab83d 100644 | ||
1369 | --- a/drivers/net/ethernet/intel/e1000e/netdev.c | ||
1370 | +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | ||
1371 | @@ -4208,7 +4208,7 @@ void e1000e_up(struct e1000_adapter *adapter) | ||
1372 | e1000_configure_msix(adapter); | ||
1373 | e1000_irq_enable(adapter); | ||
1374 | |||
1375 | - netif_start_queue(adapter->netdev); | ||
1376 | + /* Tx queue started by watchdog timer when link is up */ | ||
1377 | |||
1378 | e1000e_trigger_lsc(adapter); | ||
1379 | } | ||
1380 | @@ -4584,6 +4584,7 @@ int e1000e_open(struct net_device *netdev) | ||
1381 | pm_runtime_get_sync(&pdev->dev); | ||
1382 | |||
1383 | netif_carrier_off(netdev); | ||
1384 | + netif_stop_queue(netdev); | ||
1385 | |||
1386 | /* allocate transmit descriptors */ | ||
1387 | err = e1000e_setup_tx_resources(adapter->tx_ring); | ||
1388 | @@ -4644,7 +4645,6 @@ int e1000e_open(struct net_device *netdev) | ||
1389 | e1000_irq_enable(adapter); | ||
1390 | |||
1391 | adapter->tx_hang_recheck = false; | ||
1392 | - netif_start_queue(netdev); | ||
1393 | |||
1394 | hw->mac.get_link_status = true; | ||
1395 | pm_runtime_put(&pdev->dev); | ||
1396 | @@ -5266,6 +5266,7 @@ static void e1000_watchdog_task(struct work_struct *work) | ||
1397 | if (phy->ops.cfg_on_link_up) | ||
1398 | phy->ops.cfg_on_link_up(hw); | ||
1399 | |||
1400 | + netif_wake_queue(netdev); | ||
1401 | netif_carrier_on(netdev); | ||
1402 | |||
1403 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1404 | @@ -5279,6 +5280,7 @@ static void e1000_watchdog_task(struct work_struct *work) | ||
1405 | /* Link status message must follow this format */ | ||
1406 | pr_info("%s NIC Link is Down\n", adapter->netdev->name); | ||
1407 | netif_carrier_off(netdev); | ||
1408 | + netif_stop_queue(netdev); | ||
1409 | if (!test_bit(__E1000_DOWN, &adapter->state)) | ||
1410 | mod_timer(&adapter->phy_info_timer, | ||
1411 | round_jiffies(jiffies + 2 * HZ)); | ||
1412 | @@ -5286,13 +5288,8 @@ static void e1000_watchdog_task(struct work_struct *work) | ||
1413 | /* 8000ES2LAN requires a Rx packet buffer work-around | ||
1414 | * on link down event; reset the controller to flush | ||
1415 | * the Rx packet buffer. | ||
1416 | - * | ||
1417 | - * If the link is lost the controller stops DMA, but | ||
1418 | - * if there is queued Tx work it cannot be done. So | ||
1419 | - * reset the controller to flush the Tx packet buffers. | ||
1420 | */ | ||
1421 | - if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || | ||
1422 | - e1000_desc_unused(tx_ring) + 1 < tx_ring->count) | ||
1423 | + if (adapter->flags & FLAG_RX_NEEDS_RESTART) | ||
1424 | adapter->flags |= FLAG_RESTART_NOW; | ||
1425 | else | ||
1426 | pm_schedule_suspend(netdev->dev.parent, | ||
1427 | @@ -5315,6 +5312,14 @@ link_up: | ||
1428 | adapter->gotc_old = adapter->stats.gotc; | ||
1429 | spin_unlock(&adapter->stats64_lock); | ||
1430 | |||
1431 | + /* If the link is lost the controller stops DMA, but | ||
1432 | + * if there is queued Tx work it cannot be done. So | ||
1433 | + * reset the controller to flush the Tx packet buffers. | ||
1434 | + */ | ||
1435 | + if (!netif_carrier_ok(netdev) && | ||
1436 | + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) | ||
1437 | + adapter->flags |= FLAG_RESTART_NOW; | ||
1438 | + | ||
1439 | /* If reset is necessary, do it outside of interrupt context. */ | ||
1440 | if (adapter->flags & FLAG_RESTART_NOW) { | ||
1441 | schedule_work(&adapter->reset_task); | ||
1442 | diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c | ||
1443 | index 4bb89f74742c..d5bcbc40a55f 100644 | ||
1444 | --- a/drivers/net/ethernet/sis/sis900.c | ||
1445 | +++ b/drivers/net/ethernet/sis/sis900.c | ||
1446 | @@ -1057,7 +1057,7 @@ sis900_open(struct net_device *net_dev) | ||
1447 | sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); | ||
1448 | |||
1449 | /* Enable all known interrupts by setting the interrupt mask. */ | ||
1450 | - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); | ||
1451 | + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); | ||
1452 | sw32(cr, RxENA | sr32(cr)); | ||
1453 | sw32(ier, IE); | ||
1454 | |||
1455 | @@ -1578,7 +1578,7 @@ static void sis900_tx_timeout(struct net_device *net_dev) | ||
1456 | sw32(txdp, sis_priv->tx_ring_dma); | ||
1457 | |||
1458 | /* Enable all known interrupts by setting the interrupt mask. */ | ||
1459 | - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); | ||
1460 | + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); | ||
1461 | } | ||
1462 | |||
1463 | /** | ||
1464 | @@ -1618,7 +1618,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | ||
1465 | spin_unlock_irqrestore(&sis_priv->lock, flags); | ||
1466 | return NETDEV_TX_OK; | ||
1467 | } | ||
1468 | - sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); | ||
1469 | + sis_priv->tx_ring[entry].cmdsts = (OWN | INTR | skb->len); | ||
1470 | sw32(cr, TxENA | sr32(cr)); | ||
1471 | |||
1472 | sis_priv->cur_tx ++; | ||
1473 | @@ -1674,7 +1674,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) | ||
1474 | do { | ||
1475 | status = sr32(isr); | ||
1476 | |||
1477 | - if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) | ||
1478 | + if ((status & (HIBERR|TxURN|TxERR|TxIDLE|TxDESC|RxORN|RxERR|RxOK)) == 0) | ||
1479 | /* nothing intresting happened */ | ||
1480 | break; | ||
1481 | handled = 1; | ||
1482 | @@ -1684,7 +1684,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance) | ||
1483 | /* Rx interrupt */ | ||
1484 | sis900_rx(net_dev); | ||
1485 | |||
1486 | - if (status & (TxURN | TxERR | TxIDLE)) | ||
1487 | + if (status & (TxURN | TxERR | TxIDLE | TxDESC)) | ||
1488 | /* Tx interrupt */ | ||
1489 | sis900_finish_xmit(net_dev); | ||
1490 | |||
1491 | @@ -1896,8 +1896,8 @@ static void sis900_finish_xmit (struct net_device *net_dev) | ||
1492 | |||
1493 | if (tx_status & OWN) { | ||
1494 | /* The packet is not transmitted yet (owned by hardware) ! | ||
1495 | - * Note: the interrupt is generated only when Tx Machine | ||
1496 | - * is idle, so this is an almost impossible case */ | ||
1497 | + * Note: this is an almost impossible condition | ||
1498 | + * in case of TxDESC ('descriptor interrupt') */ | ||
1499 | break; | ||
1500 | } | ||
1501 | |||
1502 | @@ -2473,7 +2473,7 @@ static int sis900_resume(struct pci_dev *pci_dev) | ||
1503 | sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); | ||
1504 | |||
1505 | /* Enable all known interrupts by setting the interrupt mask. */ | ||
1506 | - sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE); | ||
1507 | + sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE | TxDESC); | ||
1508 | sw32(cr, RxENA | sr32(cr)); | ||
1509 | sw32(ier, IE); | ||
1510 | |||
1511 | diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c | ||
1512 | index a205750b431b..8609c1a0777b 100644 | ||
1513 | --- a/drivers/net/ppp/ppp_mppe.c | ||
1514 | +++ b/drivers/net/ppp/ppp_mppe.c | ||
1515 | @@ -63,6 +63,7 @@ MODULE_AUTHOR("Frank Cusack <fcusack@fcusack.com>"); | ||
1516 | MODULE_DESCRIPTION("Point-to-Point Protocol Microsoft Point-to-Point Encryption support"); | ||
1517 | MODULE_LICENSE("Dual BSD/GPL"); | ||
1518 | MODULE_ALIAS("ppp-compress-" __stringify(CI_MPPE)); | ||
1519 | +MODULE_SOFTDEP("pre: arc4"); | ||
1520 | MODULE_VERSION("1.0.2"); | ||
1521 | |||
1522 | static unsigned int | ||
1523 | diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c | ||
1524 | index a613e546717a..564cfaee129d 100644 | ||
1525 | --- a/drivers/pinctrl/mediatek/mtk-eint.c | ||
1526 | +++ b/drivers/pinctrl/mediatek/mtk-eint.c | ||
1527 | @@ -113,6 +113,8 @@ static void mtk_eint_mask(struct irq_data *d) | ||
1528 | void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, | ||
1529 | eint->regs->mask_set); | ||
1530 | |||
1531 | + eint->cur_mask[d->hwirq >> 5] &= ~mask; | ||
1532 | + | ||
1533 | writel(mask, reg); | ||
1534 | } | ||
1535 | |||
1536 | @@ -123,6 +125,8 @@ static void mtk_eint_unmask(struct irq_data *d) | ||
1537 | void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq, | ||
1538 | eint->regs->mask_clr); | ||
1539 | |||
1540 | + eint->cur_mask[d->hwirq >> 5] |= mask; | ||
1541 | + | ||
1542 | writel(mask, reg); | ||
1543 | |||
1544 | if (eint->dual_edge[d->hwirq]) | ||
1545 | @@ -217,19 +221,6 @@ static void mtk_eint_chip_write_mask(const struct mtk_eint *eint, | ||
1546 | } | ||
1547 | } | ||
1548 | |||
1549 | -static void mtk_eint_chip_read_mask(const struct mtk_eint *eint, | ||
1550 | - void __iomem *base, u32 *buf) | ||
1551 | -{ | ||
1552 | - int port; | ||
1553 | - void __iomem *reg; | ||
1554 | - | ||
1555 | - for (port = 0; port < eint->hw->ports; port++) { | ||
1556 | - reg = base + eint->regs->mask + (port << 2); | ||
1557 | - buf[port] = ~readl_relaxed(reg); | ||
1558 | - /* Mask is 0 when irq is enabled, and 1 when disabled. */ | ||
1559 | - } | ||
1560 | -} | ||
1561 | - | ||
1562 | static int mtk_eint_irq_request_resources(struct irq_data *d) | ||
1563 | { | ||
1564 | struct mtk_eint *eint = irq_data_get_irq_chip_data(d); | ||
1565 | @@ -318,7 +309,7 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) | ||
1566 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
1567 | struct mtk_eint *eint = irq_desc_get_handler_data(desc); | ||
1568 | unsigned int status, eint_num; | ||
1569 | - int offset, index, virq; | ||
1570 | + int offset, mask_offset, index, virq; | ||
1571 | void __iomem *reg = mtk_eint_get_offset(eint, 0, eint->regs->stat); | ||
1572 | int dual_edge, start_level, curr_level; | ||
1573 | |||
1574 | @@ -328,10 +319,24 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) | ||
1575 | status = readl(reg); | ||
1576 | while (status) { | ||
1577 | offset = __ffs(status); | ||
1578 | + mask_offset = eint_num >> 5; | ||
1579 | index = eint_num + offset; | ||
1580 | virq = irq_find_mapping(eint->domain, index); | ||
1581 | status &= ~BIT(offset); | ||
1582 | |||
1583 | + /* | ||
1584 | + * If we get an interrupt on pin that was only required | ||
1585 | + * for wake (but no real interrupt requested), mask the | ||
1586 | + * interrupt (as would mtk_eint_resume do anyway later | ||
1587 | + * in the resume sequence). | ||
1588 | + */ | ||
1589 | + if (eint->wake_mask[mask_offset] & BIT(offset) && | ||
1590 | + !(eint->cur_mask[mask_offset] & BIT(offset))) { | ||
1591 | + writel_relaxed(BIT(offset), reg - | ||
1592 | + eint->regs->stat + | ||
1593 | + eint->regs->mask_set); | ||
1594 | + } | ||
1595 | + | ||
1596 | dual_edge = eint->dual_edge[index]; | ||
1597 | if (dual_edge) { | ||
1598 | /* | ||
1599 | @@ -370,7 +375,6 @@ static void mtk_eint_irq_handler(struct irq_desc *desc) | ||
1600 | |||
1601 | int mtk_eint_do_suspend(struct mtk_eint *eint) | ||
1602 | { | ||
1603 | - mtk_eint_chip_read_mask(eint, eint->base, eint->cur_mask); | ||
1604 | mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask); | ||
1605 | |||
1606 | return 0; | ||
1607 | diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c | ||
1608 | index cecbce21d01f..33c3eca0ece9 100644 | ||
1609 | --- a/drivers/pinctrl/pinctrl-mcp23s08.c | ||
1610 | +++ b/drivers/pinctrl/pinctrl-mcp23s08.c | ||
1611 | @@ -889,6 +889,10 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | ||
1612 | if (ret < 0) | ||
1613 | goto fail; | ||
1614 | |||
1615 | + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); | ||
1616 | + if (ret < 0) | ||
1617 | + goto fail; | ||
1618 | + | ||
1619 | mcp->irq_controller = | ||
1620 | device_property_read_bool(dev, "interrupt-controller"); | ||
1621 | if (mcp->irq && mcp->irq_controller) { | ||
1622 | @@ -930,10 +934,6 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | ||
1623 | goto fail; | ||
1624 | } | ||
1625 | |||
1626 | - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); | ||
1627 | - if (ret < 0) | ||
1628 | - goto fail; | ||
1629 | - | ||
1630 | if (one_regmap_config) { | ||
1631 | mcp->pinctrl_desc.name = devm_kasprintf(dev, GFP_KERNEL, | ||
1632 | "mcp23xxx-pinctrl.%d", raw_chip_address); | ||
1633 | diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c | ||
1634 | index 78f1be41b05e..034528a5453e 100644 | ||
1635 | --- a/drivers/s390/cio/qdio_setup.c | ||
1636 | +++ b/drivers/s390/cio/qdio_setup.c | ||
1637 | @@ -151,6 +151,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) | ||
1638 | return -ENOMEM; | ||
1639 | } | ||
1640 | irq_ptr_qs[i] = q; | ||
1641 | + INIT_LIST_HEAD(&q->entry); | ||
1642 | } | ||
1643 | return 0; | ||
1644 | } | ||
1645 | @@ -179,6 +180,7 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, | ||
1646 | q->mask = 1 << (31 - i); | ||
1647 | q->nr = i; | ||
1648 | q->handler = handler; | ||
1649 | + INIT_LIST_HEAD(&q->entry); | ||
1650 | } | ||
1651 | |||
1652 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | ||
1653 | diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c | ||
1654 | index 07dea602205b..6628e0c9e70e 100644 | ||
1655 | --- a/drivers/s390/cio/qdio_thinint.c | ||
1656 | +++ b/drivers/s390/cio/qdio_thinint.c | ||
1657 | @@ -79,7 +79,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) | ||
1658 | mutex_lock(&tiq_list_lock); | ||
1659 | list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); | ||
1660 | mutex_unlock(&tiq_list_lock); | ||
1661 | - xchg(irq_ptr->dsci, 1 << 7); | ||
1662 | } | ||
1663 | |||
1664 | void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | ||
1665 | @@ -87,14 +86,14 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | ||
1666 | struct qdio_q *q; | ||
1667 | |||
1668 | q = irq_ptr->input_qs[0]; | ||
1669 | - /* if establish triggered an error */ | ||
1670 | - if (!q || !q->entry.prev || !q->entry.next) | ||
1671 | + if (!q) | ||
1672 | return; | ||
1673 | |||
1674 | mutex_lock(&tiq_list_lock); | ||
1675 | list_del_rcu(&q->entry); | ||
1676 | mutex_unlock(&tiq_list_lock); | ||
1677 | synchronize_rcu(); | ||
1678 | + INIT_LIST_HEAD(&q->entry); | ||
1679 | } | ||
1680 | |||
1681 | static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) | ||
1682 | diff --git a/fs/afs/callback.c b/fs/afs/callback.c | ||
1683 | index 5f261fbf2182..4ad701250299 100644 | ||
1684 | --- a/fs/afs/callback.c | ||
1685 | +++ b/fs/afs/callback.c | ||
1686 | @@ -276,9 +276,9 @@ static void afs_break_one_callback(struct afs_server *server, | ||
1687 | struct afs_super_info *as = AFS_FS_S(cbi->sb); | ||
1688 | struct afs_volume *volume = as->volume; | ||
1689 | |||
1690 | - write_lock(&volume->cb_break_lock); | ||
1691 | + write_lock(&volume->cb_v_break_lock); | ||
1692 | volume->cb_v_break++; | ||
1693 | - write_unlock(&volume->cb_break_lock); | ||
1694 | + write_unlock(&volume->cb_v_break_lock); | ||
1695 | } else { | ||
1696 | data.volume = NULL; | ||
1697 | data.fid = *fid; | ||
1698 | diff --git a/fs/afs/internal.h b/fs/afs/internal.h | ||
1699 | index 34c02fdcc25f..aea19614c082 100644 | ||
1700 | --- a/fs/afs/internal.h | ||
1701 | +++ b/fs/afs/internal.h | ||
1702 | @@ -477,7 +477,7 @@ struct afs_volume { | ||
1703 | unsigned int servers_seq; /* Incremented each time ->servers changes */ | ||
1704 | |||
1705 | unsigned cb_v_break; /* Break-everything counter. */ | ||
1706 | - rwlock_t cb_break_lock; | ||
1707 | + rwlock_t cb_v_break_lock; | ||
1708 | |||
1709 | afs_voltype_t type; /* type of volume */ | ||
1710 | short error; | ||
1711 | diff --git a/fs/afs/volume.c b/fs/afs/volume.c | ||
1712 | index 3037bd01f617..5ec186ec5651 100644 | ||
1713 | --- a/fs/afs/volume.c | ||
1714 | +++ b/fs/afs/volume.c | ||
1715 | @@ -47,6 +47,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_mount_params *params, | ||
1716 | atomic_set(&volume->usage, 1); | ||
1717 | INIT_LIST_HEAD(&volume->proc_link); | ||
1718 | rwlock_init(&volume->servers_lock); | ||
1719 | + rwlock_init(&volume->cb_v_break_lock); | ||
1720 | memcpy(volume->name, vldb->name, vldb->name_len + 1); | ||
1721 | |||
1722 | slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask); | ||
1723 | diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h | ||
1724 | index d64d8c2bbdab..dec0372efe2e 100644 | ||
1725 | --- a/include/linux/cpuhotplug.h | ||
1726 | +++ b/include/linux/cpuhotplug.h | ||
1727 | @@ -170,6 +170,7 @@ enum cpuhp_state { | ||
1728 | CPUHP_AP_WATCHDOG_ONLINE, | ||
1729 | CPUHP_AP_WORKQUEUE_ONLINE, | ||
1730 | CPUHP_AP_RCUTREE_ONLINE, | ||
1731 | + CPUHP_AP_BASE_CACHEINFO_ONLINE, | ||
1732 | CPUHP_AP_ONLINE_DYN, | ||
1733 | CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, | ||
1734 | CPUHP_AP_X86_HPET_ONLINE, | ||
1735 | diff --git a/include/linux/kernel.h b/include/linux/kernel.h | ||
1736 | index 3d83ebb302cf..f6f94e54ab96 100644 | ||
1737 | --- a/include/linux/kernel.h | ||
1738 | +++ b/include/linux/kernel.h | ||
1739 | @@ -118,7 +118,8 @@ | ||
1740 | #define DIV_ROUND_DOWN_ULL(ll, d) \ | ||
1741 | ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) | ||
1742 | |||
1743 | -#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d)) | ||
1744 | +#define DIV_ROUND_UP_ULL(ll, d) \ | ||
1745 | + DIV_ROUND_DOWN_ULL((unsigned long long)(ll) + (d) - 1, (d)) | ||
1746 | |||
1747 | #if BITS_PER_LONG == 32 | ||
1748 | # define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) | ||
1749 | diff --git a/include/uapi/linux/nilfs2_ondisk.h b/include/uapi/linux/nilfs2_ondisk.h | ||
1750 | index a7e66ab11d1d..c23f91ae5fe8 100644 | ||
1751 | --- a/include/uapi/linux/nilfs2_ondisk.h | ||
1752 | +++ b/include/uapi/linux/nilfs2_ondisk.h | ||
1753 | @@ -29,7 +29,7 @@ | ||
1754 | |||
1755 | #include <linux/types.h> | ||
1756 | #include <linux/magic.h> | ||
1757 | - | ||
1758 | +#include <asm/byteorder.h> | ||
1759 | |||
1760 | #define NILFS_INODE_BMAP_SIZE 7 | ||
1761 | |||
1762 | @@ -533,19 +533,19 @@ enum { | ||
1763 | static inline void \ | ||
1764 | nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ | ||
1765 | { \ | ||
1766 | - cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ | ||
1767 | - (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
1768 | + cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) | \ | ||
1769 | + (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
1770 | } \ | ||
1771 | static inline void \ | ||
1772 | nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ | ||
1773 | { \ | ||
1774 | - cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ | ||
1775 | + cp->cp_flags = __cpu_to_le32(__le32_to_cpu(cp->cp_flags) & \ | ||
1776 | ~(1UL << NILFS_CHECKPOINT_##flag)); \ | ||
1777 | } \ | ||
1778 | static inline int \ | ||
1779 | nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ | ||
1780 | { \ | ||
1781 | - return !!(le32_to_cpu(cp->cp_flags) & \ | ||
1782 | + return !!(__le32_to_cpu(cp->cp_flags) & \ | ||
1783 | (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
1784 | } | ||
1785 | |||
1786 | @@ -595,20 +595,20 @@ enum { | ||
1787 | static inline void \ | ||
1788 | nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ | ||
1789 | { \ | ||
1790 | - su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ | ||
1791 | + su->su_flags = __cpu_to_le32(__le32_to_cpu(su->su_flags) | \ | ||
1792 | (1UL << NILFS_SEGMENT_USAGE_##flag));\ | ||
1793 | } \ | ||
1794 | static inline void \ | ||
1795 | nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ | ||
1796 | { \ | ||
1797 | su->su_flags = \ | ||
1798 | - cpu_to_le32(le32_to_cpu(su->su_flags) & \ | ||
1799 | + __cpu_to_le32(__le32_to_cpu(su->su_flags) & \ | ||
1800 | ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ | ||
1801 | } \ | ||
1802 | static inline int \ | ||
1803 | nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ | ||
1804 | { \ | ||
1805 | - return !!(le32_to_cpu(su->su_flags) & \ | ||
1806 | + return !!(__le32_to_cpu(su->su_flags) & \ | ||
1807 | (1UL << NILFS_SEGMENT_USAGE_##flag)); \ | ||
1808 | } | ||
1809 | |||
1810 | @@ -619,15 +619,15 @@ NILFS_SEGMENT_USAGE_FNS(ERROR, error) | ||
1811 | static inline void | ||
1812 | nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) | ||
1813 | { | ||
1814 | - su->su_lastmod = cpu_to_le64(0); | ||
1815 | - su->su_nblocks = cpu_to_le32(0); | ||
1816 | - su->su_flags = cpu_to_le32(0); | ||
1817 | + su->su_lastmod = __cpu_to_le64(0); | ||
1818 | + su->su_nblocks = __cpu_to_le32(0); | ||
1819 | + su->su_flags = __cpu_to_le32(0); | ||
1820 | } | ||
1821 | |||
1822 | static inline int | ||
1823 | nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) | ||
1824 | { | ||
1825 | - return !le32_to_cpu(su->su_flags); | ||
1826 | + return !__le32_to_cpu(su->su_flags); | ||
1827 | } | ||
1828 | |||
1829 | /** | ||
1830 | diff --git a/kernel/cpu.c b/kernel/cpu.c | ||
1831 | index 46aefe5c0e35..d9f855cb9f6f 100644 | ||
1832 | --- a/kernel/cpu.c | ||
1833 | +++ b/kernel/cpu.c | ||
1834 | @@ -1925,6 +1925,9 @@ static ssize_t write_cpuhp_fail(struct device *dev, | ||
1835 | if (ret) | ||
1836 | return ret; | ||
1837 | |||
1838 | + if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) | ||
1839 | + return -EINVAL; | ||
1840 | + | ||
1841 | /* | ||
1842 | * Cannot fail STARTING/DYING callbacks. | ||
1843 | */ | ||
1844 | diff --git a/kernel/events/core.c b/kernel/events/core.c | ||
1845 | index 171b83ebed4a..3b61ff40bfe2 100644 | ||
1846 | --- a/kernel/events/core.c | ||
1847 | +++ b/kernel/events/core.c | ||
1848 | @@ -5906,7 +5906,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, | ||
1849 | if (user_mode(regs)) { | ||
1850 | regs_user->abi = perf_reg_abi(current); | ||
1851 | regs_user->regs = regs; | ||
1852 | - } else if (current->mm) { | ||
1853 | + } else if (!(current->flags & PF_KTHREAD)) { | ||
1854 | perf_get_regs_user(regs_user, regs, regs_user_copy); | ||
1855 | } else { | ||
1856 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; | ||
1857 | diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c | ||
1858 | index 16cbf6beb276..ae60cae24e9a 100644 | ||
1859 | --- a/kernel/irq/autoprobe.c | ||
1860 | +++ b/kernel/irq/autoprobe.c | ||
1861 | @@ -90,7 +90,7 @@ unsigned long probe_irq_on(void) | ||
1862 | /* It triggered already - consider it spurious. */ | ||
1863 | if (!(desc->istate & IRQS_WAITING)) { | ||
1864 | desc->istate &= ~IRQS_AUTODETECT; | ||
1865 | - irq_shutdown(desc); | ||
1866 | + irq_shutdown_and_deactivate(desc); | ||
1867 | } else | ||
1868 | if (i < 32) | ||
1869 | mask |= 1 << i; | ||
1870 | @@ -127,7 +127,7 @@ unsigned int probe_irq_mask(unsigned long val) | ||
1871 | mask |= 1 << i; | ||
1872 | |||
1873 | desc->istate &= ~IRQS_AUTODETECT; | ||
1874 | - irq_shutdown(desc); | ||
1875 | + irq_shutdown_and_deactivate(desc); | ||
1876 | } | ||
1877 | raw_spin_unlock_irq(&desc->lock); | ||
1878 | } | ||
1879 | @@ -169,7 +169,7 @@ int probe_irq_off(unsigned long val) | ||
1880 | nr_of_irqs++; | ||
1881 | } | ||
1882 | desc->istate &= ~IRQS_AUTODETECT; | ||
1883 | - irq_shutdown(desc); | ||
1884 | + irq_shutdown_and_deactivate(desc); | ||
1885 | } | ||
1886 | raw_spin_unlock_irq(&desc->lock); | ||
1887 | } | ||
1888 | diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c | ||
1889 | index 379e89c706c9..09d914e486a2 100644 | ||
1890 | --- a/kernel/irq/chip.c | ||
1891 | +++ b/kernel/irq/chip.c | ||
1892 | @@ -314,6 +314,12 @@ void irq_shutdown(struct irq_desc *desc) | ||
1893 | } | ||
1894 | irq_state_clr_started(desc); | ||
1895 | } | ||
1896 | +} | ||
1897 | + | ||
1898 | + | ||
1899 | +void irq_shutdown_and_deactivate(struct irq_desc *desc) | ||
1900 | +{ | ||
1901 | + irq_shutdown(desc); | ||
1902 | /* | ||
1903 | * This must be called even if the interrupt was never started up, | ||
1904 | * because the activation can happen before the interrupt is | ||
1905 | diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c | ||
1906 | index 5b1072e394b2..6c7ca2e983a5 100644 | ||
1907 | --- a/kernel/irq/cpuhotplug.c | ||
1908 | +++ b/kernel/irq/cpuhotplug.c | ||
1909 | @@ -116,7 +116,7 @@ static bool migrate_one_irq(struct irq_desc *desc) | ||
1910 | */ | ||
1911 | if (irqd_affinity_is_managed(d)) { | ||
1912 | irqd_set_managed_shutdown(d); | ||
1913 | - irq_shutdown(desc); | ||
1914 | + irq_shutdown_and_deactivate(desc); | ||
1915 | return false; | ||
1916 | } | ||
1917 | affinity = cpu_online_mask; | ||
1918 | diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h | ||
1919 | index e74e7eea76cf..ea57f3d397fe 100644 | ||
1920 | --- a/kernel/irq/internals.h | ||
1921 | +++ b/kernel/irq/internals.h | ||
1922 | @@ -80,6 +80,7 @@ extern int irq_activate_and_startup(struct irq_desc *desc, bool resend); | ||
1923 | extern int irq_startup(struct irq_desc *desc, bool resend, bool force); | ||
1924 | |||
1925 | extern void irq_shutdown(struct irq_desc *desc); | ||
1926 | +extern void irq_shutdown_and_deactivate(struct irq_desc *desc); | ||
1927 | extern void irq_enable(struct irq_desc *desc); | ||
1928 | extern void irq_disable(struct irq_desc *desc); | ||
1929 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); | ||
1930 | @@ -94,6 +95,10 @@ static inline void irq_mark_irq(unsigned int irq) { } | ||
1931 | extern void irq_mark_irq(unsigned int irq); | ||
1932 | #endif | ||
1933 | |||
1934 | +extern int __irq_get_irqchip_state(struct irq_data *data, | ||
1935 | + enum irqchip_irq_state which, | ||
1936 | + bool *state); | ||
1937 | + | ||
1938 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | ||
1939 | |||
1940 | irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags); | ||
1941 | diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c | ||
1942 | index cd4f9f3e8345..23bcfa71077f 100644 | ||
1943 | --- a/kernel/irq/manage.c | ||
1944 | +++ b/kernel/irq/manage.c | ||
1945 | @@ -13,6 +13,7 @@ | ||
1946 | #include <linux/module.h> | ||
1947 | #include <linux/random.h> | ||
1948 | #include <linux/interrupt.h> | ||
1949 | +#include <linux/irqdomain.h> | ||
1950 | #include <linux/slab.h> | ||
1951 | #include <linux/sched.h> | ||
1952 | #include <linux/sched/rt.h> | ||
1953 | @@ -34,8 +35,9 @@ static int __init setup_forced_irqthreads(char *arg) | ||
1954 | early_param("threadirqs", setup_forced_irqthreads); | ||
1955 | #endif | ||
1956 | |||
1957 | -static void __synchronize_hardirq(struct irq_desc *desc) | ||
1958 | +static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) | ||
1959 | { | ||
1960 | + struct irq_data *irqd = irq_desc_get_irq_data(desc); | ||
1961 | bool inprogress; | ||
1962 | |||
1963 | do { | ||
1964 | @@ -51,6 +53,20 @@ static void __synchronize_hardirq(struct irq_desc *desc) | ||
1965 | /* Ok, that indicated we're done: double-check carefully. */ | ||
1966 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
1967 | inprogress = irqd_irq_inprogress(&desc->irq_data); | ||
1968 | + | ||
1969 | + /* | ||
1970 | + * If requested and supported, check at the chip whether it | ||
1971 | + * is in flight at the hardware level, i.e. already pending | ||
1972 | + * in a CPU and waiting for service and acknowledge. | ||
1973 | + */ | ||
1974 | + if (!inprogress && sync_chip) { | ||
1975 | + /* | ||
1976 | + * Ignore the return code. inprogress is only updated | ||
1977 | + * when the chip supports it. | ||
1978 | + */ | ||
1979 | + __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, | ||
1980 | + &inprogress); | ||
1981 | + } | ||
1982 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
1983 | |||
1984 | /* Oops, that failed? */ | ||
1985 | @@ -73,13 +89,18 @@ static void __synchronize_hardirq(struct irq_desc *desc) | ||
1986 | * Returns: false if a threaded handler is active. | ||
1987 | * | ||
1988 | * This function may be called - with care - from IRQ context. | ||
1989 | + * | ||
1990 | + * It does not check whether there is an interrupt in flight at the | ||
1991 | + * hardware level, but not serviced yet, as this might deadlock when | ||
1992 | + * called with interrupts disabled and the target CPU of the interrupt | ||
1993 | + * is the current CPU. | ||
1994 | */ | ||
1995 | bool synchronize_hardirq(unsigned int irq) | ||
1996 | { | ||
1997 | struct irq_desc *desc = irq_to_desc(irq); | ||
1998 | |||
1999 | if (desc) { | ||
2000 | - __synchronize_hardirq(desc); | ||
2001 | + __synchronize_hardirq(desc, false); | ||
2002 | return !atomic_read(&desc->threads_active); | ||
2003 | } | ||
2004 | |||
2005 | @@ -95,14 +116,19 @@ EXPORT_SYMBOL(synchronize_hardirq); | ||
2006 | * to complete before returning. If you use this function while | ||
2007 | * holding a resource the IRQ handler may need you will deadlock. | ||
2008 | * | ||
2009 | - * This function may be called - with care - from IRQ context. | ||
2010 | + * Can only be called from preemptible code as it might sleep when | ||
2011 | + * an interrupt thread is associated to @irq. | ||
2012 | + * | ||
2013 | + * It optionally makes sure (when the irq chip supports that method) | ||
2014 | + * that the interrupt is not pending in any CPU and waiting for | ||
2015 | + * service. | ||
2016 | */ | ||
2017 | void synchronize_irq(unsigned int irq) | ||
2018 | { | ||
2019 | struct irq_desc *desc = irq_to_desc(irq); | ||
2020 | |||
2021 | if (desc) { | ||
2022 | - __synchronize_hardirq(desc); | ||
2023 | + __synchronize_hardirq(desc, true); | ||
2024 | /* | ||
2025 | * We made sure that no hardirq handler is | ||
2026 | * running. Now verify that no threaded handlers are | ||
2027 | @@ -1619,6 +1645,7 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) | ||
2028 | /* If this was the last handler, shut down the IRQ line: */ | ||
2029 | if (!desc->action) { | ||
2030 | irq_settings_clr_disable_unlazy(desc); | ||
2031 | + /* Only shutdown. Deactivate after synchronize_hardirq() */ | ||
2032 | irq_shutdown(desc); | ||
2033 | } | ||
2034 | |||
2035 | @@ -1647,8 +1674,12 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) | ||
2036 | |||
2037 | unregister_handler_proc(irq, action); | ||
2038 | |||
2039 | - /* Make sure it's not being used on another CPU: */ | ||
2040 | - synchronize_hardirq(irq); | ||
2041 | + /* | ||
2042 | + * Make sure it's not being used on another CPU and if the chip | ||
2043 | + * supports it also make sure that there is no (not yet serviced) | ||
2044 | + * interrupt in flight at the hardware level. | ||
2045 | + */ | ||
2046 | + __synchronize_hardirq(desc, true); | ||
2047 | |||
2048 | #ifdef CONFIG_DEBUG_SHIRQ | ||
2049 | /* | ||
2050 | @@ -1688,6 +1719,14 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) | ||
2051 | * require it to deallocate resources over the slow bus. | ||
2052 | */ | ||
2053 | chip_bus_lock(desc); | ||
2054 | + /* | ||
2055 | + * There is no interrupt on the fly anymore. Deactivate it | ||
2056 | + * completely. | ||
2057 | + */ | ||
2058 | + raw_spin_lock_irqsave(&desc->lock, flags); | ||
2059 | + irq_domain_deactivate_irq(&desc->irq_data); | ||
2060 | + raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
2061 | + | ||
2062 | irq_release_resources(desc); | ||
2063 | chip_bus_sync_unlock(desc); | ||
2064 | irq_remove_timings(desc); | ||
2065 | @@ -2173,6 +2212,28 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, | ||
2066 | } | ||
2067 | EXPORT_SYMBOL_GPL(__request_percpu_irq); | ||
2068 | |||
2069 | +int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, | ||
2070 | + bool *state) | ||
2071 | +{ | ||
2072 | + struct irq_chip *chip; | ||
2073 | + int err = -EINVAL; | ||
2074 | + | ||
2075 | + do { | ||
2076 | + chip = irq_data_get_irq_chip(data); | ||
2077 | + if (chip->irq_get_irqchip_state) | ||
2078 | + break; | ||
2079 | +#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
2080 | + data = data->parent_data; | ||
2081 | +#else | ||
2082 | + data = NULL; | ||
2083 | +#endif | ||
2084 | + } while (data); | ||
2085 | + | ||
2086 | + if (data) | ||
2087 | + err = chip->irq_get_irqchip_state(data, which, state); | ||
2088 | + return err; | ||
2089 | +} | ||
2090 | + | ||
2091 | /** | ||
2092 | * irq_get_irqchip_state - returns the irqchip state of a interrupt. | ||
2093 | * @irq: Interrupt line that is forwarded to a VM | ||
2094 | @@ -2191,7 +2252,6 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | ||
2095 | { | ||
2096 | struct irq_desc *desc; | ||
2097 | struct irq_data *data; | ||
2098 | - struct irq_chip *chip; | ||
2099 | unsigned long flags; | ||
2100 | int err = -EINVAL; | ||
2101 | |||
2102 | @@ -2201,19 +2261,7 @@ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, | ||
2103 | |||
2104 | data = irq_desc_get_irq_data(desc); | ||
2105 | |||
2106 | - do { | ||
2107 | - chip = irq_data_get_irq_chip(data); | ||
2108 | - if (chip->irq_get_irqchip_state) | ||
2109 | - break; | ||
2110 | -#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
2111 | - data = data->parent_data; | ||
2112 | -#else | ||
2113 | - data = NULL; | ||
2114 | -#endif | ||
2115 | - } while (data); | ||
2116 | - | ||
2117 | - if (data) | ||
2118 | - err = chip->irq_get_irqchip_state(data, which, state); | ||
2119 | + err = __irq_get_irqchip_state(data, which, state); | ||
2120 | |||
2121 | irq_put_desc_busunlock(desc, flags); | ||
2122 | return err; |