Contents of /trunk/kernel-alx/patches-5.4/0120-5.4.21-all-fixes.patch
Parent Directory | Revision Log
Revision 3501 -
(show annotations)
(download)
Mon May 11 14:36:22 2020 UTC (4 years, 4 months ago) by niro
File size: 85938 byte(s)
Mon May 11 14:36:22 2020 UTC (4 years, 4 months ago) by niro
File size: 85938 byte(s)
-linux-5.4.21
1 | diff --git a/Makefile b/Makefile |
2 | index 21e58bd54715..adfc88f00f07 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 20 |
10 | +SUBLEVEL = 21 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig |
15 | index 880bc2a5cada..7f7002dc2b21 100644 |
16 | --- a/arch/arm/mach-npcm/Kconfig |
17 | +++ b/arch/arm/mach-npcm/Kconfig |
18 | @@ -11,7 +11,7 @@ config ARCH_NPCM7XX |
19 | depends on ARCH_MULTI_V7 |
20 | select PINCTRL_NPCM7XX |
21 | select NPCM7XX_TIMER |
22 | - select ARCH_REQUIRE_GPIOLIB |
23 | + select GPIOLIB |
24 | select CACHE_L2X0 |
25 | select ARM_GIC |
26 | select HAVE_ARM_TWD if SMP |
27 | diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts |
28 | index 62ab0d54ff71..335fff762451 100644 |
29 | --- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts |
30 | +++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts |
31 | @@ -161,10 +161,10 @@ |
32 | bus-range = <0x0 0x1>; |
33 | reg = <0x0 0x40000000 0x0 0x10000000>; |
34 | ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>; |
35 | - interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, |
36 | - <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, |
37 | - <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, |
38 | - <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; |
39 | + interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>, |
40 | + <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>, |
41 | + <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, |
42 | + <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>; |
43 | interrupt-map-mask = <0x0 0x0 0x0 0x7>; |
44 | msi-map = <0x0 &its 0x0 0x10000>; |
45 | iommu-map = <0x0 &smmu 0x0 0x10000>; |
46 | diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c |
47 | index d54586d5b031..fab013c5ee8c 100644 |
48 | --- a/arch/arm64/kernel/process.c |
49 | +++ b/arch/arm64/kernel/process.c |
50 | @@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next) |
51 | if (unlikely(next->flags & PF_KTHREAD)) |
52 | return; |
53 | |
54 | + /* |
55 | + * If all CPUs implement the SSBS extension, then we just need to |
56 | + * context-switch the PSTATE field. |
57 | + */ |
58 | + if (cpu_have_feature(cpu_feature(SSBS))) |
59 | + return; |
60 | + |
61 | /* If the mitigation is enabled, then we leave SSBS clear. */ |
62 | if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || |
63 | test_tsk_thread_flag(next, TIF_SSBD)) |
64 | diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c |
65 | index ed007f4a6444..3f501159ee9f 100644 |
66 | --- a/arch/s390/boot/uv.c |
67 | +++ b/arch/s390/boot/uv.c |
68 | @@ -15,7 +15,8 @@ void uv_query_info(void) |
69 | if (!test_facility(158)) |
70 | return; |
71 | |
72 | - if (uv_call(0, (uint64_t)&uvcb)) |
73 | + /* rc==0x100 means that there is additional data we do not process */ |
74 | + if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100) |
75 | return; |
76 | |
77 | if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) && |
78 | diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h |
79 | index 2dc9eb4e1acc..b6a4ce9dafaf 100644 |
80 | --- a/arch/s390/include/asm/timex.h |
81 | +++ b/arch/s390/include/asm/timex.h |
82 | @@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk) |
83 | |
84 | static inline unsigned long long get_tod_clock(void) |
85 | { |
86 | - unsigned char clk[STORE_CLOCK_EXT_SIZE]; |
87 | + char clk[STORE_CLOCK_EXT_SIZE]; |
88 | |
89 | get_tod_clock_ext(clk); |
90 | return *((unsigned long long *)&clk[1]); |
91 | diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c |
92 | index 64c3e70b0556..beffafd7dcc3 100644 |
93 | --- a/arch/x86/events/amd/core.c |
94 | +++ b/arch/x86/events/amd/core.c |
95 | @@ -246,6 +246,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] = |
96 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
97 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
98 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60, |
99 | + [PERF_COUNT_HW_CACHE_MISSES] = 0x0964, |
100 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2, |
101 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3, |
102 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287, |
103 | diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c |
104 | index ce83950036c5..e5ad97a82342 100644 |
105 | --- a/arch/x86/events/intel/ds.c |
106 | +++ b/arch/x86/events/intel/ds.c |
107 | @@ -1713,6 +1713,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count) |
108 | old = ((s64)(prev_raw_count << shift) >> shift); |
109 | local64_add(new - old + count * period, &event->count); |
110 | |
111 | + local64_set(&hwc->period_left, -new); |
112 | + |
113 | perf_event_update_userpage(event); |
114 | |
115 | return 0; |
116 | diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h |
117 | index c1d7b866a03f..4e3f137ffa8c 100644 |
118 | --- a/arch/x86/kvm/paging_tmpl.h |
119 | +++ b/arch/x86/kvm/paging_tmpl.h |
120 | @@ -33,7 +33,7 @@ |
121 | #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT |
122 | #define PT_HAVE_ACCESSED_DIRTY(mmu) true |
123 | #ifdef CONFIG_X86_64 |
124 | - #define PT_MAX_FULL_LEVELS 4 |
125 | + #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL |
126 | #define CMPXCHG cmpxchg |
127 | #else |
128 | #define CMPXCHG cmpxchg64 |
129 | diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c |
130 | index dc7c166c4335..84b57b461ad6 100644 |
131 | --- a/arch/x86/kvm/vmx/vmx.c |
132 | +++ b/arch/x86/kvm/vmx/vmx.c |
133 | @@ -2975,6 +2975,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) |
134 | |
135 | static int get_ept_level(struct kvm_vcpu *vcpu) |
136 | { |
137 | + /* Nested EPT currently only supports 4-level walks. */ |
138 | + if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu))) |
139 | + return 4; |
140 | if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48)) |
141 | return 5; |
142 | return 4; |
143 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
144 | index edde5ee8c6f5..95180d67d570 100644 |
145 | --- a/arch/x86/kvm/x86.c |
146 | +++ b/arch/x86/kvm/x86.c |
147 | @@ -445,6 +445,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) |
148 | * for #DB exceptions under VMX. |
149 | */ |
150 | vcpu->arch.dr6 ^= payload & DR6_RTM; |
151 | + |
152 | + /* |
153 | + * The #DB payload is defined as compatible with the 'pending |
154 | + * debug exceptions' field under VMX, not DR6. While bit 12 is |
155 | + * defined in the 'pending debug exceptions' field (enabled |
156 | + * breakpoint), it is reserved and must be zero in DR6. |
157 | + */ |
158 | + vcpu->arch.dr6 &= ~BIT(12); |
159 | break; |
160 | case PF_VECTOR: |
161 | vcpu->arch.cr2 = payload; |
162 | diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h |
163 | index bcf8f7501db7..a74c1a0e892d 100644 |
164 | --- a/drivers/acpi/acpica/achware.h |
165 | +++ b/drivers/acpi/acpica/achware.h |
166 | @@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void); |
167 | |
168 | acpi_status acpi_hw_enable_all_wakeup_gpes(void); |
169 | |
170 | +u8 acpi_hw_check_all_gpes(void); |
171 | + |
172 | acpi_status |
173 | acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
174 | struct acpi_gpe_block_info *gpe_block, |
175 | diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c |
176 | index 04a40d563dd6..84b0b410310e 100644 |
177 | --- a/drivers/acpi/acpica/evxfgpe.c |
178 | +++ b/drivers/acpi/acpica/evxfgpe.c |
179 | @@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void) |
180 | |
181 | ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) |
182 | |
183 | +/****************************************************************************** |
184 | + * |
185 | + * FUNCTION: acpi_any_gpe_status_set |
186 | + * |
187 | + * PARAMETERS: None |
188 | + * |
189 | + * RETURN: Whether or not the status bit is set for any GPE |
190 | + * |
191 | + * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any |
192 | + * of them is set or FALSE otherwise. |
193 | + * |
194 | + ******************************************************************************/ |
195 | +u32 acpi_any_gpe_status_set(void) |
196 | +{ |
197 | + acpi_status status; |
198 | + u8 ret; |
199 | + |
200 | + ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); |
201 | + |
202 | + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); |
203 | + if (ACPI_FAILURE(status)) { |
204 | + return (FALSE); |
205 | + } |
206 | + |
207 | + ret = acpi_hw_check_all_gpes(); |
208 | + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); |
209 | + |
210 | + return (ret); |
211 | +} |
212 | + |
213 | +ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set) |
214 | + |
215 | /******************************************************************************* |
216 | * |
217 | * FUNCTION: acpi_install_gpe_block |
218 | diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c |
219 | index 565bd3f29f31..b1d7d5f92495 100644 |
220 | --- a/drivers/acpi/acpica/hwgpe.c |
221 | +++ b/drivers/acpi/acpica/hwgpe.c |
222 | @@ -444,6 +444,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
223 | return (AE_OK); |
224 | } |
225 | |
226 | +/****************************************************************************** |
227 | + * |
228 | + * FUNCTION: acpi_hw_get_gpe_block_status |
229 | + * |
230 | + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info |
231 | + * gpe_block - Gpe Block info |
232 | + * |
233 | + * RETURN: Success |
234 | + * |
235 | + * DESCRIPTION: Produce a combined GPE status bits mask for the given block. |
236 | + * |
237 | + ******************************************************************************/ |
238 | + |
239 | +static acpi_status |
240 | +acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, |
241 | + struct acpi_gpe_block_info *gpe_block, |
242 | + void *ret_ptr) |
243 | +{ |
244 | + struct acpi_gpe_register_info *gpe_register_info; |
245 | + u64 in_enable, in_status; |
246 | + acpi_status status; |
247 | + u8 *ret = ret_ptr; |
248 | + u32 i; |
249 | + |
250 | + /* Examine each GPE Register within the block */ |
251 | + |
252 | + for (i = 0; i < gpe_block->register_count; i++) { |
253 | + gpe_register_info = &gpe_block->register_info[i]; |
254 | + |
255 | + status = acpi_hw_read(&in_enable, |
256 | + &gpe_register_info->enable_address); |
257 | + if (ACPI_FAILURE(status)) { |
258 | + continue; |
259 | + } |
260 | + |
261 | + status = acpi_hw_read(&in_status, |
262 | + &gpe_register_info->status_address); |
263 | + if (ACPI_FAILURE(status)) { |
264 | + continue; |
265 | + } |
266 | + |
267 | + *ret |= in_enable & in_status; |
268 | + } |
269 | + |
270 | + return (AE_OK); |
271 | +} |
272 | + |
273 | /****************************************************************************** |
274 | * |
275 | * FUNCTION: acpi_hw_disable_all_gpes |
276 | @@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void) |
277 | return_ACPI_STATUS(status); |
278 | } |
279 | |
280 | +/****************************************************************************** |
281 | + * |
282 | + * FUNCTION: acpi_hw_check_all_gpes |
283 | + * |
284 | + * PARAMETERS: None |
285 | + * |
286 | + * RETURN: Combined status of all GPEs |
287 | + * |
288 | + * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the |
289 | + * status bit is set for at least one of them of FALSE otherwise. |
290 | + * |
291 | + ******************************************************************************/ |
292 | + |
293 | +u8 acpi_hw_check_all_gpes(void) |
294 | +{ |
295 | + u8 ret = 0; |
296 | + |
297 | + ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); |
298 | + |
299 | + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); |
300 | + |
301 | + return (ret != 0); |
302 | +} |
303 | + |
304 | #endif /* !ACPI_REDUCED_HARDWARE */ |
305 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
306 | index bd75caff8322..ca5cdb621c2a 100644 |
307 | --- a/drivers/acpi/ec.c |
308 | +++ b/drivers/acpi/ec.c |
309 | @@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec); |
310 | |
311 | static struct acpi_ec *boot_ec; |
312 | static bool boot_ec_is_ecdt = false; |
313 | +static struct workqueue_struct *ec_wq; |
314 | static struct workqueue_struct *ec_query_wq; |
315 | |
316 | static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ |
317 | @@ -461,7 +462,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec) |
318 | ec_dbg_evt("Command(%s) submitted/blocked", |
319 | acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); |
320 | ec->nr_pending_queries++; |
321 | - schedule_work(&ec->work); |
322 | + queue_work(ec_wq, &ec->work); |
323 | } |
324 | } |
325 | |
326 | @@ -527,7 +528,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec) |
327 | #ifdef CONFIG_PM_SLEEP |
328 | static void __acpi_ec_flush_work(void) |
329 | { |
330 | - flush_scheduled_work(); /* flush ec->work */ |
331 | + drain_workqueue(ec_wq); /* flush ec->work */ |
332 | flush_workqueue(ec_query_wq); /* flush queries */ |
333 | } |
334 | |
335 | @@ -548,8 +549,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec) |
336 | |
337 | void acpi_ec_flush_work(void) |
338 | { |
339 | - /* Without ec_query_wq there is nothing to flush. */ |
340 | - if (!ec_query_wq) |
341 | + /* Without ec_wq there is nothing to flush. */ |
342 | + if (!ec_wq) |
343 | return; |
344 | |
345 | __acpi_ec_flush_work(); |
346 | @@ -2032,25 +2033,33 @@ static struct acpi_driver acpi_ec_driver = { |
347 | .drv.pm = &acpi_ec_pm, |
348 | }; |
349 | |
350 | -static inline int acpi_ec_query_init(void) |
351 | +static void acpi_ec_destroy_workqueues(void) |
352 | { |
353 | - if (!ec_query_wq) { |
354 | - ec_query_wq = alloc_workqueue("kec_query", 0, |
355 | - ec_max_queries); |
356 | - if (!ec_query_wq) |
357 | - return -ENODEV; |
358 | + if (ec_wq) { |
359 | + destroy_workqueue(ec_wq); |
360 | + ec_wq = NULL; |
361 | } |
362 | - return 0; |
363 | -} |
364 | - |
365 | -static inline void acpi_ec_query_exit(void) |
366 | -{ |
367 | if (ec_query_wq) { |
368 | destroy_workqueue(ec_query_wq); |
369 | ec_query_wq = NULL; |
370 | } |
371 | } |
372 | |
373 | +static int acpi_ec_init_workqueues(void) |
374 | +{ |
375 | + if (!ec_wq) |
376 | + ec_wq = alloc_ordered_workqueue("kec", 0); |
377 | + |
378 | + if (!ec_query_wq) |
379 | + ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries); |
380 | + |
381 | + if (!ec_wq || !ec_query_wq) { |
382 | + acpi_ec_destroy_workqueues(); |
383 | + return -ENODEV; |
384 | + } |
385 | + return 0; |
386 | +} |
387 | + |
388 | static const struct dmi_system_id acpi_ec_no_wakeup[] = { |
389 | { |
390 | .ident = "Thinkpad X1 Carbon 6th", |
391 | @@ -2081,8 +2090,7 @@ int __init acpi_ec_init(void) |
392 | int result; |
393 | int ecdt_fail, dsdt_fail; |
394 | |
395 | - /* register workqueue for _Qxx evaluations */ |
396 | - result = acpi_ec_query_init(); |
397 | + result = acpi_ec_init_workqueues(); |
398 | if (result) |
399 | return result; |
400 | |
401 | @@ -2113,6 +2121,6 @@ static void __exit acpi_ec_exit(void) |
402 | { |
403 | |
404 | acpi_bus_unregister_driver(&acpi_ec_driver); |
405 | - acpi_ec_query_exit(); |
406 | + acpi_ec_destroy_workqueues(); |
407 | } |
408 | #endif /* 0 */ |
409 | diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c |
410 | index 2af937a8b1c5..62348ec2a807 100644 |
411 | --- a/drivers/acpi/sleep.c |
412 | +++ b/drivers/acpi/sleep.c |
413 | @@ -977,21 +977,34 @@ static int acpi_s2idle_prepare_late(void) |
414 | return 0; |
415 | } |
416 | |
417 | -static void acpi_s2idle_wake(void) |
418 | +static bool acpi_s2idle_wake(void) |
419 | { |
420 | - /* |
421 | - * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has |
422 | - * not triggered while suspended, so bail out. |
423 | - */ |
424 | - if (!acpi_sci_irq_valid() || |
425 | - irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) |
426 | - return; |
427 | + if (!acpi_sci_irq_valid()) |
428 | + return pm_wakeup_pending(); |
429 | + |
430 | + while (pm_wakeup_pending()) { |
431 | + /* |
432 | + * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the |
433 | + * SCI has not triggered while suspended, so bail out (the |
434 | + * wakeup is pending anyway and the SCI is not the source of |
435 | + * it). |
436 | + */ |
437 | + if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) |
438 | + return true; |
439 | + |
440 | + /* |
441 | + * If there are no EC events to process and at least one of the |
442 | + * other enabled GPEs is active, the wakeup is regarded as a |
443 | + * genuine one. |
444 | + * |
445 | + * Note that the checks below must be carried out in this order |
446 | + * to avoid returning prematurely due to a change of the EC GPE |
447 | + * status bit from unset to set between the checks with the |
448 | + * status bits of all the other GPEs unset. |
449 | + */ |
450 | + if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) |
451 | + return true; |
452 | |
453 | - /* |
454 | - * If there are EC events to process, the wakeup may be a spurious one |
455 | - * coming from the EC. |
456 | - */ |
457 | - if (acpi_ec_dispatch_gpe()) { |
458 | /* |
459 | * Cancel the wakeup and process all pending events in case |
460 | * there are any wakeup ones in there. |
461 | @@ -1009,8 +1022,19 @@ static void acpi_s2idle_wake(void) |
462 | acpi_ec_flush_work(); |
463 | acpi_os_wait_events_complete(); /* synchronize Notify handling */ |
464 | |
465 | + /* |
466 | + * The SCI is in the "suspended" state now and it cannot produce |
467 | + * new wakeup events till the rearming below, so if any of them |
468 | + * are pending here, they must be resulting from the processing |
469 | + * of EC events above or coming from somewhere else. |
470 | + */ |
471 | + if (pm_wakeup_pending()) |
472 | + return true; |
473 | + |
474 | rearm_wake_irq(acpi_sci_irq); |
475 | } |
476 | + |
477 | + return false; |
478 | } |
479 | |
480 | static void acpi_s2idle_restore_early(void) |
481 | diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c |
482 | index 36cf13eee6b8..68413bf9cf87 100644 |
483 | --- a/drivers/bus/moxtet.c |
484 | +++ b/drivers/bus/moxtet.c |
485 | @@ -466,7 +466,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len, |
486 | { |
487 | struct moxtet *moxtet = file->private_data; |
488 | u8 bin[TURRIS_MOX_MAX_MODULES]; |
489 | - u8 hex[sizeof(buf) * 2 + 1]; |
490 | + u8 hex[sizeof(bin) * 2 + 1]; |
491 | int ret, n; |
492 | |
493 | ret = moxtet_spi_read(moxtet, bin); |
494 | diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c |
495 | index 285e0b8f9a97..09e3e25562a8 100644 |
496 | --- a/drivers/char/ipmi/ipmb_dev_int.c |
497 | +++ b/drivers/char/ipmi/ipmb_dev_int.c |
498 | @@ -265,7 +265,7 @@ static int ipmb_slave_cb(struct i2c_client *client, |
499 | break; |
500 | |
501 | case I2C_SLAVE_WRITE_RECEIVED: |
502 | - if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg)) |
503 | + if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1) |
504 | break; |
505 | |
506 | buf[++ipmb_dev->msg_idx] = *val; |
507 | diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c |
508 | index e6fd079783bd..e73ca303f1a7 100644 |
509 | --- a/drivers/edac/edac_mc.c |
510 | +++ b/drivers/edac/edac_mc.c |
511 | @@ -503,16 +503,10 @@ void edac_mc_free(struct mem_ctl_info *mci) |
512 | { |
513 | edac_dbg(1, "\n"); |
514 | |
515 | - /* If we're not yet registered with sysfs free only what was allocated |
516 | - * in edac_mc_alloc(). |
517 | - */ |
518 | - if (!device_is_registered(&mci->dev)) { |
519 | - _edac_mc_free(mci); |
520 | - return; |
521 | - } |
522 | + if (device_is_registered(&mci->dev)) |
523 | + edac_unregister_sysfs(mci); |
524 | |
525 | - /* the mci instance is freed here, when the sysfs object is dropped */ |
526 | - edac_unregister_sysfs(mci); |
527 | + _edac_mc_free(mci); |
528 | } |
529 | EXPORT_SYMBOL_GPL(edac_mc_free); |
530 | |
531 | diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c |
532 | index 32d016f1ecd1..0287884ae28c 100644 |
533 | --- a/drivers/edac/edac_mc_sysfs.c |
534 | +++ b/drivers/edac/edac_mc_sysfs.c |
535 | @@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = { |
536 | |
537 | static void csrow_attr_release(struct device *dev) |
538 | { |
539 | - struct csrow_info *csrow = container_of(dev, struct csrow_info, dev); |
540 | - |
541 | - edac_dbg(1, "device %s released\n", dev_name(dev)); |
542 | - kfree(csrow); |
543 | + /* release device with _edac_mc_free() */ |
544 | } |
545 | |
546 | static const struct device_type csrow_attr_type = { |
547 | @@ -447,8 +444,7 @@ error: |
548 | csrow = mci->csrows[i]; |
549 | if (!nr_pages_per_csrow(csrow)) |
550 | continue; |
551 | - |
552 | - device_del(&mci->csrows[i]->dev); |
553 | + device_unregister(&mci->csrows[i]->dev); |
554 | } |
555 | |
556 | return err; |
557 | @@ -620,10 +616,7 @@ static const struct attribute_group *dimm_attr_groups[] = { |
558 | |
559 | static void dimm_attr_release(struct device *dev) |
560 | { |
561 | - struct dimm_info *dimm = container_of(dev, struct dimm_info, dev); |
562 | - |
563 | - edac_dbg(1, "device %s released\n", dev_name(dev)); |
564 | - kfree(dimm); |
565 | + /* release device with _edac_mc_free() */ |
566 | } |
567 | |
568 | static const struct device_type dimm_attr_type = { |
569 | @@ -906,10 +899,7 @@ static const struct attribute_group *mci_attr_groups[] = { |
570 | |
571 | static void mci_attr_release(struct device *dev) |
572 | { |
573 | - struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); |
574 | - |
575 | - edac_dbg(1, "device %s released\n", dev_name(dev)); |
576 | - kfree(mci); |
577 | + /* release device with _edac_mc_free() */ |
578 | } |
579 | |
580 | static const struct device_type mci_attr_type = { |
581 | diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c |
582 | index a9748b5198e6..67f9f82e0db0 100644 |
583 | --- a/drivers/gpio/gpio-xilinx.c |
584 | +++ b/drivers/gpio/gpio-xilinx.c |
585 | @@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, |
586 | for (i = 0; i < gc->ngpio; i++) { |
587 | if (*mask == 0) |
588 | break; |
589 | + /* Once finished with an index write it out to the register */ |
590 | if (index != xgpio_index(chip, i)) { |
591 | xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + |
592 | - xgpio_regoffset(chip, i), |
593 | + index * XGPIO_CHANNEL_OFFSET, |
594 | chip->gpio_state[index]); |
595 | spin_unlock_irqrestore(&chip->gpio_lock[index], flags); |
596 | index = xgpio_index(chip, i); |
597 | @@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, |
598 | } |
599 | |
600 | xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET + |
601 | - xgpio_regoffset(chip, i), chip->gpio_state[index]); |
602 | + index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]); |
603 | |
604 | spin_unlock_irqrestore(&chip->gpio_lock[index], flags); |
605 | } |
606 | diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c |
607 | index 7ee5b7f53aeb..3ece59185d37 100644 |
608 | --- a/drivers/gpio/gpiolib-of.c |
609 | +++ b/drivers/gpio/gpiolib-of.c |
610 | @@ -146,10 +146,6 @@ static void of_gpio_flags_quirks(struct device_node *np, |
611 | if (of_property_read_bool(np, "cd-inverted")) |
612 | *flags ^= OF_GPIO_ACTIVE_LOW; |
613 | } |
614 | - if (!strcmp(propname, "wp-gpios")) { |
615 | - if (of_property_read_bool(np, "wp-inverted")) |
616 | - *flags ^= OF_GPIO_ACTIVE_LOW; |
617 | - } |
618 | } |
619 | /* |
620 | * Some GPIO fixed regulator quirks. |
621 | diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
622 | index 2476306e7030..22506e4614b3 100644 |
623 | --- a/drivers/gpio/gpiolib.c |
624 | +++ b/drivers/gpio/gpiolib.c |
625 | @@ -3220,6 +3220,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc) |
626 | } |
627 | EXPORT_SYMBOL_GPL(gpiod_is_active_low); |
628 | |
629 | +/** |
630 | + * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not |
631 | + * @desc: the gpio descriptor to change |
632 | + */ |
633 | +void gpiod_toggle_active_low(struct gpio_desc *desc) |
634 | +{ |
635 | + VALIDATE_DESC_VOID(desc); |
636 | + change_bit(FLAG_ACTIVE_LOW, &desc->flags); |
637 | +} |
638 | +EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); |
639 | + |
640 | /* I/O calls are only valid after configuration completed; the relevant |
641 | * "is this a valid GPIO" error checks should already have been done. |
642 | * |
643 | diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c |
644 | index 5906c80c4b2c..f57dd195dfb8 100644 |
645 | --- a/drivers/gpu/drm/panfrost/panfrost_drv.c |
646 | +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c |
647 | @@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev, |
648 | break; |
649 | } |
650 | |
651 | + atomic_inc(&bo->gpu_usecount); |
652 | job->mappings[i] = mapping; |
653 | } |
654 | |
655 | diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h |
656 | index ca1bc9019600..b3517ff9630c 100644 |
657 | --- a/drivers/gpu/drm/panfrost/panfrost_gem.h |
658 | +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h |
659 | @@ -30,6 +30,12 @@ struct panfrost_gem_object { |
660 | struct mutex lock; |
661 | } mappings; |
662 | |
663 | + /* |
664 | + * Count the number of jobs referencing this BO so we don't let the |
665 | + * shrinker reclaim this object prematurely. |
666 | + */ |
667 | + atomic_t gpu_usecount; |
668 | + |
669 | bool noexec :1; |
670 | bool is_heap :1; |
671 | }; |
672 | diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c |
673 | index f5dd7b29bc95..288e46c40673 100644 |
674 | --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c |
675 | +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c |
676 | @@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) |
677 | struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); |
678 | struct panfrost_gem_object *bo = to_panfrost_bo(obj); |
679 | |
680 | + if (atomic_read(&bo->gpu_usecount)) |
681 | + return false; |
682 | + |
683 | if (!mutex_trylock(&shmem->pages_lock)) |
684 | return false; |
685 | |
686 | diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c |
687 | index bbb0c5e3ca6f..9f770d454684 100644 |
688 | --- a/drivers/gpu/drm/panfrost/panfrost_job.c |
689 | +++ b/drivers/gpu/drm/panfrost/panfrost_job.c |
690 | @@ -270,8 +270,13 @@ static void panfrost_job_cleanup(struct kref *ref) |
691 | dma_fence_put(job->render_done_fence); |
692 | |
693 | if (job->mappings) { |
694 | - for (i = 0; i < job->bo_count; i++) |
695 | + for (i = 0; i < job->bo_count; i++) { |
696 | + if (!job->mappings[i]) |
697 | + break; |
698 | + |
699 | + atomic_dec(&job->mappings[i]->obj->gpu_usecount); |
700 | panfrost_gem_mapping_put(job->mappings[i]); |
701 | + } |
702 | kvfree(job->mappings); |
703 | } |
704 | |
705 | diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c |
706 | index a5757b11b730..5b54eff12cc0 100644 |
707 | --- a/drivers/gpu/drm/sun4i/sun4i_drv.c |
708 | +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c |
709 | @@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev) |
710 | } |
711 | |
712 | drm_mode_config_init(drm); |
713 | - drm->mode_config.allow_fb_modifiers = true; |
714 | |
715 | ret = component_bind_all(drm->dev, drm); |
716 | if (ret) { |
717 | diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c |
718 | index 5bd60ded3d81..909eba43664a 100644 |
719 | --- a/drivers/gpu/drm/vgem/vgem_drv.c |
720 | +++ b/drivers/gpu/drm/vgem/vgem_drv.c |
721 | @@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, |
722 | return ERR_CAST(obj); |
723 | |
724 | ret = drm_gem_handle_create(file, &obj->base, handle); |
725 | - drm_gem_object_put_unlocked(&obj->base); |
726 | - if (ret) |
727 | + if (ret) { |
728 | + drm_gem_object_put_unlocked(&obj->base); |
729 | return ERR_PTR(ret); |
730 | + } |
731 | |
732 | return &obj->base; |
733 | } |
734 | @@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, |
735 | args->size = gem_object->size; |
736 | args->pitch = pitch; |
737 | |
738 | - DRM_DEBUG("Created object of size %lld\n", size); |
739 | + drm_gem_object_put_unlocked(gem_object); |
740 | + |
741 | + DRM_DEBUG("Created object of size %llu\n", args->size); |
742 | |
743 | return 0; |
744 | } |
745 | diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c |
746 | index f01f4887fb2e..a91ed01abb68 100644 |
747 | --- a/drivers/hwmon/pmbus/ltc2978.c |
748 | +++ b/drivers/hwmon/pmbus/ltc2978.c |
749 | @@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882, |
750 | |
751 | #define LTC_POLL_TIMEOUT 100 /* in milli-seconds */ |
752 | |
753 | -#define LTC_NOT_BUSY BIT(5) |
754 | -#define LTC_NOT_PENDING BIT(4) |
755 | +#define LTC_NOT_BUSY BIT(6) |
756 | +#define LTC_NOT_PENDING BIT(5) |
757 | |
758 | /* |
759 | * LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which |
760 | diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c |
761 | index 6eb6d2717ca5..2b4d80393bd0 100644 |
762 | --- a/drivers/infiniband/core/security.c |
763 | +++ b/drivers/infiniband/core/security.c |
764 | @@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, |
765 | if (!new_pps) |
766 | return NULL; |
767 | |
768 | - if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { |
769 | - if (!qp_pps) { |
770 | - new_pps->main.port_num = qp_attr->port_num; |
771 | - new_pps->main.pkey_index = qp_attr->pkey_index; |
772 | - } else { |
773 | - new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? |
774 | - qp_attr->port_num : |
775 | - qp_pps->main.port_num; |
776 | - |
777 | - new_pps->main.pkey_index = |
778 | - (qp_attr_mask & IB_QP_PKEY_INDEX) ? |
779 | - qp_attr->pkey_index : |
780 | - qp_pps->main.pkey_index; |
781 | - } |
782 | + if (qp_attr_mask & IB_QP_PORT) |
783 | + new_pps->main.port_num = |
784 | + (qp_pps) ? qp_pps->main.port_num : qp_attr->port_num; |
785 | + if (qp_attr_mask & IB_QP_PKEY_INDEX) |
786 | + new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index : |
787 | + qp_attr->pkey_index; |
788 | + if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT)) |
789 | new_pps->main.state = IB_PORT_PKEY_VALID; |
790 | - } else if (qp_pps) { |
791 | + |
792 | + if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) { |
793 | new_pps->main.port_num = qp_pps->main.port_num; |
794 | new_pps->main.pkey_index = qp_pps->main.pkey_index; |
795 | if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) |
796 | diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c |
797 | index d1407fa378e8..1235ffb2389b 100644 |
798 | --- a/drivers/infiniband/core/user_mad.c |
799 | +++ b/drivers/infiniband/core/user_mad.c |
800 | @@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port) |
801 | struct ib_umad_file *file; |
802 | int id; |
803 | |
804 | + cdev_device_del(&port->sm_cdev, &port->sm_dev); |
805 | + cdev_device_del(&port->cdev, &port->dev); |
806 | + |
807 | mutex_lock(&port->file_mutex); |
808 | |
809 | /* Mark ib_dev NULL and block ioctl or other file ops to progress |
810 | @@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port) |
811 | |
812 | mutex_unlock(&port->file_mutex); |
813 | |
814 | - cdev_device_del(&port->sm_cdev, &port->sm_dev); |
815 | - cdev_device_del(&port->cdev, &port->dev); |
816 | ida_free(&umad_ida, port->dev_num); |
817 | |
818 | /* balances device_initialize() */ |
819 | diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c |
820 | index 14a80fd9f464..300353c1e5f1 100644 |
821 | --- a/drivers/infiniband/core/uverbs_cmd.c |
822 | +++ b/drivers/infiniband/core/uverbs_cmd.c |
823 | @@ -2718,12 +2718,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, |
824 | return 0; |
825 | } |
826 | |
827 | -static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec) |
828 | -{ |
829 | - /* Returns user space filter size, includes padding */ |
830 | - return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2; |
831 | -} |
832 | - |
833 | static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size, |
834 | u16 ib_real_filter_sz) |
835 | { |
836 | @@ -2867,11 +2861,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type, |
837 | static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec, |
838 | union ib_flow_spec *ib_spec) |
839 | { |
840 | - ssize_t kern_filter_sz; |
841 | + size_t kern_filter_sz; |
842 | void *kern_spec_mask; |
843 | void *kern_spec_val; |
844 | |
845 | - kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr); |
846 | + if (check_sub_overflow((size_t)kern_spec->hdr.size, |
847 | + sizeof(struct ib_uverbs_flow_spec_hdr), |
848 | + &kern_filter_sz)) |
849 | + return -EINVAL; |
850 | + |
851 | + kern_filter_sz /= 2; |
852 | |
853 | kern_spec_val = (void *)kern_spec + |
854 | sizeof(struct ib_uverbs_flow_spec_hdr); |
855 | diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c |
856 | index 347dc242fb88..d82e0589cfd2 100644 |
857 | --- a/drivers/infiniband/hw/cxgb4/cm.c |
858 | +++ b/drivers/infiniband/hw/cxgb4/cm.c |
859 | @@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) |
860 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
861 | } |
862 | |
863 | + /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3, |
864 | + * when entering the TERM state the RNIC MUST initiate a CLOSE. |
865 | + */ |
866 | + c4iw_ep_disconnect(ep, 1, GFP_KERNEL); |
867 | c4iw_put_ep(&ep->com); |
868 | } else |
869 | pr_warn("TERM received tid %u no ep/qp\n", tid); |
870 | diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c |
871 | index bbcac539777a..89ac2f9ae6dd 100644 |
872 | --- a/drivers/infiniband/hw/cxgb4/qp.c |
873 | +++ b/drivers/infiniband/hw/cxgb4/qp.c |
874 | @@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
875 | qhp->attr.layer_etype = attrs->layer_etype; |
876 | qhp->attr.ecode = attrs->ecode; |
877 | ep = qhp->ep; |
878 | - c4iw_get_ep(&ep->com); |
879 | - disconnect = 1; |
880 | if (!internal) { |
881 | + c4iw_get_ep(&ep->com); |
882 | terminate = 1; |
883 | + disconnect = 1; |
884 | } else { |
885 | terminate = qhp->attr.send_term; |
886 | ret = rdma_fini(rhp, qhp, ep); |
887 | diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c |
888 | index c142b23bb401..1aeea5d65c01 100644 |
889 | --- a/drivers/infiniband/hw/hfi1/affinity.c |
890 | +++ b/drivers/infiniband/hw/hfi1/affinity.c |
891 | @@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd, |
892 | rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu); |
893 | } |
894 | |
895 | + free_cpumask_var(available_cpus); |
896 | + free_cpumask_var(non_intr_cpus); |
897 | return 0; |
898 | |
899 | fail: |
900 | diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c |
901 | index f9a7e9d29c8b..89e1dfd07a1b 100644 |
902 | --- a/drivers/infiniband/hw/hfi1/file_ops.c |
903 | +++ b/drivers/infiniband/hw/hfi1/file_ops.c |
904 | @@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) |
905 | |
906 | fd = kzalloc(sizeof(*fd), GFP_KERNEL); |
907 | |
908 | - if (fd) { |
909 | - fd->rec_cpu_num = -1; /* no cpu affinity by default */ |
910 | - fd->mm = current->mm; |
911 | - mmgrab(fd->mm); |
912 | - fd->dd = dd; |
913 | - kobject_get(&fd->dd->kobj); |
914 | - fp->private_data = fd; |
915 | - } else { |
916 | - fp->private_data = NULL; |
917 | - |
918 | - if (atomic_dec_and_test(&dd->user_refcount)) |
919 | - complete(&dd->user_comp); |
920 | - |
921 | - return -ENOMEM; |
922 | - } |
923 | - |
924 | + if (!fd || init_srcu_struct(&fd->pq_srcu)) |
925 | + goto nomem; |
926 | + spin_lock_init(&fd->pq_rcu_lock); |
927 | + spin_lock_init(&fd->tid_lock); |
928 | + spin_lock_init(&fd->invalid_lock); |
929 | + fd->rec_cpu_num = -1; /* no cpu affinity by default */ |
930 | + fd->mm = current->mm; |
931 | + mmgrab(fd->mm); |
932 | + fd->dd = dd; |
933 | + kobject_get(&fd->dd->kobj); |
934 | + fp->private_data = fd; |
935 | return 0; |
936 | +nomem: |
937 | + kfree(fd); |
938 | + fp->private_data = NULL; |
939 | + if (atomic_dec_and_test(&dd->user_refcount)) |
940 | + complete(&dd->user_comp); |
941 | + return -ENOMEM; |
942 | } |
943 | |
944 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, |
945 | @@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, |
946 | static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
947 | { |
948 | struct hfi1_filedata *fd = kiocb->ki_filp->private_data; |
949 | - struct hfi1_user_sdma_pkt_q *pq = fd->pq; |
950 | + struct hfi1_user_sdma_pkt_q *pq; |
951 | struct hfi1_user_sdma_comp_q *cq = fd->cq; |
952 | int done = 0, reqs = 0; |
953 | unsigned long dim = from->nr_segs; |
954 | + int idx; |
955 | |
956 | - if (!cq || !pq) |
957 | + idx = srcu_read_lock(&fd->pq_srcu); |
958 | + pq = srcu_dereference(fd->pq, &fd->pq_srcu); |
959 | + if (!cq || !pq) { |
960 | + srcu_read_unlock(&fd->pq_srcu, idx); |
961 | return -EIO; |
962 | + } |
963 | |
964 | - if (!iter_is_iovec(from) || !dim) |
965 | + if (!iter_is_iovec(from) || !dim) { |
966 | + srcu_read_unlock(&fd->pq_srcu, idx); |
967 | return -EINVAL; |
968 | + } |
969 | |
970 | trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); |
971 | |
972 | - if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) |
973 | + if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { |
974 | + srcu_read_unlock(&fd->pq_srcu, idx); |
975 | return -ENOSPC; |
976 | + } |
977 | |
978 | while (dim) { |
979 | int ret; |
980 | @@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from) |
981 | reqs++; |
982 | } |
983 | |
984 | + srcu_read_unlock(&fd->pq_srcu, idx); |
985 | return reqs; |
986 | } |
987 | |
988 | @@ -707,6 +718,7 @@ done: |
989 | if (atomic_dec_and_test(&dd->user_refcount)) |
990 | complete(&dd->user_comp); |
991 | |
992 | + cleanup_srcu_struct(&fdata->pq_srcu); |
993 | kfree(fdata); |
994 | return 0; |
995 | } |
996 | diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h |
997 | index fa45350a9a1d..1af94650bd84 100644 |
998 | --- a/drivers/infiniband/hw/hfi1/hfi.h |
999 | +++ b/drivers/infiniband/hw/hfi1/hfi.h |
1000 | @@ -1436,10 +1436,13 @@ struct mmu_rb_handler; |
1001 | |
1002 | /* Private data for file operations */ |
1003 | struct hfi1_filedata { |
1004 | + struct srcu_struct pq_srcu; |
1005 | struct hfi1_devdata *dd; |
1006 | struct hfi1_ctxtdata *uctxt; |
1007 | struct hfi1_user_sdma_comp_q *cq; |
1008 | - struct hfi1_user_sdma_pkt_q *pq; |
1009 | + /* update side lock for SRCU */ |
1010 | + spinlock_t pq_rcu_lock; |
1011 | + struct hfi1_user_sdma_pkt_q __rcu *pq; |
1012 | u16 subctxt; |
1013 | /* for cpu affinity; -1 if none */ |
1014 | int rec_cpu_num; |
1015 | diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c |
1016 | index 3592a9ec155e..4d732353379d 100644 |
1017 | --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c |
1018 | +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c |
1019 | @@ -90,9 +90,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd, |
1020 | struct hfi1_devdata *dd = uctxt->dd; |
1021 | int ret = 0; |
1022 | |
1023 | - spin_lock_init(&fd->tid_lock); |
1024 | - spin_lock_init(&fd->invalid_lock); |
1025 | - |
1026 | fd->entry_to_rb = kcalloc(uctxt->expected_count, |
1027 | sizeof(struct rb_node *), |
1028 | GFP_KERNEL); |
1029 | @@ -165,10 +162,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd) |
1030 | if (fd->handler) { |
1031 | hfi1_mmu_rb_unregister(fd->handler); |
1032 | } else { |
1033 | + mutex_lock(&uctxt->exp_mutex); |
1034 | if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list)) |
1035 | unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd); |
1036 | if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list)) |
1037 | unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd); |
1038 | + mutex_unlock(&uctxt->exp_mutex); |
1039 | } |
1040 | |
1041 | kfree(fd->invalid_tids); |
1042 | diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c |
1043 | index fd754a16475a..c2f0d9ba93de 100644 |
1044 | --- a/drivers/infiniband/hw/hfi1/user_sdma.c |
1045 | +++ b/drivers/infiniband/hw/hfi1/user_sdma.c |
1046 | @@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, |
1047 | pq = kzalloc(sizeof(*pq), GFP_KERNEL); |
1048 | if (!pq) |
1049 | return -ENOMEM; |
1050 | - |
1051 | pq->dd = dd; |
1052 | pq->ctxt = uctxt->ctxt; |
1053 | pq->subctxt = fd->subctxt; |
1054 | @@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, |
1055 | goto pq_mmu_fail; |
1056 | } |
1057 | |
1058 | - fd->pq = pq; |
1059 | + rcu_assign_pointer(fd->pq, pq); |
1060 | fd->cq = cq; |
1061 | |
1062 | return 0; |
1063 | @@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, |
1064 | |
1065 | trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt); |
1066 | |
1067 | - pq = fd->pq; |
1068 | + spin_lock(&fd->pq_rcu_lock); |
1069 | + pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, |
1070 | + lockdep_is_held(&fd->pq_rcu_lock)); |
1071 | if (pq) { |
1072 | + rcu_assign_pointer(fd->pq, NULL); |
1073 | + spin_unlock(&fd->pq_rcu_lock); |
1074 | + synchronize_srcu(&fd->pq_srcu); |
1075 | + /* at this point there can be no more new requests */ |
1076 | if (pq->handler) |
1077 | hfi1_mmu_rb_unregister(pq->handler); |
1078 | iowait_sdma_drain(&pq->busy); |
1079 | @@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, |
1080 | kfree(pq->req_in_use); |
1081 | kmem_cache_destroy(pq->txreq_cache); |
1082 | kfree(pq); |
1083 | - fd->pq = NULL; |
1084 | + } else { |
1085 | + spin_unlock(&fd->pq_rcu_lock); |
1086 | } |
1087 | if (fd->cq) { |
1088 | vfree(fd->cq->comps); |
1089 | @@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, |
1090 | { |
1091 | int ret = 0, i; |
1092 | struct hfi1_ctxtdata *uctxt = fd->uctxt; |
1093 | - struct hfi1_user_sdma_pkt_q *pq = fd->pq; |
1094 | + struct hfi1_user_sdma_pkt_q *pq = |
1095 | + srcu_dereference(fd->pq, &fd->pq_srcu); |
1096 | struct hfi1_user_sdma_comp_q *cq = fd->cq; |
1097 | struct hfi1_devdata *dd = pq->dd; |
1098 | unsigned long idx = 0; |
1099 | diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c |
1100 | index 5fd071c05944..0865373bd12d 100644 |
1101 | --- a/drivers/infiniband/hw/mlx5/qp.c |
1102 | +++ b/drivers/infiniband/hw/mlx5/qp.c |
1103 | @@ -3391,9 +3391,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, |
1104 | struct mlx5_ib_qp_base *base; |
1105 | u32 set_id; |
1106 | |
1107 | - if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) |
1108 | - return 0; |
1109 | - |
1110 | if (counter) |
1111 | set_id = counter->id; |
1112 | else |
1113 | @@ -6503,6 +6500,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp) |
1114 | */ |
1115 | int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) |
1116 | { |
1117 | + struct mlx5_ib_dev *dev = to_mdev(qp->device); |
1118 | struct mlx5_ib_qp *mqp = to_mqp(qp); |
1119 | int err = 0; |
1120 | |
1121 | @@ -6512,6 +6510,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter) |
1122 | goto out; |
1123 | } |
1124 | |
1125 | + if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) { |
1126 | + err = -EOPNOTSUPP; |
1127 | + goto out; |
1128 | + } |
1129 | + |
1130 | if (mqp->state == IB_QPS_RTS) { |
1131 | err = __mlx5_ib_qp_set_counter(qp, counter); |
1132 | if (!err) |
1133 | diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c |
1134 | index 0b0a241c57ff..799254a049ba 100644 |
1135 | --- a/drivers/infiniband/sw/rdmavt/qp.c |
1136 | +++ b/drivers/infiniband/sw/rdmavt/qp.c |
1137 | @@ -61,6 +61,8 @@ |
1138 | #define RVT_RWQ_COUNT_THRESHOLD 16 |
1139 | |
1140 | static void rvt_rc_timeout(struct timer_list *t); |
1141 | +static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1142 | + enum ib_qp_type type); |
1143 | |
1144 | /* |
1145 | * Convert the AETH RNR timeout code into the number of microseconds. |
1146 | @@ -452,40 +454,41 @@ no_qp_table: |
1147 | } |
1148 | |
1149 | /** |
1150 | - * free_all_qps - check for QPs still in use |
1151 | + * rvt_free_qp_cb - callback function to reset a qp |
1152 | + * @qp: the qp to reset |
1153 | + * @v: a 64-bit value |
1154 | + * |
1155 | + * This function resets the qp and removes it from the |
1156 | + * qp hash table. |
1157 | + */ |
1158 | +static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) |
1159 | +{ |
1160 | + unsigned int *qp_inuse = (unsigned int *)v; |
1161 | + struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device); |
1162 | + |
1163 | + /* Reset the qp and remove it from the qp hash list */ |
1164 | + rvt_reset_qp(rdi, qp, qp->ibqp.qp_type); |
1165 | + |
1166 | + /* Increment the qp_inuse count */ |
1167 | + (*qp_inuse)++; |
1168 | +} |
1169 | + |
1170 | +/** |
1171 | + * rvt_free_all_qps - check for QPs still in use |
1172 | * @rdi: rvt device info structure |
1173 | * |
1174 | * There should not be any QPs still in use. |
1175 | * Free memory for table. |
1176 | + * Return the number of QPs still in use. |
1177 | */ |
1178 | static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi) |
1179 | { |
1180 | - unsigned long flags; |
1181 | - struct rvt_qp *qp; |
1182 | - unsigned n, qp_inuse = 0; |
1183 | - spinlock_t *ql; /* work around too long line below */ |
1184 | - |
1185 | - if (rdi->driver_f.free_all_qps) |
1186 | - qp_inuse = rdi->driver_f.free_all_qps(rdi); |
1187 | + unsigned int qp_inuse = 0; |
1188 | |
1189 | qp_inuse += rvt_mcast_tree_empty(rdi); |
1190 | |
1191 | - if (!rdi->qp_dev) |
1192 | - return qp_inuse; |
1193 | + rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb); |
1194 | |
1195 | - ql = &rdi->qp_dev->qpt_lock; |
1196 | - spin_lock_irqsave(ql, flags); |
1197 | - for (n = 0; n < rdi->qp_dev->qp_table_size; n++) { |
1198 | - qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n], |
1199 | - lockdep_is_held(ql)); |
1200 | - RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL); |
1201 | - |
1202 | - for (; qp; qp = rcu_dereference_protected(qp->next, |
1203 | - lockdep_is_held(ql))) |
1204 | - qp_inuse++; |
1205 | - } |
1206 | - spin_unlock_irqrestore(ql, flags); |
1207 | - synchronize_rcu(); |
1208 | return qp_inuse; |
1209 | } |
1210 | |
1211 | @@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1212 | } |
1213 | |
1214 | /** |
1215 | - * rvt_reset_qp - initialize the QP state to the reset state |
1216 | + * _rvt_reset_qp - initialize the QP state to the reset state |
1217 | * @qp: the QP to reset |
1218 | * @type: the QP type |
1219 | * |
1220 | * r_lock, s_hlock, and s_lock are required to be held by the caller |
1221 | */ |
1222 | -static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1223 | - enum ib_qp_type type) |
1224 | +static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1225 | + enum ib_qp_type type) |
1226 | __must_hold(&qp->s_lock) |
1227 | __must_hold(&qp->s_hlock) |
1228 | __must_hold(&qp->r_lock) |
1229 | @@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1230 | lockdep_assert_held(&qp->s_lock); |
1231 | } |
1232 | |
1233 | +/** |
1234 | + * rvt_reset_qp - initialize the QP state to the reset state |
1235 | + * @rdi: the device info |
1236 | + * @qp: the QP to reset |
1237 | + * @type: the QP type |
1238 | + * |
1239 | + * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock |
1240 | + * before calling _rvt_reset_qp(). |
1241 | + */ |
1242 | +static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
1243 | + enum ib_qp_type type) |
1244 | +{ |
1245 | + spin_lock_irq(&qp->r_lock); |
1246 | + spin_lock(&qp->s_hlock); |
1247 | + spin_lock(&qp->s_lock); |
1248 | + _rvt_reset_qp(rdi, qp, type); |
1249 | + spin_unlock(&qp->s_lock); |
1250 | + spin_unlock(&qp->s_hlock); |
1251 | + spin_unlock_irq(&qp->r_lock); |
1252 | +} |
1253 | + |
1254 | /** rvt_free_qpn - Free a qpn from the bit map |
1255 | * @qpt: QP table |
1256 | * @qpn: queue pair number to free |
1257 | @@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
1258 | switch (new_state) { |
1259 | case IB_QPS_RESET: |
1260 | if (qp->state != IB_QPS_RESET) |
1261 | - rvt_reset_qp(rdi, qp, ibqp->qp_type); |
1262 | + _rvt_reset_qp(rdi, qp, ibqp->qp_type); |
1263 | break; |
1264 | |
1265 | case IB_QPS_RTR: |
1266 | @@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
1267 | struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); |
1268 | struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device); |
1269 | |
1270 | - spin_lock_irq(&qp->r_lock); |
1271 | - spin_lock(&qp->s_hlock); |
1272 | - spin_lock(&qp->s_lock); |
1273 | rvt_reset_qp(rdi, qp, ibqp->qp_type); |
1274 | - spin_unlock(&qp->s_lock); |
1275 | - spin_unlock(&qp->s_hlock); |
1276 | - spin_unlock_irq(&qp->r_lock); |
1277 | |
1278 | wait_event(qp->wait, !atomic_read(&qp->refcount)); |
1279 | /* qpn is now available for use again */ |
1280 | diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c |
1281 | index 116cafc9afcf..4bc88708b355 100644 |
1282 | --- a/drivers/infiniband/sw/rxe/rxe_comp.c |
1283 | +++ b/drivers/infiniband/sw/rxe/rxe_comp.c |
1284 | @@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, |
1285 | qp->comp.psn = pkt->psn; |
1286 | if (qp->req.wait_psn) { |
1287 | qp->req.wait_psn = 0; |
1288 | - rxe_run_task(&qp->req.task, 1); |
1289 | + rxe_run_task(&qp->req.task, 0); |
1290 | } |
1291 | } |
1292 | return COMPST_ERROR_RETRY; |
1293 | @@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) |
1294 | */ |
1295 | if (qp->req.wait_fence) { |
1296 | qp->req.wait_fence = 0; |
1297 | - rxe_run_task(&qp->req.task, 1); |
1298 | + rxe_run_task(&qp->req.task, 0); |
1299 | } |
1300 | } |
1301 | |
1302 | @@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, |
1303 | if (qp->req.need_rd_atomic) { |
1304 | qp->comp.timeout_retry = 0; |
1305 | qp->req.need_rd_atomic = 0; |
1306 | - rxe_run_task(&qp->req.task, 1); |
1307 | + rxe_run_task(&qp->req.task, 0); |
1308 | } |
1309 | } |
1310 | |
1311 | @@ -725,7 +725,7 @@ int rxe_completer(void *arg) |
1312 | RXE_CNT_COMP_RETRY); |
1313 | qp->req.need_retry = 1; |
1314 | qp->comp.started_retry = 1; |
1315 | - rxe_run_task(&qp->req.task, 1); |
1316 | + rxe_run_task(&qp->req.task, 0); |
1317 | } |
1318 | |
1319 | if (pkt) { |
1320 | diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c |
1321 | index 1ae6f8bba9ae..2c666fb34625 100644 |
1322 | --- a/drivers/input/mouse/synaptics.c |
1323 | +++ b/drivers/input/mouse/synaptics.c |
1324 | @@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = { |
1325 | "LEN0042", /* Yoga */ |
1326 | "LEN0045", |
1327 | "LEN0047", |
1328 | - "LEN0049", |
1329 | "LEN2000", /* S540 */ |
1330 | "LEN2001", /* Edge E431 */ |
1331 | "LEN2002", /* Edge E531 */ |
1332 | @@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = { |
1333 | /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ |
1334 | "LEN0048", /* X1 Carbon 3 */ |
1335 | "LEN0046", /* X250 */ |
1336 | + "LEN0049", /* Yoga 11e */ |
1337 | "LEN004a", /* W541 */ |
1338 | "LEN005b", /* P50 */ |
1339 | "LEN005e", /* T560 */ |
1340 | + "LEN006c", /* T470s */ |
1341 | "LEN0071", /* T480 */ |
1342 | "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */ |
1343 | "LEN0073", /* X1 Carbon G5 (Elantech) */ |
1344 | @@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = { |
1345 | "LEN0097", /* X280 -> ALPS trackpoint */ |
1346 | "LEN009b", /* T580 */ |
1347 | "LEN200f", /* T450s */ |
1348 | + "LEN2044", /* L470 */ |
1349 | "LEN2054", /* E480 */ |
1350 | "LEN2055", /* E580 */ |
1351 | "SYN3052", /* HP EliteBook 840 G4 */ |
1352 | diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c |
1353 | index 105b7a7c0251..b3484def0a8b 100644 |
1354 | --- a/drivers/mmc/core/host.c |
1355 | +++ b/drivers/mmc/core/host.c |
1356 | @@ -176,7 +176,6 @@ int mmc_of_parse(struct mmc_host *host) |
1357 | u32 bus_width, drv_type, cd_debounce_delay_ms; |
1358 | int ret; |
1359 | bool cd_cap_invert, cd_gpio_invert = false; |
1360 | - bool ro_cap_invert, ro_gpio_invert = false; |
1361 | |
1362 | if (!dev || !dev_fwnode(dev)) |
1363 | return 0; |
1364 | @@ -255,9 +254,11 @@ int mmc_of_parse(struct mmc_host *host) |
1365 | } |
1366 | |
1367 | /* Parse Write Protection */ |
1368 | - ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); |
1369 | |
1370 | - ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); |
1371 | + if (device_property_read_bool(dev, "wp-inverted")) |
1372 | + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
1373 | + |
1374 | + ret = mmc_gpiod_request_ro(host, "wp", 0, 0, NULL); |
1375 | if (!ret) |
1376 | dev_info(host->parent, "Got WP GPIO\n"); |
1377 | else if (ret != -ENOENT && ret != -ENOSYS) |
1378 | @@ -266,10 +267,6 @@ int mmc_of_parse(struct mmc_host *host) |
1379 | if (device_property_read_bool(dev, "disable-wp")) |
1380 | host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; |
1381 | |
1382 | - /* See the comment on CD inversion above */ |
1383 | - if (ro_cap_invert ^ ro_gpio_invert) |
1384 | - host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
1385 | - |
1386 | if (device_property_read_bool(dev, "cap-sd-highspeed")) |
1387 | host->caps |= MMC_CAP_SD_HIGHSPEED; |
1388 | if (device_property_read_bool(dev, "cap-mmc-highspeed")) |
1389 | diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c |
1390 | index da2596c5fa28..582ec3d720f6 100644 |
1391 | --- a/drivers/mmc/core/slot-gpio.c |
1392 | +++ b/drivers/mmc/core/slot-gpio.c |
1393 | @@ -241,6 +241,9 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, |
1394 | return ret; |
1395 | } |
1396 | |
1397 | + if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) |
1398 | + gpiod_toggle_active_low(desc); |
1399 | + |
1400 | if (gpio_invert) |
1401 | *gpio_invert = !gpiod_is_active_low(desc); |
1402 | |
1403 | diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c |
1404 | index 024acc1b0a2e..b2bbcb09a49e 100644 |
1405 | --- a/drivers/mmc/host/pxamci.c |
1406 | +++ b/drivers/mmc/host/pxamci.c |
1407 | @@ -740,16 +740,16 @@ static int pxamci_probe(struct platform_device *pdev) |
1408 | goto out; |
1409 | } |
1410 | |
1411 | + if (!host->pdata->gpio_card_ro_invert) |
1412 | + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
1413 | + |
1414 | ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); |
1415 | if (ret && ret != -ENOENT) { |
1416 | dev_err(dev, "Failed requesting gpio_ro\n"); |
1417 | goto out; |
1418 | } |
1419 | - if (!ret) { |
1420 | + if (!ret) |
1421 | host->use_ro_gpio = true; |
1422 | - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? |
1423 | - 0 : MMC_CAP2_RO_ACTIVE_HIGH; |
1424 | - } |
1425 | |
1426 | if (host->pdata->init) |
1427 | host->pdata->init(dev, pxamci_detect_irq, mmc); |
1428 | diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c |
1429 | index 1c988d6a2433..dccb4df46512 100644 |
1430 | --- a/drivers/mmc/host/sdhci-esdhc-imx.c |
1431 | +++ b/drivers/mmc/host/sdhci-esdhc-imx.c |
1432 | @@ -1381,13 +1381,14 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, |
1433 | host->mmc->parent->platform_data); |
1434 | /* write_protect */ |
1435 | if (boarddata->wp_type == ESDHC_WP_GPIO) { |
1436 | + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
1437 | + |
1438 | err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); |
1439 | if (err) { |
1440 | dev_err(mmc_dev(host->mmc), |
1441 | "failed to request write-protect gpio!\n"); |
1442 | return err; |
1443 | } |
1444 | - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; |
1445 | } |
1446 | |
1447 | /* card_detect */ |
1448 | diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
1449 | index a6b7b242d516..e703827d27e9 100644 |
1450 | --- a/drivers/nvme/host/core.c |
1451 | +++ b/drivers/nvme/host/core.c |
1452 | @@ -3853,7 +3853,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) |
1453 | if (!log) |
1454 | return; |
1455 | |
1456 | - if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, |
1457 | + if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log, |
1458 | sizeof(*log), 0)) |
1459 | dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); |
1460 | kfree(log); |
1461 | diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c |
1462 | index e17fac20127e..5c9898e934d9 100644 |
1463 | --- a/drivers/s390/crypto/pkey_api.c |
1464 | +++ b/drivers/s390/crypto/pkey_api.c |
1465 | @@ -794,7 +794,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, |
1466 | return -EFAULT; |
1467 | rc = cca_sec2protkey(ksp.cardnr, ksp.domain, |
1468 | ksp.seckey.seckey, ksp.protkey.protkey, |
1469 | - NULL, &ksp.protkey.type); |
1470 | + &ksp.protkey.len, &ksp.protkey.type); |
1471 | DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc); |
1472 | if (rc) |
1473 | break; |
1474 | diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c |
1475 | index 97acc2ba2912..de844b412110 100644 |
1476 | --- a/drivers/spmi/spmi-pmic-arb.c |
1477 | +++ b/drivers/spmi/spmi-pmic-arb.c |
1478 | @@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d, |
1479 | return 0; |
1480 | } |
1481 | |
1482 | +static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class; |
1483 | |
1484 | static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb, |
1485 | struct irq_domain *domain, unsigned int virq, |
1486 | @@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb, |
1487 | else |
1488 | handler = handle_level_irq; |
1489 | |
1490 | + |
1491 | + irq_set_lockdep_class(virq, &qpnpint_irq_lock_class, |
1492 | + &qpnpint_irq_request_class); |
1493 | irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb, |
1494 | handler, NULL, NULL); |
1495 | } |
1496 | diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
1497 | index 7becc5e96f92..b0ccca5d08b5 100644 |
1498 | --- a/fs/btrfs/disk-io.c |
1499 | +++ b/fs/btrfs/disk-io.c |
1500 | @@ -3167,6 +3167,7 @@ retry_root_backup: |
1501 | /* do not make disk changes in broken FS or nologreplay is given */ |
1502 | if (btrfs_super_log_root(disk_super) != 0 && |
1503 | !btrfs_test_opt(fs_info, NOLOGREPLAY)) { |
1504 | + btrfs_info(fs_info, "start tree-log replay"); |
1505 | ret = btrfs_replay_log(fs_info, fs_devices); |
1506 | if (ret) { |
1507 | err = ret; |
1508 | diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c |
1509 | index 9d30acca55e1..043eec682ccd 100644 |
1510 | --- a/fs/btrfs/extent_map.c |
1511 | +++ b/fs/btrfs/extent_map.c |
1512 | @@ -233,6 +233,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) |
1513 | struct extent_map *merge = NULL; |
1514 | struct rb_node *rb; |
1515 | |
1516 | + /* |
1517 | + * We can't modify an extent map that is in the tree and that is being |
1518 | + * used by another task, as it can cause that other task to see it in |
1519 | + * inconsistent state during the merging. We always have 1 reference for |
1520 | + * the tree and 1 for this task (which is unpinning the extent map or |
1521 | + * clearing the logging flag), so anything > 2 means it's being used by |
1522 | + * other tasks too. |
1523 | + */ |
1524 | + if (refcount_read(&em->refs) > 2) |
1525 | + return; |
1526 | + |
1527 | if (em->start != 0) { |
1528 | rb = rb_prev(&em->rb_node); |
1529 | if (rb) |
1530 | diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c |
1531 | index b57f3618e58e..454a1015d026 100644 |
1532 | --- a/fs/btrfs/ref-verify.c |
1533 | +++ b/fs/btrfs/ref-verify.c |
1534 | @@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, |
1535 | */ |
1536 | be = add_block_entry(fs_info, bytenr, num_bytes, ref_root); |
1537 | if (IS_ERR(be)) { |
1538 | + kfree(ref); |
1539 | kfree(ra); |
1540 | ret = PTR_ERR(be); |
1541 | goto out; |
1542 | @@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, |
1543 | "re-allocated a block that still has references to it!"); |
1544 | dump_block_entry(fs_info, be); |
1545 | dump_ref_action(fs_info, ra); |
1546 | + kfree(ref); |
1547 | + kfree(ra); |
1548 | goto out_unlock; |
1549 | } |
1550 | |
1551 | @@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, |
1552 | "dropping a ref for a existing root that doesn't have a ref on the block"); |
1553 | dump_block_entry(fs_info, be); |
1554 | dump_ref_action(fs_info, ra); |
1555 | + kfree(ref); |
1556 | kfree(ra); |
1557 | goto out_unlock; |
1558 | } |
1559 | @@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, |
1560 | "attempting to add another ref for an existing ref on a tree block"); |
1561 | dump_block_entry(fs_info, be); |
1562 | dump_ref_action(fs_info, ra); |
1563 | + kfree(ref); |
1564 | kfree(ra); |
1565 | goto out_unlock; |
1566 | } |
1567 | diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c |
1568 | index abcd93a3ca1d..aea24202cd35 100644 |
1569 | --- a/fs/btrfs/super.c |
1570 | +++ b/fs/btrfs/super.c |
1571 | @@ -1804,6 +1804,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) |
1572 | } |
1573 | |
1574 | if (btrfs_super_log_root(fs_info->super_copy) != 0) { |
1575 | + btrfs_warn(fs_info, |
1576 | + "mount required to replay tree-log, cannot remount read-write"); |
1577 | ret = -EINVAL; |
1578 | goto restore; |
1579 | } |
1580 | diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c |
1581 | index 07d8ace61f77..637624ab6e46 100644 |
1582 | --- a/fs/cifs/cifsfs.c |
1583 | +++ b/fs/cifs/cifsfs.c |
1584 | @@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses) |
1585 | seq_puts(s, "ntlm"); |
1586 | break; |
1587 | case Kerberos: |
1588 | - seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid)); |
1589 | + seq_puts(s, "krb5"); |
1590 | break; |
1591 | case RawNTLMSSP: |
1592 | seq_puts(s, "ntlmssp"); |
1593 | @@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses) |
1594 | |
1595 | if (ses->sign) |
1596 | seq_puts(s, "i"); |
1597 | + |
1598 | + if (ses->sectype == Kerberos) |
1599 | + seq_printf(s, ",cruid=%u", |
1600 | + from_kuid_munged(&init_user_ns, ses->cred_uid)); |
1601 | } |
1602 | |
1603 | static void |
1604 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
1605 | index 662256fa2a18..b75d208d4b2b 100644 |
1606 | --- a/fs/cifs/smb2ops.c |
1607 | +++ b/fs/cifs/smb2ops.c |
1608 | @@ -1087,7 +1087,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, |
1609 | void *data[1]; |
1610 | struct smb2_file_full_ea_info *ea = NULL; |
1611 | struct kvec close_iov[1]; |
1612 | - int rc; |
1613 | + struct smb2_query_info_rsp *rsp; |
1614 | + int rc, used_len = 0; |
1615 | |
1616 | if (smb3_encryption_required(tcon)) |
1617 | flags |= CIFS_TRANSFORM_REQ; |
1618 | @@ -1110,6 +1111,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon, |
1619 | cifs_sb); |
1620 | if (rc == -ENODATA) |
1621 | goto sea_exit; |
1622 | + } else { |
1623 | + /* If we are adding a attribute we should first check |
1624 | + * if there will be enough space available to store |
1625 | + * the new EA. If not we should not add it since we |
1626 | + * would not be able to even read the EAs back. |
1627 | + */ |
1628 | + rc = smb2_query_info_compound(xid, tcon, utf16_path, |
1629 | + FILE_READ_EA, |
1630 | + FILE_FULL_EA_INFORMATION, |
1631 | + SMB2_O_INFO_FILE, |
1632 | + CIFSMaxBufSize - |
1633 | + MAX_SMB2_CREATE_RESPONSE_SIZE - |
1634 | + MAX_SMB2_CLOSE_RESPONSE_SIZE, |
1635 | + &rsp_iov[1], &resp_buftype[1], cifs_sb); |
1636 | + if (rc == 0) { |
1637 | + rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; |
1638 | + used_len = le32_to_cpu(rsp->OutputBufferLength); |
1639 | + } |
1640 | + free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); |
1641 | + resp_buftype[1] = CIFS_NO_BUFFER; |
1642 | + memset(&rsp_iov[1], 0, sizeof(rsp_iov[1])); |
1643 | + rc = 0; |
1644 | + |
1645 | + /* Use a fudge factor of 256 bytes in case we collide |
1646 | + * with a different set_EAs command. |
1647 | + */ |
1648 | + if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE - |
1649 | + MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 < |
1650 | + used_len + ea_name_len + ea_value_len + 1) { |
1651 | + rc = -ENOSPC; |
1652 | + goto sea_exit; |
1653 | + } |
1654 | } |
1655 | } |
1656 | |
1657 | diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c |
1658 | index d4d4fdfac1a6..ff8e1205127e 100644 |
1659 | --- a/fs/ext4/block_validity.c |
1660 | +++ b/fs/ext4/block_validity.c |
1661 | @@ -203,6 +203,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb, |
1662 | return PTR_ERR(inode); |
1663 | num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits; |
1664 | while (i < num) { |
1665 | + cond_resched(); |
1666 | map.m_lblk = i; |
1667 | map.m_len = num - i; |
1668 | n = ext4_map_blocks(NULL, inode, &map, 0); |
1669 | diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c |
1670 | index 5ef8d7ae231b..2743c6f8a457 100644 |
1671 | --- a/fs/ext4/dir.c |
1672 | +++ b/fs/ext4/dir.c |
1673 | @@ -130,12 +130,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) |
1674 | if (err != ERR_BAD_DX_DIR) { |
1675 | return err; |
1676 | } |
1677 | - /* |
1678 | - * We don't set the inode dirty flag since it's not |
1679 | - * critical that it get flushed back to the disk. |
1680 | - */ |
1681 | - ext4_clear_inode_flag(file_inode(file), |
1682 | - EXT4_INODE_INDEX); |
1683 | + /* Can we just clear INDEX flag to ignore htree information? */ |
1684 | + if (!ext4_has_metadata_csum(sb)) { |
1685 | + /* |
1686 | + * We don't set the inode dirty flag since it's not |
1687 | + * critical that it gets flushed back to the disk. |
1688 | + */ |
1689 | + ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); |
1690 | + } |
1691 | } |
1692 | |
1693 | if (ext4_has_inline_data(inode)) { |
1694 | diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h |
1695 | index 03db3e71676c..e2f65b565c1f 100644 |
1696 | --- a/fs/ext4/ext4.h |
1697 | +++ b/fs/ext4/ext4.h |
1698 | @@ -2476,8 +2476,11 @@ void ext4_insert_dentry(struct inode *inode, |
1699 | struct ext4_filename *fname); |
1700 | static inline void ext4_update_dx_flag(struct inode *inode) |
1701 | { |
1702 | - if (!ext4_has_feature_dir_index(inode->i_sb)) |
1703 | + if (!ext4_has_feature_dir_index(inode->i_sb)) { |
1704 | + /* ext4_iget() should have caught this... */ |
1705 | + WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb)); |
1706 | ext4_clear_inode_flag(inode, EXT4_INODE_INDEX); |
1707 | + } |
1708 | } |
1709 | static const unsigned char ext4_filetype_table[] = { |
1710 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK |
1711 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
1712 | index 8bba6cd5e870..76a38ef5f226 100644 |
1713 | --- a/fs/ext4/inode.c |
1714 | +++ b/fs/ext4/inode.c |
1715 | @@ -4972,6 +4972,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, |
1716 | ret = -EFSCORRUPTED; |
1717 | goto bad_inode; |
1718 | } |
1719 | + /* |
1720 | + * If dir_index is not enabled but there's dir with INDEX flag set, |
1721 | + * we'd normally treat htree data as empty space. But with metadata |
1722 | + * checksumming that corrupts checksums so forbid that. |
1723 | + */ |
1724 | + if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) && |
1725 | + ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) { |
1726 | + ext4_error_inode(inode, function, line, 0, |
1727 | + "iget: Dir with htree data on filesystem without dir_index feature."); |
1728 | + ret = -EFSCORRUPTED; |
1729 | + goto bad_inode; |
1730 | + } |
1731 | ei->i_disksize = inode->i_size; |
1732 | #ifdef CONFIG_QUOTA |
1733 | ei->i_reserved_quota = 0; |
1734 | diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c |
1735 | index 2305b4374fd3..9d00e0dd2ba9 100644 |
1736 | --- a/fs/ext4/mmp.c |
1737 | +++ b/fs/ext4/mmp.c |
1738 | @@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp, |
1739 | { |
1740 | __ext4_warning(sb, function, line, "%s", msg); |
1741 | __ext4_warning(sb, function, line, |
1742 | - "MMP failure info: last update time: %llu, last update " |
1743 | - "node: %s, last update device: %s", |
1744 | - (long long unsigned int) le64_to_cpu(mmp->mmp_time), |
1745 | - mmp->mmp_nodename, mmp->mmp_bdevname); |
1746 | + "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s", |
1747 | + (unsigned long long)le64_to_cpu(mmp->mmp_time), |
1748 | + (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename, |
1749 | + (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname); |
1750 | } |
1751 | |
1752 | /* |
1753 | @@ -154,6 +154,7 @@ static int kmmpd(void *data) |
1754 | mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval, |
1755 | EXT4_MMP_MIN_CHECK_INTERVAL); |
1756 | mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); |
1757 | + BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE); |
1758 | bdevname(bh->b_bdev, mmp->mmp_bdevname); |
1759 | |
1760 | memcpy(mmp->mmp_nodename, init_utsname()->nodename, |
1761 | @@ -375,7 +376,8 @@ skip: |
1762 | /* |
1763 | * Start a kernel thread to update the MMP block periodically. |
1764 | */ |
1765 | - EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s", |
1766 | + EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s", |
1767 | + (int)sizeof(mmp->mmp_bdevname), |
1768 | bdevname(bh->b_bdev, |
1769 | mmp->mmp_bdevname)); |
1770 | if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) { |
1771 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c |
1772 | index f56402e9c11c..94d84910dc1e 100644 |
1773 | --- a/fs/ext4/namei.c |
1774 | +++ b/fs/ext4/namei.c |
1775 | @@ -2205,6 +2205,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, |
1776 | retval = ext4_dx_add_entry(handle, &fname, dir, inode); |
1777 | if (!retval || (retval != ERR_BAD_DX_DIR)) |
1778 | goto out; |
1779 | + /* Can we just ignore htree data? */ |
1780 | + if (ext4_has_metadata_csum(sb)) { |
1781 | + EXT4_ERROR_INODE(dir, |
1782 | + "Directory has corrupted htree index."); |
1783 | + retval = -EFSCORRUPTED; |
1784 | + goto out; |
1785 | + } |
1786 | ext4_clear_inode_flag(dir, EXT4_INODE_INDEX); |
1787 | dx_fallback++; |
1788 | ext4_mark_inode_dirty(handle, dir); |
1789 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
1790 | index 66162b430edc..914230e63054 100644 |
1791 | --- a/fs/ext4/super.c |
1792 | +++ b/fs/ext4/super.c |
1793 | @@ -2961,17 +2961,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly) |
1794 | return 0; |
1795 | } |
1796 | |
1797 | -#ifndef CONFIG_QUOTA |
1798 | - if (ext4_has_feature_quota(sb) && !readonly) { |
1799 | +#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2) |
1800 | + if (!readonly && (ext4_has_feature_quota(sb) || |
1801 | + ext4_has_feature_project(sb))) { |
1802 | ext4_msg(sb, KERN_ERR, |
1803 | - "Filesystem with quota feature cannot be mounted RDWR " |
1804 | - "without CONFIG_QUOTA"); |
1805 | - return 0; |
1806 | - } |
1807 | - if (ext4_has_feature_project(sb) && !readonly) { |
1808 | - ext4_msg(sb, KERN_ERR, |
1809 | - "Filesystem with project quota feature cannot be mounted RDWR " |
1810 | - "without CONFIG_QUOTA"); |
1811 | + "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2"); |
1812 | return 0; |
1813 | } |
1814 | #endif /* CONFIG_QUOTA */ |
1815 | @@ -3765,6 +3759,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1816 | */ |
1817 | sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; |
1818 | |
1819 | + blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); |
1820 | + if (blocksize < EXT4_MIN_BLOCK_SIZE || |
1821 | + blocksize > EXT4_MAX_BLOCK_SIZE) { |
1822 | + ext4_msg(sb, KERN_ERR, |
1823 | + "Unsupported filesystem blocksize %d (%d log_block_size)", |
1824 | + blocksize, le32_to_cpu(es->s_log_block_size)); |
1825 | + goto failed_mount; |
1826 | + } |
1827 | + |
1828 | if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) { |
1829 | sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE; |
1830 | sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO; |
1831 | @@ -3782,6 +3785,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1832 | ext4_msg(sb, KERN_ERR, |
1833 | "unsupported inode size: %d", |
1834 | sbi->s_inode_size); |
1835 | + ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize); |
1836 | goto failed_mount; |
1837 | } |
1838 | /* |
1839 | @@ -3985,14 +3989,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
1840 | if (!ext4_feature_set_ok(sb, (sb_rdonly(sb)))) |
1841 | goto failed_mount; |
1842 | |
1843 | - blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size); |
1844 | - if (blocksize < EXT4_MIN_BLOCK_SIZE || |
1845 | - blocksize > EXT4_MAX_BLOCK_SIZE) { |
1846 | - ext4_msg(sb, KERN_ERR, |
1847 | - "Unsupported filesystem blocksize %d (%d log_block_size)", |
1848 | - blocksize, le32_to_cpu(es->s_log_block_size)); |
1849 | - goto failed_mount; |
1850 | - } |
1851 | if (le32_to_cpu(es->s_log_block_size) > |
1852 | (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { |
1853 | ext4_msg(sb, KERN_ERR, |
1854 | @@ -5544,9 +5540,15 @@ static int ext4_statfs_project(struct super_block *sb, |
1855 | return PTR_ERR(dquot); |
1856 | spin_lock(&dquot->dq_dqb_lock); |
1857 | |
1858 | - limit = (dquot->dq_dqb.dqb_bsoftlimit ? |
1859 | - dquot->dq_dqb.dqb_bsoftlimit : |
1860 | - dquot->dq_dqb.dqb_bhardlimit) >> sb->s_blocksize_bits; |
1861 | + limit = 0; |
1862 | + if (dquot->dq_dqb.dqb_bsoftlimit && |
1863 | + (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit)) |
1864 | + limit = dquot->dq_dqb.dqb_bsoftlimit; |
1865 | + if (dquot->dq_dqb.dqb_bhardlimit && |
1866 | + (!limit || dquot->dq_dqb.dqb_bhardlimit < limit)) |
1867 | + limit = dquot->dq_dqb.dqb_bhardlimit; |
1868 | + limit >>= sb->s_blocksize_bits; |
1869 | + |
1870 | if (limit && buf->f_blocks > limit) { |
1871 | curblock = (dquot->dq_dqb.dqb_curspace + |
1872 | dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; |
1873 | @@ -5556,9 +5558,14 @@ static int ext4_statfs_project(struct super_block *sb, |
1874 | (buf->f_blocks - curblock) : 0; |
1875 | } |
1876 | |
1877 | - limit = dquot->dq_dqb.dqb_isoftlimit ? |
1878 | - dquot->dq_dqb.dqb_isoftlimit : |
1879 | - dquot->dq_dqb.dqb_ihardlimit; |
1880 | + limit = 0; |
1881 | + if (dquot->dq_dqb.dqb_isoftlimit && |
1882 | + (!limit || dquot->dq_dqb.dqb_isoftlimit < limit)) |
1883 | + limit = dquot->dq_dqb.dqb_isoftlimit; |
1884 | + if (dquot->dq_dqb.dqb_ihardlimit && |
1885 | + (!limit || dquot->dq_dqb.dqb_ihardlimit < limit)) |
1886 | + limit = dquot->dq_dqb.dqb_ihardlimit; |
1887 | + |
1888 | if (limit && buf->f_files > limit) { |
1889 | buf->f_files = limit; |
1890 | buf->f_ffree = |
1891 | diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c |
1892 | index c43591cd70f1..2a42904bcd62 100644 |
1893 | --- a/fs/jbd2/commit.c |
1894 | +++ b/fs/jbd2/commit.c |
1895 | @@ -974,29 +974,33 @@ restart_loop: |
1896 | * it. */ |
1897 | |
1898 | /* |
1899 | - * A buffer which has been freed while still being journaled by |
1900 | - * a previous transaction. |
1901 | - */ |
1902 | - if (buffer_freed(bh)) { |
1903 | + * A buffer which has been freed while still being journaled |
1904 | + * by a previous transaction, refile the buffer to BJ_Forget of |
1905 | + * the running transaction. If the just committed transaction |
1906 | + * contains "add to orphan" operation, we can completely |
1907 | + * invalidate the buffer now. We are rather through in that |
1908 | + * since the buffer may be still accessible when blocksize < |
1909 | + * pagesize and it is attached to the last partial page. |
1910 | + */ |
1911 | + if (buffer_freed(bh) && !jh->b_next_transaction) { |
1912 | + struct address_space *mapping; |
1913 | + |
1914 | + clear_buffer_freed(bh); |
1915 | + clear_buffer_jbddirty(bh); |
1916 | + |
1917 | /* |
1918 | - * If the running transaction is the one containing |
1919 | - * "add to orphan" operation (b_next_transaction != |
1920 | - * NULL), we have to wait for that transaction to |
1921 | - * commit before we can really get rid of the buffer. |
1922 | - * So just clear b_modified to not confuse transaction |
1923 | - * credit accounting and refile the buffer to |
1924 | - * BJ_Forget of the running transaction. If the just |
1925 | - * committed transaction contains "add to orphan" |
1926 | - * operation, we can completely invalidate the buffer |
1927 | - * now. We are rather through in that since the |
1928 | - * buffer may be still accessible when blocksize < |
1929 | - * pagesize and it is attached to the last partial |
1930 | - * page. |
1931 | + * Block device buffers need to stay mapped all the |
1932 | + * time, so it is enough to clear buffer_jbddirty and |
1933 | + * buffer_freed bits. For the file mapping buffers (i.e. |
1934 | + * journalled data) we need to unmap buffer and clear |
1935 | + * more bits. We also need to be careful about the check |
1936 | + * because the data page mapping can get cleared under |
1937 | + * out hands, which alse need not to clear more bits |
1938 | + * because the page and buffers will be freed and can |
1939 | + * never be reused once we are done with them. |
1940 | */ |
1941 | - jh->b_modified = 0; |
1942 | - if (!jh->b_next_transaction) { |
1943 | - clear_buffer_freed(bh); |
1944 | - clear_buffer_jbddirty(bh); |
1945 | + mapping = READ_ONCE(bh->b_page->mapping); |
1946 | + if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { |
1947 | clear_buffer_mapped(bh); |
1948 | clear_buffer_new(bh); |
1949 | clear_buffer_req(bh); |
1950 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
1951 | index bee8498d7792..3930c68a9c20 100644 |
1952 | --- a/fs/jbd2/transaction.c |
1953 | +++ b/fs/jbd2/transaction.c |
1954 | @@ -2296,14 +2296,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, |
1955 | return -EBUSY; |
1956 | } |
1957 | /* |
1958 | - * OK, buffer won't be reachable after truncate. We just set |
1959 | - * j_next_transaction to the running transaction (if there is |
1960 | - * one) and mark buffer as freed so that commit code knows it |
1961 | - * should clear dirty bits when it is done with the buffer. |
1962 | + * OK, buffer won't be reachable after truncate. We just clear |
1963 | + * b_modified to not confuse transaction credit accounting, and |
1964 | + * set j_next_transaction to the running transaction (if there |
1965 | + * is one) and mark buffer as freed so that commit code knows |
1966 | + * it should clear dirty bits when it is done with the buffer. |
1967 | */ |
1968 | set_buffer_freed(bh); |
1969 | if (journal->j_running_transaction && buffer_jbddirty(bh)) |
1970 | jh->b_next_transaction = journal->j_running_transaction; |
1971 | + jh->b_modified = 0; |
1972 | jbd2_journal_put_journal_head(jh); |
1973 | spin_unlock(&journal->j_list_lock); |
1974 | jbd_unlock_bh_state(bh); |
1975 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
1976 | index 423960d480f1..f808fb34b110 100644 |
1977 | --- a/fs/nfs/nfs4proc.c |
1978 | +++ b/fs/nfs/nfs4proc.c |
1979 | @@ -5293,7 +5293,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr, |
1980 | hdr->timestamp = jiffies; |
1981 | |
1982 | msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE]; |
1983 | - nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0); |
1984 | + nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); |
1985 | nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr); |
1986 | } |
1987 | |
1988 | diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h |
1989 | index e5e041413581..d1fdf26ccb33 100644 |
1990 | --- a/include/acpi/acpixf.h |
1991 | +++ b/include/acpi/acpixf.h |
1992 | @@ -748,6 +748,7 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3 |
1993 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) |
1994 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) |
1995 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) |
1996 | +ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) |
1997 | |
1998 | ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status |
1999 | acpi_get_gpe_device(u32 gpe_index, |
2000 | diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h |
2001 | index b70af921c614..803bb63dd5ff 100644 |
2002 | --- a/include/linux/gpio/consumer.h |
2003 | +++ b/include/linux/gpio/consumer.h |
2004 | @@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size, |
2005 | |
2006 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); |
2007 | int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); |
2008 | +void gpiod_toggle_active_low(struct gpio_desc *desc); |
2009 | |
2010 | int gpiod_is_active_low(const struct gpio_desc *desc); |
2011 | int gpiod_cansleep(const struct gpio_desc *desc); |
2012 | @@ -479,6 +480,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) |
2013 | return -ENOSYS; |
2014 | } |
2015 | |
2016 | +static inline void gpiod_toggle_active_low(struct gpio_desc *desc) |
2017 | +{ |
2018 | + /* GPIO can never have been requested */ |
2019 | + WARN_ON(desc); |
2020 | +} |
2021 | + |
2022 | static inline int gpiod_is_active_low(const struct gpio_desc *desc) |
2023 | { |
2024 | /* GPIO can never have been requested */ |
2025 | diff --git a/include/linux/suspend.h b/include/linux/suspend.h |
2026 | index 6fc8843f1c9e..cd97d2c8840c 100644 |
2027 | --- a/include/linux/suspend.h |
2028 | +++ b/include/linux/suspend.h |
2029 | @@ -191,7 +191,7 @@ struct platform_s2idle_ops { |
2030 | int (*begin)(void); |
2031 | int (*prepare)(void); |
2032 | int (*prepare_late)(void); |
2033 | - void (*wake)(void); |
2034 | + bool (*wake)(void); |
2035 | void (*restore_early)(void); |
2036 | void (*restore)(void); |
2037 | void (*end)(void); |
2038 | diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c |
2039 | index f3b7239f1892..27f149f5d4a9 100644 |
2040 | --- a/kernel/power/suspend.c |
2041 | +++ b/kernel/power/suspend.c |
2042 | @@ -131,11 +131,12 @@ static void s2idle_loop(void) |
2043 | * to avoid them upfront. |
2044 | */ |
2045 | for (;;) { |
2046 | - if (s2idle_ops && s2idle_ops->wake) |
2047 | - s2idle_ops->wake(); |
2048 | - |
2049 | - if (pm_wakeup_pending()) |
2050 | + if (s2idle_ops && s2idle_ops->wake) { |
2051 | + if (s2idle_ops->wake()) |
2052 | + break; |
2053 | + } else if (pm_wakeup_pending()) { |
2054 | break; |
2055 | + } |
2056 | |
2057 | pm_wakeup_clear(false); |
2058 | |
2059 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
2060 | index 00743684a549..dfaefb175ba0 100644 |
2061 | --- a/kernel/sched/core.c |
2062 | +++ b/kernel/sched/core.c |
2063 | @@ -7250,7 +7250,7 @@ capacity_from_percent(char *buf) |
2064 | &req.percent); |
2065 | if (req.ret) |
2066 | return req; |
2067 | - if (req.percent > UCLAMP_PERCENT_SCALE) { |
2068 | + if ((u64)req.percent > UCLAMP_PERCENT_SCALE) { |
2069 | req.ret = -ERANGE; |
2070 | return req; |
2071 | } |
2072 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
2073 | index 54dd8849d1cc..1e3b9d34aaa4 100644 |
2074 | --- a/net/mac80211/mlme.c |
2075 | +++ b/net/mac80211/mlme.c |
2076 | @@ -8,7 +8,7 @@ |
2077 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
2078 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
2079 | * Copyright (C) 2015 - 2017 Intel Deutschland GmbH |
2080 | - * Copyright (C) 2018 - 2019 Intel Corporation |
2081 | + * Copyright (C) 2018 - 2020 Intel Corporation |
2082 | */ |
2083 | |
2084 | #include <linux/delay.h> |
2085 | @@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
2086 | if (!res) { |
2087 | ch_switch.timestamp = timestamp; |
2088 | ch_switch.device_timestamp = device_timestamp; |
2089 | - ch_switch.block_tx = beacon ? csa_ie.mode : 0; |
2090 | + ch_switch.block_tx = csa_ie.mode; |
2091 | ch_switch.chandef = csa_ie.chandef; |
2092 | ch_switch.count = csa_ie.count; |
2093 | ch_switch.delay = csa_ie.max_switch_time; |
2094 | @@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
2095 | |
2096 | sdata->vif.csa_active = true; |
2097 | sdata->csa_chandef = csa_ie.chandef; |
2098 | - sdata->csa_block_tx = ch_switch.block_tx; |
2099 | + sdata->csa_block_tx = csa_ie.mode; |
2100 | ifmgd->csa_ignored_same_chan = false; |
2101 | |
2102 | if (sdata->csa_block_tx) |
2103 | @@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
2104 | * reset when the disconnection worker runs. |
2105 | */ |
2106 | sdata->vif.csa_active = true; |
2107 | - sdata->csa_block_tx = ch_switch.block_tx; |
2108 | + sdata->csa_block_tx = csa_ie.mode; |
2109 | |
2110 | ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work); |
2111 | mutex_unlock(&local->chanctx_mtx); |
2112 | diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c |
2113 | index 9901a811f598..0ad45a8fe3fb 100644 |
2114 | --- a/net/sunrpc/xprtrdma/frwr_ops.c |
2115 | +++ b/net/sunrpc/xprtrdma/frwr_ops.c |
2116 | @@ -326,8 +326,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, |
2117 | { |
2118 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
2119 | struct ib_reg_wr *reg_wr; |
2120 | + int i, n, dma_nents; |
2121 | struct ib_mr *ibmr; |
2122 | - int i, n; |
2123 | u8 key; |
2124 | |
2125 | if (nsegs > ia->ri_max_frwr_depth) |
2126 | @@ -351,15 +351,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, |
2127 | break; |
2128 | } |
2129 | mr->mr_dir = rpcrdma_data_dir(writing); |
2130 | + mr->mr_nents = i; |
2131 | |
2132 | - mr->mr_nents = |
2133 | - ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir); |
2134 | - if (!mr->mr_nents) |
2135 | + dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents, |
2136 | + mr->mr_dir); |
2137 | + if (!dma_nents) |
2138 | goto out_dmamap_err; |
2139 | |
2140 | ibmr = mr->frwr.fr_mr; |
2141 | - n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE); |
2142 | - if (unlikely(n != mr->mr_nents)) |
2143 | + n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); |
2144 | + if (n != dma_nents) |
2145 | goto out_mapmr_err; |
2146 | |
2147 | ibmr->iova &= 0x00000000ffffffff; |
2148 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
2149 | index 68832f52c1ad..a66d4be3516e 100644 |
2150 | --- a/sound/pci/hda/patch_realtek.c |
2151 | +++ b/sound/pci/hda/patch_realtek.c |
2152 | @@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
2153 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), |
2154 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), |
2155 | SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), |
2156 | + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), |
2157 | SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), |
2158 | SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), |
2159 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), |
2160 | @@ -5701,8 +5702,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec, |
2161 | break; |
2162 | case HDA_FIXUP_ACT_INIT: |
2163 | switch (codec->core.vendor_id) { |
2164 | + case 0x10ec0215: |
2165 | case 0x10ec0225: |
2166 | + case 0x10ec0285: |
2167 | case 0x10ec0295: |
2168 | + case 0x10ec0289: |
2169 | case 0x10ec0299: |
2170 | alc_write_coef_idx(codec, 0x48, 0xd011); |
2171 | alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); |
2172 | diff --git a/sound/usb/clock.c b/sound/usb/clock.c |
2173 | index 6b8c14f9b5d4..a48313dfa967 100644 |
2174 | --- a/sound/usb/clock.c |
2175 | +++ b/sound/usb/clock.c |
2176 | @@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i |
2177 | return ret; |
2178 | } |
2179 | |
2180 | +/* |
2181 | + * Assume the clock is valid if clock source supports only one single sample |
2182 | + * rate, the terminal is connected directly to it (there is no clock selector) |
2183 | + * and clock type is internal. This is to deal with some Denon DJ controllers |
2184 | + * that always reports that clock is invalid. |
2185 | + */ |
2186 | +static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip, |
2187 | + struct audioformat *fmt, |
2188 | + int source_id) |
2189 | +{ |
2190 | + if (fmt->protocol == UAC_VERSION_2) { |
2191 | + struct uac_clock_source_descriptor *cs_desc = |
2192 | + snd_usb_find_clock_source(chip->ctrl_intf, source_id); |
2193 | + |
2194 | + if (!cs_desc) |
2195 | + return false; |
2196 | + |
2197 | + return (fmt->nr_rates == 1 && |
2198 | + (fmt->clock & 0xff) == cs_desc->bClockID && |
2199 | + (cs_desc->bmAttributes & 0x3) != |
2200 | + UAC_CLOCK_SOURCE_TYPE_EXT); |
2201 | + } |
2202 | + |
2203 | + return false; |
2204 | +} |
2205 | + |
2206 | static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, |
2207 | - int protocol, |
2208 | + struct audioformat *fmt, |
2209 | int source_id) |
2210 | { |
2211 | int err; |
2212 | @@ -160,26 +186,26 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, |
2213 | struct usb_device *dev = chip->dev; |
2214 | u32 bmControls; |
2215 | |
2216 | - if (protocol == UAC_VERSION_3) { |
2217 | + if (fmt->protocol == UAC_VERSION_3) { |
2218 | struct uac3_clock_source_descriptor *cs_desc = |
2219 | snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id); |
2220 | |
2221 | if (!cs_desc) |
2222 | - return 0; |
2223 | + return false; |
2224 | bmControls = le32_to_cpu(cs_desc->bmControls); |
2225 | } else { /* UAC_VERSION_1/2 */ |
2226 | struct uac_clock_source_descriptor *cs_desc = |
2227 | snd_usb_find_clock_source(chip->ctrl_intf, source_id); |
2228 | |
2229 | if (!cs_desc) |
2230 | - return 0; |
2231 | + return false; |
2232 | bmControls = cs_desc->bmControls; |
2233 | } |
2234 | |
2235 | /* If a clock source can't tell us whether it's valid, we assume it is */ |
2236 | if (!uac_v2v3_control_is_readable(bmControls, |
2237 | UAC2_CS_CONTROL_CLOCK_VALID)) |
2238 | - return 1; |
2239 | + return true; |
2240 | |
2241 | err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, |
2242 | USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, |
2243 | @@ -191,13 +217,17 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, |
2244 | dev_warn(&dev->dev, |
2245 | "%s(): cannot get clock validity for id %d\n", |
2246 | __func__, source_id); |
2247 | - return 0; |
2248 | + return false; |
2249 | } |
2250 | |
2251 | - return !!data; |
2252 | + if (data) |
2253 | + return true; |
2254 | + else |
2255 | + return uac_clock_source_is_valid_quirk(chip, fmt, source_id); |
2256 | } |
2257 | |
2258 | -static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2259 | +static int __uac_clock_find_source(struct snd_usb_audio *chip, |
2260 | + struct audioformat *fmt, int entity_id, |
2261 | unsigned long *visited, bool validate) |
2262 | { |
2263 | struct uac_clock_source_descriptor *source; |
2264 | @@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2265 | source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id); |
2266 | if (source) { |
2267 | entity_id = source->bClockID; |
2268 | - if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2, |
2269 | + if (validate && !uac_clock_source_is_valid(chip, fmt, |
2270 | entity_id)) { |
2271 | usb_audio_err(chip, |
2272 | "clock source %d is not valid, cannot use\n", |
2273 | @@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2274 | } |
2275 | |
2276 | cur = ret; |
2277 | - ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1], |
2278 | - visited, validate); |
2279 | + ret = __uac_clock_find_source(chip, fmt, |
2280 | + selector->baCSourceID[ret - 1], |
2281 | + visited, validate); |
2282 | if (!validate || ret > 0 || !chip->autoclock) |
2283 | return ret; |
2284 | |
2285 | @@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2286 | if (i == cur) |
2287 | continue; |
2288 | |
2289 | - ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1], |
2290 | - visited, true); |
2291 | + ret = __uac_clock_find_source(chip, fmt, |
2292 | + selector->baCSourceID[i - 1], |
2293 | + visited, true); |
2294 | if (ret < 0) |
2295 | continue; |
2296 | |
2297 | @@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2298 | /* FIXME: multipliers only act as pass-thru element for now */ |
2299 | multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id); |
2300 | if (multiplier) |
2301 | - return __uac_clock_find_source(chip, multiplier->bCSourceID, |
2302 | - visited, validate); |
2303 | + return __uac_clock_find_source(chip, fmt, |
2304 | + multiplier->bCSourceID, |
2305 | + visited, validate); |
2306 | |
2307 | return -EINVAL; |
2308 | } |
2309 | |
2310 | -static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2311 | - unsigned long *visited, bool validate) |
2312 | +static int __uac3_clock_find_source(struct snd_usb_audio *chip, |
2313 | + struct audioformat *fmt, int entity_id, |
2314 | + unsigned long *visited, bool validate) |
2315 | { |
2316 | struct uac3_clock_source_descriptor *source; |
2317 | struct uac3_clock_selector_descriptor *selector; |
2318 | @@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2319 | source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id); |
2320 | if (source) { |
2321 | entity_id = source->bClockID; |
2322 | - if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3, |
2323 | + if (validate && !uac_clock_source_is_valid(chip, fmt, |
2324 | entity_id)) { |
2325 | usb_audio_err(chip, |
2326 | "clock source %d is not valid, cannot use\n", |
2327 | @@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2328 | } |
2329 | |
2330 | cur = ret; |
2331 | - ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1], |
2332 | + ret = __uac3_clock_find_source(chip, fmt, |
2333 | + selector->baCSourceID[ret - 1], |
2334 | visited, validate); |
2335 | if (!validate || ret > 0 || !chip->autoclock) |
2336 | return ret; |
2337 | @@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2338 | if (i == cur) |
2339 | continue; |
2340 | |
2341 | - ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1], |
2342 | - visited, true); |
2343 | + ret = __uac3_clock_find_source(chip, fmt, |
2344 | + selector->baCSourceID[i - 1], |
2345 | + visited, true); |
2346 | if (ret < 0) |
2347 | continue; |
2348 | |
2349 | @@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2350 | multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf, |
2351 | entity_id); |
2352 | if (multiplier) |
2353 | - return __uac3_clock_find_source(chip, multiplier->bCSourceID, |
2354 | + return __uac3_clock_find_source(chip, fmt, |
2355 | + multiplier->bCSourceID, |
2356 | visited, validate); |
2357 | |
2358 | return -EINVAL; |
2359 | @@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id, |
2360 | * |
2361 | * Returns the clock source UnitID (>=0) on success, or an error. |
2362 | */ |
2363 | -int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol, |
2364 | - int entity_id, bool validate) |
2365 | +int snd_usb_clock_find_source(struct snd_usb_audio *chip, |
2366 | + struct audioformat *fmt, bool validate) |
2367 | { |
2368 | DECLARE_BITMAP(visited, 256); |
2369 | memset(visited, 0, sizeof(visited)); |
2370 | |
2371 | - switch (protocol) { |
2372 | + switch (fmt->protocol) { |
2373 | case UAC_VERSION_2: |
2374 | - return __uac_clock_find_source(chip, entity_id, visited, |
2375 | + return __uac_clock_find_source(chip, fmt, fmt->clock, visited, |
2376 | validate); |
2377 | case UAC_VERSION_3: |
2378 | - return __uac3_clock_find_source(chip, entity_id, visited, |
2379 | + return __uac3_clock_find_source(chip, fmt, fmt->clock, visited, |
2380 | validate); |
2381 | default: |
2382 | return -EINVAL; |
2383 | @@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, |
2384 | * automatic clock selection if the current clock is not |
2385 | * valid. |
2386 | */ |
2387 | - clock = snd_usb_clock_find_source(chip, fmt->protocol, |
2388 | - fmt->clock, true); |
2389 | + clock = snd_usb_clock_find_source(chip, fmt, true); |
2390 | if (clock < 0) { |
2391 | /* We did not find a valid clock, but that might be |
2392 | * because the current sample rate does not match an |
2393 | @@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, |
2394 | * and we will do another validation after setting the |
2395 | * rate. |
2396 | */ |
2397 | - clock = snd_usb_clock_find_source(chip, fmt->protocol, |
2398 | - fmt->clock, false); |
2399 | + clock = snd_usb_clock_find_source(chip, fmt, false); |
2400 | if (clock < 0) |
2401 | return clock; |
2402 | } |
2403 | @@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface, |
2404 | |
2405 | validation: |
2406 | /* validate clock after rate change */ |
2407 | - if (!uac_clock_source_is_valid(chip, fmt->protocol, clock)) |
2408 | + if (!uac_clock_source_is_valid(chip, fmt, clock)) |
2409 | return -ENXIO; |
2410 | return 0; |
2411 | } |
2412 | diff --git a/sound/usb/clock.h b/sound/usb/clock.h |
2413 | index 076e31b79ee0..68df0fbe09d0 100644 |
2414 | --- a/sound/usb/clock.h |
2415 | +++ b/sound/usb/clock.h |
2416 | @@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface, |
2417 | struct usb_host_interface *alts, |
2418 | struct audioformat *fmt, int rate); |
2419 | |
2420 | -int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol, |
2421 | - int entity_id, bool validate); |
2422 | +int snd_usb_clock_find_source(struct snd_usb_audio *chip, |
2423 | + struct audioformat *fmt, bool validate); |
2424 | |
2425 | #endif /* __USBAUDIO_CLOCK_H */ |
2426 | diff --git a/sound/usb/format.c b/sound/usb/format.c |
2427 | index d79db71305f6..25668ba5e68e 100644 |
2428 | --- a/sound/usb/format.c |
2429 | +++ b/sound/usb/format.c |
2430 | @@ -322,8 +322,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip, |
2431 | struct usb_device *dev = chip->dev; |
2432 | unsigned char tmp[2], *data; |
2433 | int nr_triplets, data_size, ret = 0, ret_l6; |
2434 | - int clock = snd_usb_clock_find_source(chip, fp->protocol, |
2435 | - fp->clock, false); |
2436 | + int clock = snd_usb_clock_find_source(chip, fp, false); |
2437 | |
2438 | if (clock < 0) { |
2439 | dev_err(&dev->dev, |
2440 | diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c |
2441 | index 6cd4ff09c5ee..d2a050bb8341 100644 |
2442 | --- a/sound/usb/mixer.c |
2443 | +++ b/sound/usb/mixer.c |
2444 | @@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state, |
2445 | return 0; |
2446 | } |
2447 | |
2448 | +static int parse_term_effect_unit(struct mixer_build *state, |
2449 | + struct usb_audio_term *term, |
2450 | + void *p1, int id) |
2451 | +{ |
2452 | + term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */ |
2453 | + term->id = id; |
2454 | + return 0; |
2455 | +} |
2456 | + |
2457 | static int parse_term_uac2_clock_source(struct mixer_build *state, |
2458 | struct usb_audio_term *term, |
2459 | void *p1, int id) |
2460 | @@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id, |
2461 | UAC3_PROCESSING_UNIT); |
2462 | case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT): |
2463 | case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT): |
2464 | - return parse_term_proc_unit(state, term, p1, id, |
2465 | - UAC3_EFFECT_UNIT); |
2466 | + return parse_term_effect_unit(state, term, p1, id); |
2467 | case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT): |
2468 | case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2): |
2469 | case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT): |
2470 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
2471 | index 82184036437b..1ed25b1d2a6a 100644 |
2472 | --- a/sound/usb/quirks.c |
2473 | +++ b/sound/usb/quirks.c |
2474 | @@ -1402,6 +1402,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) |
2475 | case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */ |
2476 | case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */ |
2477 | case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */ |
2478 | + case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */ |
2479 | return true; |
2480 | } |
2481 | |
2482 | diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c |
2483 | index 2c41d47f6f83..90d23cc3c8d4 100644 |
2484 | --- a/tools/perf/util/stat-shadow.c |
2485 | +++ b/tools/perf/util/stat-shadow.c |
2486 | @@ -18,7 +18,6 @@ |
2487 | * AGGR_NONE: Use matching CPU |
2488 | * AGGR_THREAD: Not supported? |
2489 | */ |
2490 | -static bool have_frontend_stalled; |
2491 | |
2492 | struct runtime_stat rt_stat; |
2493 | struct stats walltime_nsecs_stats; |
2494 | @@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st) |
2495 | |
2496 | void perf_stat__init_shadow_stats(void) |
2497 | { |
2498 | - have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend"); |
2499 | runtime_stat__init(&rt_stat); |
2500 | } |
2501 | |
2502 | @@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config, |
2503 | print_metric(config, ctxp, NULL, "%7.2f ", |
2504 | "stalled cycles per insn", |
2505 | ratio); |
2506 | - } else if (have_frontend_stalled) { |
2507 | - out->new_line(config, ctxp); |
2508 | - print_metric(config, ctxp, NULL, "%7.2f ", |
2509 | - "stalled cycles per insn", 0); |
2510 | } |
2511 | } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) { |
2512 | if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0) |