Contents of /trunk/kernel-alx/patches-5.4/0241-5.4.142-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 64369 byte(s)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 64369 byte(s)
-add missing
1 | diff --git a/Makefile b/Makefile |
2 | index 2bfa11d0aab36..ef3adc6ccb871 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 141 |
10 | +SUBLEVEL = 142 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c |
15 | index 9b340af02c387..dd01a4abc2582 100644 |
16 | --- a/arch/powerpc/kernel/kprobes.c |
17 | +++ b/arch/powerpc/kernel/kprobes.c |
18 | @@ -264,7 +264,8 @@ int kprobe_handler(struct pt_regs *regs) |
19 | if (user_mode(regs)) |
20 | return 0; |
21 | |
22 | - if (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)) |
23 | + if (!IS_ENABLED(CONFIG_BOOKE) && |
24 | + (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR))) |
25 | return 0; |
26 | |
27 | /* |
28 | diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h |
29 | index 6ece8561ba661..c29d8fb0ffbe2 100644 |
30 | --- a/arch/x86/include/asm/svm.h |
31 | +++ b/arch/x86/include/asm/svm.h |
32 | @@ -119,6 +119,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { |
33 | #define V_IGN_TPR_SHIFT 20 |
34 | #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) |
35 | |
36 | +#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) |
37 | + |
38 | #define V_INTR_MASKING_SHIFT 24 |
39 | #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) |
40 | |
41 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c |
42 | index 0edcf69659eee..1622cff009c9a 100644 |
43 | --- a/arch/x86/kernel/apic/io_apic.c |
44 | +++ b/arch/x86/kernel/apic/io_apic.c |
45 | @@ -1961,7 +1961,8 @@ static struct irq_chip ioapic_chip __read_mostly = { |
46 | .irq_set_affinity = ioapic_set_affinity, |
47 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
48 | .irq_get_irqchip_state = ioapic_irq_get_chip_state, |
49 | - .flags = IRQCHIP_SKIP_SET_WAKE, |
50 | + .flags = IRQCHIP_SKIP_SET_WAKE | |
51 | + IRQCHIP_AFFINITY_PRE_STARTUP, |
52 | }; |
53 | |
54 | static struct irq_chip ioapic_ir_chip __read_mostly = { |
55 | @@ -1974,7 +1975,8 @@ static struct irq_chip ioapic_ir_chip __read_mostly = { |
56 | .irq_set_affinity = ioapic_set_affinity, |
57 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
58 | .irq_get_irqchip_state = ioapic_irq_get_chip_state, |
59 | - .flags = IRQCHIP_SKIP_SET_WAKE, |
60 | + .flags = IRQCHIP_SKIP_SET_WAKE | |
61 | + IRQCHIP_AFFINITY_PRE_STARTUP, |
62 | }; |
63 | |
64 | static inline void init_IO_APIC_traps(void) |
65 | diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c |
66 | index a20873bbbed67..f86e10b1d99ce 100644 |
67 | --- a/arch/x86/kernel/apic/msi.c |
68 | +++ b/arch/x86/kernel/apic/msi.c |
69 | @@ -86,11 +86,13 @@ msi_set_affinity(struct irq_data *irqd, const struct cpumask *mask, bool force) |
70 | * The quirk bit is not set in this case. |
71 | * - The new vector is the same as the old vector |
72 | * - The old vector is MANAGED_IRQ_SHUTDOWN_VECTOR (interrupt starts up) |
73 | + * - The interrupt is not yet started up |
74 | * - The new destination CPU is the same as the old destination CPU |
75 | */ |
76 | if (!irqd_msi_nomask_quirk(irqd) || |
77 | cfg->vector == old_cfg.vector || |
78 | old_cfg.vector == MANAGED_IRQ_SHUTDOWN_VECTOR || |
79 | + !irqd_is_started(irqd) || |
80 | cfg->dest_apicid == old_cfg.dest_apicid) { |
81 | irq_msi_update_msg(irqd, cfg); |
82 | return ret; |
83 | @@ -178,7 +180,8 @@ static struct irq_chip pci_msi_controller = { |
84 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
85 | .irq_compose_msi_msg = irq_msi_compose_msg, |
86 | .irq_set_affinity = msi_set_affinity, |
87 | - .flags = IRQCHIP_SKIP_SET_WAKE, |
88 | + .flags = IRQCHIP_SKIP_SET_WAKE | |
89 | + IRQCHIP_AFFINITY_PRE_STARTUP, |
90 | }; |
91 | |
92 | int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
93 | @@ -279,7 +282,8 @@ static struct irq_chip pci_msi_ir_controller = { |
94 | .irq_ack = irq_chip_ack_parent, |
95 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
96 | .irq_set_vcpu_affinity = irq_chip_set_vcpu_affinity_parent, |
97 | - .flags = IRQCHIP_SKIP_SET_WAKE, |
98 | + .flags = IRQCHIP_SKIP_SET_WAKE | |
99 | + IRQCHIP_AFFINITY_PRE_STARTUP, |
100 | }; |
101 | |
102 | static struct msi_domain_info pci_msi_ir_domain_info = { |
103 | @@ -322,7 +326,8 @@ static struct irq_chip dmar_msi_controller = { |
104 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
105 | .irq_compose_msi_msg = irq_msi_compose_msg, |
106 | .irq_write_msi_msg = dmar_msi_write_msg, |
107 | - .flags = IRQCHIP_SKIP_SET_WAKE, |
108 | + .flags = IRQCHIP_SKIP_SET_WAKE | |
109 | + IRQCHIP_AFFINITY_PRE_STARTUP, |
110 | }; |
111 | |
112 | static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info, |
113 | @@ -420,7 +425,7 @@ static struct irq_chip hpet_msi_controller __ro_after_init = { |
114 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
115 | .irq_compose_msi_msg = irq_msi_compose_msg, |
116 | .irq_write_msi_msg = hpet_msi_write_msg, |
117 | - .flags = IRQCHIP_SKIP_SET_WAKE, |
118 | + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP, |
119 | }; |
120 | |
121 | static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info, |
122 | diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c |
123 | index 50f683ecd2c6c..21eb593e0313e 100644 |
124 | --- a/arch/x86/kernel/cpu/resctrl/monitor.c |
125 | +++ b/arch/x86/kernel/cpu/resctrl/monitor.c |
126 | @@ -223,15 +223,14 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr) |
127 | return chunks >>= shift; |
128 | } |
129 | |
130 | -static int __mon_event_count(u32 rmid, struct rmid_read *rr) |
131 | +static u64 __mon_event_count(u32 rmid, struct rmid_read *rr) |
132 | { |
133 | struct mbm_state *m; |
134 | u64 chunks, tval; |
135 | |
136 | tval = __rmid_read(rmid, rr->evtid); |
137 | if (tval & (RMID_VAL_ERROR | RMID_VAL_UNAVAIL)) { |
138 | - rr->val = tval; |
139 | - return -EINVAL; |
140 | + return tval; |
141 | } |
142 | switch (rr->evtid) { |
143 | case QOS_L3_OCCUP_EVENT_ID: |
144 | @@ -243,12 +242,6 @@ static int __mon_event_count(u32 rmid, struct rmid_read *rr) |
145 | case QOS_L3_MBM_LOCAL_EVENT_ID: |
146 | m = &rr->d->mbm_local[rmid]; |
147 | break; |
148 | - default: |
149 | - /* |
150 | - * Code would never reach here because |
151 | - * an invalid event id would fail the __rmid_read. |
152 | - */ |
153 | - return -EINVAL; |
154 | } |
155 | |
156 | if (rr->first) { |
157 | @@ -298,23 +291,29 @@ void mon_event_count(void *info) |
158 | struct rdtgroup *rdtgrp, *entry; |
159 | struct rmid_read *rr = info; |
160 | struct list_head *head; |
161 | + u64 ret_val; |
162 | |
163 | rdtgrp = rr->rgrp; |
164 | |
165 | - if (__mon_event_count(rdtgrp->mon.rmid, rr)) |
166 | - return; |
167 | + ret_val = __mon_event_count(rdtgrp->mon.rmid, rr); |
168 | |
169 | /* |
170 | - * For Ctrl groups read data from child monitor groups. |
171 | + * For Ctrl groups read data from child monitor groups and |
172 | + * add them together. Count events which are read successfully. |
173 | + * Discard the rmid_read's reporting errors. |
174 | */ |
175 | head = &rdtgrp->mon.crdtgrp_list; |
176 | |
177 | if (rdtgrp->type == RDTCTRL_GROUP) { |
178 | list_for_each_entry(entry, head, mon.crdtgrp_list) { |
179 | - if (__mon_event_count(entry->mon.rmid, rr)) |
180 | - return; |
181 | + if (__mon_event_count(entry->mon.rmid, rr) == 0) |
182 | + ret_val = 0; |
183 | } |
184 | } |
185 | + |
186 | + /* Report error if none of rmid_reads are successful */ |
187 | + if (ret_val) |
188 | + rr->val = ret_val; |
189 | } |
190 | |
191 | /* |
192 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
193 | index 2a958dcc80f21..425444d080712 100644 |
194 | --- a/arch/x86/kvm/svm.c |
195 | +++ b/arch/x86/kvm/svm.c |
196 | @@ -516,6 +516,9 @@ static void recalc_intercepts(struct vcpu_svm *svm) |
197 | c->intercept_dr = h->intercept_dr | g->intercept_dr; |
198 | c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions; |
199 | c->intercept = h->intercept | g->intercept; |
200 | + |
201 | + c->intercept |= (1ULL << INTERCEPT_VMLOAD); |
202 | + c->intercept |= (1ULL << INTERCEPT_VMSAVE); |
203 | } |
204 | |
205 | static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) |
206 | @@ -1443,12 +1446,7 @@ static __init int svm_hardware_setup(void) |
207 | } |
208 | } |
209 | |
210 | - if (vgif) { |
211 | - if (!boot_cpu_has(X86_FEATURE_VGIF)) |
212 | - vgif = false; |
213 | - else |
214 | - pr_info("Virtual GIF supported\n"); |
215 | - } |
216 | + vgif = false; /* Disabled for CVE-2021-3653 */ |
217 | |
218 | return 0; |
219 | |
220 | @@ -3607,7 +3605,13 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, |
221 | svm->nested.intercept = nested_vmcb->control.intercept; |
222 | |
223 | svm_flush_tlb(&svm->vcpu, true); |
224 | - svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; |
225 | + |
226 | + svm->vmcb->control.int_ctl &= |
227 | + V_INTR_MASKING_MASK | V_GIF_ENABLE_MASK | V_GIF_MASK; |
228 | + |
229 | + svm->vmcb->control.int_ctl |= nested_vmcb->control.int_ctl & |
230 | + (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK); |
231 | + |
232 | if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) |
233 | svm->vcpu.arch.hflags |= HF_VINTR_MASK; |
234 | else |
235 | diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h |
236 | index a1919ec7fd108..55731dd0096f2 100644 |
237 | --- a/arch/x86/kvm/vmx/vmx.h |
238 | +++ b/arch/x86/kvm/vmx/vmx.h |
239 | @@ -512,7 +512,7 @@ static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx) |
240 | |
241 | static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) |
242 | { |
243 | - return vmx->secondary_exec_control & |
244 | + return secondary_exec_controls_get(vmx) & |
245 | SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; |
246 | } |
247 | |
248 | diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk |
249 | index fd1ab80be0dec..a4cf678cf5c80 100644 |
250 | --- a/arch/x86/tools/chkobjdump.awk |
251 | +++ b/arch/x86/tools/chkobjdump.awk |
252 | @@ -10,6 +10,7 @@ BEGIN { |
253 | |
254 | /^GNU objdump/ { |
255 | verstr = "" |
256 | + gsub(/\(.*\)/, ""); |
257 | for (i = 3; i <= NF; i++) |
258 | if (match($(i), "^[0-9]")) { |
259 | verstr = $(i); |
260 | diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c |
261 | index 9d78f29cf9967..0fe4f3ed72ca4 100644 |
262 | --- a/drivers/acpi/nfit/core.c |
263 | +++ b/drivers/acpi/nfit/core.c |
264 | @@ -2973,6 +2973,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, |
265 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
266 | struct nd_mapping_desc *mapping; |
267 | |
268 | + /* range index 0 == unmapped in SPA or invalid-SPA */ |
269 | + if (memdev->range_index == 0 || spa->range_index == 0) |
270 | + continue; |
271 | if (memdev->range_index != spa->range_index) |
272 | continue; |
273 | if (count >= ND_MAX_MAPPINGS) { |
274 | diff --git a/drivers/base/core.c b/drivers/base/core.c |
275 | index a119479fe3f42..8b651bfc1d88e 100644 |
276 | --- a/drivers/base/core.c |
277 | +++ b/drivers/base/core.c |
278 | @@ -1722,6 +1722,7 @@ void device_initialize(struct device *dev) |
279 | device_pm_init(dev); |
280 | set_dev_node(dev, -1); |
281 | #ifdef CONFIG_GENERIC_MSI_IRQ |
282 | + raw_spin_lock_init(&dev->msi_lock); |
283 | INIT_LIST_HEAD(&dev->msi_list); |
284 | #endif |
285 | INIT_LIST_HEAD(&dev->links.consumers); |
286 | diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
287 | index 839364371f9af..25e81b1a59a54 100644 |
288 | --- a/drivers/block/nbd.c |
289 | +++ b/drivers/block/nbd.c |
290 | @@ -797,6 +797,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved) |
291 | { |
292 | struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); |
293 | |
294 | + /* don't abort one completed request */ |
295 | + if (blk_mq_request_completed(req)) |
296 | + return true; |
297 | + |
298 | mutex_lock(&cmd->lock); |
299 | cmd->status = BLK_STS_IOERR; |
300 | mutex_unlock(&cmd->lock); |
301 | @@ -2009,15 +2013,19 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) |
302 | { |
303 | mutex_lock(&nbd->config_lock); |
304 | nbd_disconnect(nbd); |
305 | - nbd_clear_sock(nbd); |
306 | - mutex_unlock(&nbd->config_lock); |
307 | + sock_shutdown(nbd); |
308 | /* |
309 | * Make sure recv thread has finished, so it does not drop the last |
310 | * config ref and try to destroy the workqueue from inside the work |
311 | - * queue. |
312 | + * queue. And this also ensure that we can safely call nbd_clear_que() |
313 | + * to cancel the inflight I/Os. |
314 | */ |
315 | if (nbd->recv_workq) |
316 | flush_workqueue(nbd->recv_workq); |
317 | + nbd_clear_que(nbd); |
318 | + nbd->task_setup = NULL; |
319 | + mutex_unlock(&nbd->config_lock); |
320 | + |
321 | if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, |
322 | &nbd->config->runtime_flags)) |
323 | nbd_config_put(nbd); |
324 | diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h |
325 | index 05fce48ceee0d..f7da816a5562a 100644 |
326 | --- a/drivers/gpu/drm/meson/meson_registers.h |
327 | +++ b/drivers/gpu/drm/meson/meson_registers.h |
328 | @@ -590,6 +590,11 @@ |
329 | #define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc |
330 | #define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd |
331 | |
332 | +/* osd1 HDR */ |
333 | +#define OSD1_HDR2_CTRL 0x38a0 |
334 | +#define OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN BIT(13) |
335 | +#define OSD1_HDR2_CTRL_REG_ONLY_MAT BIT(16) |
336 | + |
337 | /* osd2 scaler */ |
338 | #define OSD2_VSC_PHASE_STEP 0x3d00 |
339 | #define OSD2_VSC_INI_PHASE 0x3d01 |
340 | diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c |
341 | index 68cf2c2eca5fe..33698814c022f 100644 |
342 | --- a/drivers/gpu/drm/meson/meson_viu.c |
343 | +++ b/drivers/gpu/drm/meson/meson_viu.c |
344 | @@ -356,9 +356,14 @@ void meson_viu_init(struct meson_drm *priv) |
345 | if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || |
346 | meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) |
347 | meson_viu_load_matrix(priv); |
348 | - else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) |
349 | + else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { |
350 | meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff, |
351 | true); |
352 | + /* fix green/pink color distortion from vendor u-boot */ |
353 | + writel_bits_relaxed(OSD1_HDR2_CTRL_REG_ONLY_MAT | |
354 | + OSD1_HDR2_CTRL_VDIN0_HDR2_TOP_EN, 0, |
355 | + priv->io_base + _REG(OSD1_HDR2_CTRL)); |
356 | + } |
357 | |
358 | /* Initialize OSD1 fifo control register */ |
359 | reg = VIU_OSD_DDR_PRIORITY_URGENT | |
360 | diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c |
361 | index a3fec3df11b68..c9ae1895cd48a 100644 |
362 | --- a/drivers/i2c/i2c-dev.c |
363 | +++ b/drivers/i2c/i2c-dev.c |
364 | @@ -141,7 +141,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, |
365 | if (count > 8192) |
366 | count = 8192; |
367 | |
368 | - tmp = kmalloc(count, GFP_KERNEL); |
369 | + tmp = kzalloc(count, GFP_KERNEL); |
370 | if (tmp == NULL) |
371 | return -ENOMEM; |
372 | |
373 | @@ -150,7 +150,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, |
374 | |
375 | ret = i2c_master_recv(client, tmp, count); |
376 | if (ret >= 0) |
377 | - ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; |
378 | + if (copy_to_user(buf, tmp, ret)) |
379 | + ret = -EFAULT; |
380 | kfree(tmp); |
381 | return ret; |
382 | } |
383 | diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c |
384 | index 46e595eb889fa..2bd785e9e42ac 100644 |
385 | --- a/drivers/iio/adc/palmas_gpadc.c |
386 | +++ b/drivers/iio/adc/palmas_gpadc.c |
387 | @@ -656,8 +656,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc) |
388 | |
389 | adc_period = adc->auto_conversion_period; |
390 | for (i = 0; i < 16; ++i) { |
391 | - if (((1000 * (1 << i)) / 32) < adc_period) |
392 | - continue; |
393 | + if (((1000 * (1 << i)) / 32) >= adc_period) |
394 | + break; |
395 | } |
396 | if (i > 0) |
397 | i--; |
398 | diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c |
399 | index 2e66e4d586ff0..7a1a9fe470728 100644 |
400 | --- a/drivers/iio/adc/ti-ads7950.c |
401 | +++ b/drivers/iio/adc/ti-ads7950.c |
402 | @@ -569,7 +569,6 @@ static int ti_ads7950_probe(struct spi_device *spi) |
403 | st->ring_xfer.tx_buf = &st->tx_buf[0]; |
404 | st->ring_xfer.rx_buf = &st->rx_buf[0]; |
405 | /* len will be set later */ |
406 | - st->ring_xfer.cs_change = true; |
407 | |
408 | spi_message_add_tail(&st->ring_xfer, &st->ring_msg); |
409 | |
410 | diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c |
411 | index 7618cdf59efdb..05af6410ca131 100644 |
412 | --- a/drivers/iio/humidity/hdc100x.c |
413 | +++ b/drivers/iio/humidity/hdc100x.c |
414 | @@ -24,6 +24,8 @@ |
415 | #include <linux/iio/trigger_consumer.h> |
416 | #include <linux/iio/triggered_buffer.h> |
417 | |
418 | +#include <linux/time.h> |
419 | + |
420 | #define HDC100X_REG_TEMP 0x00 |
421 | #define HDC100X_REG_HUMIDITY 0x01 |
422 | |
423 | @@ -165,7 +167,7 @@ static int hdc100x_get_measurement(struct hdc100x_data *data, |
424 | struct iio_chan_spec const *chan) |
425 | { |
426 | struct i2c_client *client = data->client; |
427 | - int delay = data->adc_int_us[chan->address]; |
428 | + int delay = data->adc_int_us[chan->address] + 1*USEC_PER_MSEC; |
429 | int ret; |
430 | __be16 val; |
431 | |
432 | @@ -322,7 +324,7 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p) |
433 | struct iio_dev *indio_dev = pf->indio_dev; |
434 | struct hdc100x_data *data = iio_priv(indio_dev); |
435 | struct i2c_client *client = data->client; |
436 | - int delay = data->adc_int_us[0] + data->adc_int_us[1]; |
437 | + int delay = data->adc_int_us[0] + data->adc_int_us[1] + 2*USEC_PER_MSEC; |
438 | int ret; |
439 | |
440 | /* dual read starts at temp register */ |
441 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
442 | index 953d86ca6d2b2..a2a03df977046 100644 |
443 | --- a/drivers/iommu/intel-iommu.c |
444 | +++ b/drivers/iommu/intel-iommu.c |
445 | @@ -1853,7 +1853,7 @@ static inline int guestwidth_to_adjustwidth(int gaw) |
446 | static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, |
447 | int guest_width) |
448 | { |
449 | - int adjust_width, agaw; |
450 | + int adjust_width, agaw, cap_width; |
451 | unsigned long sagaw; |
452 | int err; |
453 | |
454 | @@ -1867,8 +1867,9 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, |
455 | domain_reserve_special_ranges(domain); |
456 | |
457 | /* calculate AGAW */ |
458 | - if (guest_width > cap_mgaw(iommu->cap)) |
459 | - guest_width = cap_mgaw(iommu->cap); |
460 | + cap_width = min_t(int, cap_mgaw(iommu->cap), agaw_to_width(iommu->agaw)); |
461 | + if (guest_width > cap_width) |
462 | + guest_width = cap_width; |
463 | domain->gaw = guest_width; |
464 | adjust_width = guestwidth_to_adjustwidth(guest_width); |
465 | agaw = width_to_agaw(adjust_width); |
466 | diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c |
467 | index bbec86b9418e6..19d1f1c51f97e 100644 |
468 | --- a/drivers/net/dsa/lan9303-core.c |
469 | +++ b/drivers/net/dsa/lan9303-core.c |
470 | @@ -557,12 +557,12 @@ static int lan9303_alr_make_entry_raw(struct lan9303 *chip, u32 dat0, u32 dat1) |
471 | return 0; |
472 | } |
473 | |
474 | -typedef void alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1, |
475 | - int portmap, void *ctx); |
476 | +typedef int alr_loop_cb_t(struct lan9303 *chip, u32 dat0, u32 dat1, |
477 | + int portmap, void *ctx); |
478 | |
479 | -static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) |
480 | +static int lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) |
481 | { |
482 | - int i; |
483 | + int ret = 0, i; |
484 | |
485 | mutex_lock(&chip->alr_mutex); |
486 | lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, |
487 | @@ -582,13 +582,17 @@ static void lan9303_alr_loop(struct lan9303 *chip, alr_loop_cb_t *cb, void *ctx) |
488 | LAN9303_ALR_DAT1_PORT_BITOFFS; |
489 | portmap = alrport_2_portmap[alrport]; |
490 | |
491 | - cb(chip, dat0, dat1, portmap, ctx); |
492 | + ret = cb(chip, dat0, dat1, portmap, ctx); |
493 | + if (ret) |
494 | + break; |
495 | |
496 | lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, |
497 | LAN9303_ALR_CMD_GET_NEXT); |
498 | lan9303_write_switch_reg(chip, LAN9303_SWE_ALR_CMD, 0); |
499 | } |
500 | mutex_unlock(&chip->alr_mutex); |
501 | + |
502 | + return ret; |
503 | } |
504 | |
505 | static void alr_reg_to_mac(u32 dat0, u32 dat1, u8 mac[6]) |
506 | @@ -606,18 +610,20 @@ struct del_port_learned_ctx { |
507 | }; |
508 | |
509 | /* Clear learned (non-static) entry on given port */ |
510 | -static void alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0, |
511 | - u32 dat1, int portmap, void *ctx) |
512 | +static int alr_loop_cb_del_port_learned(struct lan9303 *chip, u32 dat0, |
513 | + u32 dat1, int portmap, void *ctx) |
514 | { |
515 | struct del_port_learned_ctx *del_ctx = ctx; |
516 | int port = del_ctx->port; |
517 | |
518 | if (((BIT(port) & portmap) == 0) || (dat1 & LAN9303_ALR_DAT1_STATIC)) |
519 | - return; |
520 | + return 0; |
521 | |
522 | /* learned entries has only one port, we can just delete */ |
523 | dat1 &= ~LAN9303_ALR_DAT1_VALID; /* delete entry */ |
524 | lan9303_alr_make_entry_raw(chip, dat0, dat1); |
525 | + |
526 | + return 0; |
527 | } |
528 | |
529 | struct port_fdb_dump_ctx { |
530 | @@ -626,19 +632,19 @@ struct port_fdb_dump_ctx { |
531 | dsa_fdb_dump_cb_t *cb; |
532 | }; |
533 | |
534 | -static void alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0, |
535 | - u32 dat1, int portmap, void *ctx) |
536 | +static int alr_loop_cb_fdb_port_dump(struct lan9303 *chip, u32 dat0, |
537 | + u32 dat1, int portmap, void *ctx) |
538 | { |
539 | struct port_fdb_dump_ctx *dump_ctx = ctx; |
540 | u8 mac[ETH_ALEN]; |
541 | bool is_static; |
542 | |
543 | if ((BIT(dump_ctx->port) & portmap) == 0) |
544 | - return; |
545 | + return 0; |
546 | |
547 | alr_reg_to_mac(dat0, dat1, mac); |
548 | is_static = !!(dat1 & LAN9303_ALR_DAT1_STATIC); |
549 | - dump_ctx->cb(mac, 0, is_static, dump_ctx->data); |
550 | + return dump_ctx->cb(mac, 0, is_static, dump_ctx->data); |
551 | } |
552 | |
553 | /* Set a static ALR entry. Delete entry if port_map is zero */ |
554 | @@ -1210,9 +1216,7 @@ static int lan9303_port_fdb_dump(struct dsa_switch *ds, int port, |
555 | }; |
556 | |
557 | dev_dbg(chip->dev, "%s(%d)\n", __func__, port); |
558 | - lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx); |
559 | - |
560 | - return 0; |
561 | + return lan9303_alr_loop(chip, alr_loop_cb_fdb_port_dump, &dump_ctx); |
562 | } |
563 | |
564 | static int lan9303_port_mdb_prepare(struct dsa_switch *ds, int port, |
565 | diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c |
566 | index dc75e798dbff8..af3d56636a076 100644 |
567 | --- a/drivers/net/dsa/lantiq_gswip.c |
568 | +++ b/drivers/net/dsa/lantiq_gswip.c |
569 | @@ -1399,11 +1399,17 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, |
570 | addr[1] = mac_bridge.key[2] & 0xff; |
571 | addr[0] = (mac_bridge.key[2] >> 8) & 0xff; |
572 | if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) { |
573 | - if (mac_bridge.val[0] & BIT(port)) |
574 | - cb(addr, 0, true, data); |
575 | + if (mac_bridge.val[0] & BIT(port)) { |
576 | + err = cb(addr, 0, true, data); |
577 | + if (err) |
578 | + return err; |
579 | + } |
580 | } else { |
581 | - if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) |
582 | - cb(addr, 0, false, data); |
583 | + if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) { |
584 | + err = cb(addr, 0, false, data); |
585 | + if (err) |
586 | + return err; |
587 | + } |
588 | } |
589 | } |
590 | return 0; |
591 | diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h |
592 | index 061142b183cb9..d6013410dc88b 100644 |
593 | --- a/drivers/net/dsa/microchip/ksz_common.h |
594 | +++ b/drivers/net/dsa/microchip/ksz_common.h |
595 | @@ -215,12 +215,8 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val) |
596 | int ret; |
597 | |
598 | ret = regmap_bulk_read(dev->regmap[2], reg, value, 2); |
599 | - if (!ret) { |
600 | - /* Ick! ToDo: Add 64bit R/W to regmap on 32bit systems */ |
601 | - value[0] = swab32(value[0]); |
602 | - value[1] = swab32(value[1]); |
603 | - *val = swab64((u64)*value); |
604 | - } |
605 | + if (!ret) |
606 | + *val = (u64)value[0] << 32 | value[1]; |
607 | |
608 | return ret; |
609 | } |
610 | diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c |
611 | index 071e5015bf91d..e1a3c33fdad90 100644 |
612 | --- a/drivers/net/dsa/mt7530.c |
613 | +++ b/drivers/net/dsa/mt7530.c |
614 | @@ -45,6 +45,7 @@ static const struct mt7530_mib_desc mt7530_mib[] = { |
615 | MIB_DESC(2, 0x48, "TxBytes"), |
616 | MIB_DESC(1, 0x60, "RxDrop"), |
617 | MIB_DESC(1, 0x64, "RxFiltering"), |
618 | + MIB_DESC(1, 0x68, "RxUnicast"), |
619 | MIB_DESC(1, 0x6c, "RxMulticast"), |
620 | MIB_DESC(1, 0x70, "RxBroadcast"), |
621 | MIB_DESC(1, 0x74, "RxAlignErr"), |
622 | diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c |
623 | index a07d8051ec3e8..eab861352bf23 100644 |
624 | --- a/drivers/net/dsa/sja1105/sja1105_main.c |
625 | +++ b/drivers/net/dsa/sja1105/sja1105_main.c |
626 | @@ -1312,7 +1312,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port, |
627 | /* We need to hide the dsa_8021q VLANs from the user. */ |
628 | if (!dsa_port_is_vlan_filtering(&ds->ports[port])) |
629 | l2_lookup.vlanid = 0; |
630 | - cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); |
631 | + rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data); |
632 | + if (rc) |
633 | + return rc; |
634 | } |
635 | return 0; |
636 | } |
637 | diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c |
638 | index cda9b9a8392a2..dc902e371c2cf 100644 |
639 | --- a/drivers/net/ethernet/intel/iavf/iavf_main.c |
640 | +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c |
641 | @@ -1499,11 +1499,6 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter) |
642 | set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); |
643 | |
644 | iavf_map_rings_to_vectors(adapter); |
645 | - |
646 | - if (RSS_AQ(adapter)) |
647 | - adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; |
648 | - else |
649 | - err = iavf_init_rss(adapter); |
650 | err: |
651 | return err; |
652 | } |
653 | @@ -2179,6 +2174,14 @@ continue_reset: |
654 | goto reset_err; |
655 | } |
656 | |
657 | + if (RSS_AQ(adapter)) { |
658 | + adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS; |
659 | + } else { |
660 | + err = iavf_init_rss(adapter); |
661 | + if (err) |
662 | + goto reset_err; |
663 | + } |
664 | + |
665 | adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG; |
666 | adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS; |
667 | |
668 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c |
669 | index eb2e57ff08a60..dc36b0db37222 100644 |
670 | --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c |
671 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c |
672 | @@ -1017,12 +1017,19 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) |
673 | MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER); |
674 | mlx5_eq_notifier_register(dev, &tracer->nb); |
675 | |
676 | - mlx5_fw_tracer_start(tracer); |
677 | - |
678 | + err = mlx5_fw_tracer_start(tracer); |
679 | + if (err) { |
680 | + mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err); |
681 | + goto err_notifier_unregister; |
682 | + } |
683 | return 0; |
684 | |
685 | +err_notifier_unregister: |
686 | + mlx5_eq_notifier_unregister(dev, &tracer->nb); |
687 | + mlx5_core_destroy_mkey(dev, &tracer->buff.mkey); |
688 | err_dealloc_pd: |
689 | mlx5_core_dealloc_pd(dev, tracer->buff.pdn); |
690 | + cancel_work_sync(&tracer->read_fw_strings_work); |
691 | return err; |
692 | } |
693 | |
694 | diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c |
695 | index 79d74763cf24a..2a78084aeaedb 100644 |
696 | --- a/drivers/net/ieee802154/mac802154_hwsim.c |
697 | +++ b/drivers/net/ieee802154/mac802154_hwsim.c |
698 | @@ -418,7 +418,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info) |
699 | struct hwsim_edge *e; |
700 | u32 v0, v1; |
701 | |
702 | - if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && |
703 | + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || |
704 | !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) |
705 | return -EINVAL; |
706 | |
707 | @@ -528,14 +528,14 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info) |
708 | u32 v0, v1; |
709 | u8 lqi; |
710 | |
711 | - if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && |
712 | + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || |
713 | !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) |
714 | return -EINVAL; |
715 | |
716 | if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL)) |
717 | return -EINVAL; |
718 | |
719 | - if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] && |
720 | + if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] || |
721 | !edge_attrs[MAC802154_HWSIM_EDGE_ATTR_LQI]) |
722 | return -EINVAL; |
723 | |
724 | diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c |
725 | index 910ab2182158d..f95bd1b0fb965 100644 |
726 | --- a/drivers/net/phy/micrel.c |
727 | +++ b/drivers/net/phy/micrel.c |
728 | @@ -1184,8 +1184,6 @@ static struct phy_driver ksphy_driver[] = { |
729 | .name = "Micrel KSZ87XX Switch", |
730 | /* PHY_BASIC_FEATURES */ |
731 | .config_init = kszphy_config_init, |
732 | - .config_aneg = ksz8873mll_config_aneg, |
733 | - .read_status = ksz8873mll_read_status, |
734 | .match_phy_device = ksz8795_match_phy_device, |
735 | .suspend = genphy_suspend, |
736 | .resume = genphy_resume, |
737 | diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
738 | index b7e2b4a0f3c66..c6c41a7836c93 100644 |
739 | --- a/drivers/net/ppp/ppp_generic.c |
740 | +++ b/drivers/net/ppp/ppp_generic.c |
741 | @@ -1121,7 +1121,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, |
742 | * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows |
743 | * userspace to infer the device name using to the PPPIOCGUNIT ioctl. |
744 | */ |
745 | - if (!tb[IFLA_IFNAME]) |
746 | + if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME])) |
747 | conf.ifname_is_set = false; |
748 | |
749 | err = ppp_dev_configure(src_net, dev, &conf); |
750 | diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c |
751 | index cca0a3ba1d2c2..e6da7b2b34729 100644 |
752 | --- a/drivers/nvdimm/namespace_devs.c |
753 | +++ b/drivers/nvdimm/namespace_devs.c |
754 | @@ -2486,7 +2486,7 @@ static void deactivate_labels(void *region) |
755 | |
756 | static int init_active_labels(struct nd_region *nd_region) |
757 | { |
758 | - int i; |
759 | + int i, rc = 0; |
760 | |
761 | for (i = 0; i < nd_region->ndr_mappings; i++) { |
762 | struct nd_mapping *nd_mapping = &nd_region->mapping[i]; |
763 | @@ -2505,13 +2505,14 @@ static int init_active_labels(struct nd_region *nd_region) |
764 | else if (test_bit(NDD_ALIASING, &nvdimm->flags)) |
765 | /* fail, labels needed to disambiguate dpa */; |
766 | else |
767 | - return 0; |
768 | + continue; |
769 | |
770 | dev_err(&nd_region->dev, "%s: is %s, failing probe\n", |
771 | dev_name(&nd_mapping->nvdimm->dev), |
772 | test_bit(NDD_LOCKED, &nvdimm->flags) |
773 | ? "locked" : "disabled"); |
774 | - return -ENXIO; |
775 | + rc = -ENXIO; |
776 | + goto out; |
777 | } |
778 | nd_mapping->ndd = ndd; |
779 | atomic_inc(&nvdimm->busy); |
780 | @@ -2545,13 +2546,17 @@ static int init_active_labels(struct nd_region *nd_region) |
781 | break; |
782 | } |
783 | |
784 | - if (i < nd_region->ndr_mappings) { |
785 | + if (i < nd_region->ndr_mappings) |
786 | + rc = -ENOMEM; |
787 | + |
788 | +out: |
789 | + if (rc) { |
790 | deactivate_labels(nd_region); |
791 | - return -ENOMEM; |
792 | + return rc; |
793 | } |
794 | |
795 | return devm_add_action_or_reset(&nd_region->dev, deactivate_labels, |
796 | - nd_region); |
797 | + nd_region); |
798 | } |
799 | |
800 | int nd_region_register_namespaces(struct nd_region *nd_region, int *err) |
801 | diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c |
802 | index 771041784e645..5bb37671a86ad 100644 |
803 | --- a/drivers/pci/msi.c |
804 | +++ b/drivers/pci/msi.c |
805 | @@ -170,24 +170,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) |
806 | * reliably as devices without an INTx disable bit will then generate a |
807 | * level IRQ which will never be cleared. |
808 | */ |
809 | -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
810 | +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
811 | { |
812 | - u32 mask_bits = desc->masked; |
813 | + raw_spinlock_t *lock = &desc->dev->msi_lock; |
814 | + unsigned long flags; |
815 | |
816 | if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) |
817 | - return 0; |
818 | + return; |
819 | |
820 | - mask_bits &= ~mask; |
821 | - mask_bits |= flag; |
822 | + raw_spin_lock_irqsave(lock, flags); |
823 | + desc->masked &= ~mask; |
824 | + desc->masked |= flag; |
825 | pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, |
826 | - mask_bits); |
827 | - |
828 | - return mask_bits; |
829 | + desc->masked); |
830 | + raw_spin_unlock_irqrestore(lock, flags); |
831 | } |
832 | |
833 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
834 | { |
835 | - desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); |
836 | + __pci_msi_desc_mask_irq(desc, mask, flag); |
837 | } |
838 | |
839 | static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) |
840 | @@ -316,13 +317,31 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
841 | /* Don't touch the hardware now */ |
842 | } else if (entry->msi_attrib.is_msix) { |
843 | void __iomem *base = pci_msix_desc_addr(entry); |
844 | + bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); |
845 | |
846 | if (!base) |
847 | goto skip; |
848 | |
849 | + /* |
850 | + * The specification mandates that the entry is masked |
851 | + * when the message is modified: |
852 | + * |
853 | + * "If software changes the Address or Data value of an |
854 | + * entry while the entry is unmasked, the result is |
855 | + * undefined." |
856 | + */ |
857 | + if (unmasked) |
858 | + __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); |
859 | + |
860 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); |
861 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); |
862 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); |
863 | + |
864 | + if (unmasked) |
865 | + __pci_msix_desc_mask_irq(entry, 0); |
866 | + |
867 | + /* Ensure that the writes are visible in the device */ |
868 | + readl(base + PCI_MSIX_ENTRY_DATA); |
869 | } else { |
870 | int pos = dev->msi_cap; |
871 | u16 msgctl; |
872 | @@ -343,6 +362,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
873 | pci_write_config_word(dev, pos + PCI_MSI_DATA_32, |
874 | msg->data); |
875 | } |
876 | + /* Ensure that the writes are visible in the device */ |
877 | + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); |
878 | } |
879 | |
880 | skip: |
881 | @@ -642,21 +663,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, |
882 | /* Configure MSI capability structure */ |
883 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
884 | if (ret) { |
885 | - msi_mask_irq(entry, mask, ~mask); |
886 | + msi_mask_irq(entry, mask, 0); |
887 | free_msi_irqs(dev); |
888 | return ret; |
889 | } |
890 | |
891 | ret = msi_verify_entries(dev); |
892 | if (ret) { |
893 | - msi_mask_irq(entry, mask, ~mask); |
894 | + msi_mask_irq(entry, mask, 0); |
895 | free_msi_irqs(dev); |
896 | return ret; |
897 | } |
898 | |
899 | ret = populate_msi_sysfs(dev); |
900 | if (ret) { |
901 | - msi_mask_irq(entry, mask, ~mask); |
902 | + msi_mask_irq(entry, mask, 0); |
903 | free_msi_irqs(dev); |
904 | return ret; |
905 | } |
906 | @@ -697,6 +718,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
907 | { |
908 | struct irq_affinity_desc *curmsk, *masks = NULL; |
909 | struct msi_desc *entry; |
910 | + void __iomem *addr; |
911 | int ret, i; |
912 | int vec_count = pci_msix_vec_count(dev); |
913 | |
914 | @@ -717,6 +739,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
915 | |
916 | entry->msi_attrib.is_msix = 1; |
917 | entry->msi_attrib.is_64 = 1; |
918 | + |
919 | if (entries) |
920 | entry->msi_attrib.entry_nr = entries[i].entry; |
921 | else |
922 | @@ -728,6 +751,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
923 | entry->msi_attrib.default_irq = dev->irq; |
924 | entry->mask_base = base; |
925 | |
926 | + addr = pci_msix_desc_addr(entry); |
927 | + if (addr) |
928 | + entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); |
929 | + |
930 | list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); |
931 | if (masks) |
932 | curmsk++; |
933 | @@ -738,26 +765,25 @@ out: |
934 | return ret; |
935 | } |
936 | |
937 | -static void msix_program_entries(struct pci_dev *dev, |
938 | - struct msix_entry *entries) |
939 | +static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) |
940 | { |
941 | struct msi_desc *entry; |
942 | - int i = 0; |
943 | - void __iomem *desc_addr; |
944 | |
945 | for_each_pci_msi_entry(entry, dev) { |
946 | - if (entries) |
947 | - entries[i++].vector = entry->irq; |
948 | + if (entries) { |
949 | + entries->vector = entry->irq; |
950 | + entries++; |
951 | + } |
952 | + } |
953 | +} |
954 | |
955 | - desc_addr = pci_msix_desc_addr(entry); |
956 | - if (desc_addr) |
957 | - entry->masked = readl(desc_addr + |
958 | - PCI_MSIX_ENTRY_VECTOR_CTRL); |
959 | - else |
960 | - entry->masked = 0; |
961 | +static void msix_mask_all(void __iomem *base, int tsize) |
962 | +{ |
963 | + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; |
964 | + int i; |
965 | |
966 | - msix_mask_irq(entry, 1); |
967 | - } |
968 | + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) |
969 | + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); |
970 | } |
971 | |
972 | /** |
973 | @@ -774,22 +800,33 @@ static void msix_program_entries(struct pci_dev *dev, |
974 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
975 | int nvec, struct irq_affinity *affd) |
976 | { |
977 | - int ret; |
978 | - u16 control; |
979 | void __iomem *base; |
980 | + int ret, tsize; |
981 | + u16 control; |
982 | |
983 | - /* Ensure MSI-X is disabled while it is set up */ |
984 | - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
985 | + /* |
986 | + * Some devices require MSI-X to be enabled before the MSI-X |
987 | + * registers can be accessed. Mask all the vectors to prevent |
988 | + * interrupts coming in before they're fully set up. |
989 | + */ |
990 | + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | |
991 | + PCI_MSIX_FLAGS_ENABLE); |
992 | |
993 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); |
994 | /* Request & Map MSI-X table region */ |
995 | - base = msix_map_region(dev, msix_table_size(control)); |
996 | - if (!base) |
997 | - return -ENOMEM; |
998 | + tsize = msix_table_size(control); |
999 | + base = msix_map_region(dev, tsize); |
1000 | + if (!base) { |
1001 | + ret = -ENOMEM; |
1002 | + goto out_disable; |
1003 | + } |
1004 | + |
1005 | + /* Ensure that all table entries are masked. */ |
1006 | + msix_mask_all(base, tsize); |
1007 | |
1008 | ret = msix_setup_entries(dev, base, entries, nvec, affd); |
1009 | if (ret) |
1010 | - return ret; |
1011 | + goto out_disable; |
1012 | |
1013 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
1014 | if (ret) |
1015 | @@ -800,15 +837,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
1016 | if (ret) |
1017 | goto out_free; |
1018 | |
1019 | - /* |
1020 | - * Some devices require MSI-X to be enabled before we can touch the |
1021 | - * MSI-X registers. We need to mask all the vectors to prevent |
1022 | - * interrupts coming in before they're fully set up. |
1023 | - */ |
1024 | - pci_msix_clear_and_set_ctrl(dev, 0, |
1025 | - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); |
1026 | - |
1027 | - msix_program_entries(dev, entries); |
1028 | + msix_update_entries(dev, entries); |
1029 | |
1030 | ret = populate_msi_sysfs(dev); |
1031 | if (ret) |
1032 | @@ -842,6 +871,9 @@ out_avail: |
1033 | out_free: |
1034 | free_msi_irqs(dev); |
1035 | |
1036 | +out_disable: |
1037 | + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
1038 | + |
1039 | return ret; |
1040 | } |
1041 | |
1042 | @@ -929,8 +961,7 @@ static void pci_msi_shutdown(struct pci_dev *dev) |
1043 | |
1044 | /* Return the device with MSI unmasked as initial states */ |
1045 | mask = msi_mask(desc->msi_attrib.multi_cap); |
1046 | - /* Keep cached state to be restored */ |
1047 | - __pci_msi_desc_mask_irq(desc, mask, ~mask); |
1048 | + msi_mask_irq(desc, mask, 0); |
1049 | |
1050 | /* Restore dev->irq to its default pin-assertion IRQ */ |
1051 | dev->irq = desc->msi_attrib.default_irq; |
1052 | @@ -1015,10 +1046,8 @@ static void pci_msix_shutdown(struct pci_dev *dev) |
1053 | } |
1054 | |
1055 | /* Return the device with MSI-X masked as initial states */ |
1056 | - for_each_pci_msi_entry(entry, dev) { |
1057 | - /* Keep cached states to be restored */ |
1058 | + for_each_pci_msi_entry(entry, dev) |
1059 | __pci_msix_desc_mask_irq(entry, 1); |
1060 | - } |
1061 | |
1062 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
1063 | pci_intx_for_msi(dev, 1); |
1064 | diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c |
1065 | index c32daf087640a..5db6f7394ef2b 100644 |
1066 | --- a/drivers/platform/x86/pcengines-apuv2.c |
1067 | +++ b/drivers/platform/x86/pcengines-apuv2.c |
1068 | @@ -78,7 +78,6 @@ static const struct gpio_led apu2_leds[] = { |
1069 | { .name = "apu:green:1" }, |
1070 | { .name = "apu:green:2" }, |
1071 | { .name = "apu:green:3" }, |
1072 | - { .name = "apu:simswap" }, |
1073 | }; |
1074 | |
1075 | static const struct gpio_led_platform_data apu2_leds_pdata = { |
1076 | @@ -95,8 +94,7 @@ static struct gpiod_lookup_table gpios_led_table = { |
1077 | NULL, 1, GPIO_ACTIVE_LOW), |
1078 | GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_LED3, |
1079 | NULL, 2, GPIO_ACTIVE_LOW), |
1080 | - GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_SIMSWAP, |
1081 | - NULL, 3, GPIO_ACTIVE_LOW), |
1082 | + {} /* Terminating entry */ |
1083 | } |
1084 | }; |
1085 | |
1086 | @@ -126,6 +124,7 @@ static struct gpiod_lookup_table gpios_key_table = { |
1087 | .table = { |
1088 | GPIO_LOOKUP_IDX(AMD_FCH_GPIO_DRIVER_NAME, APU2_GPIO_LINE_MODESW, |
1089 | NULL, 0, GPIO_ACTIVE_LOW), |
1090 | + {} /* Terminating entry */ |
1091 | } |
1092 | }; |
1093 | |
1094 | diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
1095 | index de825df4abf6a..87cfadd70d0db 100644 |
1096 | --- a/drivers/xen/events/events_base.c |
1097 | +++ b/drivers/xen/events/events_base.c |
1098 | @@ -134,12 +134,12 @@ static void disable_dynirq(struct irq_data *data); |
1099 | |
1100 | static DEFINE_PER_CPU(unsigned int, irq_epoch); |
1101 | |
1102 | -static void clear_evtchn_to_irq_row(unsigned row) |
1103 | +static void clear_evtchn_to_irq_row(int *evtchn_row) |
1104 | { |
1105 | unsigned col; |
1106 | |
1107 | for (col = 0; col < EVTCHN_PER_ROW; col++) |
1108 | - WRITE_ONCE(evtchn_to_irq[row][col], -1); |
1109 | + WRITE_ONCE(evtchn_row[col], -1); |
1110 | } |
1111 | |
1112 | static void clear_evtchn_to_irq_all(void) |
1113 | @@ -149,7 +149,7 @@ static void clear_evtchn_to_irq_all(void) |
1114 | for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { |
1115 | if (evtchn_to_irq[row] == NULL) |
1116 | continue; |
1117 | - clear_evtchn_to_irq_row(row); |
1118 | + clear_evtchn_to_irq_row(evtchn_to_irq[row]); |
1119 | } |
1120 | } |
1121 | |
1122 | @@ -157,6 +157,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) |
1123 | { |
1124 | unsigned row; |
1125 | unsigned col; |
1126 | + int *evtchn_row; |
1127 | |
1128 | if (evtchn >= xen_evtchn_max_channels()) |
1129 | return -EINVAL; |
1130 | @@ -169,11 +170,18 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) |
1131 | if (irq == -1) |
1132 | return 0; |
1133 | |
1134 | - evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); |
1135 | - if (evtchn_to_irq[row] == NULL) |
1136 | + evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0); |
1137 | + if (evtchn_row == NULL) |
1138 | return -ENOMEM; |
1139 | |
1140 | - clear_evtchn_to_irq_row(row); |
1141 | + clear_evtchn_to_irq_row(evtchn_row); |
1142 | + |
1143 | + /* |
1144 | + * We've prepared an empty row for the mapping. If a different |
1145 | + * thread was faster inserting it, we can drop ours. |
1146 | + */ |
1147 | + if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL) |
1148 | + free_page((unsigned long) evtchn_row); |
1149 | } |
1150 | |
1151 | WRITE_ONCE(evtchn_to_irq[row][col], irq); |
1152 | diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c |
1153 | index a6047caf77ecb..a49bf1fbaea82 100644 |
1154 | --- a/fs/ceph/caps.c |
1155 | +++ b/fs/ceph/caps.c |
1156 | @@ -4053,12 +4053,20 @@ bad: |
1157 | |
1158 | /* |
1159 | * Delayed work handler to process end of delayed cap release LRU list. |
1160 | + * |
1161 | + * If new caps are added to the list while processing it, these won't get |
1162 | + * processed in this run. In this case, the ci->i_hold_caps_max will be |
1163 | + * returned so that the work can be scheduled accordingly. |
1164 | */ |
1165 | -void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) |
1166 | +unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) |
1167 | { |
1168 | struct inode *inode; |
1169 | struct ceph_inode_info *ci; |
1170 | int flags = CHECK_CAPS_NODELAY; |
1171 | + struct ceph_mount_options *opt = mdsc->fsc->mount_options; |
1172 | + unsigned long delay_max = opt->caps_wanted_delay_max * HZ; |
1173 | + unsigned long loop_start = jiffies; |
1174 | + unsigned long delay = 0; |
1175 | |
1176 | dout("check_delayed_caps\n"); |
1177 | while (1) { |
1178 | @@ -4068,6 +4076,11 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) |
1179 | ci = list_first_entry(&mdsc->cap_delay_list, |
1180 | struct ceph_inode_info, |
1181 | i_cap_delay_list); |
1182 | + if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) { |
1183 | + dout("%s caps added recently. Exiting loop", __func__); |
1184 | + delay = ci->i_hold_caps_max; |
1185 | + break; |
1186 | + } |
1187 | if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && |
1188 | time_before(jiffies, ci->i_hold_caps_max)) |
1189 | break; |
1190 | @@ -4084,6 +4097,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) |
1191 | } |
1192 | } |
1193 | spin_unlock(&mdsc->cap_delay_lock); |
1194 | + |
1195 | + return delay; |
1196 | } |
1197 | |
1198 | /* |
1199 | diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c |
1200 | index 0f21073a51a1b..1ef370913c007 100644 |
1201 | --- a/fs/ceph/mds_client.c |
1202 | +++ b/fs/ceph/mds_client.c |
1203 | @@ -4049,22 +4049,29 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc) |
1204 | } |
1205 | |
1206 | /* |
1207 | - * delayed work -- periodically trim expired leases, renew caps with mds |
1208 | + * delayed work -- periodically trim expired leases, renew caps with mds. If |
1209 | + * the @delay parameter is set to 0 or if it's more than 5 secs, the default |
1210 | + * workqueue delay value of 5 secs will be used. |
1211 | */ |
1212 | -static void schedule_delayed(struct ceph_mds_client *mdsc) |
1213 | +static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay) |
1214 | { |
1215 | - int delay = 5; |
1216 | - unsigned hz = round_jiffies_relative(HZ * delay); |
1217 | - schedule_delayed_work(&mdsc->delayed_work, hz); |
1218 | + unsigned long max_delay = HZ * 5; |
1219 | + |
1220 | + /* 5 secs default delay */ |
1221 | + if (!delay || (delay > max_delay)) |
1222 | + delay = max_delay; |
1223 | + schedule_delayed_work(&mdsc->delayed_work, |
1224 | + round_jiffies_relative(delay)); |
1225 | } |
1226 | |
1227 | static void delayed_work(struct work_struct *work) |
1228 | { |
1229 | - int i; |
1230 | struct ceph_mds_client *mdsc = |
1231 | container_of(work, struct ceph_mds_client, delayed_work.work); |
1232 | + unsigned long delay; |
1233 | int renew_interval; |
1234 | int renew_caps; |
1235 | + int i; |
1236 | |
1237 | dout("mdsc delayed_work\n"); |
1238 | |
1239 | @@ -4119,7 +4126,7 @@ static void delayed_work(struct work_struct *work) |
1240 | } |
1241 | mutex_unlock(&mdsc->mutex); |
1242 | |
1243 | - ceph_check_delayed_caps(mdsc); |
1244 | + delay = ceph_check_delayed_caps(mdsc); |
1245 | |
1246 | ceph_queue_cap_reclaim_work(mdsc); |
1247 | |
1248 | @@ -4127,7 +4134,7 @@ static void delayed_work(struct work_struct *work) |
1249 | |
1250 | maybe_recover_session(mdsc); |
1251 | |
1252 | - schedule_delayed(mdsc); |
1253 | + schedule_delayed(mdsc, delay); |
1254 | } |
1255 | |
1256 | int ceph_mdsc_init(struct ceph_fs_client *fsc) |
1257 | @@ -4600,7 +4607,7 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) |
1258 | mdsc->mdsmap->m_epoch); |
1259 | |
1260 | mutex_unlock(&mdsc->mutex); |
1261 | - schedule_delayed(mdsc); |
1262 | + schedule_delayed(mdsc, 0); |
1263 | return; |
1264 | |
1265 | bad_unlock: |
1266 | diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c |
1267 | index 923be9399b21c..e1b9b224fcb28 100644 |
1268 | --- a/fs/ceph/snap.c |
1269 | +++ b/fs/ceph/snap.c |
1270 | @@ -60,24 +60,26 @@ |
1271 | /* |
1272 | * increase ref count for the realm |
1273 | * |
1274 | - * caller must hold snap_rwsem for write. |
1275 | + * caller must hold snap_rwsem. |
1276 | */ |
1277 | void ceph_get_snap_realm(struct ceph_mds_client *mdsc, |
1278 | struct ceph_snap_realm *realm) |
1279 | { |
1280 | - dout("get_realm %p %d -> %d\n", realm, |
1281 | - atomic_read(&realm->nref), atomic_read(&realm->nref)+1); |
1282 | + lockdep_assert_held(&mdsc->snap_rwsem); |
1283 | + |
1284 | /* |
1285 | - * since we _only_ increment realm refs or empty the empty |
1286 | - * list with snap_rwsem held, adjusting the empty list here is |
1287 | - * safe. we do need to protect against concurrent empty list |
1288 | - * additions, however. |
1289 | + * The 0->1 and 1->0 transitions must take the snap_empty_lock |
1290 | + * atomically with the refcount change. Go ahead and bump the |
1291 | + * nref here, unless it's 0, in which case we take the spinlock |
1292 | + * and then do the increment and remove it from the list. |
1293 | */ |
1294 | - if (atomic_inc_return(&realm->nref) == 1) { |
1295 | - spin_lock(&mdsc->snap_empty_lock); |
1296 | + if (atomic_inc_not_zero(&realm->nref)) |
1297 | + return; |
1298 | + |
1299 | + spin_lock(&mdsc->snap_empty_lock); |
1300 | + if (atomic_inc_return(&realm->nref) == 1) |
1301 | list_del_init(&realm->empty_item); |
1302 | - spin_unlock(&mdsc->snap_empty_lock); |
1303 | - } |
1304 | + spin_unlock(&mdsc->snap_empty_lock); |
1305 | } |
1306 | |
1307 | static void __insert_snap_realm(struct rb_root *root, |
1308 | @@ -113,6 +115,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm( |
1309 | { |
1310 | struct ceph_snap_realm *realm; |
1311 | |
1312 | + lockdep_assert_held_write(&mdsc->snap_rwsem); |
1313 | + |
1314 | realm = kzalloc(sizeof(*realm), GFP_NOFS); |
1315 | if (!realm) |
1316 | return ERR_PTR(-ENOMEM); |
1317 | @@ -135,7 +139,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm( |
1318 | /* |
1319 | * lookup the realm rooted at @ino. |
1320 | * |
1321 | - * caller must hold snap_rwsem for write. |
1322 | + * caller must hold snap_rwsem. |
1323 | */ |
1324 | static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc, |
1325 | u64 ino) |
1326 | @@ -143,6 +147,8 @@ static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc, |
1327 | struct rb_node *n = mdsc->snap_realms.rb_node; |
1328 | struct ceph_snap_realm *r; |
1329 | |
1330 | + lockdep_assert_held(&mdsc->snap_rwsem); |
1331 | + |
1332 | while (n) { |
1333 | r = rb_entry(n, struct ceph_snap_realm, node); |
1334 | if (ino < r->ino) |
1335 | @@ -176,6 +182,8 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc, |
1336 | static void __destroy_snap_realm(struct ceph_mds_client *mdsc, |
1337 | struct ceph_snap_realm *realm) |
1338 | { |
1339 | + lockdep_assert_held_write(&mdsc->snap_rwsem); |
1340 | + |
1341 | dout("__destroy_snap_realm %p %llx\n", realm, realm->ino); |
1342 | |
1343 | rb_erase(&realm->node, &mdsc->snap_realms); |
1344 | @@ -198,28 +206,30 @@ static void __destroy_snap_realm(struct ceph_mds_client *mdsc, |
1345 | static void __put_snap_realm(struct ceph_mds_client *mdsc, |
1346 | struct ceph_snap_realm *realm) |
1347 | { |
1348 | - dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, |
1349 | - atomic_read(&realm->nref), atomic_read(&realm->nref)-1); |
1350 | + lockdep_assert_held_write(&mdsc->snap_rwsem); |
1351 | + |
1352 | + /* |
1353 | + * We do not require the snap_empty_lock here, as any caller that |
1354 | + * increments the value must hold the snap_rwsem. |
1355 | + */ |
1356 | if (atomic_dec_and_test(&realm->nref)) |
1357 | __destroy_snap_realm(mdsc, realm); |
1358 | } |
1359 | |
1360 | /* |
1361 | - * caller needn't hold any locks |
1362 | + * See comments in ceph_get_snap_realm. Caller needn't hold any locks. |
1363 | */ |
1364 | void ceph_put_snap_realm(struct ceph_mds_client *mdsc, |
1365 | struct ceph_snap_realm *realm) |
1366 | { |
1367 | - dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm, |
1368 | - atomic_read(&realm->nref), atomic_read(&realm->nref)-1); |
1369 | - if (!atomic_dec_and_test(&realm->nref)) |
1370 | + if (!atomic_dec_and_lock(&realm->nref, &mdsc->snap_empty_lock)) |
1371 | return; |
1372 | |
1373 | if (down_write_trylock(&mdsc->snap_rwsem)) { |
1374 | + spin_unlock(&mdsc->snap_empty_lock); |
1375 | __destroy_snap_realm(mdsc, realm); |
1376 | up_write(&mdsc->snap_rwsem); |
1377 | } else { |
1378 | - spin_lock(&mdsc->snap_empty_lock); |
1379 | list_add(&realm->empty_item, &mdsc->snap_empty); |
1380 | spin_unlock(&mdsc->snap_empty_lock); |
1381 | } |
1382 | @@ -236,6 +246,8 @@ static void __cleanup_empty_realms(struct ceph_mds_client *mdsc) |
1383 | { |
1384 | struct ceph_snap_realm *realm; |
1385 | |
1386 | + lockdep_assert_held_write(&mdsc->snap_rwsem); |
1387 | + |
1388 | spin_lock(&mdsc->snap_empty_lock); |
1389 | while (!list_empty(&mdsc->snap_empty)) { |
1390 | realm = list_first_entry(&mdsc->snap_empty, |
1391 | @@ -269,6 +281,8 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, |
1392 | { |
1393 | struct ceph_snap_realm *parent; |
1394 | |
1395 | + lockdep_assert_held_write(&mdsc->snap_rwsem); |
1396 | + |
1397 | if (realm->parent_ino == parentino) |
1398 | return 0; |
1399 | |
1400 | @@ -686,6 +700,8 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, |
1401 | int err = -ENOMEM; |
1402 | LIST_HEAD(dirty_realms); |
1403 | |
1404 | + lockdep_assert_held_write(&mdsc->snap_rwsem); |
1405 | + |
1406 | dout("update_snap_trace deletion=%d\n", deletion); |
1407 | more: |
1408 | ceph_decode_need(&p, e, sizeof(*ri), bad); |
1409 | diff --git a/fs/ceph/super.h b/fs/ceph/super.h |
1410 | index bb12c9f3a218d..8ffc8e88dd3d2 100644 |
1411 | --- a/fs/ceph/super.h |
1412 | +++ b/fs/ceph/super.h |
1413 | @@ -1064,7 +1064,7 @@ extern void ceph_flush_snaps(struct ceph_inode_info *ci, |
1414 | extern bool __ceph_should_report_size(struct ceph_inode_info *ci); |
1415 | extern void ceph_check_caps(struct ceph_inode_info *ci, int flags, |
1416 | struct ceph_mds_session *session); |
1417 | -extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc); |
1418 | +extern unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc); |
1419 | extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc); |
1420 | extern int ceph_drop_caps_for_unlink(struct inode *inode); |
1421 | extern int ceph_encode_inode_release(void **p, struct inode *inode, |
1422 | diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h |
1423 | index 1e037f894804b..8a5ced9cf5273 100644 |
1424 | --- a/include/asm-generic/vmlinux.lds.h |
1425 | +++ b/include/asm-generic/vmlinux.lds.h |
1426 | @@ -536,6 +536,7 @@ |
1427 | NOINSTR_TEXT \ |
1428 | *(.text..refcount) \ |
1429 | *(.ref.text) \ |
1430 | + *(.text.asan.* .text.tsan.*) \ |
1431 | MEM_KEEP(init.text*) \ |
1432 | MEM_KEEP(exit.text*) \ |
1433 | |
1434 | diff --git a/include/linux/device.h b/include/linux/device.h |
1435 | index 297239a08bb77..3414b5a67b466 100644 |
1436 | --- a/include/linux/device.h |
1437 | +++ b/include/linux/device.h |
1438 | @@ -1260,6 +1260,7 @@ struct device { |
1439 | struct dev_pin_info *pins; |
1440 | #endif |
1441 | #ifdef CONFIG_GENERIC_MSI_IRQ |
1442 | + raw_spinlock_t msi_lock; |
1443 | struct list_head msi_list; |
1444 | #endif |
1445 | |
1446 | diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h |
1447 | index 3515ca64e638a..b68fca08be27c 100644 |
1448 | --- a/include/linux/inetdevice.h |
1449 | +++ b/include/linux/inetdevice.h |
1450 | @@ -41,7 +41,7 @@ struct in_device { |
1451 | unsigned long mr_qri; /* Query Response Interval */ |
1452 | unsigned char mr_qrv; /* Query Robustness Variable */ |
1453 | unsigned char mr_gq_running; |
1454 | - unsigned char mr_ifc_count; |
1455 | + u32 mr_ifc_count; |
1456 | struct timer_list mr_gq_timer; /* general query timer */ |
1457 | struct timer_list mr_ifc_timer; /* interface change timer */ |
1458 | |
1459 | diff --git a/include/linux/irq.h b/include/linux/irq.h |
1460 | index e9e69c511ea92..5655da9eb1fb9 100644 |
1461 | --- a/include/linux/irq.h |
1462 | +++ b/include/linux/irq.h |
1463 | @@ -542,6 +542,7 @@ struct irq_chip { |
1464 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
1465 | * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs |
1466 | * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips |
1467 | + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup |
1468 | */ |
1469 | enum { |
1470 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
1471 | @@ -553,6 +554,7 @@ enum { |
1472 | IRQCHIP_EOI_THREADED = (1 << 6), |
1473 | IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), |
1474 | IRQCHIP_SUPPORTS_NMI = (1 << 8), |
1475 | + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), |
1476 | }; |
1477 | |
1478 | #include <linux/irqdesc.h> |
1479 | diff --git a/include/linux/msi.h b/include/linux/msi.h |
1480 | index d695e2eb2092d..758e32f0d4434 100644 |
1481 | --- a/include/linux/msi.h |
1482 | +++ b/include/linux/msi.h |
1483 | @@ -194,7 +194,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
1484 | void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
1485 | |
1486 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); |
1487 | -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
1488 | +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
1489 | void pci_msi_mask_irq(struct irq_data *data); |
1490 | void pci_msi_unmask_irq(struct irq_data *data); |
1491 | |
1492 | diff --git a/include/net/psample.h b/include/net/psample.h |
1493 | index 68ae16bb0a4a8..20a17551f790f 100644 |
1494 | --- a/include/net/psample.h |
1495 | +++ b/include/net/psample.h |
1496 | @@ -18,6 +18,8 @@ struct psample_group *psample_group_get(struct net *net, u32 group_num); |
1497 | void psample_group_take(struct psample_group *group); |
1498 | void psample_group_put(struct psample_group *group); |
1499 | |
1500 | +struct sk_buff; |
1501 | + |
1502 | #if IS_ENABLED(CONFIG_PSAMPLE) |
1503 | |
1504 | void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, |
1505 | diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c |
1506 | index b76703b2c0af2..856f0297dc738 100644 |
1507 | --- a/kernel/irq/chip.c |
1508 | +++ b/kernel/irq/chip.c |
1509 | @@ -265,8 +265,11 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) |
1510 | } else { |
1511 | switch (__irq_startup_managed(desc, aff, force)) { |
1512 | case IRQ_STARTUP_NORMAL: |
1513 | + if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP) |
1514 | + irq_setup_affinity(desc); |
1515 | ret = __irq_startup(desc); |
1516 | - irq_setup_affinity(desc); |
1517 | + if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)) |
1518 | + irq_setup_affinity(desc); |
1519 | break; |
1520 | case IRQ_STARTUP_MANAGED: |
1521 | irq_do_set_affinity(d, aff, false); |
1522 | diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c |
1523 | index 5d3da0db092ff..b7e4c5999cc80 100644 |
1524 | --- a/kernel/irq/msi.c |
1525 | +++ b/kernel/irq/msi.c |
1526 | @@ -477,11 +477,6 @@ skip_activate: |
1527 | return 0; |
1528 | |
1529 | cleanup: |
1530 | - for_each_msi_vector(desc, i, dev) { |
1531 | - irq_data = irq_domain_get_irq_data(domain, i); |
1532 | - if (irqd_is_activated(irq_data)) |
1533 | - irq_domain_deactivate_irq(irq_data); |
1534 | - } |
1535 | msi_domain_free_irqs(domain, dev); |
1536 | return ret; |
1537 | } |
1538 | @@ -494,7 +489,15 @@ cleanup: |
1539 | */ |
1540 | void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) |
1541 | { |
1542 | + struct irq_data *irq_data; |
1543 | struct msi_desc *desc; |
1544 | + int i; |
1545 | + |
1546 | + for_each_msi_vector(desc, i, dev) { |
1547 | + irq_data = irq_domain_get_irq_data(domain, i); |
1548 | + if (irqd_is_activated(irq_data)) |
1549 | + irq_domain_deactivate_irq(irq_data); |
1550 | + } |
1551 | |
1552 | for_each_msi_entry(desc, dev) { |
1553 | /* |
1554 | diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c |
1555 | index e960d7ce7bcce..b5985da80acf0 100644 |
1556 | --- a/kernel/irq/timings.c |
1557 | +++ b/kernel/irq/timings.c |
1558 | @@ -453,6 +453,11 @@ static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs, |
1559 | */ |
1560 | index = irq_timings_interval_index(interval); |
1561 | |
1562 | + if (index > PREDICTION_BUFFER_SIZE - 1) { |
1563 | + irqs->count = 0; |
1564 | + return; |
1565 | + } |
1566 | + |
1567 | /* |
1568 | * Store the index as an element of the pattern in another |
1569 | * circular array. |
1570 | diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c |
1571 | index bec20dbf6f603..e2a999890d05e 100644 |
1572 | --- a/net/bridge/br_if.c |
1573 | +++ b/net/bridge/br_if.c |
1574 | @@ -599,6 +599,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev, |
1575 | |
1576 | err = dev_set_allmulti(dev, 1); |
1577 | if (err) { |
1578 | + br_multicast_del_port(p); |
1579 | kfree(p); /* kobject not yet init'd, manually free */ |
1580 | goto err1; |
1581 | } |
1582 | @@ -712,6 +713,7 @@ err4: |
1583 | err3: |
1584 | sysfs_remove_link(br->ifobj, p->dev->name); |
1585 | err2: |
1586 | + br_multicast_del_port(p); |
1587 | kobject_put(&p->kobj); |
1588 | dev_set_allmulti(dev, -1); |
1589 | err1: |
1590 | diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c |
1591 | index 8d033a75a766e..fdbed31585553 100644 |
1592 | --- a/net/bridge/netfilter/nf_conntrack_bridge.c |
1593 | +++ b/net/bridge/netfilter/nf_conntrack_bridge.c |
1594 | @@ -88,6 +88,12 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, |
1595 | |
1596 | skb = ip_fraglist_next(&iter); |
1597 | } |
1598 | + |
1599 | + if (!err) |
1600 | + return 0; |
1601 | + |
1602 | + kfree_skb_list(iter.frag); |
1603 | + |
1604 | return err; |
1605 | } |
1606 | slow_path: |
1607 | diff --git a/net/core/link_watch.c b/net/core/link_watch.c |
1608 | index f153e06018383..35b0e39030daf 100644 |
1609 | --- a/net/core/link_watch.c |
1610 | +++ b/net/core/link_watch.c |
1611 | @@ -150,7 +150,7 @@ static void linkwatch_do_dev(struct net_device *dev) |
1612 | clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); |
1613 | |
1614 | rfc2863_policy(dev); |
1615 | - if (dev->flags & IFF_UP && netif_device_present(dev)) { |
1616 | + if (dev->flags & IFF_UP) { |
1617 | if (netif_carrier_ok(dev)) |
1618 | dev_activate(dev); |
1619 | else |
1620 | @@ -196,7 +196,8 @@ static void __linkwatch_run_queue(int urgent_only) |
1621 | dev = list_first_entry(&wrk, struct net_device, link_watch_list); |
1622 | list_del_init(&dev->link_watch_list); |
1623 | |
1624 | - if (urgent_only && !linkwatch_urgent_event(dev)) { |
1625 | + if (!netif_device_present(dev) || |
1626 | + (urgent_only && !linkwatch_urgent_event(dev))) { |
1627 | list_add_tail(&dev->link_watch_list, &lweventlist); |
1628 | continue; |
1629 | } |
1630 | diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c |
1631 | index d93d4531aa9bc..9a675ba0bf0a8 100644 |
1632 | --- a/net/ieee802154/socket.c |
1633 | +++ b/net/ieee802154/socket.c |
1634 | @@ -992,6 +992,11 @@ static const struct proto_ops ieee802154_dgram_ops = { |
1635 | #endif |
1636 | }; |
1637 | |
1638 | +static void ieee802154_sock_destruct(struct sock *sk) |
1639 | +{ |
1640 | + skb_queue_purge(&sk->sk_receive_queue); |
1641 | +} |
1642 | + |
1643 | /* Create a socket. Initialise the socket, blank the addresses |
1644 | * set the state. |
1645 | */ |
1646 | @@ -1032,7 +1037,7 @@ static int ieee802154_create(struct net *net, struct socket *sock, |
1647 | sock->ops = ops; |
1648 | |
1649 | sock_init_data(sock, sk); |
1650 | - /* FIXME: sk->sk_destruct */ |
1651 | + sk->sk_destruct = ieee802154_sock_destruct; |
1652 | sk->sk_family = PF_IEEE802154; |
1653 | |
1654 | /* Checksums on by default */ |
1655 | diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
1656 | index c8cbdc4d5cbc7..d2b1ae83f258d 100644 |
1657 | --- a/net/ipv4/igmp.c |
1658 | +++ b/net/ipv4/igmp.c |
1659 | @@ -805,10 +805,17 @@ static void igmp_gq_timer_expire(struct timer_list *t) |
1660 | static void igmp_ifc_timer_expire(struct timer_list *t) |
1661 | { |
1662 | struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer); |
1663 | + u32 mr_ifc_count; |
1664 | |
1665 | igmpv3_send_cr(in_dev); |
1666 | - if (in_dev->mr_ifc_count) { |
1667 | - in_dev->mr_ifc_count--; |
1668 | +restart: |
1669 | + mr_ifc_count = READ_ONCE(in_dev->mr_ifc_count); |
1670 | + |
1671 | + if (mr_ifc_count) { |
1672 | + if (cmpxchg(&in_dev->mr_ifc_count, |
1673 | + mr_ifc_count, |
1674 | + mr_ifc_count - 1) != mr_ifc_count) |
1675 | + goto restart; |
1676 | igmp_ifc_start_timer(in_dev, |
1677 | unsolicited_report_interval(in_dev)); |
1678 | } |
1679 | @@ -820,7 +827,7 @@ static void igmp_ifc_event(struct in_device *in_dev) |
1680 | struct net *net = dev_net(in_dev->dev); |
1681 | if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) |
1682 | return; |
1683 | - in_dev->mr_ifc_count = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
1684 | + WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv); |
1685 | igmp_ifc_start_timer(in_dev, 1); |
1686 | } |
1687 | |
1688 | @@ -959,7 +966,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, |
1689 | in_dev->mr_qri; |
1690 | } |
1691 | /* cancel the interface change timer */ |
1692 | - in_dev->mr_ifc_count = 0; |
1693 | + WRITE_ONCE(in_dev->mr_ifc_count, 0); |
1694 | if (del_timer(&in_dev->mr_ifc_timer)) |
1695 | __in_dev_put(in_dev); |
1696 | /* clear deleted report items */ |
1697 | @@ -1726,7 +1733,7 @@ void ip_mc_down(struct in_device *in_dev) |
1698 | igmp_group_dropped(pmc); |
1699 | |
1700 | #ifdef CONFIG_IP_MULTICAST |
1701 | - in_dev->mr_ifc_count = 0; |
1702 | + WRITE_ONCE(in_dev->mr_ifc_count, 0); |
1703 | if (del_timer(&in_dev->mr_ifc_timer)) |
1704 | __in_dev_put(in_dev); |
1705 | in_dev->mr_gq_running = 0; |
1706 | @@ -1943,7 +1950,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, |
1707 | pmc->sfmode = MCAST_INCLUDE; |
1708 | #ifdef CONFIG_IP_MULTICAST |
1709 | pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
1710 | - in_dev->mr_ifc_count = pmc->crcount; |
1711 | + WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); |
1712 | for (psf = pmc->sources; psf; psf = psf->sf_next) |
1713 | psf->sf_crcount = 0; |
1714 | igmp_ifc_event(pmc->interface); |
1715 | @@ -2122,7 +2129,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, |
1716 | /* else no filters; keep old mode for reports */ |
1717 | |
1718 | pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
1719 | - in_dev->mr_ifc_count = pmc->crcount; |
1720 | + WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); |
1721 | for (psf = pmc->sources; psf; psf = psf->sf_next) |
1722 | psf->sf_crcount = 0; |
1723 | igmp_ifc_event(in_dev); |
1724 | diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c |
1725 | index 6ea3dc2e42194..6274462b86b4b 100644 |
1726 | --- a/net/ipv4/tcp_bbr.c |
1727 | +++ b/net/ipv4/tcp_bbr.c |
1728 | @@ -1041,7 +1041,7 @@ static void bbr_init(struct sock *sk) |
1729 | bbr->prior_cwnd = 0; |
1730 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1731 | bbr->rtt_cnt = 0; |
1732 | - bbr->next_rtt_delivered = 0; |
1733 | + bbr->next_rtt_delivered = tp->delivered; |
1734 | bbr->prev_ca_state = TCP_CA_Open; |
1735 | bbr->packet_conservation = 0; |
1736 | |
1737 | diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c |
1738 | index 8327ef9793ef8..e3ff884a48c56 100644 |
1739 | --- a/net/sched/act_mirred.c |
1740 | +++ b/net/sched/act_mirred.c |
1741 | @@ -261,6 +261,9 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, |
1742 | goto out; |
1743 | } |
1744 | |
1745 | + /* All mirred/redirected skbs should clear previous ct info */ |
1746 | + nf_reset_ct(skb2); |
1747 | + |
1748 | want_ingress = tcf_mirred_act_wants_ingress(m_eaction); |
1749 | |
1750 | expects_nh = want_ingress || !m_mac_header_xmit; |
1751 | diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c |
1752 | index 5905f0cddc895..7973f98ebd918 100644 |
1753 | --- a/net/vmw_vsock/virtio_transport.c |
1754 | +++ b/net/vmw_vsock/virtio_transport.c |
1755 | @@ -373,11 +373,14 @@ static void virtio_vsock_event_fill(struct virtio_vsock *vsock) |
1756 | |
1757 | static void virtio_vsock_reset_sock(struct sock *sk) |
1758 | { |
1759 | - lock_sock(sk); |
1760 | + /* vmci_transport.c doesn't take sk_lock here either. At least we're |
1761 | + * under vsock_table_lock so the sock cannot disappear while we're |
1762 | + * executing. |
1763 | + */ |
1764 | + |
1765 | sk->sk_state = TCP_CLOSE; |
1766 | sk->sk_err = ECONNRESET; |
1767 | sk->sk_error_report(sk); |
1768 | - release_sock(sk); |
1769 | } |
1770 | |
1771 | static void virtio_vsock_update_guest_cid(struct virtio_vsock *vsock) |
1772 | diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c |
1773 | index 5faf8877137ae..6825e874785f2 100644 |
1774 | --- a/sound/soc/codecs/cs42l42.c |
1775 | +++ b/sound/soc/codecs/cs42l42.c |
1776 | @@ -403,7 +403,7 @@ static const struct regmap_config cs42l42_regmap = { |
1777 | .use_single_write = true, |
1778 | }; |
1779 | |
1780 | -static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false); |
1781 | +static DECLARE_TLV_DB_SCALE(adc_tlv, -9700, 100, true); |
1782 | static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true); |
1783 | |
1784 | static const char * const cs42l42_hpf_freq_text[] = { |
1785 | @@ -423,34 +423,23 @@ static SOC_ENUM_SINGLE_DECL(cs42l42_wnf3_freq_enum, CS42L42_ADC_WNF_HPF_CTL, |
1786 | CS42L42_ADC_WNF_CF_SHIFT, |
1787 | cs42l42_wnf3_freq_text); |
1788 | |
1789 | -static const char * const cs42l42_wnf05_freq_text[] = { |
1790 | - "280Hz", "315Hz", "350Hz", "385Hz", |
1791 | - "420Hz", "455Hz", "490Hz", "525Hz" |
1792 | -}; |
1793 | - |
1794 | -static SOC_ENUM_SINGLE_DECL(cs42l42_wnf05_freq_enum, CS42L42_ADC_WNF_HPF_CTL, |
1795 | - CS42L42_ADC_WNF_CF_SHIFT, |
1796 | - cs42l42_wnf05_freq_text); |
1797 | - |
1798 | static const struct snd_kcontrol_new cs42l42_snd_controls[] = { |
1799 | /* ADC Volume and Filter Controls */ |
1800 | SOC_SINGLE("ADC Notch Switch", CS42L42_ADC_CTL, |
1801 | - CS42L42_ADC_NOTCH_DIS_SHIFT, true, false), |
1802 | + CS42L42_ADC_NOTCH_DIS_SHIFT, true, true), |
1803 | SOC_SINGLE("ADC Weak Force Switch", CS42L42_ADC_CTL, |
1804 | CS42L42_ADC_FORCE_WEAK_VCM_SHIFT, true, false), |
1805 | SOC_SINGLE("ADC Invert Switch", CS42L42_ADC_CTL, |
1806 | CS42L42_ADC_INV_SHIFT, true, false), |
1807 | SOC_SINGLE("ADC Boost Switch", CS42L42_ADC_CTL, |
1808 | CS42L42_ADC_DIG_BOOST_SHIFT, true, false), |
1809 | - SOC_SINGLE_SX_TLV("ADC Volume", CS42L42_ADC_VOLUME, |
1810 | - CS42L42_ADC_VOL_SHIFT, 0xA0, 0x6C, adc_tlv), |
1811 | + SOC_SINGLE_S8_TLV("ADC Volume", CS42L42_ADC_VOLUME, -97, 12, adc_tlv), |
1812 | SOC_SINGLE("ADC WNF Switch", CS42L42_ADC_WNF_HPF_CTL, |
1813 | CS42L42_ADC_WNF_EN_SHIFT, true, false), |
1814 | SOC_SINGLE("ADC HPF Switch", CS42L42_ADC_WNF_HPF_CTL, |
1815 | CS42L42_ADC_HPF_EN_SHIFT, true, false), |
1816 | SOC_ENUM("HPF Corner Freq", cs42l42_hpf_freq_enum), |
1817 | SOC_ENUM("WNF 3dB Freq", cs42l42_wnf3_freq_enum), |
1818 | - SOC_ENUM("WNF 05dB Freq", cs42l42_wnf05_freq_enum), |
1819 | |
1820 | /* DAC Volume and Filter Controls */ |
1821 | SOC_SINGLE("DACA Invert Switch", CS42L42_DAC_CTL1, |
1822 | @@ -669,15 +658,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component) |
1823 | CS42L42_FSYNC_PULSE_WIDTH_MASK, |
1824 | CS42L42_FRAC1_VAL(fsync - 1) << |
1825 | CS42L42_FSYNC_PULSE_WIDTH_SHIFT); |
1826 | - snd_soc_component_update_bits(component, |
1827 | - CS42L42_ASP_FRM_CFG, |
1828 | - CS42L42_ASP_5050_MASK, |
1829 | - CS42L42_ASP_5050_MASK); |
1830 | - /* Set the frame delay to 1.0 SCLK clocks */ |
1831 | - snd_soc_component_update_bits(component, CS42L42_ASP_FRM_CFG, |
1832 | - CS42L42_ASP_FSD_MASK, |
1833 | - CS42L42_ASP_FSD_1_0 << |
1834 | - CS42L42_ASP_FSD_SHIFT); |
1835 | /* Set the sample rates (96k or lower) */ |
1836 | snd_soc_component_update_bits(component, CS42L42_FS_RATE_EN, |
1837 | CS42L42_FS_EN_MASK, |
1838 | @@ -773,7 +753,18 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) |
1839 | /* interface format */ |
1840 | switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { |
1841 | case SND_SOC_DAIFMT_I2S: |
1842 | - case SND_SOC_DAIFMT_LEFT_J: |
1843 | + /* |
1844 | + * 5050 mode, frame starts on falling edge of LRCLK, |
1845 | + * frame delayed by 1.0 SCLKs |
1846 | + */ |
1847 | + snd_soc_component_update_bits(component, |
1848 | + CS42L42_ASP_FRM_CFG, |
1849 | + CS42L42_ASP_STP_MASK | |
1850 | + CS42L42_ASP_5050_MASK | |
1851 | + CS42L42_ASP_FSD_MASK, |
1852 | + CS42L42_ASP_5050_MASK | |
1853 | + (CS42L42_ASP_FSD_1_0 << |
1854 | + CS42L42_ASP_FSD_SHIFT)); |
1855 | break; |
1856 | default: |
1857 | return -EINVAL; |
1858 | diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1859 | index c3ff203c3f447..216e88624c5f3 100644 |
1860 | --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1861 | +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1862 | @@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, |
1863 | snd_pcm_uframes_t period_size; |
1864 | ssize_t periodbytes; |
1865 | ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); |
1866 | - u32 buffer_addr = virt_to_phys(substream->dma_buffer.area); |
1867 | + u32 buffer_addr = substream->runtime->dma_addr; |
1868 | |
1869 | channels = substream->runtime->channels; |
1870 | period_size = substream->runtime->period_size; |
1871 | @@ -233,7 +233,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream, |
1872 | /* set codec params and inform SST driver the same */ |
1873 | sst_fill_pcm_params(substream, ¶m); |
1874 | sst_fill_alloc_params(substream, &alloc_params); |
1875 | - substream->runtime->dma_area = substream->dma_buffer.area; |
1876 | str_params.sparams = param; |
1877 | str_params.aparams = alloc_params; |
1878 | str_params.codec = SST_CODEC_TYPE_PCM; |
1879 | diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c |
1880 | index 48970efe7838e..1f15c11782ec4 100644 |
1881 | --- a/sound/soc/xilinx/xlnx_formatter_pcm.c |
1882 | +++ b/sound/soc/xilinx/xlnx_formatter_pcm.c |
1883 | @@ -461,8 +461,8 @@ static int xlnx_formatter_pcm_hw_params(struct snd_pcm_substream *substream, |
1884 | |
1885 | stream_data->buffer_size = size; |
1886 | |
1887 | - low = lower_32_bits(substream->dma_buffer.addr); |
1888 | - high = upper_32_bits(substream->dma_buffer.addr); |
1889 | + low = lower_32_bits(runtime->dma_addr); |
1890 | + high = upper_32_bits(runtime->dma_addr); |
1891 | writel(low, stream_data->mmio + XLNX_AUD_BUFF_ADDR_LSB); |
1892 | writel(high, stream_data->mmio + XLNX_AUD_BUFF_ADDR_MSB); |
1893 |