Contents of /trunk/kernel-alx-legacy/patches-4.9/0380-4.9.281-all-fixes.patch
Parent Directory | Revision Log
Revision 3682 -
(show annotations)
(download)
Mon Oct 24 14:07:58 2022 UTC (23 months ago) by niro
File size: 42607 byte(s)
Mon Oct 24 14:07:58 2022 UTC (23 months ago) by niro
File size: 42607 byte(s)
-linux-4.9.281
1 | diff --git a/Documentation/filesystems/mandatory-locking.txt b/Documentation/filesystems/mandatory-locking.txt |
2 | index 0979d1d2ca8bb..a251ca33164ae 100644 |
3 | --- a/Documentation/filesystems/mandatory-locking.txt |
4 | +++ b/Documentation/filesystems/mandatory-locking.txt |
5 | @@ -169,3 +169,13 @@ havoc if they lock crucial files. The way around it is to change the file |
6 | permissions (remove the setgid bit) before trying to read or write to it. |
7 | Of course, that might be a bit tricky if the system is hung :-( |
8 | |
9 | +7. The "mand" mount option |
10 | +-------------------------- |
11 | +Mandatory locking is disabled on all filesystems by default, and must be |
12 | +administratively enabled by mounting with "-o mand". That mount option |
13 | +is only allowed if the mounting task has the CAP_SYS_ADMIN capability. |
14 | + |
15 | +Since kernel v4.5, it is possible to disable mandatory locking |
16 | +altogether by setting CONFIG_MANDATORY_FILE_LOCKING to "n". A kernel |
17 | +with this disabled will reject attempts to mount filesystems with the |
18 | +"mand" mount option with the error status EPERM. |
19 | diff --git a/Makefile b/Makefile |
20 | index 7cd5634469b10..08bbebb4acbf1 100644 |
21 | --- a/Makefile |
22 | +++ b/Makefile |
23 | @@ -1,6 +1,6 @@ |
24 | VERSION = 4 |
25 | PATCHLEVEL = 9 |
26 | -SUBLEVEL = 280 |
27 | +SUBLEVEL = 281 |
28 | EXTRAVERSION = |
29 | NAME = Roaring Lionus |
30 | |
31 | diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts |
32 | index 21918807c9f6d..f42a923912894 100644 |
33 | --- a/arch/arm/boot/dts/am43x-epos-evm.dts |
34 | +++ b/arch/arm/boot/dts/am43x-epos-evm.dts |
35 | @@ -411,7 +411,7 @@ |
36 | status = "okay"; |
37 | pinctrl-names = "default"; |
38 | pinctrl-0 = <&i2c0_pins>; |
39 | - clock-frequency = <400000>; |
40 | + clock-frequency = <100000>; |
41 | |
42 | tps65218: tps65218@24 { |
43 | reg = <0x24>; |
44 | diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi |
45 | index 1077ceebb2d68..87494773f4097 100644 |
46 | --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi |
47 | +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi |
48 | @@ -755,14 +755,14 @@ |
49 | status = "disabled"; |
50 | }; |
51 | |
52 | - vica: intc@10140000 { |
53 | + vica: interrupt-controller@10140000 { |
54 | compatible = "arm,versatile-vic"; |
55 | interrupt-controller; |
56 | #interrupt-cells = <1>; |
57 | reg = <0x10140000 0x20>; |
58 | }; |
59 | |
60 | - vicb: intc@10140020 { |
61 | + vicb: interrupt-controller@10140020 { |
62 | compatible = "arm,versatile-vic"; |
63 | interrupt-controller; |
64 | #interrupt-cells = <1>; |
65 | diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h |
66 | index ebda4718eb8f7..793c04cba0def 100644 |
67 | --- a/arch/x86/include/asm/fpu/internal.h |
68 | +++ b/arch/x86/include/asm/fpu/internal.h |
69 | @@ -221,6 +221,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
70 | } |
71 | } |
72 | |
73 | +static inline void fxsave(struct fxregs_state *fx) |
74 | +{ |
75 | + if (IS_ENABLED(CONFIG_X86_32)) |
76 | + asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx)); |
77 | + else |
78 | + asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx)); |
79 | +} |
80 | + |
81 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
82 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" |
83 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" |
84 | @@ -294,28 +302,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
85 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
86 | : "memory") |
87 | |
88 | -/* |
89 | - * This function is called only during boot time when x86 caps are not set |
90 | - * up and alternative can not be used yet. |
91 | - */ |
92 | -static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) |
93 | -{ |
94 | - u64 mask = -1; |
95 | - u32 lmask = mask; |
96 | - u32 hmask = mask >> 32; |
97 | - int err; |
98 | - |
99 | - WARN_ON(system_state != SYSTEM_BOOTING); |
100 | - |
101 | - if (static_cpu_has(X86_FEATURE_XSAVES)) |
102 | - XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
103 | - else |
104 | - XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
105 | - |
106 | - /* We should never fault when copying to a kernel buffer: */ |
107 | - WARN_ON_FPU(err); |
108 | -} |
109 | - |
110 | /* |
111 | * This function is called only during boot time when x86 caps are not set |
112 | * up and alternative can not be used yet. |
113 | diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h |
114 | index 14824fc78f7e7..509b9f3307e43 100644 |
115 | --- a/arch/x86/include/asm/svm.h |
116 | +++ b/arch/x86/include/asm/svm.h |
117 | @@ -113,6 +113,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area { |
118 | #define V_IGN_TPR_SHIFT 20 |
119 | #define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT) |
120 | |
121 | +#define V_IRQ_INJECTION_BITS_MASK (V_IRQ_MASK | V_INTR_PRIO_MASK | V_IGN_TPR_MASK) |
122 | + |
123 | #define V_INTR_MASKING_SHIFT 24 |
124 | #define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT) |
125 | |
126 | diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c |
127 | index dbd396c913488..02ad98ec51491 100644 |
128 | --- a/arch/x86/kernel/fpu/xstate.c |
129 | +++ b/arch/x86/kernel/fpu/xstate.c |
130 | @@ -407,6 +407,24 @@ static void __init print_xstate_offset_size(void) |
131 | } |
132 | } |
133 | |
134 | +/* |
135 | + * All supported features have either init state all zeros or are |
136 | + * handled in setup_init_fpu() individually. This is an explicit |
137 | + * feature list and does not use XFEATURE_MASK*SUPPORTED to catch |
138 | + * newly added supported features at build time and make people |
139 | + * actually look at the init state for the new feature. |
140 | + */ |
141 | +#define XFEATURES_INIT_FPSTATE_HANDLED \ |
142 | + (XFEATURE_MASK_FP | \ |
143 | + XFEATURE_MASK_SSE | \ |
144 | + XFEATURE_MASK_YMM | \ |
145 | + XFEATURE_MASK_OPMASK | \ |
146 | + XFEATURE_MASK_ZMM_Hi256 | \ |
147 | + XFEATURE_MASK_Hi16_ZMM | \ |
148 | + XFEATURE_MASK_PKRU | \ |
149 | + XFEATURE_MASK_BNDREGS | \ |
150 | + XFEATURE_MASK_BNDCSR) |
151 | + |
152 | /* |
153 | * setup the xstate image representing the init state |
154 | */ |
155 | @@ -414,6 +432,8 @@ static void __init setup_init_fpu_buf(void) |
156 | { |
157 | static int on_boot_cpu __initdata = 1; |
158 | |
159 | + BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED); |
160 | + |
161 | WARN_ON_FPU(!on_boot_cpu); |
162 | on_boot_cpu = 0; |
163 | |
164 | @@ -432,10 +452,22 @@ static void __init setup_init_fpu_buf(void) |
165 | copy_kernel_to_xregs_booting(&init_fpstate.xsave); |
166 | |
167 | /* |
168 | - * Dump the init state again. This is to identify the init state |
169 | - * of any feature which is not represented by all zero's. |
170 | + * All components are now in init state. Read the state back so |
171 | + * that init_fpstate contains all non-zero init state. This only |
172 | + * works with XSAVE, but not with XSAVEOPT and XSAVES because |
173 | + * those use the init optimization which skips writing data for |
174 | + * components in init state. |
175 | + * |
176 | + * XSAVE could be used, but that would require to reshuffle the |
177 | + * data when XSAVES is available because XSAVES uses xstate |
178 | + * compaction. But doing so is a pointless exercise because most |
179 | + * components have an all zeros init state except for the legacy |
180 | + * ones (FP and SSE). Those can be saved with FXSAVE into the |
181 | + * legacy area. Adding new features requires to ensure that init |
182 | + * state is all zeroes or if not to add the necessary handling |
183 | + * here. |
184 | */ |
185 | - copy_xregs_to_kernel_booting(&init_fpstate.xsave); |
186 | + fxsave(&init_fpstate.fxsave); |
187 | } |
188 | |
189 | static int xfeature_uncompacted_offset(int xfeature_nr) |
190 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
191 | index cbc7f177bbd8e..03fdeab057d29 100644 |
192 | --- a/arch/x86/kvm/svm.c |
193 | +++ b/arch/x86/kvm/svm.c |
194 | @@ -3048,7 +3048,11 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) |
195 | svm->nested.intercept = nested_vmcb->control.intercept; |
196 | |
197 | svm_flush_tlb(&svm->vcpu); |
198 | - svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; |
199 | + svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl & |
200 | + (V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK); |
201 | + |
202 | + svm->vmcb->control.int_ctl |= V_INTR_MASKING_MASK; |
203 | + |
204 | if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK) |
205 | svm->vcpu.arch.hflags |= HF_VINTR_MASK; |
206 | else |
207 | diff --git a/arch/x86/tools/chkobjdump.awk b/arch/x86/tools/chkobjdump.awk |
208 | index fd1ab80be0dec..a4cf678cf5c80 100644 |
209 | --- a/arch/x86/tools/chkobjdump.awk |
210 | +++ b/arch/x86/tools/chkobjdump.awk |
211 | @@ -10,6 +10,7 @@ BEGIN { |
212 | |
213 | /^GNU objdump/ { |
214 | verstr = "" |
215 | + gsub(/\(.*\)/, ""); |
216 | for (i = 3; i <= NF; i++) |
217 | if (match($(i), "^[0-9]")) { |
218 | verstr = $(i); |
219 | diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c |
220 | index b7fd8e00b346b..4dddf579560f3 100644 |
221 | --- a/drivers/acpi/nfit/core.c |
222 | +++ b/drivers/acpi/nfit/core.c |
223 | @@ -2258,6 +2258,9 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, |
224 | struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; |
225 | struct nd_mapping_desc *mapping; |
226 | |
227 | + /* range index 0 == unmapped in SPA or invalid-SPA */ |
228 | + if (memdev->range_index == 0 || spa->range_index == 0) |
229 | + continue; |
230 | if (memdev->range_index != spa->range_index) |
231 | continue; |
232 | if (count >= ND_MAX_MAPPINGS) { |
233 | diff --git a/drivers/base/core.c b/drivers/base/core.c |
234 | index 3b8487e28c84f..e82a89325f3d6 100644 |
235 | --- a/drivers/base/core.c |
236 | +++ b/drivers/base/core.c |
237 | @@ -710,6 +710,7 @@ void device_initialize(struct device *dev) |
238 | device_pm_init(dev); |
239 | set_dev_node(dev, -1); |
240 | #ifdef CONFIG_GENERIC_MSI_IRQ |
241 | + raw_spin_lock_init(&dev->msi_lock); |
242 | INIT_LIST_HEAD(&dev->msi_list); |
243 | #endif |
244 | } |
245 | diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c |
246 | index 757cf48c1c5ed..441f37b41abd5 100644 |
247 | --- a/drivers/dma/of-dma.c |
248 | +++ b/drivers/dma/of-dma.c |
249 | @@ -68,8 +68,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, |
250 | return NULL; |
251 | |
252 | ofdma_target = of_dma_find_controller(&dma_spec_target); |
253 | - if (!ofdma_target) |
254 | - return NULL; |
255 | + if (!ofdma_target) { |
256 | + ofdma->dma_router->route_free(ofdma->dma_router->dev, |
257 | + route_data); |
258 | + chan = ERR_PTR(-EPROBE_DEFER); |
259 | + goto err; |
260 | + } |
261 | |
262 | chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); |
263 | if (IS_ERR_OR_NULL(chan)) { |
264 | @@ -80,6 +84,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, |
265 | chan->route_data = route_data; |
266 | } |
267 | |
268 | +err: |
269 | /* |
270 | * Need to put the node back since the ofdma->of_dma_route_allocate |
271 | * has taken it for generating the new, translated dma_spec |
272 | diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c |
273 | index 6682b3eec2b66..ec15ded640f61 100644 |
274 | --- a/drivers/dma/sh/usb-dmac.c |
275 | +++ b/drivers/dma/sh/usb-dmac.c |
276 | @@ -861,8 +861,8 @@ static int usb_dmac_probe(struct platform_device *pdev) |
277 | |
278 | error: |
279 | of_dma_controller_free(pdev->dev.of_node); |
280 | - pm_runtime_put(&pdev->dev); |
281 | error_pm: |
282 | + pm_runtime_put(&pdev->dev); |
283 | pm_runtime_disable(&pdev->dev); |
284 | return ret; |
285 | } |
286 | diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c |
287 | index c4066276eb7b9..b7f9fb00f695f 100644 |
288 | --- a/drivers/i2c/i2c-dev.c |
289 | +++ b/drivers/i2c/i2c-dev.c |
290 | @@ -148,7 +148,7 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, |
291 | if (count > 8192) |
292 | count = 8192; |
293 | |
294 | - tmp = kmalloc(count, GFP_KERNEL); |
295 | + tmp = kzalloc(count, GFP_KERNEL); |
296 | if (tmp == NULL) |
297 | return -ENOMEM; |
298 | |
299 | @@ -157,7 +157,8 @@ static ssize_t i2cdev_read(struct file *file, char __user *buf, size_t count, |
300 | |
301 | ret = i2c_master_recv(client, tmp, count); |
302 | if (ret >= 0) |
303 | - ret = copy_to_user(buf, tmp, count) ? -EFAULT : ret; |
304 | + if (copy_to_user(buf, tmp, ret)) |
305 | + ret = -EFAULT; |
306 | kfree(tmp); |
307 | return ret; |
308 | } |
309 | diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c |
310 | index 7d61b566e148d..f5218461ae255 100644 |
311 | --- a/drivers/iio/adc/palmas_gpadc.c |
312 | +++ b/drivers/iio/adc/palmas_gpadc.c |
313 | @@ -660,8 +660,8 @@ static int palmas_adc_wakeup_configure(struct palmas_gpadc *adc) |
314 | |
315 | adc_period = adc->auto_conversion_period; |
316 | for (i = 0; i < 16; ++i) { |
317 | - if (((1000 * (1 << i)) / 32) < adc_period) |
318 | - continue; |
319 | + if (((1000 * (1 << i)) / 32) >= adc_period) |
320 | + break; |
321 | } |
322 | if (i > 0) |
323 | i--; |
324 | diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c |
325 | index 7ba1a94497f5d..4294523bede5c 100644 |
326 | --- a/drivers/ipack/carriers/tpci200.c |
327 | +++ b/drivers/ipack/carriers/tpci200.c |
328 | @@ -94,16 +94,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200) |
329 | free_irq(tpci200->info->pdev->irq, (void *) tpci200); |
330 | |
331 | pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs); |
332 | - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); |
333 | |
334 | pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR); |
335 | pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR); |
336 | pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR); |
337 | pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR); |
338 | - pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); |
339 | |
340 | pci_disable_device(tpci200->info->pdev); |
341 | - pci_dev_put(tpci200->info->pdev); |
342 | } |
343 | |
344 | static void tpci200_enable_irq(struct tpci200_board *tpci200, |
345 | @@ -524,7 +521,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, |
346 | tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL); |
347 | if (!tpci200->info) { |
348 | ret = -ENOMEM; |
349 | - goto out_err_info; |
350 | + goto err_tpci200; |
351 | } |
352 | |
353 | pci_dev_get(pdev); |
354 | @@ -535,7 +532,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, |
355 | if (ret) { |
356 | dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory"); |
357 | ret = -EBUSY; |
358 | - goto out_err_pci_request; |
359 | + goto err_tpci200_info; |
360 | } |
361 | tpci200->info->cfg_regs = ioremap_nocache( |
362 | pci_resource_start(pdev, TPCI200_CFG_MEM_BAR), |
363 | @@ -543,7 +540,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, |
364 | if (!tpci200->info->cfg_regs) { |
365 | dev_err(&pdev->dev, "Failed to map PCI Configuration Memory"); |
366 | ret = -EFAULT; |
367 | - goto out_err_ioremap; |
368 | + goto err_request_region; |
369 | } |
370 | |
371 | /* Disable byte swapping for 16 bit IP module access. This will ensure |
372 | @@ -566,7 +563,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, |
373 | if (ret) { |
374 | dev_err(&pdev->dev, "error during tpci200 install\n"); |
375 | ret = -ENODEV; |
376 | - goto out_err_install; |
377 | + goto err_cfg_regs; |
378 | } |
379 | |
380 | /* Register the carrier in the industry pack bus driver */ |
381 | @@ -578,7 +575,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev, |
382 | dev_err(&pdev->dev, |
383 | "error registering the carrier on ipack driver\n"); |
384 | ret = -EFAULT; |
385 | - goto out_err_bus_register; |
386 | + goto err_tpci200_install; |
387 | } |
388 | |
389 | /* save the bus number given by ipack to logging purpose */ |
390 | @@ -589,19 +586,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev, |
391 | tpci200_create_device(tpci200, i); |
392 | return 0; |
393 | |
394 | -out_err_bus_register: |
395 | +err_tpci200_install: |
396 | tpci200_uninstall(tpci200); |
397 | - /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */ |
398 | - tpci200->info->cfg_regs = NULL; |
399 | -out_err_install: |
400 | - if (tpci200->info->cfg_regs) |
401 | - iounmap(tpci200->info->cfg_regs); |
402 | -out_err_ioremap: |
403 | +err_cfg_regs: |
404 | + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); |
405 | +err_request_region: |
406 | pci_release_region(pdev, TPCI200_CFG_MEM_BAR); |
407 | -out_err_pci_request: |
408 | - pci_dev_put(pdev); |
409 | +err_tpci200_info: |
410 | kfree(tpci200->info); |
411 | -out_err_info: |
412 | + pci_dev_put(pdev); |
413 | +err_tpci200: |
414 | kfree(tpci200); |
415 | return ret; |
416 | } |
417 | @@ -611,6 +605,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200) |
418 | ipack_bus_unregister(tpci200->info->ipack_bus); |
419 | tpci200_uninstall(tpci200); |
420 | |
421 | + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs); |
422 | + |
423 | + pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR); |
424 | + |
425 | + pci_dev_put(tpci200->info->pdev); |
426 | + |
427 | kfree(tpci200->info); |
428 | kfree(tpci200); |
429 | } |
430 | diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c |
431 | index d9c7fd0cabafb..c6b91efaa9568 100644 |
432 | --- a/drivers/mmc/host/dw_mmc.c |
433 | +++ b/drivers/mmc/host/dw_mmc.c |
434 | @@ -380,7 +380,7 @@ static void dw_mci_start_command(struct dw_mci *host, |
435 | |
436 | static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) |
437 | { |
438 | - struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; |
439 | + struct mmc_command *stop = &host->stop_abort; |
440 | |
441 | dw_mci_start_command(host, stop, host->stop_cmdr); |
442 | } |
443 | @@ -1280,10 +1280,7 @@ static void __dw_mci_start_request(struct dw_mci *host, |
444 | spin_unlock_irqrestore(&host->irq_lock, irqflags); |
445 | } |
446 | |
447 | - if (mrq->stop) |
448 | - host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); |
449 | - else |
450 | - host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); |
451 | + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); |
452 | } |
453 | |
454 | static void dw_mci_start_request(struct dw_mci *host, |
455 | @@ -1869,8 +1866,8 @@ static void dw_mci_tasklet_func(unsigned long priv) |
456 | continue; |
457 | } |
458 | |
459 | - dw_mci_stop_dma(host); |
460 | send_stop_abort(host, data); |
461 | + dw_mci_stop_dma(host); |
462 | state = STATE_SENDING_STOP; |
463 | break; |
464 | } |
465 | @@ -1894,11 +1891,10 @@ static void dw_mci_tasklet_func(unsigned long priv) |
466 | */ |
467 | if (test_and_clear_bit(EVENT_DATA_ERROR, |
468 | &host->pending_events)) { |
469 | - dw_mci_stop_dma(host); |
470 | - if (data->stop || |
471 | - !(host->data_status & (SDMMC_INT_DRTO | |
472 | + if (!(host->data_status & (SDMMC_INT_DRTO | |
473 | SDMMC_INT_EBE))) |
474 | send_stop_abort(host, data); |
475 | + dw_mci_stop_dma(host); |
476 | state = STATE_DATA_ERROR; |
477 | break; |
478 | } |
479 | @@ -1931,11 +1927,10 @@ static void dw_mci_tasklet_func(unsigned long priv) |
480 | */ |
481 | if (test_and_clear_bit(EVENT_DATA_ERROR, |
482 | &host->pending_events)) { |
483 | - dw_mci_stop_dma(host); |
484 | - if (data->stop || |
485 | - !(host->data_status & (SDMMC_INT_DRTO | |
486 | + if (!(host->data_status & (SDMMC_INT_DRTO | |
487 | SDMMC_INT_EBE))) |
488 | send_stop_abort(host, data); |
489 | + dw_mci_stop_dma(host); |
490 | state = STATE_DATA_ERROR; |
491 | break; |
492 | } |
493 | @@ -2009,7 +2004,7 @@ static void dw_mci_tasklet_func(unsigned long priv) |
494 | host->cmd = NULL; |
495 | host->data = NULL; |
496 | |
497 | - if (mrq->stop) |
498 | + if (!mrq->sbc && mrq->stop) |
499 | dw_mci_command_complete(host, mrq->stop); |
500 | else |
501 | host->cmd_status = 0; |
502 | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c |
503 | index 5d2de48b77a00..dce36e9e1879c 100644 |
504 | --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c |
505 | +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c |
506 | @@ -3157,8 +3157,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr, |
507 | |
508 | indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr); |
509 | ret = QLCRD32(adapter, indirect_addr, &err); |
510 | - if (err == -EIO) |
511 | + if (err == -EIO) { |
512 | + qlcnic_83xx_unlock_flash(adapter); |
513 | return err; |
514 | + } |
515 | |
516 | word = ret; |
517 | *(u32 *)p_data = word; |
518 | diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c |
519 | index 03c96a6cbafd8..e510dbda77e58 100644 |
520 | --- a/drivers/net/hamradio/6pack.c |
521 | +++ b/drivers/net/hamradio/6pack.c |
522 | @@ -870,6 +870,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte) |
523 | return; |
524 | } |
525 | |
526 | + if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) { |
527 | + pr_err("6pack: cooked buffer overrun, data loss\n"); |
528 | + sp->rx_count = 0; |
529 | + return; |
530 | + } |
531 | + |
532 | buf = sp->raw_buf; |
533 | sp->cooked_buf[sp->rx_count_cooked++] = |
534 | buf[0] | ((buf[1] << 2) & 0xc0); |
535 | diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
536 | index 5ba472691546b..0a29844676f92 100644 |
537 | --- a/drivers/net/ppp/ppp_generic.c |
538 | +++ b/drivers/net/ppp/ppp_generic.c |
539 | @@ -1136,7 +1136,7 @@ static int ppp_nl_newlink(struct net *src_net, struct net_device *dev, |
540 | * the PPP unit identifer as suffix (i.e. ppp<unit_id>). This allows |
541 | * userspace to infer the device name using to the PPPIOCGUNIT ioctl. |
542 | */ |
543 | - if (!tb[IFLA_IFNAME]) |
544 | + if (!tb[IFLA_IFNAME] || !nla_len(tb[IFLA_IFNAME]) || !*(char *)nla_data(tb[IFLA_IFNAME])) |
545 | conf.ifname_is_set = false; |
546 | |
547 | err = ppp_dev_configure(src_net, dev, &conf); |
548 | diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c |
549 | index 55ca14fbdd2a2..77810f4240492 100644 |
550 | --- a/drivers/pci/msi.c |
551 | +++ b/drivers/pci/msi.c |
552 | @@ -189,24 +189,25 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) |
553 | * reliably as devices without an INTx disable bit will then generate a |
554 | * level IRQ which will never be cleared. |
555 | */ |
556 | -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
557 | +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
558 | { |
559 | - u32 mask_bits = desc->masked; |
560 | + raw_spinlock_t *lock = &desc->dev->msi_lock; |
561 | + unsigned long flags; |
562 | |
563 | if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) |
564 | - return 0; |
565 | + return; |
566 | |
567 | - mask_bits &= ~mask; |
568 | - mask_bits |= flag; |
569 | + raw_spin_lock_irqsave(lock, flags); |
570 | + desc->masked &= ~mask; |
571 | + desc->masked |= flag; |
572 | pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, |
573 | - mask_bits); |
574 | - |
575 | - return mask_bits; |
576 | + desc->masked); |
577 | + raw_spin_unlock_irqrestore(lock, flags); |
578 | } |
579 | |
580 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) |
581 | { |
582 | - desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); |
583 | + __pci_msi_desc_mask_irq(desc, mask, flag); |
584 | } |
585 | |
586 | static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) |
587 | @@ -321,10 +322,28 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
588 | /* Don't touch the hardware now */ |
589 | } else if (entry->msi_attrib.is_msix) { |
590 | void __iomem *base = pci_msix_desc_addr(entry); |
591 | + bool unmasked = !(entry->masked & PCI_MSIX_ENTRY_CTRL_MASKBIT); |
592 | + |
593 | + /* |
594 | + * The specification mandates that the entry is masked |
595 | + * when the message is modified: |
596 | + * |
597 | + * "If software changes the Address or Data value of an |
598 | + * entry while the entry is unmasked, the result is |
599 | + * undefined." |
600 | + */ |
601 | + if (unmasked) |
602 | + __pci_msix_desc_mask_irq(entry, PCI_MSIX_ENTRY_CTRL_MASKBIT); |
603 | |
604 | writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); |
605 | writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); |
606 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA); |
607 | + |
608 | + if (unmasked) |
609 | + __pci_msix_desc_mask_irq(entry, 0); |
610 | + |
611 | + /* Ensure that the writes are visible in the device */ |
612 | + readl(base + PCI_MSIX_ENTRY_DATA); |
613 | } else { |
614 | int pos = dev->msi_cap; |
615 | u16 msgctl; |
616 | @@ -345,6 +364,8 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) |
617 | pci_write_config_word(dev, pos + PCI_MSI_DATA_32, |
618 | msg->data); |
619 | } |
620 | + /* Ensure that the writes are visible in the device */ |
621 | + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); |
622 | } |
623 | entry->msg = *msg; |
624 | } |
625 | @@ -639,21 +660,21 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity) |
626 | /* Configure MSI capability structure */ |
627 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); |
628 | if (ret) { |
629 | - msi_mask_irq(entry, mask, ~mask); |
630 | + msi_mask_irq(entry, mask, 0); |
631 | free_msi_irqs(dev); |
632 | return ret; |
633 | } |
634 | |
635 | ret = msi_verify_entries(dev); |
636 | if (ret) { |
637 | - msi_mask_irq(entry, mask, ~mask); |
638 | + msi_mask_irq(entry, mask, 0); |
639 | free_msi_irqs(dev); |
640 | return ret; |
641 | } |
642 | |
643 | ret = populate_msi_sysfs(dev); |
644 | if (ret) { |
645 | - msi_mask_irq(entry, mask, ~mask); |
646 | + msi_mask_irq(entry, mask, 0); |
647 | free_msi_irqs(dev); |
648 | return ret; |
649 | } |
650 | @@ -694,6 +715,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
651 | { |
652 | struct cpumask *curmsk, *masks = NULL; |
653 | struct msi_desc *entry; |
654 | + void __iomem *addr; |
655 | int ret, i; |
656 | |
657 | if (affinity) { |
658 | @@ -716,6 +738,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
659 | |
660 | entry->msi_attrib.is_msix = 1; |
661 | entry->msi_attrib.is_64 = 1; |
662 | + |
663 | if (entries) |
664 | entry->msi_attrib.entry_nr = entries[i].entry; |
665 | else |
666 | @@ -723,6 +746,10 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
667 | entry->msi_attrib.default_irq = dev->irq; |
668 | entry->mask_base = base; |
669 | |
670 | + addr = pci_msix_desc_addr(entry); |
671 | + if (addr) |
672 | + entry->masked = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); |
673 | + |
674 | list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); |
675 | if (masks) |
676 | curmsk++; |
677 | @@ -733,21 +760,27 @@ out: |
678 | return ret; |
679 | } |
680 | |
681 | -static void msix_program_entries(struct pci_dev *dev, |
682 | - struct msix_entry *entries) |
683 | +static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) |
684 | { |
685 | struct msi_desc *entry; |
686 | - int i = 0; |
687 | |
688 | for_each_pci_msi_entry(entry, dev) { |
689 | - if (entries) |
690 | - entries[i++].vector = entry->irq; |
691 | - entry->masked = readl(pci_msix_desc_addr(entry) + |
692 | - PCI_MSIX_ENTRY_VECTOR_CTRL); |
693 | - msix_mask_irq(entry, 1); |
694 | + if (entries) { |
695 | + entries->vector = entry->irq; |
696 | + entries++; |
697 | + } |
698 | } |
699 | } |
700 | |
701 | +static void msix_mask_all(void __iomem *base, int tsize) |
702 | +{ |
703 | + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; |
704 | + int i; |
705 | + |
706 | + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) |
707 | + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); |
708 | +} |
709 | + |
710 | /** |
711 | * msix_capability_init - configure device's MSI-X capability |
712 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
713 | @@ -762,22 +795,34 @@ static void msix_program_entries(struct pci_dev *dev, |
714 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
715 | int nvec, bool affinity) |
716 | { |
717 | - int ret; |
718 | - u16 control; |
719 | void __iomem *base; |
720 | + int ret, tsize; |
721 | + u16 control; |
722 | |
723 | - /* Ensure MSI-X is disabled while it is set up */ |
724 | - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
725 | + /* |
726 | + * Some devices require MSI-X to be enabled before the MSI-X |
727 | + * registers can be accessed. Mask all the vectors to prevent |
728 | + * interrupts coming in before they're fully set up. |
729 | + */ |
730 | + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | |
731 | + PCI_MSIX_FLAGS_ENABLE); |
732 | |
733 | pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); |
734 | /* Request & Map MSI-X table region */ |
735 | + tsize = msix_table_size(control); |
736 | + base = msix_map_region(dev, tsize); |
737 | base = msix_map_region(dev, msix_table_size(control)); |
738 | - if (!base) |
739 | - return -ENOMEM; |
740 | + if (!base) { |
741 | + ret = -ENOMEM; |
742 | + goto out_disable; |
743 | + } |
744 | + |
745 | + /* Ensure that all table entries are masked. */ |
746 | + msix_mask_all(base, tsize); |
747 | |
748 | ret = msix_setup_entries(dev, base, entries, nvec, affinity); |
749 | if (ret) |
750 | - return ret; |
751 | + goto out_disable; |
752 | |
753 | ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
754 | if (ret) |
755 | @@ -788,15 +833,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
756 | if (ret) |
757 | goto out_free; |
758 | |
759 | - /* |
760 | - * Some devices require MSI-X to be enabled before we can touch the |
761 | - * MSI-X registers. We need to mask all the vectors to prevent |
762 | - * interrupts coming in before they're fully set up. |
763 | - */ |
764 | - pci_msix_clear_and_set_ctrl(dev, 0, |
765 | - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); |
766 | - |
767 | - msix_program_entries(dev, entries); |
768 | + msix_update_entries(dev, entries); |
769 | |
770 | ret = populate_msi_sysfs(dev); |
771 | if (ret) |
772 | @@ -830,6 +867,9 @@ out_avail: |
773 | out_free: |
774 | free_msi_irqs(dev); |
775 | |
776 | +out_disable: |
777 | + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
778 | + |
779 | return ret; |
780 | } |
781 | |
782 | @@ -917,8 +957,7 @@ void pci_msi_shutdown(struct pci_dev *dev) |
783 | |
784 | /* Return the device with MSI unmasked as initial states */ |
785 | mask = msi_mask(desc->msi_attrib.multi_cap); |
786 | - /* Keep cached state to be restored */ |
787 | - __pci_msi_desc_mask_irq(desc, mask, ~mask); |
788 | + msi_mask_irq(desc, mask, 0); |
789 | |
790 | /* Restore dev->irq to its default pin-assertion irq */ |
791 | dev->irq = desc->msi_attrib.default_irq; |
792 | @@ -1019,10 +1058,8 @@ void pci_msix_shutdown(struct pci_dev *dev) |
793 | return; |
794 | |
795 | /* Return the device with MSI-X masked as initial states */ |
796 | - for_each_pci_msi_entry(entry, dev) { |
797 | - /* Keep cached states to be restored */ |
798 | + for_each_pci_msi_entry(entry, dev) |
799 | __pci_msix_desc_mask_irq(entry, 1); |
800 | - } |
801 | |
802 | pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); |
803 | pci_intx_for_msi(dev, 1); |
804 | diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c |
805 | index 06fbd0b0c68a3..6ddb3e9f21ba9 100644 |
806 | --- a/drivers/scsi/device_handler/scsi_dh_rdac.c |
807 | +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c |
808 | @@ -526,8 +526,8 @@ static int initialize_controller(struct scsi_device *sdev, |
809 | if (!h->ctlr) |
810 | err = SCSI_DH_RES_TEMP_UNAVAIL; |
811 | else { |
812 | - list_add_rcu(&h->node, &h->ctlr->dh_list); |
813 | h->sdev = sdev; |
814 | + list_add_rcu(&h->node, &h->ctlr->dh_list); |
815 | } |
816 | spin_unlock(&list_lock); |
817 | } |
818 | @@ -852,11 +852,11 @@ static void rdac_bus_detach( struct scsi_device *sdev ) |
819 | spin_lock(&list_lock); |
820 | if (h->ctlr) { |
821 | list_del_rcu(&h->node); |
822 | - h->sdev = NULL; |
823 | kref_put(&h->ctlr->kref, release_controller); |
824 | } |
825 | spin_unlock(&list_lock); |
826 | sdev->handler_data = NULL; |
827 | + synchronize_rcu(); |
828 | kfree(h); |
829 | } |
830 | |
831 | diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c |
832 | index 4cf9ed96414f0..d61df49e4e1bb 100644 |
833 | --- a/drivers/scsi/megaraid/megaraid_mm.c |
834 | +++ b/drivers/scsi/megaraid/megaraid_mm.c |
835 | @@ -250,7 +250,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) |
836 | mimd_t mimd; |
837 | uint32_t adapno; |
838 | int iterator; |
839 | - |
840 | + bool is_found; |
841 | |
842 | if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) { |
843 | *rval = -EFAULT; |
844 | @@ -266,12 +266,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval) |
845 | |
846 | adapter = NULL; |
847 | iterator = 0; |
848 | + is_found = false; |
849 | |
850 | list_for_each_entry(adapter, &adapters_list_g, list) { |
851 | - if (iterator++ == adapno) break; |
852 | + if (iterator++ == adapno) { |
853 | + is_found = true; |
854 | + break; |
855 | + } |
856 | } |
857 | |
858 | - if (!adapter) { |
859 | + if (!is_found) { |
860 | *rval = -ENODEV; |
861 | return NULL; |
862 | } |
863 | @@ -739,6 +743,7 @@ ioctl_done(uioc_t *kioc) |
864 | uint32_t adapno; |
865 | int iterator; |
866 | mraid_mmadp_t* adapter; |
867 | + bool is_found; |
868 | |
869 | /* |
870 | * When the kioc returns from driver, make sure it still doesn't |
871 | @@ -761,19 +766,23 @@ ioctl_done(uioc_t *kioc) |
872 | iterator = 0; |
873 | adapter = NULL; |
874 | adapno = kioc->adapno; |
875 | + is_found = false; |
876 | |
877 | con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed " |
878 | "ioctl that was timedout before\n")); |
879 | |
880 | list_for_each_entry(adapter, &adapters_list_g, list) { |
881 | - if (iterator++ == adapno) break; |
882 | + if (iterator++ == adapno) { |
883 | + is_found = true; |
884 | + break; |
885 | + } |
886 | } |
887 | |
888 | kioc->timedout = 0; |
889 | |
890 | - if (adapter) { |
891 | + if (is_found) |
892 | mraid_mm_dealloc_kioc( adapter, kioc ); |
893 | - } |
894 | + |
895 | } |
896 | else { |
897 | wake_up(&wait_q); |
898 | diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c |
899 | index 397deb69c6595..e51819e3a508e 100644 |
900 | --- a/drivers/scsi/scsi_scan.c |
901 | +++ b/drivers/scsi/scsi_scan.c |
902 | @@ -460,7 +460,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent, |
903 | error = shost->hostt->target_alloc(starget); |
904 | |
905 | if(error) { |
906 | - dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error); |
907 | + if (error != -ENXIO) |
908 | + dev_err(dev, "target allocation failed, error %d\n", error); |
909 | /* don't want scsi_target_reap to do the final |
910 | * put because it will be under the host lock */ |
911 | scsi_target_destroy(starget); |
912 | diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c |
913 | index d2431afeda847..62c61a283b35d 100644 |
914 | --- a/drivers/vhost/vhost.c |
915 | +++ b/drivers/vhost/vhost.c |
916 | @@ -675,10 +675,16 @@ static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz) |
917 | (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8); |
918 | } |
919 | |
920 | +/* Make sure 64 bit math will not overflow. */ |
921 | static bool vhost_overflow(u64 uaddr, u64 size) |
922 | { |
923 | - /* Make sure 64 bit math will not overflow. */ |
924 | - return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size; |
925 | + if (uaddr > ULONG_MAX || size > ULONG_MAX) |
926 | + return true; |
927 | + |
928 | + if (!size) |
929 | + return false; |
930 | + |
931 | + return uaddr > ULONG_MAX - size + 1; |
932 | } |
933 | |
934 | /* Caller should have vq mutex and device mutex. */ |
935 | diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c |
936 | index c6e6b7470cbf6..fbb6a4701ea3f 100644 |
937 | --- a/drivers/xen/events/events_base.c |
938 | +++ b/drivers/xen/events/events_base.c |
939 | @@ -134,12 +134,12 @@ static void disable_dynirq(struct irq_data *data); |
940 | |
941 | static DEFINE_PER_CPU(unsigned int, irq_epoch); |
942 | |
943 | -static void clear_evtchn_to_irq_row(unsigned row) |
944 | +static void clear_evtchn_to_irq_row(int *evtchn_row) |
945 | { |
946 | unsigned col; |
947 | |
948 | for (col = 0; col < EVTCHN_PER_ROW; col++) |
949 | - WRITE_ONCE(evtchn_to_irq[row][col], -1); |
950 | + WRITE_ONCE(evtchn_row[col], -1); |
951 | } |
952 | |
953 | static void clear_evtchn_to_irq_all(void) |
954 | @@ -149,7 +149,7 @@ static void clear_evtchn_to_irq_all(void) |
955 | for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) { |
956 | if (evtchn_to_irq[row] == NULL) |
957 | continue; |
958 | - clear_evtchn_to_irq_row(row); |
959 | + clear_evtchn_to_irq_row(evtchn_to_irq[row]); |
960 | } |
961 | } |
962 | |
963 | @@ -157,6 +157,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) |
964 | { |
965 | unsigned row; |
966 | unsigned col; |
967 | + int *evtchn_row; |
968 | |
969 | if (evtchn >= xen_evtchn_max_channels()) |
970 | return -EINVAL; |
971 | @@ -169,11 +170,18 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) |
972 | if (irq == -1) |
973 | return 0; |
974 | |
975 | - evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL); |
976 | - if (evtchn_to_irq[row] == NULL) |
977 | + evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0); |
978 | + if (evtchn_row == NULL) |
979 | return -ENOMEM; |
980 | |
981 | - clear_evtchn_to_irq_row(row); |
982 | + clear_evtchn_to_irq_row(evtchn_row); |
983 | + |
984 | + /* |
985 | + * We've prepared an empty row for the mapping. If a different |
986 | + * thread was faster inserting it, we can drop ours. |
987 | + */ |
988 | + if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL) |
989 | + free_page((unsigned long) evtchn_row); |
990 | } |
991 | |
992 | WRITE_ONCE(evtchn_to_irq[row][col], irq); |
993 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
994 | index a55d23a73cdbc..b744e7d33d87f 100644 |
995 | --- a/fs/btrfs/inode.c |
996 | +++ b/fs/btrfs/inode.c |
997 | @@ -9632,8 +9632,14 @@ static int btrfs_rename_exchange(struct inode *old_dir, |
998 | bool root_log_pinned = false; |
999 | bool dest_log_pinned = false; |
1000 | |
1001 | - /* we only allow rename subvolume link between subvolumes */ |
1002 | - if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) |
1003 | + /* |
1004 | + * For non-subvolumes allow exchange only within one subvolume, in the |
1005 | + * same inode namespace. Two subvolumes (represented as directory) can |
1006 | + * be exchanged as they're a logical link and have a fixed inode number. |
1007 | + */ |
1008 | + if (root != dest && |
1009 | + (old_ino != BTRFS_FIRST_FREE_OBJECTID || |
1010 | + new_ino != BTRFS_FIRST_FREE_OBJECTID)) |
1011 | return -EXDEV; |
1012 | |
1013 | /* close the race window with snapshot create/destroy ioctl */ |
1014 | diff --git a/fs/namespace.c b/fs/namespace.c |
1015 | index 9f2390c89b63b..b9e30a385c013 100644 |
1016 | --- a/fs/namespace.c |
1017 | +++ b/fs/namespace.c |
1018 | @@ -1669,13 +1669,22 @@ static inline bool may_mount(void) |
1019 | return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); |
1020 | } |
1021 | |
1022 | +#ifdef CONFIG_MANDATORY_FILE_LOCKING |
1023 | +static bool may_mandlock(void) |
1024 | +{ |
1025 | + pr_warn_once("======================================================\n" |
1026 | + "WARNING: the mand mount option is being deprecated and\n" |
1027 | + " will be removed in v5.15!\n" |
1028 | + "======================================================\n"); |
1029 | + return capable(CAP_SYS_ADMIN); |
1030 | +} |
1031 | +#else |
1032 | static inline bool may_mandlock(void) |
1033 | { |
1034 | -#ifndef CONFIG_MANDATORY_FILE_LOCKING |
1035 | + pr_warn("VFS: \"mand\" mount option not supported"); |
1036 | return false; |
1037 | -#endif |
1038 | - return capable(CAP_SYS_ADMIN); |
1039 | } |
1040 | +#endif |
1041 | |
1042 | /* |
1043 | * Now umount can handle mount points as well as block devices. |
1044 | diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h |
1045 | index 36198563fb8bc..8cff6d157e562 100644 |
1046 | --- a/include/asm-generic/vmlinux.lds.h |
1047 | +++ b/include/asm-generic/vmlinux.lds.h |
1048 | @@ -465,6 +465,7 @@ |
1049 | *(.text.unlikely .text.unlikely.*) \ |
1050 | *(.text.unknown .text.unknown.*) \ |
1051 | *(.ref.text) \ |
1052 | + *(.text.asan.* .text.tsan.*) \ |
1053 | MEM_KEEP(init.text) \ |
1054 | MEM_KEEP(exit.text) \ |
1055 | |
1056 | diff --git a/include/linux/device.h b/include/linux/device.h |
1057 | index eb865b461acc4..ca765188a9814 100644 |
1058 | --- a/include/linux/device.h |
1059 | +++ b/include/linux/device.h |
1060 | @@ -812,6 +812,7 @@ struct device { |
1061 | struct dev_pin_info *pins; |
1062 | #endif |
1063 | #ifdef CONFIG_GENERIC_MSI_IRQ |
1064 | + raw_spinlock_t msi_lock; |
1065 | struct list_head msi_list; |
1066 | #endif |
1067 | |
1068 | diff --git a/include/linux/msi.h b/include/linux/msi.h |
1069 | index debc8aa4ec197..601bff9fbbec2 100644 |
1070 | --- a/include/linux/msi.h |
1071 | +++ b/include/linux/msi.h |
1072 | @@ -133,7 +133,7 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
1073 | void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); |
1074 | |
1075 | u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); |
1076 | -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
1077 | +void __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); |
1078 | void pci_msi_mask_irq(struct irq_data *data); |
1079 | void pci_msi_unmask_irq(struct irq_data *data); |
1080 | |
1081 | diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c |
1082 | index 552e00b07196e..9ec37c6c8c4aa 100644 |
1083 | --- a/net/bluetooth/hidp/core.c |
1084 | +++ b/net/bluetooth/hidp/core.c |
1085 | @@ -1282,7 +1282,7 @@ static int hidp_session_thread(void *arg) |
1086 | |
1087 | /* cleanup runtime environment */ |
1088 | remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); |
1089 | - remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait); |
1090 | + remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); |
1091 | wake_up_interruptible(&session->report_queue); |
1092 | hidp_del_timer(session); |
1093 | |
1094 | diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c |
1095 | index 4718c528e1003..794fba20afbcd 100644 |
1096 | --- a/net/bridge/br_if.c |
1097 | +++ b/net/bridge/br_if.c |
1098 | @@ -520,6 +520,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) |
1099 | |
1100 | err = dev_set_allmulti(dev, 1); |
1101 | if (err) { |
1102 | + br_multicast_del_port(p); |
1103 | kfree(p); /* kobject not yet init'd, manually free */ |
1104 | goto err1; |
1105 | } |
1106 | @@ -624,6 +625,7 @@ err4: |
1107 | err3: |
1108 | sysfs_remove_link(br->ifobj, p->dev->name); |
1109 | err2: |
1110 | + br_multicast_del_port(p); |
1111 | kobject_put(&p->kobj); |
1112 | dev_set_allmulti(dev, -1); |
1113 | err1: |
1114 | diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h |
1115 | index 0c55ffb859bf5..121aa71fcb5cc 100644 |
1116 | --- a/net/dccp/dccp.h |
1117 | +++ b/net/dccp/dccp.h |
1118 | @@ -44,9 +44,9 @@ extern bool dccp_debug; |
1119 | #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) |
1120 | #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) |
1121 | #else |
1122 | -#define dccp_pr_debug(format, a...) |
1123 | -#define dccp_pr_debug_cat(format, a...) |
1124 | -#define dccp_debug(format, a...) |
1125 | +#define dccp_pr_debug(format, a...) do {} while (0) |
1126 | +#define dccp_pr_debug_cat(format, a...) do {} while (0) |
1127 | +#define dccp_debug(format, a...) do {} while (0) |
1128 | #endif |
1129 | |
1130 | extern struct inet_hashinfo dccp_hashinfo; |
1131 | diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c |
1132 | index f66e4afb978a7..6383627b783e0 100644 |
1133 | --- a/net/ieee802154/socket.c |
1134 | +++ b/net/ieee802154/socket.c |
1135 | @@ -987,6 +987,11 @@ static const struct proto_ops ieee802154_dgram_ops = { |
1136 | #endif |
1137 | }; |
1138 | |
1139 | +static void ieee802154_sock_destruct(struct sock *sk) |
1140 | +{ |
1141 | + skb_queue_purge(&sk->sk_receive_queue); |
1142 | +} |
1143 | + |
1144 | /* Create a socket. Initialise the socket, blank the addresses |
1145 | * set the state. |
1146 | */ |
1147 | @@ -1027,7 +1032,7 @@ static int ieee802154_create(struct net *net, struct socket *sock, |
1148 | sock->ops = ops; |
1149 | |
1150 | sock_init_data(sock, sk); |
1151 | - /* FIXME: sk->sk_destruct */ |
1152 | + sk->sk_destruct = ieee802154_sock_destruct; |
1153 | sk->sk_family = PF_IEEE802154; |
1154 | |
1155 | /* Checksums on by default */ |
1156 | diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c |
1157 | index c22da42376fe9..47f40e1050445 100644 |
1158 | --- a/net/ipv4/tcp_bbr.c |
1159 | +++ b/net/ipv4/tcp_bbr.c |
1160 | @@ -811,7 +811,7 @@ static void bbr_init(struct sock *sk) |
1161 | bbr->prior_cwnd = 0; |
1162 | bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */ |
1163 | bbr->rtt_cnt = 0; |
1164 | - bbr->next_rtt_delivered = 0; |
1165 | + bbr->next_rtt_delivered = tp->delivered; |
1166 | bbr->prev_ca_state = TCP_CA_Open; |
1167 | bbr->packet_conservation = 0; |
1168 | |
1169 | diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c |
1170 | index 14ec63a026693..91b94ac9a88a4 100644 |
1171 | --- a/net/mac80211/debugfs_sta.c |
1172 | +++ b/net/mac80211/debugfs_sta.c |
1173 | @@ -80,6 +80,7 @@ static const char * const sta_flag_names[] = { |
1174 | FLAG(MPSP_OWNER), |
1175 | FLAG(MPSP_RECIPIENT), |
1176 | FLAG(PS_DELIVER), |
1177 | + FLAG(USES_ENCRYPTION), |
1178 | #undef FLAG |
1179 | }; |
1180 | |
1181 | diff --git a/net/mac80211/key.c b/net/mac80211/key.c |
1182 | index 4e23f240f599e..a0d9507cb6a71 100644 |
1183 | --- a/net/mac80211/key.c |
1184 | +++ b/net/mac80211/key.c |
1185 | @@ -334,6 +334,7 @@ static void ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, |
1186 | if (sta) { |
1187 | if (pairwise) { |
1188 | rcu_assign_pointer(sta->ptk[idx], new); |
1189 | + set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION); |
1190 | sta->ptk_idx = idx; |
1191 | ieee80211_check_fast_xmit(sta); |
1192 | } else { |
1193 | diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h |
1194 | index fd31c4db12821..0909332965bc8 100644 |
1195 | --- a/net/mac80211/sta_info.h |
1196 | +++ b/net/mac80211/sta_info.h |
1197 | @@ -100,6 +100,7 @@ enum ieee80211_sta_info_flags { |
1198 | WLAN_STA_MPSP_OWNER, |
1199 | WLAN_STA_MPSP_RECIPIENT, |
1200 | WLAN_STA_PS_DELIVER, |
1201 | + WLAN_STA_USES_ENCRYPTION, |
1202 | |
1203 | NUM_WLAN_STA_FLAGS, |
1204 | }; |
1205 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
1206 | index eebbddccb47b7..48d0dd0beaa5f 100644 |
1207 | --- a/net/mac80211/tx.c |
1208 | +++ b/net/mac80211/tx.c |
1209 | @@ -588,10 +588,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) |
1210 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); |
1211 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; |
1212 | |
1213 | - if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) |
1214 | + if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) { |
1215 | tx->key = NULL; |
1216 | - else if (tx->sta && |
1217 | - (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) |
1218 | + return TX_CONTINUE; |
1219 | + } |
1220 | + |
1221 | + if (tx->sta && |
1222 | + (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) |
1223 | tx->key = key; |
1224 | else if (ieee80211_is_group_privacy_action(tx->skb) && |
1225 | (key = rcu_dereference(tx->sdata->default_multicast_key))) |
1226 | @@ -652,6 +655,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) |
1227 | if (!skip_hw && tx->key && |
1228 | tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) |
1229 | info->control.hw_key = &tx->key->conf; |
1230 | + } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta && |
1231 | + test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) { |
1232 | + return TX_DROP; |
1233 | } |
1234 | |
1235 | return TX_CONTINUE; |
1236 | diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c |
1237 | index 8d99ac931ff6b..c29f7ff5ccd2d 100644 |
1238 | --- a/sound/pci/hda/hda_generic.c |
1239 | +++ b/sound/pci/hda/hda_generic.c |
1240 | @@ -3421,7 +3421,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, |
1241 | struct hda_gen_spec *spec = codec->spec; |
1242 | const struct hda_input_mux *imux; |
1243 | struct nid_path *path; |
1244 | - int i, adc_idx, err = 0; |
1245 | + int i, adc_idx, ret, err = 0; |
1246 | |
1247 | imux = &spec->input_mux; |
1248 | adc_idx = kcontrol->id.index; |
1249 | @@ -3431,9 +3431,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol, |
1250 | if (!path || !path->ctls[type]) |
1251 | continue; |
1252 | kcontrol->private_value = path->ctls[type]; |
1253 | - err = func(kcontrol, ucontrol); |
1254 | - if (err < 0) |
1255 | + ret = func(kcontrol, ucontrol); |
1256 | + if (ret < 0) { |
1257 | + err = ret; |
1258 | break; |
1259 | + } |
1260 | + if (ret > 0) |
1261 | + err = 1; |
1262 | } |
1263 | mutex_unlock(&codec->control_mutex); |
1264 | if (err >= 0 && spec->cap_sync_hook) |
1265 | diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1266 | index d812cbf41b944..1b6dedfc33e3d 100644 |
1267 | --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1268 | +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c |
1269 | @@ -135,7 +135,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream, |
1270 | snd_pcm_uframes_t period_size; |
1271 | ssize_t periodbytes; |
1272 | ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream); |
1273 | - u32 buffer_addr = virt_to_phys(substream->dma_buffer.area); |
1274 | + u32 buffer_addr = virt_to_phys(substream->runtime->dma_area); |
1275 | |
1276 | channels = substream->runtime->channels; |
1277 | period_size = substream->runtime->period_size; |
1278 | @@ -241,7 +241,6 @@ static int sst_platform_alloc_stream(struct snd_pcm_substream *substream, |
1279 | /* set codec params and inform SST driver the same */ |
1280 | sst_fill_pcm_params(substream, ¶m); |
1281 | sst_fill_alloc_params(substream, &alloc_params); |
1282 | - substream->runtime->dma_area = substream->dma_buffer.area; |
1283 | str_params.sparams = param; |
1284 | str_params.aparams = alloc_params; |
1285 | str_params.codec = SST_CODEC_TYPE_PCM; |