Contents of /trunk/kernel-alx/patches-4.9/0193-4.9.94-all-fixes.patch
Parent Directory | Revision Log
Revision 3170 -
(show annotations)
(download)
Wed Aug 8 14:17:25 2018 UTC (6 years, 1 month ago) by niro
File size: 313734 byte(s)
Wed Aug 8 14:17:25 2018 UTC (6 years, 1 month ago) by niro
File size: 313734 byte(s)
-linux-4.9.94
1 | diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt |
2 | index 2b7b3fa588d7..606da38c0959 100644 |
3 | --- a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt |
4 | +++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt |
5 | @@ -1,11 +1,14 @@ |
6 | -* Amlogic Meson8b Clock and Reset Unit |
7 | +* Amlogic Meson8, Meson8b and Meson8m2 Clock and Reset Unit |
8 | |
9 | -The Amlogic Meson8b clock controller generates and supplies clock to various |
10 | -controllers within the SoC. |
11 | +The Amlogic Meson8 / Meson8b / Meson8m2 clock controller generates and |
12 | +supplies clock to various controllers within the SoC. |
13 | |
14 | Required Properties: |
15 | |
16 | -- compatible: should be "amlogic,meson8b-clkc" |
17 | +- compatible: must be one of: |
18 | + - "amlogic,meson8-clkc" for Meson8 (S802) SoCs |
19 | + - "amlogic,meson8b-clkc" for Meson8 (S805) SoCs |
20 | + - "amlogic,meson8m2-clkc" for Meson8m2 (S812) SoCs |
21 | - reg: it must be composed by two tuples: |
22 | 0) physical base address of the xtal register and length of memory |
23 | mapped region. |
24 | diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt |
25 | index 4f7ae7555758..bda9d6fab6b4 100644 |
26 | --- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt |
27 | +++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt |
28 | @@ -47,10 +47,13 @@ Required properties: |
29 | Documentation/devicetree/bindings/media/video-interfaces.txt. The |
30 | first port should be the input endpoint, the second one the output |
31 | |
32 | - The output should have two endpoints. The first is the block |
33 | - connected to the TCON channel 0 (usually a panel or a bridge), the |
34 | - second the block connected to the TCON channel 1 (usually the TV |
35 | - encoder) |
36 | + The output may have multiple endpoints. The TCON has two channels, |
37 | + usually with the first channel being used for the panels interfaces |
38 | + (RGB, LVDS, etc.), and the second being used for the outputs that |
39 | + require another controller (TV Encoder, HDMI, etc.). The endpoints |
40 | + will take an extra property, allwinner,tcon-channel, to specify the |
41 | + channel the endpoint is associated to. If that property is not |
42 | + present, the endpoint number will be used as the channel number. |
43 | |
44 | On SoCs other than the A33, there is one more clock required: |
45 | - 'tcon-ch1': The clock driving the TCON channel 1 |
46 | diff --git a/Makefile b/Makefile |
47 | index f5cf4159fc20..02188cf8e9af 100644 |
48 | --- a/Makefile |
49 | +++ b/Makefile |
50 | @@ -1,6 +1,6 @@ |
51 | VERSION = 4 |
52 | PATCHLEVEL = 9 |
53 | -SUBLEVEL = 93 |
54 | +SUBLEVEL = 94 |
55 | EXTRAVERSION = |
56 | NAME = Roaring Lionus |
57 | |
58 | diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts |
59 | index 96d7eede412e..036c9bd9bf75 100644 |
60 | --- a/arch/arm/boot/dts/imx53-qsrb.dts |
61 | +++ b/arch/arm/boot/dts/imx53-qsrb.dts |
62 | @@ -23,7 +23,7 @@ |
63 | imx53-qsrb { |
64 | pinctrl_pmic: pmicgrp { |
65 | fsl,pins = < |
66 | - MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ |
67 | + MX53_PAD_CSI0_DAT5__GPIO5_23 0x1c4 /* IRQ */ |
68 | >; |
69 | }; |
70 | }; |
71 | diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi |
72 | index 2b9c2be436f9..47c955458a77 100644 |
73 | --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi |
74 | +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi |
75 | @@ -88,6 +88,7 @@ |
76 | clocks = <&clks IMX6QDL_CLK_CKO>; |
77 | VDDA-supply = <®_2p5v>; |
78 | VDDIO-supply = <®_3p3v>; |
79 | + lrclk-strength = <3>; |
80 | }; |
81 | }; |
82 | |
83 | diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi |
84 | index 368e21934285..825f6eae3d1c 100644 |
85 | --- a/arch/arm/boot/dts/ls1021a.dtsi |
86 | +++ b/arch/arm/boot/dts/ls1021a.dtsi |
87 | @@ -146,7 +146,7 @@ |
88 | }; |
89 | |
90 | esdhc: esdhc@1560000 { |
91 | - compatible = "fsl,esdhc"; |
92 | + compatible = "fsl,ls1021a-esdhc", "fsl,esdhc"; |
93 | reg = <0x0 0x1560000 0x0 0x10000>; |
94 | interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; |
95 | clock-frequency = <0>; |
96 | diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi |
97 | index b7a24af8f47b..4b7d97275c62 100644 |
98 | --- a/arch/arm/boot/dts/qcom-ipq4019.dtsi |
99 | +++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi |
100 | @@ -154,10 +154,10 @@ |
101 | |
102 | i2c_0: i2c@78b7000 { |
103 | compatible = "qcom,i2c-qup-v2.2.1"; |
104 | - reg = <0x78b7000 0x6000>; |
105 | + reg = <0x78b7000 0x600>; |
106 | interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>; |
107 | clocks = <&gcc GCC_BLSP1_AHB_CLK>, |
108 | - <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>; |
109 | + <&gcc GCC_BLSP1_QUP1_I2C_APPS_CLK>; |
110 | clock-names = "iface", "core"; |
111 | #address-cells = <1>; |
112 | #size-cells = <0>; |
113 | diff --git a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts |
114 | index 7885075428bb..1788e186a512 100644 |
115 | --- a/arch/arm/boot/dts/r8a7740-armadillo800eva.dts |
116 | +++ b/arch/arm/boot/dts/r8a7740-armadillo800eva.dts |
117 | @@ -266,7 +266,9 @@ |
118 | lcd0_pins: lcd0 { |
119 | groups = "lcd0_data24_0", "lcd0_lclk_1", "lcd0_sync"; |
120 | function = "lcd0"; |
121 | + }; |
122 | |
123 | + lcd0_mux { |
124 | /* DBGMD/LCDC0/FSIA MUX */ |
125 | gpio-hog; |
126 | gpios = <176 0>; |
127 | diff --git a/arch/arm/boot/dts/rk322x.dtsi b/arch/arm/boot/dts/rk322x.dtsi |
128 | index 9e6bf0e311bb..2679dc80f831 100644 |
129 | --- a/arch/arm/boot/dts/rk322x.dtsi |
130 | +++ b/arch/arm/boot/dts/rk322x.dtsi |
131 | @@ -617,9 +617,9 @@ |
132 | <0 12 RK_FUNC_1 &pcfg_pull_none>, |
133 | <0 13 RK_FUNC_1 &pcfg_pull_none>, |
134 | <0 14 RK_FUNC_1 &pcfg_pull_none>, |
135 | - <1 2 RK_FUNC_1 &pcfg_pull_none>, |
136 | - <1 4 RK_FUNC_1 &pcfg_pull_none>, |
137 | - <1 5 RK_FUNC_1 &pcfg_pull_none>; |
138 | + <1 2 RK_FUNC_2 &pcfg_pull_none>, |
139 | + <1 4 RK_FUNC_2 &pcfg_pull_none>, |
140 | + <1 5 RK_FUNC_2 &pcfg_pull_none>; |
141 | }; |
142 | }; |
143 | |
144 | diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h |
145 | index 71e473d05fcc..620dc75362e5 100644 |
146 | --- a/arch/arm/include/asm/xen/events.h |
147 | +++ b/arch/arm/include/asm/xen/events.h |
148 | @@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) |
149 | return raw_irqs_disabled_flags(regs->ARM_cpsr); |
150 | } |
151 | |
152 | -#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \ |
153 | +#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\ |
154 | atomic64_t, \ |
155 | counter), (val)) |
156 | |
157 | diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c |
158 | index 624a510d31df..ebd2dd46adf7 100644 |
159 | --- a/arch/arm/kvm/hyp/switch.c |
160 | +++ b/arch/arm/kvm/hyp/switch.c |
161 | @@ -237,8 +237,10 @@ void __hyp_text __noreturn __hyp_panic(int cause) |
162 | |
163 | vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); |
164 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); |
165 | + __timer_save_state(vcpu); |
166 | __deactivate_traps(vcpu); |
167 | __deactivate_vm(vcpu); |
168 | + __banked_restore_state(host_ctxt); |
169 | __sysreg_restore_state(host_ctxt); |
170 | } |
171 | |
172 | diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c |
173 | index add3771d38f6..9a22d40602aa 100644 |
174 | --- a/arch/arm/mach-davinci/devices-da8xx.c |
175 | +++ b/arch/arm/mach-davinci/devices-da8xx.c |
176 | @@ -821,6 +821,8 @@ static struct platform_device da8xx_dsp = { |
177 | .resource = da8xx_rproc_resources, |
178 | }; |
179 | |
180 | +static bool rproc_mem_inited __initdata; |
181 | + |
182 | #if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC) |
183 | |
184 | static phys_addr_t rproc_base __initdata; |
185 | @@ -859,6 +861,8 @@ void __init da8xx_rproc_reserve_cma(void) |
186 | ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0); |
187 | if (ret) |
188 | pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret); |
189 | + else |
190 | + rproc_mem_inited = true; |
191 | } |
192 | |
193 | #else |
194 | @@ -873,6 +877,12 @@ int __init da8xx_register_rproc(void) |
195 | { |
196 | int ret; |
197 | |
198 | + if (!rproc_mem_inited) { |
199 | + pr_warn("%s: memory not reserved for DSP, not registering DSP device\n", |
200 | + __func__); |
201 | + return -ENOMEM; |
202 | + } |
203 | + |
204 | ret = platform_device_register(&da8xx_dsp); |
205 | if (ret) |
206 | pr_err("%s: can't register DSP device: %d\n", __func__, ret); |
207 | diff --git a/arch/arm/mach-imx/cpu.c b/arch/arm/mach-imx/cpu.c |
208 | index b3347d32349f..94906ed49392 100644 |
209 | --- a/arch/arm/mach-imx/cpu.c |
210 | +++ b/arch/arm/mach-imx/cpu.c |
211 | @@ -131,6 +131,9 @@ struct device * __init imx_soc_device_init(void) |
212 | case MXC_CPU_IMX6UL: |
213 | soc_id = "i.MX6UL"; |
214 | break; |
215 | + case MXC_CPU_IMX6ULL: |
216 | + soc_id = "i.MX6ULL"; |
217 | + break; |
218 | case MXC_CPU_IMX7D: |
219 | soc_id = "i.MX7D"; |
220 | break; |
221 | diff --git a/arch/arm/mach-imx/mxc.h b/arch/arm/mach-imx/mxc.h |
222 | index 34f2ff62583c..e00d6260c3df 100644 |
223 | --- a/arch/arm/mach-imx/mxc.h |
224 | +++ b/arch/arm/mach-imx/mxc.h |
225 | @@ -39,6 +39,7 @@ |
226 | #define MXC_CPU_IMX6SX 0x62 |
227 | #define MXC_CPU_IMX6Q 0x63 |
228 | #define MXC_CPU_IMX6UL 0x64 |
229 | +#define MXC_CPU_IMX6ULL 0x65 |
230 | #define MXC_CPU_IMX7D 0x72 |
231 | |
232 | #define IMX_DDR_TYPE_LPDDR2 1 |
233 | @@ -73,6 +74,11 @@ static inline bool cpu_is_imx6ul(void) |
234 | return __mxc_cpu_type == MXC_CPU_IMX6UL; |
235 | } |
236 | |
237 | +static inline bool cpu_is_imx6ull(void) |
238 | +{ |
239 | + return __mxc_cpu_type == MXC_CPU_IMX6ULL; |
240 | +} |
241 | + |
242 | static inline bool cpu_is_imx6q(void) |
243 | { |
244 | return __mxc_cpu_type == MXC_CPU_IMX6Q; |
245 | diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h |
246 | index f2585cdd32c2..20dcb196b240 100644 |
247 | --- a/arch/arm64/include/asm/futex.h |
248 | +++ b/arch/arm64/include/asm/futex.h |
249 | @@ -51,16 +51,16 @@ |
250 | : "memory") |
251 | |
252 | static inline int |
253 | -futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
254 | +futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) |
255 | { |
256 | int op = (encoded_op >> 28) & 7; |
257 | int cmp = (encoded_op >> 24) & 15; |
258 | - int oparg = (encoded_op << 8) >> 20; |
259 | - int cmparg = (encoded_op << 20) >> 20; |
260 | + int oparg = (int)(encoded_op << 8) >> 20; |
261 | + int cmparg = (int)(encoded_op << 20) >> 20; |
262 | int oldval = 0, ret, tmp; |
263 | |
264 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
265 | - oparg = 1 << oparg; |
266 | + oparg = 1U << (oparg & 0x1f); |
267 | |
268 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
269 | return -EFAULT; |
270 | diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c |
271 | index 409abc45bdb6..1b3eb67edefb 100644 |
272 | --- a/arch/arm64/kernel/pci.c |
273 | +++ b/arch/arm64/kernel/pci.c |
274 | @@ -175,8 +175,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) |
275 | return NULL; |
276 | |
277 | root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); |
278 | - if (!root_ops) |
279 | + if (!root_ops) { |
280 | + kfree(ri); |
281 | return NULL; |
282 | + } |
283 | |
284 | ri->cfg = pci_acpi_setup_ecam_mapping(root); |
285 | if (!ri->cfg) { |
286 | diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c |
287 | index 57ae9d9ed9bb..199a23f058d5 100644 |
288 | --- a/arch/arm64/kernel/perf_event.c |
289 | +++ b/arch/arm64/kernel/perf_event.c |
290 | @@ -871,15 +871,24 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, |
291 | |
292 | if (attr->exclude_idle) |
293 | return -EPERM; |
294 | - if (is_kernel_in_hyp_mode() && |
295 | - attr->exclude_kernel != attr->exclude_hv) |
296 | - return -EINVAL; |
297 | + |
298 | + /* |
299 | + * If we're running in hyp mode, then we *are* the hypervisor. |
300 | + * Therefore we ignore exclude_hv in this configuration, since |
301 | + * there's no hypervisor to sample anyway. This is consistent |
302 | + * with other architectures (x86 and Power). |
303 | + */ |
304 | + if (is_kernel_in_hyp_mode()) { |
305 | + if (!attr->exclude_kernel) |
306 | + config_base |= ARMV8_PMU_INCLUDE_EL2; |
307 | + } else { |
308 | + if (attr->exclude_kernel) |
309 | + config_base |= ARMV8_PMU_EXCLUDE_EL1; |
310 | + if (!attr->exclude_hv) |
311 | + config_base |= ARMV8_PMU_INCLUDE_EL2; |
312 | + } |
313 | if (attr->exclude_user) |
314 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
315 | - if (!is_kernel_in_hyp_mode() && attr->exclude_kernel) |
316 | - config_base |= ARMV8_PMU_EXCLUDE_EL1; |
317 | - if (!attr->exclude_hv) |
318 | - config_base |= ARMV8_PMU_INCLUDE_EL2; |
319 | |
320 | /* |
321 | * Install the filter into config_base as this is used to |
322 | diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c |
323 | index 0c848c18ca44..9174ba917d65 100644 |
324 | --- a/arch/arm64/kvm/hyp/switch.c |
325 | +++ b/arch/arm64/kvm/hyp/switch.c |
326 | @@ -404,6 +404,7 @@ void __hyp_text __noreturn __hyp_panic(void) |
327 | |
328 | vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); |
329 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); |
330 | + __timer_save_state(vcpu); |
331 | __deactivate_traps(vcpu); |
332 | __deactivate_vm(vcpu); |
333 | __sysreg_restore_host_state(host_ctxt); |
334 | diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c |
335 | index 01c171723bb3..caf75abf6ae7 100644 |
336 | --- a/arch/arm64/mm/mmap.c |
337 | +++ b/arch/arm64/mm/mmap.c |
338 | @@ -18,6 +18,7 @@ |
339 | |
340 | #include <linux/elf.h> |
341 | #include <linux/fs.h> |
342 | +#include <linux/memblock.h> |
343 | #include <linux/mm.h> |
344 | #include <linux/mman.h> |
345 | #include <linux/export.h> |
346 | @@ -102,12 +103,18 @@ void arch_pick_mmap_layout(struct mm_struct *mm) |
347 | */ |
348 | int valid_phys_addr_range(phys_addr_t addr, size_t size) |
349 | { |
350 | - if (addr < PHYS_OFFSET) |
351 | - return 0; |
352 | - if (addr + size > __pa(high_memory - 1) + 1) |
353 | - return 0; |
354 | - |
355 | - return 1; |
356 | + /* |
357 | + * Check whether addr is covered by a memory region without the |
358 | + * MEMBLOCK_NOMAP attribute, and whether that region covers the |
359 | + * entire range. In theory, this could lead to false negatives |
360 | + * if the range is covered by distinct but adjacent memory regions |
361 | + * that only differ in other attributes. However, few of such |
362 | + * attributes have been defined, and it is debatable whether it |
363 | + * follows that /dev/mem read() calls should be able traverse |
364 | + * such boundaries. |
365 | + */ |
366 | + return memblock_is_region_memory(addr, size) && |
367 | + memblock_is_map_memory(addr); |
368 | } |
369 | |
370 | /* |
371 | diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h |
372 | index daba1f9a4f79..174aedce3167 100644 |
373 | --- a/arch/mips/include/asm/kprobes.h |
374 | +++ b/arch/mips/include/asm/kprobes.h |
375 | @@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t; |
376 | |
377 | #define flush_insn_slot(p) \ |
378 | do { \ |
379 | - flush_icache_range((unsigned long)p->addr, \ |
380 | + if (p->addr) \ |
381 | + flush_icache_range((unsigned long)p->addr, \ |
382 | (unsigned long)p->addr + \ |
383 | (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ |
384 | } while (0) |
385 | diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h |
386 | index d21f3da7bdb6..c0be540e83cb 100644 |
387 | --- a/arch/mips/include/asm/pgtable-32.h |
388 | +++ b/arch/mips/include/asm/pgtable-32.h |
389 | @@ -18,6 +18,10 @@ |
390 | |
391 | #include <asm-generic/pgtable-nopmd.h> |
392 | |
393 | +#ifdef CONFIG_HIGHMEM |
394 | +#include <asm/highmem.h> |
395 | +#endif |
396 | + |
397 | extern int temp_tlb_entry; |
398 | |
399 | /* |
400 | @@ -61,7 +65,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, |
401 | |
402 | #define VMALLOC_START MAP_BASE |
403 | |
404 | -#define PKMAP_BASE (0xfe000000UL) |
405 | +#define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1)) |
406 | +#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP) |
407 | |
408 | #ifdef CONFIG_HIGHMEM |
409 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) |
410 | diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c |
411 | index adc6911ba748..b19a3c506b1e 100644 |
412 | --- a/arch/mips/mm/pgtable-32.c |
413 | +++ b/arch/mips/mm/pgtable-32.c |
414 | @@ -51,15 +51,15 @@ void __init pagetable_init(void) |
415 | /* |
416 | * Fixed mappings: |
417 | */ |
418 | - vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
419 | - fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); |
420 | + vaddr = __fix_to_virt(__end_of_fixed_addresses - 1); |
421 | + fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base); |
422 | |
423 | #ifdef CONFIG_HIGHMEM |
424 | /* |
425 | * Permanent kmaps: |
426 | */ |
427 | vaddr = PKMAP_BASE; |
428 | - fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); |
429 | + fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); |
430 | |
431 | pgd = swapper_pg_dir + __pgd_offset(vaddr); |
432 | pud = pud_offset(pgd, vaddr); |
433 | diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h |
434 | index cd4ffd86765f..bb9073a2b2ae 100644 |
435 | --- a/arch/powerpc/include/asm/module.h |
436 | +++ b/arch/powerpc/include/asm/module.h |
437 | @@ -14,6 +14,10 @@ |
438 | #include <asm-generic/module.h> |
439 | |
440 | |
441 | +#ifdef CC_USING_MPROFILE_KERNEL |
442 | +#define MODULE_ARCH_VERMAGIC "mprofile-kernel" |
443 | +#endif |
444 | + |
445 | #ifndef __powerpc64__ |
446 | /* |
447 | * Thanks to Paul M for explaining this. |
448 | diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h |
449 | index 56398e7e6100..71c69883125a 100644 |
450 | --- a/arch/powerpc/include/asm/page.h |
451 | +++ b/arch/powerpc/include/asm/page.h |
452 | @@ -132,7 +132,19 @@ extern long long virt_phys_offset; |
453 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
454 | #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) |
455 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
456 | + |
457 | +#ifdef CONFIG_PPC_BOOK3S_64 |
458 | +/* |
459 | + * On hash the vmalloc and other regions alias to the kernel region when passed |
460 | + * through __pa(), which virt_to_pfn() uses. That means virt_addr_valid() can |
461 | + * return true for some vmalloc addresses, which is incorrect. So explicitly |
462 | + * check that the address is in the kernel region. |
463 | + */ |
464 | +#define virt_addr_valid(kaddr) (REGION_ID(kaddr) == KERNEL_REGION_ID && \ |
465 | + pfn_valid(virt_to_pfn(kaddr))) |
466 | +#else |
467 | #define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) |
468 | +#endif |
469 | |
470 | /* |
471 | * On Book-E parts we need __va to parse the device tree and we can't |
472 | diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c |
473 | index f1d7e996e673..ab7b661b6da3 100644 |
474 | --- a/arch/powerpc/kernel/time.c |
475 | +++ b/arch/powerpc/kernel/time.c |
476 | @@ -719,12 +719,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val) |
477 | static void start_cpu_decrementer(void) |
478 | { |
479 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
480 | + unsigned int tcr; |
481 | + |
482 | /* Clear any pending timer interrupts */ |
483 | mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); |
484 | |
485 | - /* Enable decrementer interrupt */ |
486 | - mtspr(SPRN_TCR, TCR_DIE); |
487 | -#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */ |
488 | + tcr = mfspr(SPRN_TCR); |
489 | + /* |
490 | + * The watchdog may have already been enabled by u-boot. So leave |
491 | + * TRC[WP] (Watchdog Period) alone. |
492 | + */ |
493 | + tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */ |
494 | + tcr |= TCR_DIE; /* Enable decrementer */ |
495 | + mtspr(SPRN_TCR, tcr); |
496 | +#endif |
497 | } |
498 | |
499 | void __init generic_calibrate_decr(void) |
500 | diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c |
501 | index 02176fd52f84..286a3b051ff6 100644 |
502 | --- a/arch/powerpc/kvm/book3s_pr_papr.c |
503 | +++ b/arch/powerpc/kvm/book3s_pr_papr.c |
504 | @@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) |
505 | pteg_addr = get_pteg_addr(vcpu, pte_index); |
506 | |
507 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
508 | - copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); |
509 | + ret = H_FUNCTION; |
510 | + if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) |
511 | + goto done; |
512 | hpte = pteg; |
513 | |
514 | ret = H_PTEG_FULL; |
515 | @@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) |
516 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); |
517 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); |
518 | pteg_addr += i * HPTE_SIZE; |
519 | - copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); |
520 | + ret = H_FUNCTION; |
521 | + if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) |
522 | + goto done; |
523 | kvmppc_set_gpr(vcpu, 4, pte_index | i); |
524 | ret = H_SUCCESS; |
525 | |
526 | @@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) |
527 | |
528 | pteg = get_pteg_addr(vcpu, pte_index); |
529 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
530 | - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
531 | + ret = H_FUNCTION; |
532 | + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) |
533 | + goto done; |
534 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
535 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
536 | |
537 | @@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) |
538 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) |
539 | goto done; |
540 | |
541 | - copy_to_user((void __user *)pteg, &v, sizeof(v)); |
542 | + ret = H_FUNCTION; |
543 | + if (copy_to_user((void __user *)pteg, &v, sizeof(v))) |
544 | + goto done; |
545 | |
546 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); |
547 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
548 | @@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) |
549 | } |
550 | |
551 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); |
552 | - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
553 | + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { |
554 | + ret = H_FUNCTION; |
555 | + break; |
556 | + } |
557 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
558 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
559 | |
560 | @@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) |
561 | tsh |= H_BULK_REMOVE_NOT_FOUND; |
562 | } else { |
563 | /* Splat the pteg in (userland) hpt */ |
564 | - copy_to_user((void __user *)pteg, &v, sizeof(v)); |
565 | + if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { |
566 | + ret = H_FUNCTION; |
567 | + break; |
568 | + } |
569 | |
570 | rb = compute_tlbie_rb(pte[0], pte[1], |
571 | tsh & H_BULK_REMOVE_PTEX); |
572 | @@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) |
573 | |
574 | pteg = get_pteg_addr(vcpu, pte_index); |
575 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
576 | - copy_from_user(pte, (void __user *)pteg, sizeof(pte)); |
577 | + ret = H_FUNCTION; |
578 | + if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) |
579 | + goto done; |
580 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
581 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
582 | |
583 | @@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) |
584 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
585 | pte[0] = (__force u64)cpu_to_be64(pte[0]); |
586 | pte[1] = (__force u64)cpu_to_be64(pte[1]); |
587 | - copy_to_user((void __user *)pteg, pte, sizeof(pte)); |
588 | + ret = H_FUNCTION; |
589 | + if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) |
590 | + goto done; |
591 | ret = H_SUCCESS; |
592 | |
593 | done: |
594 | diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c |
595 | index 85c85eb3e245..b4abf9d5d9e1 100644 |
596 | --- a/arch/powerpc/platforms/cell/spufs/coredump.c |
597 | +++ b/arch/powerpc/platforms/cell/spufs/coredump.c |
598 | @@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i, |
599 | skip = roundup(cprm->pos - total + sz, 4) - cprm->pos; |
600 | if (!dump_skip(cprm, skip)) |
601 | goto Eio; |
602 | + |
603 | + rc = 0; |
604 | out: |
605 | free_page((unsigned long)buf); |
606 | return rc; |
607 | diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c |
608 | index 3e828b20c21e..2842f9d63d21 100644 |
609 | --- a/arch/powerpc/sysdev/mpc8xx_pic.c |
610 | +++ b/arch/powerpc/sysdev/mpc8xx_pic.c |
611 | @@ -79,7 +79,7 @@ unsigned int mpc8xx_get_irq(void) |
612 | irq = in_be32(&siu_reg->sc_sivec) >> 26; |
613 | |
614 | if (irq == PIC_VEC_SPURRIOUS) |
615 | - irq = 0; |
616 | + return 0; |
617 | |
618 | return irq_linear_revmap(mpc8xx_pic_host, irq); |
619 | |
620 | diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S |
621 | index 3667d20e997f..115bda280d50 100644 |
622 | --- a/arch/s390/kernel/vmlinux.lds.S |
623 | +++ b/arch/s390/kernel/vmlinux.lds.S |
624 | @@ -31,8 +31,14 @@ SECTIONS |
625 | { |
626 | . = 0x00000000; |
627 | .text : { |
628 | - _text = .; /* Text and read-only data */ |
629 | + /* Text and read-only data */ |
630 | HEAD_TEXT |
631 | + /* |
632 | + * E.g. perf doesn't like symbols starting at address zero, |
633 | + * therefore skip the initial PSW and channel program located |
634 | + * at address zero and let _text start at 0x200. |
635 | + */ |
636 | + _text = 0x200; |
637 | TEXT_TEXT |
638 | SCHED_TEXT |
639 | CPUIDLE_TEXT |
640 | diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c |
641 | index 59d503866431..9cc600b2d68c 100644 |
642 | --- a/arch/sparc/kernel/ldc.c |
643 | +++ b/arch/sparc/kernel/ldc.c |
644 | @@ -1733,9 +1733,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size) |
645 | |
646 | lp->rcv_nxt = p->seqid; |
647 | |
648 | + /* |
649 | + * If this is a control-only packet, there is nothing |
650 | + * else to do but advance the rx queue since the packet |
651 | + * was already processed above. |
652 | + */ |
653 | if (!(p->type & LDC_DATA)) { |
654 | new = rx_advance(lp, new); |
655 | - goto no_data; |
656 | + break; |
657 | } |
658 | if (p->stype & (LDC_ACK | LDC_NACK)) { |
659 | err = data_ack_nack(lp, p); |
660 | diff --git a/arch/x86/boot/compressed/error.h b/arch/x86/boot/compressed/error.h |
661 | index 2e59dac07f9e..d732e608e3af 100644 |
662 | --- a/arch/x86/boot/compressed/error.h |
663 | +++ b/arch/x86/boot/compressed/error.h |
664 | @@ -1,7 +1,9 @@ |
665 | #ifndef BOOT_COMPRESSED_ERROR_H |
666 | #define BOOT_COMPRESSED_ERROR_H |
667 | |
668 | +#include <linux/compiler.h> |
669 | + |
670 | void warn(char *m); |
671 | -void error(char *m); |
672 | +void error(char *m) __noreturn; |
673 | |
674 | #endif /* BOOT_COMPRESSED_ERROR_H */ |
675 | diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h |
676 | index f35f246a26bf..8d8c24f3a963 100644 |
677 | --- a/arch/x86/include/asm/asm.h |
678 | +++ b/arch/x86/include/asm/asm.h |
679 | @@ -34,6 +34,7 @@ |
680 | #define _ASM_ADD __ASM_SIZE(add) |
681 | #define _ASM_SUB __ASM_SIZE(sub) |
682 | #define _ASM_XADD __ASM_SIZE(xadd) |
683 | +#define _ASM_MUL __ASM_SIZE(mul) |
684 | |
685 | #define _ASM_AX __ASM_REG(ax) |
686 | #define _ASM_BX __ASM_REG(bx) |
687 | diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
688 | index d07a9390023e..bbfb03eccb7f 100644 |
689 | --- a/arch/x86/kernel/tsc.c |
690 | +++ b/arch/x86/kernel/tsc.c |
691 | @@ -366,6 +366,8 @@ static int __init tsc_setup(char *str) |
692 | tsc_clocksource_reliable = 1; |
693 | if (!strncmp(str, "noirqtime", 9)) |
694 | no_sched_irq_time = 1; |
695 | + if (!strcmp(str, "unstable")) |
696 | + mark_tsc_unstable("boot parameter"); |
697 | return 1; |
698 | } |
699 | |
700 | diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c |
701 | index b24b3c6d686e..5c3d416fff17 100644 |
702 | --- a/arch/x86/kvm/lapic.c |
703 | +++ b/arch/x86/kvm/lapic.c |
704 | @@ -1363,8 +1363,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); |
705 | |
706 | static void cancel_hv_tscdeadline(struct kvm_lapic *apic) |
707 | { |
708 | + preempt_disable(); |
709 | kvm_x86_ops->cancel_hv_timer(apic->vcpu); |
710 | apic->lapic_timer.hv_timer_in_use = false; |
711 | + preempt_enable(); |
712 | } |
713 | |
714 | void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu) |
715 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
716 | index 8c99f2fbae80..aaa93b4b0380 100644 |
717 | --- a/arch/x86/kvm/svm.c |
718 | +++ b/arch/x86/kvm/svm.c |
719 | @@ -1879,6 +1879,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, |
720 | */ |
721 | if (var->unusable) |
722 | var->db = 0; |
723 | + /* This is symmetric with svm_set_segment() */ |
724 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
725 | break; |
726 | } |
727 | @@ -2024,18 +2025,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, |
728 | s->base = var->base; |
729 | s->limit = var->limit; |
730 | s->selector = var->selector; |
731 | - if (var->unusable) |
732 | - s->attrib = 0; |
733 | - else { |
734 | - s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
735 | - s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
736 | - s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
737 | - s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; |
738 | - s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
739 | - s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
740 | - s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
741 | - s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
742 | - } |
743 | + s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
744 | + s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
745 | + s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
746 | + s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
747 | + s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
748 | + s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
749 | + s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
750 | + s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
751 | |
752 | /* |
753 | * This is always accurate, except if SYSRET returned to a segment |
754 | @@ -2044,7 +2041,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, |
755 | * would entail passing the CPL to userspace and back. |
756 | */ |
757 | if (seg == VCPU_SREG_SS) |
758 | - svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; |
759 | + /* This is symmetric with svm_get_segment() */ |
760 | + svm->vmcb->save.cpl = (var->dpl & 3); |
761 | |
762 | mark_dirty(svm->vmcb, VMCB_SEG); |
763 | } |
764 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
765 | index 7ed422e2641b..b978aeccda78 100644 |
766 | --- a/arch/x86/kvm/vmx.c |
767 | +++ b/arch/x86/kvm/vmx.c |
768 | @@ -7924,11 +7924,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, |
769 | { |
770 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
771 | int cr = exit_qualification & 15; |
772 | - int reg = (exit_qualification >> 8) & 15; |
773 | - unsigned long val = kvm_register_readl(vcpu, reg); |
774 | + int reg; |
775 | + unsigned long val; |
776 | |
777 | switch ((exit_qualification >> 4) & 3) { |
778 | case 0: /* mov to cr */ |
779 | + reg = (exit_qualification >> 8) & 15; |
780 | + val = kvm_register_readl(vcpu, reg); |
781 | switch (cr) { |
782 | case 0: |
783 | if (vmcs12->cr0_guest_host_mask & |
784 | @@ -7983,6 +7985,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, |
785 | * lmsw can change bits 1..3 of cr0, and only set bit 0 of |
786 | * cr0. Other attempted changes are ignored, with no exit. |
787 | */ |
788 | + val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; |
789 | if (vmcs12->cr0_guest_host_mask & 0xe & |
790 | (val ^ vmcs12->cr0_read_shadow)) |
791 | return true; |
792 | @@ -10661,8 +10664,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, |
793 | vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3); |
794 | } |
795 | |
796 | - if (nested_cpu_has_ept(vmcs12)) |
797 | - vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); |
798 | + vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS); |
799 | |
800 | if (nested_cpu_has_vid(vmcs12)) |
801 | vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS); |
802 | diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S |
803 | index 7e48807b2fa1..45a53dfe1859 100644 |
804 | --- a/arch/x86/lib/csum-copy_64.S |
805 | +++ b/arch/x86/lib/csum-copy_64.S |
806 | @@ -55,7 +55,7 @@ ENTRY(csum_partial_copy_generic) |
807 | movq %r12, 3*8(%rsp) |
808 | movq %r14, 4*8(%rsp) |
809 | movq %r13, 5*8(%rsp) |
810 | - movq %rbp, 6*8(%rsp) |
811 | + movq %r15, 6*8(%rsp) |
812 | |
813 | movq %r8, (%rsp) |
814 | movq %r9, 1*8(%rsp) |
815 | @@ -74,7 +74,7 @@ ENTRY(csum_partial_copy_generic) |
816 | /* main loop. clear in 64 byte blocks */ |
817 | /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */ |
818 | /* r11: temp3, rdx: temp4, r12 loopcnt */ |
819 | - /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */ |
820 | + /* r10: temp5, r15: temp6, r14 temp7, r13 temp8 */ |
821 | .p2align 4 |
822 | .Lloop: |
823 | source |
824 | @@ -89,7 +89,7 @@ ENTRY(csum_partial_copy_generic) |
825 | source |
826 | movq 32(%rdi), %r10 |
827 | source |
828 | - movq 40(%rdi), %rbp |
829 | + movq 40(%rdi), %r15 |
830 | source |
831 | movq 48(%rdi), %r14 |
832 | source |
833 | @@ -103,7 +103,7 @@ ENTRY(csum_partial_copy_generic) |
834 | adcq %r11, %rax |
835 | adcq %rdx, %rax |
836 | adcq %r10, %rax |
837 | - adcq %rbp, %rax |
838 | + adcq %r15, %rax |
839 | adcq %r14, %rax |
840 | adcq %r13, %rax |
841 | |
842 | @@ -121,7 +121,7 @@ ENTRY(csum_partial_copy_generic) |
843 | dest |
844 | movq %r10, 32(%rsi) |
845 | dest |
846 | - movq %rbp, 40(%rsi) |
847 | + movq %r15, 40(%rsi) |
848 | dest |
849 | movq %r14, 48(%rsi) |
850 | dest |
851 | @@ -203,7 +203,7 @@ ENTRY(csum_partial_copy_generic) |
852 | movq 3*8(%rsp), %r12 |
853 | movq 4*8(%rsp), %r14 |
854 | movq 5*8(%rsp), %r13 |
855 | - movq 6*8(%rsp), %rbp |
856 | + movq 6*8(%rsp), %r15 |
857 | addq $7*8, %rsp |
858 | ret |
859 | |
860 | diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c |
861 | index 121f59c6ee54..0c7fe444dcdd 100644 |
862 | --- a/arch/x86/lib/kaslr.c |
863 | +++ b/arch/x86/lib/kaslr.c |
864 | @@ -5,6 +5,7 @@ |
865 | * kernel starts. This file is included in the compressed kernel and |
866 | * normally linked in the regular. |
867 | */ |
868 | +#include <asm/asm.h> |
869 | #include <asm/kaslr.h> |
870 | #include <asm/msr.h> |
871 | #include <asm/archrandom.h> |
872 | @@ -79,7 +80,7 @@ unsigned long kaslr_get_random_long(const char *purpose) |
873 | } |
874 | |
875 | /* Circular multiply for better bit diffusion */ |
876 | - asm("mul %3" |
877 | + asm(_ASM_MUL "%3" |
878 | : "=a" (random), "=d" (raw) |
879 | : "a" (random), "rm" (mix_const)); |
880 | random += raw; |
881 | diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c |
882 | index 274dfc481849..a0e85f2aff7d 100644 |
883 | --- a/arch/x86/platform/efi/efi.c |
884 | +++ b/arch/x86/platform/efi/efi.c |
885 | @@ -832,9 +832,11 @@ static void __init kexec_enter_virtual_mode(void) |
886 | |
887 | /* |
888 | * We don't do virtual mode, since we don't do runtime services, on |
889 | - * non-native EFI |
890 | + * non-native EFI. With efi=old_map, we don't do runtime services in |
891 | + * kexec kernel because in the initial boot something else might |
892 | + * have been mapped at these virtual addresses. |
893 | */ |
894 | - if (!efi_is_native()) { |
895 | + if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) { |
896 | efi_memmap_unmap(); |
897 | clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
898 | return; |
899 | diff --git a/block/bio-integrity.c b/block/bio-integrity.c |
900 | index 63f72f00c72e..80dedde0de73 100644 |
901 | --- a/block/bio-integrity.c |
902 | +++ b/block/bio-integrity.c |
903 | @@ -175,6 +175,9 @@ bool bio_integrity_enabled(struct bio *bio) |
904 | if (!bio_is_rw(bio)) |
905 | return false; |
906 | |
907 | + if (!bio_sectors(bio)) |
908 | + return false; |
909 | + |
910 | /* Already protected? */ |
911 | if (bio_integrity(bio)) |
912 | return false; |
913 | diff --git a/block/blk-mq.c b/block/blk-mq.c |
914 | index c6572ffc1e87..5ca4e4cd8cba 100644 |
915 | --- a/block/blk-mq.c |
916 | +++ b/block/blk-mq.c |
917 | @@ -1265,13 +1265,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) |
918 | |
919 | blk_queue_bounce(q, &bio); |
920 | |
921 | + blk_queue_split(q, &bio, q->bio_split); |
922 | + |
923 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { |
924 | bio_io_error(bio); |
925 | return BLK_QC_T_NONE; |
926 | } |
927 | |
928 | - blk_queue_split(q, &bio, q->bio_split); |
929 | - |
930 | if (!is_flush_fua && !blk_queue_nomerges(q) && |
931 | blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) |
932 | return BLK_QC_T_NONE; |
933 | @@ -1592,7 +1592,8 @@ static void blk_mq_exit_hctx(struct request_queue *q, |
934 | { |
935 | unsigned flush_start_tag = set->queue_depth; |
936 | |
937 | - blk_mq_tag_idle(hctx); |
938 | + if (blk_mq_hw_queue_mapped(hctx)) |
939 | + blk_mq_tag_idle(hctx); |
940 | |
941 | if (set->ops->exit_request) |
942 | set->ops->exit_request(set->driver_data, |
943 | @@ -1907,6 +1908,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, |
944 | struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; |
945 | |
946 | blk_mq_sysfs_unregister(q); |
947 | + |
948 | + /* protect against switching io scheduler */ |
949 | + mutex_lock(&q->sysfs_lock); |
950 | for (i = 0; i < set->nr_hw_queues; i++) { |
951 | int node; |
952 | |
953 | @@ -1956,6 +1960,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set, |
954 | } |
955 | } |
956 | q->nr_hw_queues = i; |
957 | + mutex_unlock(&q->sysfs_lock); |
958 | blk_mq_sysfs_register(q); |
959 | } |
960 | |
961 | diff --git a/block/partition-generic.c b/block/partition-generic.c |
962 | index a2437c006640..298c05f8b5e3 100644 |
963 | --- a/block/partition-generic.c |
964 | +++ b/block/partition-generic.c |
965 | @@ -321,8 +321,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, |
966 | |
967 | if (info) { |
968 | struct partition_meta_info *pinfo = alloc_part_info(disk); |
969 | - if (!pinfo) |
970 | + if (!pinfo) { |
971 | + err = -ENOMEM; |
972 | goto out_free_stats; |
973 | + } |
974 | memcpy(pinfo, info, sizeof(*info)); |
975 | p->info = pinfo; |
976 | } |
977 | diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c |
978 | index 029f7051f2be..ce2df8c9c583 100644 |
979 | --- a/crypto/asymmetric_keys/x509_cert_parser.c |
980 | +++ b/crypto/asymmetric_keys/x509_cert_parser.c |
981 | @@ -102,6 +102,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) |
982 | } |
983 | } |
984 | |
985 | + ret = -ENOMEM; |
986 | cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); |
987 | if (!cert->pub->key) |
988 | goto error_decode; |
989 | diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c |
990 | index f83de99d7d71..56bd612927ab 100644 |
991 | --- a/crypto/async_tx/async_pq.c |
992 | +++ b/crypto/async_tx/async_pq.c |
993 | @@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan, |
994 | dma_addr_t dma_dest[2]; |
995 | int src_off = 0; |
996 | |
997 | - if (submit->flags & ASYNC_TX_FENCE) |
998 | - dma_flags |= DMA_PREP_FENCE; |
999 | - |
1000 | while (src_cnt > 0) { |
1001 | submit->flags = flags_orig; |
1002 | pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); |
1003 | @@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan, |
1004 | if (cb_fn_orig) |
1005 | dma_flags |= DMA_PREP_INTERRUPT; |
1006 | } |
1007 | + if (submit->flags & ASYNC_TX_FENCE) |
1008 | + dma_flags |= DMA_PREP_FENCE; |
1009 | |
1010 | /* Drivers force forward progress in case they can not provide |
1011 | * a descriptor |
1012 | diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c |
1013 | index c5557d070954..94e04c9de12b 100644 |
1014 | --- a/drivers/acpi/acpi_video.c |
1015 | +++ b/drivers/acpi/acpi_video.c |
1016 | @@ -87,8 +87,8 @@ MODULE_PARM_DESC(report_key_events, |
1017 | static bool device_id_scheme = false; |
1018 | module_param(device_id_scheme, bool, 0444); |
1019 | |
1020 | -static bool only_lcd = false; |
1021 | -module_param(only_lcd, bool, 0444); |
1022 | +static int only_lcd = -1; |
1023 | +module_param(only_lcd, int, 0444); |
1024 | |
1025 | static int register_count; |
1026 | static DEFINE_MUTEX(register_count_mutex); |
1027 | @@ -2082,6 +2082,16 @@ int acpi_video_register(void) |
1028 | goto leave; |
1029 | } |
1030 | |
1031 | + /* |
1032 | + * We're seeing a lot of bogus backlight interfaces on newer machines |
1033 | + * without a LCD such as desktops, servers and HDMI sticks. Checking |
1034 | + * the lcd flag fixes this, so enable this on any machines which are |
1035 | + * win8 ready (where we also prefer the native backlight driver, so |
1036 | + * normally the acpi_video code should not register there anyways). |
1037 | + */ |
1038 | + if (only_lcd == -1) |
1039 | + only_lcd = acpi_osi_is_win8(); |
1040 | + |
1041 | dmi_check_system(video_dmi_table); |
1042 | |
1043 | ret = acpi_bus_register_driver(&acpi_video_bus); |
1044 | diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c |
1045 | index 9179e9abe3db..35a2d9ea0654 100644 |
1046 | --- a/drivers/acpi/acpica/evxfevnt.c |
1047 | +++ b/drivers/acpi/acpica/evxfevnt.c |
1048 | @@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags) |
1049 | |
1050 | ACPI_FUNCTION_TRACE(acpi_enable_event); |
1051 | |
1052 | + /* If Hardware Reduced flag is set, there are no fixed events */ |
1053 | + |
1054 | + if (acpi_gbl_reduced_hardware) { |
1055 | + return_ACPI_STATUS(AE_OK); |
1056 | + } |
1057 | + |
1058 | /* Decode the Fixed Event */ |
1059 | |
1060 | if (event > ACPI_EVENT_MAX) { |
1061 | @@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags) |
1062 | |
1063 | ACPI_FUNCTION_TRACE(acpi_disable_event); |
1064 | |
1065 | + /* If Hardware Reduced flag is set, there are no fixed events */ |
1066 | + |
1067 | + if (acpi_gbl_reduced_hardware) { |
1068 | + return_ACPI_STATUS(AE_OK); |
1069 | + } |
1070 | + |
1071 | /* Decode the Fixed Event */ |
1072 | |
1073 | if (event > ACPI_EVENT_MAX) { |
1074 | @@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event) |
1075 | |
1076 | ACPI_FUNCTION_TRACE(acpi_clear_event); |
1077 | |
1078 | + /* If Hardware Reduced flag is set, there are no fixed events */ |
1079 | + |
1080 | + if (acpi_gbl_reduced_hardware) { |
1081 | + return_ACPI_STATUS(AE_OK); |
1082 | + } |
1083 | + |
1084 | /* Decode the Fixed Event */ |
1085 | |
1086 | if (event > ACPI_EVENT_MAX) { |
1087 | diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c |
1088 | index db0e90342e82..ac2e8dfdf74e 100644 |
1089 | --- a/drivers/acpi/acpica/psobject.c |
1090 | +++ b/drivers/acpi/acpica/psobject.c |
1091 | @@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) |
1092 | (u32)(aml_offset + |
1093 | sizeof(struct acpi_table_header))); |
1094 | |
1095 | + ACPI_ERROR((AE_INFO, |
1096 | + "Aborting disassembly, AML byte code is corrupt")); |
1097 | + |
1098 | /* Dump the context surrounding the invalid opcode */ |
1099 | |
1100 | acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. |
1101 | @@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) |
1102 | sizeof(struct acpi_table_header) - |
1103 | 16)); |
1104 | acpi_os_printf(" */\n"); |
1105 | + |
1106 | + /* |
1107 | + * Just abort the disassembly, cannot continue because the |
1108 | + * parser is essentially lost. The disassembler can then |
1109 | + * randomly fail because an ill-constructed parse tree |
1110 | + * can result. |
1111 | + */ |
1112 | + return_ACPI_STATUS(AE_AML_BAD_OPCODE); |
1113 | #endif |
1114 | } |
1115 | |
1116 | @@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state, |
1117 | if (status == AE_CTRL_PARSE_CONTINUE) { |
1118 | return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); |
1119 | } |
1120 | + if (ACPI_FAILURE(status)) { |
1121 | + return_ACPI_STATUS(status); |
1122 | + } |
1123 | |
1124 | /* Create Op structure and append to parent's argument list */ |
1125 | |
1126 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
1127 | index c3bcb7f5986e..307b3e28f34c 100644 |
1128 | --- a/drivers/acpi/ec.c |
1129 | +++ b/drivers/acpi/ec.c |
1130 | @@ -1518,7 +1518,7 @@ static int acpi_ec_setup(struct acpi_ec *ec, bool handle_events) |
1131 | } |
1132 | |
1133 | acpi_handle_info(ec->handle, |
1134 | - "GPE=0x%lx, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", |
1135 | + "GPE=0x%x, EC_CMD/EC_SC=0x%lx, EC_DATA=0x%lx\n", |
1136 | ec->gpe, ec->command_addr, ec->data_addr); |
1137 | return ret; |
1138 | } |
1139 | diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c |
1140 | index 6c7dd7af789e..dd70d6c2bca0 100644 |
1141 | --- a/drivers/acpi/ec_sys.c |
1142 | +++ b/drivers/acpi/ec_sys.c |
1143 | @@ -128,7 +128,7 @@ static int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) |
1144 | return -ENOMEM; |
1145 | } |
1146 | |
1147 | - if (!debugfs_create_x32("gpe", 0444, dev_dir, (u32 *)&first_ec->gpe)) |
1148 | + if (!debugfs_create_x32("gpe", 0444, dev_dir, &first_ec->gpe)) |
1149 | goto error; |
1150 | if (!debugfs_create_bool("use_global_lock", 0444, dev_dir, |
1151 | &first_ec->global_lock)) |
1152 | diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h |
1153 | index 08b3ca0ead69..b012e94b7d9f 100644 |
1154 | --- a/drivers/acpi/internal.h |
1155 | +++ b/drivers/acpi/internal.h |
1156 | @@ -158,7 +158,7 @@ static inline void acpi_early_processor_osc(void) {} |
1157 | -------------------------------------------------------------------------- */ |
1158 | struct acpi_ec { |
1159 | acpi_handle handle; |
1160 | - unsigned long gpe; |
1161 | + u32 gpe; |
1162 | unsigned long command_addr; |
1163 | unsigned long data_addr; |
1164 | bool global_lock; |
1165 | diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c |
1166 | index aaa761b9081c..cd2eab6aa92e 100644 |
1167 | --- a/drivers/ata/libahci_platform.c |
1168 | +++ b/drivers/ata/libahci_platform.c |
1169 | @@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev, |
1170 | |
1171 | irq = platform_get_irq(pdev, 0); |
1172 | if (irq <= 0) { |
1173 | - dev_err(dev, "no irq\n"); |
1174 | - return -EINVAL; |
1175 | + if (irq != -EPROBE_DEFER) |
1176 | + dev_err(dev, "no irq\n"); |
1177 | + return irq; |
1178 | } |
1179 | |
1180 | hpriv->irq = irq; |
1181 | diff --git a/drivers/block/loop.c b/drivers/block/loop.c |
1182 | index 68bfcef24701..dc318b9100c2 100644 |
1183 | --- a/drivers/block/loop.c |
1184 | +++ b/drivers/block/loop.c |
1185 | @@ -612,6 +612,9 @@ static int loop_switch(struct loop_device *lo, struct file *file) |
1186 | */ |
1187 | static int loop_flush(struct loop_device *lo) |
1188 | { |
1189 | + /* loop not yet configured, no running thread, nothing to flush */ |
1190 | + if (lo->lo_state != Lo_bound) |
1191 | + return 0; |
1192 | return loop_switch(lo, NULL); |
1193 | } |
1194 | |
1195 | diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c |
1196 | index 72fe0a5a8bf3..017c37b9c7c1 100644 |
1197 | --- a/drivers/bus/brcmstb_gisb.c |
1198 | +++ b/drivers/bus/brcmstb_gisb.c |
1199 | @@ -1,5 +1,5 @@ |
1200 | /* |
1201 | - * Copyright (C) 2014 Broadcom Corporation |
1202 | + * Copyright (C) 2014-2017 Broadcom |
1203 | * |
1204 | * This program is free software; you can redistribute it and/or modify |
1205 | * it under the terms of the GNU General Public License version 2 as |
1206 | @@ -37,8 +37,6 @@ |
1207 | #define ARB_ERR_CAP_CLEAR (1 << 0) |
1208 | #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) |
1209 | #define ARB_ERR_CAP_STATUS_TEA (1 << 11) |
1210 | -#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) |
1211 | -#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c |
1212 | #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) |
1213 | #define ARB_ERR_CAP_STATUS_VALID (1 << 0) |
1214 | |
1215 | @@ -47,7 +45,6 @@ enum { |
1216 | ARB_ERR_CAP_CLR, |
1217 | ARB_ERR_CAP_HI_ADDR, |
1218 | ARB_ERR_CAP_ADDR, |
1219 | - ARB_ERR_CAP_DATA, |
1220 | ARB_ERR_CAP_STATUS, |
1221 | ARB_ERR_CAP_MASTER, |
1222 | }; |
1223 | @@ -57,7 +54,6 @@ static const int gisb_offsets_bcm7038[] = { |
1224 | [ARB_ERR_CAP_CLR] = 0x0c4, |
1225 | [ARB_ERR_CAP_HI_ADDR] = -1, |
1226 | [ARB_ERR_CAP_ADDR] = 0x0c8, |
1227 | - [ARB_ERR_CAP_DATA] = 0x0cc, |
1228 | [ARB_ERR_CAP_STATUS] = 0x0d0, |
1229 | [ARB_ERR_CAP_MASTER] = -1, |
1230 | }; |
1231 | @@ -67,7 +63,6 @@ static const int gisb_offsets_bcm7400[] = { |
1232 | [ARB_ERR_CAP_CLR] = 0x0c8, |
1233 | [ARB_ERR_CAP_HI_ADDR] = -1, |
1234 | [ARB_ERR_CAP_ADDR] = 0x0cc, |
1235 | - [ARB_ERR_CAP_DATA] = 0x0d0, |
1236 | [ARB_ERR_CAP_STATUS] = 0x0d4, |
1237 | [ARB_ERR_CAP_MASTER] = 0x0d8, |
1238 | }; |
1239 | @@ -77,7 +72,6 @@ static const int gisb_offsets_bcm7435[] = { |
1240 | [ARB_ERR_CAP_CLR] = 0x168, |
1241 | [ARB_ERR_CAP_HI_ADDR] = -1, |
1242 | [ARB_ERR_CAP_ADDR] = 0x16c, |
1243 | - [ARB_ERR_CAP_DATA] = 0x170, |
1244 | [ARB_ERR_CAP_STATUS] = 0x174, |
1245 | [ARB_ERR_CAP_MASTER] = 0x178, |
1246 | }; |
1247 | @@ -87,7 +81,6 @@ static const int gisb_offsets_bcm7445[] = { |
1248 | [ARB_ERR_CAP_CLR] = 0x7e4, |
1249 | [ARB_ERR_CAP_HI_ADDR] = 0x7e8, |
1250 | [ARB_ERR_CAP_ADDR] = 0x7ec, |
1251 | - [ARB_ERR_CAP_DATA] = 0x7f0, |
1252 | [ARB_ERR_CAP_STATUS] = 0x7f4, |
1253 | [ARB_ERR_CAP_MASTER] = 0x7f8, |
1254 | }; |
1255 | @@ -109,9 +102,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) |
1256 | { |
1257 | int offset = gdev->gisb_offsets[reg]; |
1258 | |
1259 | - /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ |
1260 | - if (offset == -1) |
1261 | - return 1; |
1262 | + if (offset < 0) { |
1263 | + /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ |
1264 | + if (reg == ARB_ERR_CAP_MASTER) |
1265 | + return 1; |
1266 | + else |
1267 | + return 0; |
1268 | + } |
1269 | |
1270 | if (gdev->big_endian) |
1271 | return ioread32be(gdev->base + offset); |
1272 | @@ -119,6 +116,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) |
1273 | return ioread32(gdev->base + offset); |
1274 | } |
1275 | |
1276 | +static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev) |
1277 | +{ |
1278 | + u64 value; |
1279 | + |
1280 | + value = gisb_read(gdev, ARB_ERR_CAP_ADDR); |
1281 | + value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; |
1282 | + |
1283 | + return value; |
1284 | +} |
1285 | + |
1286 | static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) |
1287 | { |
1288 | int offset = gdev->gisb_offsets[reg]; |
1289 | @@ -127,9 +134,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) |
1290 | return; |
1291 | |
1292 | if (gdev->big_endian) |
1293 | - iowrite32be(val, gdev->base + reg); |
1294 | + iowrite32be(val, gdev->base + offset); |
1295 | else |
1296 | - iowrite32(val, gdev->base + reg); |
1297 | + iowrite32(val, gdev->base + offset); |
1298 | } |
1299 | |
1300 | static ssize_t gisb_arb_get_timeout(struct device *dev, |
1301 | @@ -185,7 +192,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, |
1302 | const char *reason) |
1303 | { |
1304 | u32 cap_status; |
1305 | - unsigned long arb_addr; |
1306 | + u64 arb_addr; |
1307 | u32 master; |
1308 | const char *m_name; |
1309 | char m_fmt[11]; |
1310 | @@ -197,10 +204,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, |
1311 | return 1; |
1312 | |
1313 | /* Read the address and master */ |
1314 | - arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff; |
1315 | -#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) |
1316 | - arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; |
1317 | -#endif |
1318 | + arb_addr = gisb_read_address(gdev); |
1319 | master = gisb_read(gdev, ARB_ERR_CAP_MASTER); |
1320 | |
1321 | m_name = brcmstb_gisb_master_to_str(gdev, master); |
1322 | @@ -209,7 +213,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, |
1323 | m_name = m_fmt; |
1324 | } |
1325 | |
1326 | - pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", |
1327 | + pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n", |
1328 | __func__, reason, arb_addr, |
1329 | cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', |
1330 | cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", |
1331 | diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c |
1332 | index 510fc104bcdc..f11c1c7e84c6 100644 |
1333 | --- a/drivers/char/ipmi/ipmi_ssif.c |
1334 | +++ b/drivers/char/ipmi/ipmi_ssif.c |
1335 | @@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) |
1336 | msg = ipmi_alloc_smi_msg(); |
1337 | if (!msg) { |
1338 | ssif_info->ssif_state = SSIF_NORMAL; |
1339 | + ipmi_ssif_unlock_cond(ssif_info, flags); |
1340 | return; |
1341 | } |
1342 | |
1343 | @@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, |
1344 | msg = ipmi_alloc_smi_msg(); |
1345 | if (!msg) { |
1346 | ssif_info->ssif_state = SSIF_NORMAL; |
1347 | + ipmi_ssif_unlock_cond(ssif_info, flags); |
1348 | return; |
1349 | } |
1350 | |
1351 | diff --git a/drivers/char/random.c b/drivers/char/random.c |
1352 | index 08d1dd58c0d2..0c23ced255cb 100644 |
1353 | --- a/drivers/char/random.c |
1354 | +++ b/drivers/char/random.c |
1355 | @@ -1115,12 +1115,16 @@ static void add_interrupt_bench(cycles_t start) |
1356 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) |
1357 | { |
1358 | __u32 *ptr = (__u32 *) regs; |
1359 | + unsigned int idx; |
1360 | |
1361 | if (regs == NULL) |
1362 | return 0; |
1363 | - if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) |
1364 | - f->reg_idx = 0; |
1365 | - return *(ptr + f->reg_idx++); |
1366 | + idx = READ_ONCE(f->reg_idx); |
1367 | + if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) |
1368 | + idx = 0; |
1369 | + ptr += idx++; |
1370 | + WRITE_ONCE(f->reg_idx, idx); |
1371 | + return *ptr; |
1372 | } |
1373 | |
1374 | void add_interrupt_randomness(int irq, int irq_flags) |
1375 | diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c |
1376 | index 4e1cd5aa69d8..07c8f701e51c 100644 |
1377 | --- a/drivers/clk/at91/clk-generated.c |
1378 | +++ b/drivers/clk/at91/clk-generated.c |
1379 | @@ -260,13 +260,13 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, |
1380 | gck->lock = lock; |
1381 | gck->range = *range; |
1382 | |
1383 | + clk_generated_startup(gck); |
1384 | hw = &gck->hw; |
1385 | ret = clk_hw_register(NULL, &gck->hw); |
1386 | if (ret) { |
1387 | kfree(gck); |
1388 | hw = ERR_PTR(ret); |
1389 | - } else |
1390 | - clk_generated_startup(gck); |
1391 | + } |
1392 | |
1393 | return hw; |
1394 | } |
1395 | diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c |
1396 | index 674785d968a3..f02900922bbe 100644 |
1397 | --- a/drivers/clk/clk-conf.c |
1398 | +++ b/drivers/clk/clk-conf.c |
1399 | @@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) |
1400 | |
1401 | rc = clk_set_rate(clk, rate); |
1402 | if (rc < 0) |
1403 | - pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", |
1404 | + pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n", |
1405 | __clk_get_name(clk), rate, rc, |
1406 | clk_get_rate(clk)); |
1407 | clk_put(clk); |
1408 | diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c |
1409 | index 96d37175d0ad..8ad458b5ad6e 100644 |
1410 | --- a/drivers/clk/clk-scpi.c |
1411 | +++ b/drivers/clk/clk-scpi.c |
1412 | @@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = { |
1413 | }; |
1414 | |
1415 | /* find closest match to given frequency in OPP table */ |
1416 | -static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) |
1417 | +static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) |
1418 | { |
1419 | int idx; |
1420 | - u32 fmin = 0, fmax = ~0, ftmp; |
1421 | + unsigned long fmin = 0, fmax = ~0, ftmp; |
1422 | const struct scpi_opp *opp = clk->info->opps; |
1423 | |
1424 | for (idx = 0; idx < clk->info->count; idx++, opp++) { |
1425 | ftmp = opp->freq; |
1426 | - if (ftmp >= (u32)rate) { |
1427 | + if (ftmp >= rate) { |
1428 | if (ftmp <= fmax) |
1429 | fmax = ftmp; |
1430 | break; |
1431 | diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig |
1432 | index 2f29ee1a4d00..5588f75a8414 100644 |
1433 | --- a/drivers/clk/meson/Kconfig |
1434 | +++ b/drivers/clk/meson/Kconfig |
1435 | @@ -7,9 +7,9 @@ config COMMON_CLK_MESON8B |
1436 | bool |
1437 | depends on COMMON_CLK_AMLOGIC |
1438 | help |
1439 | - Support for the clock controller on AmLogic S805 devices, aka |
1440 | - meson8b. Say Y if you want peripherals and CPU frequency scaling to |
1441 | - work. |
1442 | + Support for the clock controller on AmLogic S802 (Meson8), |
1443 | + S805 (Meson8b) and S812 (Meson8m2) devices. Say Y if you |
1444 | + want peripherals and CPU frequency scaling to work. |
1445 | |
1446 | config COMMON_CLK_GXBB |
1447 | bool |
1448 | diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c |
1449 | index 3f1be46cbb33..70567958b86a 100644 |
1450 | --- a/drivers/clk/meson/meson8b.c |
1451 | +++ b/drivers/clk/meson/meson8b.c |
1452 | @@ -1,5 +1,6 @@ |
1453 | /* |
1454 | - * AmLogic S805 / Meson8b Clock Controller Driver |
1455 | + * AmLogic S802 (Meson8) / S805 (Meson8b) / S812 (Meson8m2) Clock Controller |
1456 | + * Driver |
1457 | * |
1458 | * Copyright (c) 2015 Endless Mobile, Inc. |
1459 | * Author: Carlo Caione <carlo@endlessm.com> |
1460 | @@ -661,7 +662,9 @@ static int meson8b_clkc_probe(struct platform_device *pdev) |
1461 | } |
1462 | |
1463 | static const struct of_device_id meson8b_clkc_match_table[] = { |
1464 | + { .compatible = "amlogic,meson8-clkc" }, |
1465 | { .compatible = "amlogic,meson8b-clkc" }, |
1466 | + { .compatible = "amlogic,meson8m2-clkc" }, |
1467 | { } |
1468 | }; |
1469 | |
1470 | diff --git a/drivers/clk/renesas/clk-rcar-gen2.c b/drivers/clk/renesas/clk-rcar-gen2.c |
1471 | index 00e6aba4b9c0..c55d5fe116d6 100644 |
1472 | --- a/drivers/clk/renesas/clk-rcar-gen2.c |
1473 | +++ b/drivers/clk/renesas/clk-rcar-gen2.c |
1474 | @@ -271,11 +271,14 @@ struct cpg_pll_config { |
1475 | unsigned int extal_div; |
1476 | unsigned int pll1_mult; |
1477 | unsigned int pll3_mult; |
1478 | + unsigned int pll0_mult; /* For R-Car V2H and E2 only */ |
1479 | }; |
1480 | |
1481 | static const struct cpg_pll_config cpg_pll_configs[8] __initconst = { |
1482 | - { 1, 208, 106 }, { 1, 208, 88 }, { 1, 156, 80 }, { 1, 156, 66 }, |
1483 | - { 2, 240, 122 }, { 2, 240, 102 }, { 2, 208, 106 }, { 2, 208, 88 }, |
1484 | + { 1, 208, 106, 200 }, { 1, 208, 88, 200 }, |
1485 | + { 1, 156, 80, 150 }, { 1, 156, 66, 150 }, |
1486 | + { 2, 240, 122, 230 }, { 2, 240, 102, 230 }, |
1487 | + { 2, 208, 106, 200 }, { 2, 208, 88, 200 }, |
1488 | }; |
1489 | |
1490 | /* SDHI divisors */ |
1491 | @@ -297,6 +300,12 @@ static const struct clk_div_table cpg_sd01_div_table[] = { |
1492 | |
1493 | static u32 cpg_mode __initdata; |
1494 | |
1495 | +static const char * const pll0_mult_match[] = { |
1496 | + "renesas,r8a7792-cpg-clocks", |
1497 | + "renesas,r8a7794-cpg-clocks", |
1498 | + NULL |
1499 | +}; |
1500 | + |
1501 | static struct clk * __init |
1502 | rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, |
1503 | const struct cpg_pll_config *config, |
1504 | @@ -317,9 +326,15 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, |
1505 | * clock implementation and we currently have no need to change |
1506 | * the multiplier value. |
1507 | */ |
1508 | - u32 value = clk_readl(cpg->reg + CPG_PLL0CR); |
1509 | + if (of_device_compatible_match(np, pll0_mult_match)) { |
1510 | + /* R-Car V2H and E2 do not have PLL0CR */ |
1511 | + mult = config->pll0_mult; |
1512 | + div = 3; |
1513 | + } else { |
1514 | + u32 value = clk_readl(cpg->reg + CPG_PLL0CR); |
1515 | + mult = ((value >> 24) & ((1 << 7) - 1)) + 1; |
1516 | + } |
1517 | parent_name = "main"; |
1518 | - mult = ((value >> 24) & ((1 << 7) - 1)) + 1; |
1519 | } else if (!strcmp(name, "pll1")) { |
1520 | parent_name = "main"; |
1521 | mult = config->pll1_mult / 2; |
1522 | diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c |
1523 | index a5c111b67f37..ea11a33e7fff 100644 |
1524 | --- a/drivers/cpuidle/dt_idle_states.c |
1525 | +++ b/drivers/cpuidle/dt_idle_states.c |
1526 | @@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, |
1527 | if (!state_node) |
1528 | break; |
1529 | |
1530 | - if (!of_device_is_available(state_node)) |
1531 | + if (!of_device_is_available(state_node)) { |
1532 | + of_node_put(state_node); |
1533 | continue; |
1534 | + } |
1535 | |
1536 | if (!idle_state_valid(state_node, i, cpumask)) { |
1537 | pr_warn("%s idle state not valid, bailing out\n", |
1538 | diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c |
1539 | index d0b16e5e4ee5..d8305ddf87d0 100644 |
1540 | --- a/drivers/crypto/omap-sham.c |
1541 | +++ b/drivers/crypto/omap-sham.c |
1542 | @@ -750,7 +750,10 @@ static int omap_sham_align_sgs(struct scatterlist *sg, |
1543 | if (final) |
1544 | new_len = DIV_ROUND_UP(new_len, bs) * bs; |
1545 | else |
1546 | - new_len = new_len / bs * bs; |
1547 | + new_len = (new_len - 1) / bs * bs; |
1548 | + |
1549 | + if (nbytes != new_len) |
1550 | + list_ok = false; |
1551 | |
1552 | while (nbytes > 0 && sg_tmp) { |
1553 | n++; |
1554 | @@ -846,6 +849,8 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) |
1555 | xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; |
1556 | else |
1557 | xmit_len = xmit_len / bs * bs; |
1558 | + } else if (!final) { |
1559 | + xmit_len -= bs; |
1560 | } |
1561 | |
1562 | hash_later = rctx->total - xmit_len; |
1563 | @@ -873,14 +878,21 @@ static int omap_sham_prepare_request(struct ahash_request *req, bool update) |
1564 | } |
1565 | |
1566 | if (hash_later) { |
1567 | - if (req->nbytes) { |
1568 | - scatterwalk_map_and_copy(rctx->buffer, req->src, |
1569 | - req->nbytes - hash_later, |
1570 | - hash_later, 0); |
1571 | - } else { |
1572 | + int offset = 0; |
1573 | + |
1574 | + if (hash_later > req->nbytes) { |
1575 | memcpy(rctx->buffer, rctx->buffer + xmit_len, |
1576 | - hash_later); |
1577 | + hash_later - req->nbytes); |
1578 | + offset = hash_later - req->nbytes; |
1579 | } |
1580 | + |
1581 | + if (req->nbytes) { |
1582 | + scatterwalk_map_and_copy(rctx->buffer + offset, |
1583 | + req->src, |
1584 | + offset + req->nbytes - |
1585 | + hash_later, hash_later, 0); |
1586 | + } |
1587 | + |
1588 | rctx->bufcnt = hash_later; |
1589 | } else { |
1590 | rctx->bufcnt = 0; |
1591 | @@ -1130,7 +1142,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
1592 | ctx = ahash_request_ctx(req); |
1593 | |
1594 | err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE); |
1595 | - if (err) |
1596 | + if (err || !ctx->total) |
1597 | goto err1; |
1598 | |
1599 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
1600 | @@ -1189,11 +1201,10 @@ static int omap_sham_update(struct ahash_request *req) |
1601 | if (!req->nbytes) |
1602 | return 0; |
1603 | |
1604 | - if (ctx->total + req->nbytes < ctx->buflen) { |
1605 | + if (ctx->bufcnt + req->nbytes <= ctx->buflen) { |
1606 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src, |
1607 | 0, req->nbytes, 0); |
1608 | ctx->bufcnt += req->nbytes; |
1609 | - ctx->total += req->nbytes; |
1610 | return 0; |
1611 | } |
1612 | |
1613 | diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c |
1614 | index 9e5674c5a07b..db70cee71caa 100644 |
1615 | --- a/drivers/devfreq/devfreq.c |
1616 | +++ b/drivers/devfreq/devfreq.c |
1617 | @@ -943,7 +943,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, |
1618 | if (df->governor == governor) { |
1619 | ret = 0; |
1620 | goto out; |
1621 | - } else if (df->governor->immutable || governor->immutable) { |
1622 | + } else if ((df->governor && df->governor->immutable) || |
1623 | + governor->immutable) { |
1624 | ret = -EINVAL; |
1625 | goto out; |
1626 | } |
1627 | diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c |
1628 | index 21726a270fc4..b9c29720aeb1 100644 |
1629 | --- a/drivers/dma/imx-sdma.c |
1630 | +++ b/drivers/dma/imx-sdma.c |
1631 | @@ -1755,19 +1755,26 @@ static int sdma_probe(struct platform_device *pdev) |
1632 | if (IS_ERR(sdma->clk_ahb)) |
1633 | return PTR_ERR(sdma->clk_ahb); |
1634 | |
1635 | - clk_prepare(sdma->clk_ipg); |
1636 | - clk_prepare(sdma->clk_ahb); |
1637 | + ret = clk_prepare(sdma->clk_ipg); |
1638 | + if (ret) |
1639 | + return ret; |
1640 | + |
1641 | + ret = clk_prepare(sdma->clk_ahb); |
1642 | + if (ret) |
1643 | + goto err_clk; |
1644 | |
1645 | ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", |
1646 | sdma); |
1647 | if (ret) |
1648 | - return ret; |
1649 | + goto err_irq; |
1650 | |
1651 | sdma->irq = irq; |
1652 | |
1653 | sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); |
1654 | - if (!sdma->script_addrs) |
1655 | - return -ENOMEM; |
1656 | + if (!sdma->script_addrs) { |
1657 | + ret = -ENOMEM; |
1658 | + goto err_irq; |
1659 | + } |
1660 | |
1661 | /* initially no scripts available */ |
1662 | saddr_arr = (s32 *)sdma->script_addrs; |
1663 | @@ -1882,6 +1889,10 @@ static int sdma_probe(struct platform_device *pdev) |
1664 | dma_async_device_unregister(&sdma->dma_device); |
1665 | err_init: |
1666 | kfree(sdma->script_addrs); |
1667 | +err_irq: |
1668 | + clk_unprepare(sdma->clk_ahb); |
1669 | +err_clk: |
1670 | + clk_unprepare(sdma->clk_ipg); |
1671 | return ret; |
1672 | } |
1673 | |
1674 | @@ -1893,6 +1904,8 @@ static int sdma_remove(struct platform_device *pdev) |
1675 | devm_free_irq(&pdev->dev, sdma->irq, sdma); |
1676 | dma_async_device_unregister(&sdma->dma_device); |
1677 | kfree(sdma->script_addrs); |
1678 | + clk_unprepare(sdma->clk_ahb); |
1679 | + clk_unprepare(sdma->clk_ipg); |
1680 | /* Kill the tasklet */ |
1681 | for (i = 0; i < MAX_DMA_CHANNELS; i++) { |
1682 | struct sdma_channel *sdmac = &sdma->channel[i]; |
1683 | diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c |
1684 | index cb9b8577acbc..61c19b81ed81 100644 |
1685 | --- a/drivers/edac/mv64x60_edac.c |
1686 | +++ b/drivers/edac/mv64x60_edac.c |
1687 | @@ -759,7 +759,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) |
1688 | /* Non-ECC RAM? */ |
1689 | printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); |
1690 | res = -ENODEV; |
1691 | - goto err2; |
1692 | + goto err; |
1693 | } |
1694 | |
1695 | edac_dbg(3, "init mci\n"); |
1696 | diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c |
1697 | index 7c446d118cd6..1d87b0718d3a 100644 |
1698 | --- a/drivers/gpio/gpio-crystalcove.c |
1699 | +++ b/drivers/gpio/gpio-crystalcove.c |
1700 | @@ -90,8 +90,18 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) |
1701 | { |
1702 | int reg; |
1703 | |
1704 | - if (gpio == 94) |
1705 | - return GPIOPANELCTL; |
1706 | + if (gpio >= CRYSTALCOVE_GPIO_NUM) { |
1707 | + /* |
1708 | + * Virtual GPIO called from ACPI, for now we only support |
1709 | + * the panel ctl. |
1710 | + */ |
1711 | + switch (gpio) { |
1712 | + case 0x5e: |
1713 | + return GPIOPANELCTL; |
1714 | + default: |
1715 | + return -EOPNOTSUPP; |
1716 | + } |
1717 | + } |
1718 | |
1719 | if (reg_type == CTRL_IN) { |
1720 | if (gpio < 8) |
1721 | @@ -130,36 +140,36 @@ static void crystalcove_update_irq_ctrl(struct crystalcove_gpio *cg, int gpio) |
1722 | static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned gpio) |
1723 | { |
1724 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
1725 | + int reg = to_reg(gpio, CTRL_OUT); |
1726 | |
1727 | - if (gpio > CRYSTALCOVE_VGPIO_NUM) |
1728 | + if (reg < 0) |
1729 | return 0; |
1730 | |
1731 | - return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), |
1732 | - CTLO_INPUT_SET); |
1733 | + return regmap_write(cg->regmap, reg, CTLO_INPUT_SET); |
1734 | } |
1735 | |
1736 | static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned gpio, |
1737 | int value) |
1738 | { |
1739 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
1740 | + int reg = to_reg(gpio, CTRL_OUT); |
1741 | |
1742 | - if (gpio > CRYSTALCOVE_VGPIO_NUM) |
1743 | + if (reg < 0) |
1744 | return 0; |
1745 | |
1746 | - return regmap_write(cg->regmap, to_reg(gpio, CTRL_OUT), |
1747 | - CTLO_OUTPUT_SET | value); |
1748 | + return regmap_write(cg->regmap, reg, CTLO_OUTPUT_SET | value); |
1749 | } |
1750 | |
1751 | static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned gpio) |
1752 | { |
1753 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
1754 | - int ret; |
1755 | unsigned int val; |
1756 | + int ret, reg = to_reg(gpio, CTRL_IN); |
1757 | |
1758 | - if (gpio > CRYSTALCOVE_VGPIO_NUM) |
1759 | + if (reg < 0) |
1760 | return 0; |
1761 | |
1762 | - ret = regmap_read(cg->regmap, to_reg(gpio, CTRL_IN), &val); |
1763 | + ret = regmap_read(cg->regmap, reg, &val); |
1764 | if (ret) |
1765 | return ret; |
1766 | |
1767 | @@ -170,14 +180,15 @@ static void crystalcove_gpio_set(struct gpio_chip *chip, |
1768 | unsigned gpio, int value) |
1769 | { |
1770 | struct crystalcove_gpio *cg = gpiochip_get_data(chip); |
1771 | + int reg = to_reg(gpio, CTRL_OUT); |
1772 | |
1773 | - if (gpio > CRYSTALCOVE_VGPIO_NUM) |
1774 | + if (reg < 0) |
1775 | return; |
1776 | |
1777 | if (value) |
1778 | - regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 1); |
1779 | + regmap_update_bits(cg->regmap, reg, 1, 1); |
1780 | else |
1781 | - regmap_update_bits(cg->regmap, to_reg(gpio, CTRL_OUT), 1, 0); |
1782 | + regmap_update_bits(cg->regmap, reg, 1, 0); |
1783 | } |
1784 | |
1785 | static int crystalcove_irq_type(struct irq_data *data, unsigned type) |
1786 | @@ -185,6 +196,9 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned type) |
1787 | struct crystalcove_gpio *cg = |
1788 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); |
1789 | |
1790 | + if (data->hwirq >= CRYSTALCOVE_GPIO_NUM) |
1791 | + return 0; |
1792 | + |
1793 | switch (type) { |
1794 | case IRQ_TYPE_NONE: |
1795 | cg->intcnt_value = CTLI_INTCNT_DIS; |
1796 | @@ -235,8 +249,10 @@ static void crystalcove_irq_unmask(struct irq_data *data) |
1797 | struct crystalcove_gpio *cg = |
1798 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); |
1799 | |
1800 | - cg->set_irq_mask = false; |
1801 | - cg->update |= UPDATE_IRQ_MASK; |
1802 | + if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { |
1803 | + cg->set_irq_mask = false; |
1804 | + cg->update |= UPDATE_IRQ_MASK; |
1805 | + } |
1806 | } |
1807 | |
1808 | static void crystalcove_irq_mask(struct irq_data *data) |
1809 | @@ -244,8 +260,10 @@ static void crystalcove_irq_mask(struct irq_data *data) |
1810 | struct crystalcove_gpio *cg = |
1811 | gpiochip_get_data(irq_data_get_irq_chip_data(data)); |
1812 | |
1813 | - cg->set_irq_mask = true; |
1814 | - cg->update |= UPDATE_IRQ_MASK; |
1815 | + if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { |
1816 | + cg->set_irq_mask = true; |
1817 | + cg->update |= UPDATE_IRQ_MASK; |
1818 | + } |
1819 | } |
1820 | |
1821 | static struct irq_chip crystalcove_irqchip = { |
1822 | diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
1823 | index f3c3680963b9..4f54ff45e09e 100644 |
1824 | --- a/drivers/gpio/gpiolib.c |
1825 | +++ b/drivers/gpio/gpiolib.c |
1826 | @@ -3231,7 +3231,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, |
1827 | return desc; |
1828 | } |
1829 | |
1830 | - status = gpiod_request(desc, con_id); |
1831 | + /* If a connection label was passed use that, else use the device name as label */ |
1832 | + status = gpiod_request(desc, con_id ? con_id : dev_name(dev)); |
1833 | if (status < 0) |
1834 | return ERR_PTR(status); |
1835 | |
1836 | diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c |
1837 | index ef7c8de7060e..171480bb95d0 100644 |
1838 | --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c |
1839 | +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c |
1840 | @@ -317,7 +317,8 @@ static struct kfd_process *create_process(const struct task_struct *thread) |
1841 | |
1842 | /* init process apertures*/ |
1843 | process->is_32bit_user_mode = in_compat_syscall(); |
1844 | - if (kfd_init_apertures(process) != 0) |
1845 | + err = kfd_init_apertures(process); |
1846 | + if (err != 0) |
1847 | goto err_init_apretures; |
1848 | |
1849 | return process; |
1850 | diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c |
1851 | index 797d1f8340b9..7145127513c4 100644 |
1852 | --- a/drivers/gpu/drm/msm/msm_gem.c |
1853 | +++ b/drivers/gpu/drm/msm/msm_gem.c |
1854 | @@ -770,6 +770,8 @@ static int msm_gem_new_impl(struct drm_device *dev, |
1855 | unsigned sz; |
1856 | bool use_vram = false; |
1857 | |
1858 | + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
1859 | + |
1860 | switch (flags & MSM_BO_CACHE_MASK) { |
1861 | case MSM_BO_UNCACHED: |
1862 | case MSM_BO_CACHED: |
1863 | @@ -863,7 +865,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, |
1864 | |
1865 | size = PAGE_ALIGN(dmabuf->size); |
1866 | |
1867 | + /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ |
1868 | + mutex_lock(&dev->struct_mutex); |
1869 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); |
1870 | + mutex_unlock(&dev->struct_mutex); |
1871 | + |
1872 | if (ret) |
1873 | goto fail; |
1874 | |
1875 | diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c |
1876 | index 505dee0db973..ca91651be3d4 100644 |
1877 | --- a/drivers/gpu/drm/omapdrm/omap_gem.c |
1878 | +++ b/drivers/gpu/drm/omapdrm/omap_gem.c |
1879 | @@ -195,7 +195,7 @@ static void evict_entry(struct drm_gem_object *obj, |
1880 | size_t size = PAGE_SIZE * n; |
1881 | loff_t off = mmap_offset(obj) + |
1882 | (entry->obj_pgoff << PAGE_SHIFT); |
1883 | - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
1884 | + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); |
1885 | |
1886 | if (m > 1) { |
1887 | int i; |
1888 | @@ -442,7 +442,7 @@ static int fault_2d(struct drm_gem_object *obj, |
1889 | * into account in some of the math, so figure out virtual stride |
1890 | * in pages |
1891 | */ |
1892 | - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); |
1893 | + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); |
1894 | |
1895 | /* We don't use vmf->pgoff since that has the fake offset: */ |
1896 | pgoff = ((unsigned long)vmf->virtual_address - |
1897 | diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c |
1898 | index 9e77fc034e0a..aad2f4a2a0ef 100644 |
1899 | --- a/drivers/gpu/drm/sun4i/sun4i_drv.c |
1900 | +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c |
1901 | @@ -212,6 +212,11 @@ static const struct component_master_ops sun4i_drv_master_ops = { |
1902 | .unbind = sun4i_drv_unbind, |
1903 | }; |
1904 | |
1905 | +static bool sun4i_drv_node_is_connector(struct device_node *node) |
1906 | +{ |
1907 | + return of_device_is_compatible(node, "hdmi-connector"); |
1908 | +} |
1909 | + |
1910 | static bool sun4i_drv_node_is_frontend(struct device_node *node) |
1911 | { |
1912 | return of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") || |
1913 | @@ -252,6 +257,13 @@ static int sun4i_drv_add_endpoints(struct device *dev, |
1914 | !of_device_is_available(node)) |
1915 | return 0; |
1916 | |
1917 | + /* |
1918 | + * The connectors will be the last nodes in our pipeline, we |
1919 | + * can just bail out. |
1920 | + */ |
1921 | + if (sun4i_drv_node_is_connector(node)) |
1922 | + return 0; |
1923 | + |
1924 | if (!sun4i_drv_node_is_frontend(node)) { |
1925 | /* Add current component */ |
1926 | DRM_DEBUG_DRIVER("Adding component %s\n", |
1927 | diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c |
1928 | index ab3016982466..b608cd463d4e 100644 |
1929 | --- a/drivers/gpu/drm/vc4/vc4_gem.c |
1930 | +++ b/drivers/gpu/drm/vc4/vc4_gem.c |
1931 | @@ -110,8 +110,8 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, |
1932 | &handle); |
1933 | |
1934 | if (ret) { |
1935 | - state->bo_count = i - 1; |
1936 | - goto err; |
1937 | + state->bo_count = i; |
1938 | + goto err_delete_handle; |
1939 | } |
1940 | bo_state[i].handle = handle; |
1941 | bo_state[i].paddr = vc4_bo->base.paddr; |
1942 | @@ -123,13 +123,16 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, |
1943 | state->bo_count * sizeof(*bo_state))) |
1944 | ret = -EFAULT; |
1945 | |
1946 | - kfree(bo_state); |
1947 | +err_delete_handle: |
1948 | + if (ret) { |
1949 | + for (i = 0; i < state->bo_count; i++) |
1950 | + drm_gem_handle_delete(file_priv, bo_state[i].handle); |
1951 | + } |
1952 | |
1953 | err_free: |
1954 | - |
1955 | vc4_free_hang_state(dev, kernel_state); |
1956 | + kfree(bo_state); |
1957 | |
1958 | -err: |
1959 | return ret; |
1960 | } |
1961 | |
1962 | diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c |
1963 | index 865e7c262322..7d6da9b43dab 100644 |
1964 | --- a/drivers/hid/i2c-hid/i2c-hid.c |
1965 | +++ b/drivers/hid/i2c-hid/i2c-hid.c |
1966 | @@ -968,6 +968,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client, |
1967 | return ret < 0 && ret != -ENXIO ? ret : 0; |
1968 | } |
1969 | |
1970 | +static void i2c_hid_acpi_fix_up_power(struct device *dev) |
1971 | +{ |
1972 | + acpi_handle handle = ACPI_HANDLE(dev); |
1973 | + struct acpi_device *adev; |
1974 | + |
1975 | + if (handle && acpi_bus_get_device(handle, &adev) == 0) |
1976 | + acpi_device_fix_up_power(adev); |
1977 | +} |
1978 | + |
1979 | static const struct acpi_device_id i2c_hid_acpi_match[] = { |
1980 | {"ACPI0C50", 0 }, |
1981 | {"PNP0C50", 0 }, |
1982 | @@ -980,6 +989,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client, |
1983 | { |
1984 | return -ENODEV; |
1985 | } |
1986 | + |
1987 | +static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {} |
1988 | #endif |
1989 | |
1990 | #ifdef CONFIG_OF |
1991 | @@ -1082,6 +1093,8 @@ static int i2c_hid_probe(struct i2c_client *client, |
1992 | if (ret < 0) |
1993 | goto err; |
1994 | |
1995 | + i2c_hid_acpi_fix_up_power(&client->dev); |
1996 | + |
1997 | pm_runtime_get_noresume(&client->dev); |
1998 | pm_runtime_set_active(&client->dev); |
1999 | pm_runtime_enable(&client->dev); |
2000 | diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c |
2001 | index b24f1d3045f0..a629f7c130f0 100644 |
2002 | --- a/drivers/hwmon/ina2xx.c |
2003 | +++ b/drivers/hwmon/ina2xx.c |
2004 | @@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 }; |
2005 | |
2006 | struct ina2xx_config { |
2007 | u16 config_default; |
2008 | - int calibration_factor; |
2009 | + int calibration_value; |
2010 | int registers; |
2011 | int shunt_div; |
2012 | int bus_voltage_shift; |
2013 | int bus_voltage_lsb; /* uV */ |
2014 | - int power_lsb; /* uW */ |
2015 | + int power_lsb_factor; |
2016 | }; |
2017 | |
2018 | struct ina2xx_data { |
2019 | const struct ina2xx_config *config; |
2020 | |
2021 | long rshunt; |
2022 | + long current_lsb_uA; |
2023 | + long power_lsb_uW; |
2024 | struct mutex config_lock; |
2025 | struct regmap *regmap; |
2026 | |
2027 | @@ -115,21 +117,21 @@ struct ina2xx_data { |
2028 | static const struct ina2xx_config ina2xx_config[] = { |
2029 | [ina219] = { |
2030 | .config_default = INA219_CONFIG_DEFAULT, |
2031 | - .calibration_factor = 40960000, |
2032 | + .calibration_value = 4096, |
2033 | .registers = INA219_REGISTERS, |
2034 | .shunt_div = 100, |
2035 | .bus_voltage_shift = 3, |
2036 | .bus_voltage_lsb = 4000, |
2037 | - .power_lsb = 20000, |
2038 | + .power_lsb_factor = 20, |
2039 | }, |
2040 | [ina226] = { |
2041 | .config_default = INA226_CONFIG_DEFAULT, |
2042 | - .calibration_factor = 5120000, |
2043 | + .calibration_value = 2048, |
2044 | .registers = INA226_REGISTERS, |
2045 | .shunt_div = 400, |
2046 | .bus_voltage_shift = 0, |
2047 | .bus_voltage_lsb = 1250, |
2048 | - .power_lsb = 25000, |
2049 | + .power_lsb_factor = 25, |
2050 | }, |
2051 | }; |
2052 | |
2053 | @@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval) |
2054 | return INA226_SHIFT_AVG(avg_bits); |
2055 | } |
2056 | |
2057 | +/* |
2058 | + * Calibration register is set to the best value, which eliminates |
2059 | + * truncation errors on calculating current register in hardware. |
2060 | + * According to datasheet (eq. 3) the best values are 2048 for |
2061 | + * ina226 and 4096 for ina219. They are hardcoded as calibration_value. |
2062 | + */ |
2063 | static int ina2xx_calibrate(struct ina2xx_data *data) |
2064 | { |
2065 | - u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, |
2066 | - data->rshunt); |
2067 | - |
2068 | - return regmap_write(data->regmap, INA2XX_CALIBRATION, val); |
2069 | + return regmap_write(data->regmap, INA2XX_CALIBRATION, |
2070 | + data->config->calibration_value); |
2071 | } |
2072 | |
2073 | /* |
2074 | @@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data) |
2075 | if (ret < 0) |
2076 | return ret; |
2077 | |
2078 | - /* |
2079 | - * Set current LSB to 1mA, shunt is in uOhms |
2080 | - * (equation 13 in datasheet). |
2081 | - */ |
2082 | return ina2xx_calibrate(data); |
2083 | } |
2084 | |
2085 | @@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, |
2086 | val = DIV_ROUND_CLOSEST(val, 1000); |
2087 | break; |
2088 | case INA2XX_POWER: |
2089 | - val = regval * data->config->power_lsb; |
2090 | + val = regval * data->power_lsb_uW; |
2091 | break; |
2092 | case INA2XX_CURRENT: |
2093 | - /* signed register, LSB=1mA (selected), in mA */ |
2094 | - val = (s16)regval; |
2095 | + /* signed register, result in mA */ |
2096 | + val = regval * data->current_lsb_uA; |
2097 | + val = DIV_ROUND_CLOSEST(val, 1000); |
2098 | break; |
2099 | case INA2XX_CALIBRATION: |
2100 | - val = DIV_ROUND_CLOSEST(data->config->calibration_factor, |
2101 | - regval); |
2102 | + val = regval; |
2103 | break; |
2104 | default: |
2105 | /* programmer goofed */ |
2106 | @@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev, |
2107 | ina2xx_get_value(data, attr->index, regval)); |
2108 | } |
2109 | |
2110 | -static ssize_t ina2xx_set_shunt(struct device *dev, |
2111 | - struct device_attribute *da, |
2112 | - const char *buf, size_t count) |
2113 | +/* |
2114 | + * In order to keep calibration register value fixed, the product |
2115 | + * of current_lsb and shunt_resistor should also be fixed and equal |
2116 | + * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order |
2117 | + * to keep the scale. |
2118 | + */ |
2119 | +static int ina2xx_set_shunt(struct ina2xx_data *data, long val) |
2120 | +{ |
2121 | + unsigned int dividend = DIV_ROUND_CLOSEST(1000000000, |
2122 | + data->config->shunt_div); |
2123 | + if (val <= 0 || val > dividend) |
2124 | + return -EINVAL; |
2125 | + |
2126 | + mutex_lock(&data->config_lock); |
2127 | + data->rshunt = val; |
2128 | + data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val); |
2129 | + data->power_lsb_uW = data->config->power_lsb_factor * |
2130 | + data->current_lsb_uA; |
2131 | + mutex_unlock(&data->config_lock); |
2132 | + |
2133 | + return 0; |
2134 | +} |
2135 | + |
2136 | +static ssize_t ina2xx_store_shunt(struct device *dev, |
2137 | + struct device_attribute *da, |
2138 | + const char *buf, size_t count) |
2139 | { |
2140 | unsigned long val; |
2141 | int status; |
2142 | @@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev, |
2143 | if (status < 0) |
2144 | return status; |
2145 | |
2146 | - if (val == 0 || |
2147 | - /* Values greater than the calibration factor make no sense. */ |
2148 | - val > data->config->calibration_factor) |
2149 | - return -EINVAL; |
2150 | - |
2151 | - mutex_lock(&data->config_lock); |
2152 | - data->rshunt = val; |
2153 | - status = ina2xx_calibrate(data); |
2154 | - mutex_unlock(&data->config_lock); |
2155 | + status = ina2xx_set_shunt(data, val); |
2156 | if (status < 0) |
2157 | return status; |
2158 | - |
2159 | return count; |
2160 | } |
2161 | |
2162 | @@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, |
2163 | |
2164 | /* shunt resistance */ |
2165 | static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, |
2166 | - ina2xx_show_value, ina2xx_set_shunt, |
2167 | + ina2xx_show_value, ina2xx_store_shunt, |
2168 | INA2XX_CALIBRATION); |
2169 | |
2170 | /* update interval (ina226 only) */ |
2171 | @@ -441,10 +457,7 @@ static int ina2xx_probe(struct i2c_client *client, |
2172 | val = INA2XX_RSHUNT_DEFAULT; |
2173 | } |
2174 | |
2175 | - if (val <= 0 || val > data->config->calibration_factor) |
2176 | - return -ENODEV; |
2177 | - |
2178 | - data->rshunt = val; |
2179 | + ina2xx_set_shunt(data, val); |
2180 | |
2181 | ina2xx_regmap_config.max_register = data->config->registers; |
2182 | |
2183 | diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c |
2184 | index d8517d2a968c..864488793f09 100644 |
2185 | --- a/drivers/hwtracing/coresight/coresight-tmc.c |
2186 | +++ b/drivers/hwtracing/coresight/coresight-tmc.c |
2187 | @@ -362,6 +362,13 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) |
2188 | desc.type = CORESIGHT_DEV_TYPE_SINK; |
2189 | desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; |
2190 | desc.ops = &tmc_etr_cs_ops; |
2191 | + /* |
2192 | + * ETR configuration uses a 40-bit AXI master in place of |
2193 | + * the embedded SRAM of ETB/ETF. |
2194 | + */ |
2195 | + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
2196 | + if (ret) |
2197 | + goto out; |
2198 | } else { |
2199 | desc.type = CORESIGHT_DEV_TYPE_LINKSINK; |
2200 | desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; |
2201 | diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c |
2202 | index 7bf00a0beb6f..4383324ec01c 100644 |
2203 | --- a/drivers/hwtracing/coresight/coresight.c |
2204 | +++ b/drivers/hwtracing/coresight/coresight.c |
2205 | @@ -498,6 +498,9 @@ int coresight_enable(struct coresight_device *csdev) |
2206 | { |
2207 | int cpu, ret = 0; |
2208 | struct list_head *path; |
2209 | + enum coresight_dev_subtype_source subtype; |
2210 | + |
2211 | + subtype = csdev->subtype.source_subtype; |
2212 | |
2213 | mutex_lock(&coresight_mutex); |
2214 | |
2215 | @@ -505,8 +508,16 @@ int coresight_enable(struct coresight_device *csdev) |
2216 | if (ret) |
2217 | goto out; |
2218 | |
2219 | - if (csdev->enable) |
2220 | + if (csdev->enable) { |
2221 | + /* |
2222 | + * There could be multiple applications driving the software |
2223 | + * source. So keep the refcount for each such user when the |
2224 | + * source is already enabled. |
2225 | + */ |
2226 | + if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) |
2227 | + atomic_inc(csdev->refcnt); |
2228 | goto out; |
2229 | + } |
2230 | |
2231 | path = coresight_build_path(csdev); |
2232 | if (IS_ERR(path)) { |
2233 | @@ -523,7 +534,7 @@ int coresight_enable(struct coresight_device *csdev) |
2234 | if (ret) |
2235 | goto err_source; |
2236 | |
2237 | - switch (csdev->subtype.source_subtype) { |
2238 | + switch (subtype) { |
2239 | case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC: |
2240 | /* |
2241 | * When working from sysFS it is important to keep track |
2242 | diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c |
2243 | index c6a90b4a9c62..91b10afc8c34 100644 |
2244 | --- a/drivers/i2c/muxes/i2c-mux-reg.c |
2245 | +++ b/drivers/i2c/muxes/i2c-mux-reg.c |
2246 | @@ -196,20 +196,25 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) |
2247 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
2248 | mux->data.reg_size = resource_size(res); |
2249 | mux->data.reg = devm_ioremap_resource(&pdev->dev, res); |
2250 | - if (IS_ERR(mux->data.reg)) |
2251 | - return PTR_ERR(mux->data.reg); |
2252 | + if (IS_ERR(mux->data.reg)) { |
2253 | + ret = PTR_ERR(mux->data.reg); |
2254 | + goto err_put_parent; |
2255 | + } |
2256 | } |
2257 | |
2258 | if (mux->data.reg_size != 4 && mux->data.reg_size != 2 && |
2259 | mux->data.reg_size != 1) { |
2260 | dev_err(&pdev->dev, "Invalid register size\n"); |
2261 | - return -EINVAL; |
2262 | + ret = -EINVAL; |
2263 | + goto err_put_parent; |
2264 | } |
2265 | |
2266 | muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values, 0, 0, |
2267 | i2c_mux_reg_select, NULL); |
2268 | - if (!muxc) |
2269 | - return -ENOMEM; |
2270 | + if (!muxc) { |
2271 | + ret = -ENOMEM; |
2272 | + goto err_put_parent; |
2273 | + } |
2274 | muxc->priv = mux; |
2275 | |
2276 | platform_set_drvdata(pdev, muxc); |
2277 | @@ -235,6 +240,8 @@ static int i2c_mux_reg_probe(struct platform_device *pdev) |
2278 | |
2279 | add_adapter_failed: |
2280 | i2c_mux_del_adapters(muxc); |
2281 | +err_put_parent: |
2282 | + i2c_put_adapter(parent); |
2283 | |
2284 | return ret; |
2285 | } |
2286 | diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c |
2287 | index 678e8c7ea763..fec696ec3fd1 100644 |
2288 | --- a/drivers/iio/adc/hi8435.c |
2289 | +++ b/drivers/iio/adc/hi8435.c |
2290 | @@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev, |
2291 | enum iio_event_direction dir, int state) |
2292 | { |
2293 | struct hi8435_priv *priv = iio_priv(idev); |
2294 | + int ret; |
2295 | + u32 tmp; |
2296 | + |
2297 | + if (state) { |
2298 | + ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp); |
2299 | + if (ret < 0) |
2300 | + return ret; |
2301 | + if (tmp & BIT(chan->channel)) |
2302 | + priv->event_prev_val |= BIT(chan->channel); |
2303 | + else |
2304 | + priv->event_prev_val &= ~BIT(chan->channel); |
2305 | |
2306 | - priv->event_scan_mask &= ~BIT(chan->channel); |
2307 | - if (state) |
2308 | priv->event_scan_mask |= BIT(chan->channel); |
2309 | + } else |
2310 | + priv->event_scan_mask &= ~BIT(chan->channel); |
2311 | |
2312 | return 0; |
2313 | } |
2314 | @@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi) |
2315 | priv->spi = spi; |
2316 | |
2317 | reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW); |
2318 | - if (IS_ERR(reset_gpio)) { |
2319 | - /* chip s/w reset if h/w reset failed */ |
2320 | + if (!IS_ERR(reset_gpio)) { |
2321 | + /* need >=100ns low pulse to reset chip */ |
2322 | + gpiod_set_raw_value_cansleep(reset_gpio, 0); |
2323 | + udelay(1); |
2324 | + gpiod_set_raw_value_cansleep(reset_gpio, 1); |
2325 | + } else { |
2326 | + /* s/w reset chip if h/w reset is not available */ |
2327 | hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST); |
2328 | hi8435_writeb(priv, HI8435_CTRL_REG, 0); |
2329 | - } else { |
2330 | - udelay(5); |
2331 | - gpiod_set_value(reset_gpio, 1); |
2332 | } |
2333 | |
2334 | spi_set_drvdata(spi, idev); |
2335 | diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c |
2336 | index 7de0f397194b..d23cf7759ee7 100644 |
2337 | --- a/drivers/iio/light/rpr0521.c |
2338 | +++ b/drivers/iio/light/rpr0521.c |
2339 | @@ -510,13 +510,26 @@ static int rpr0521_probe(struct i2c_client *client, |
2340 | |
2341 | ret = pm_runtime_set_active(&client->dev); |
2342 | if (ret < 0) |
2343 | - return ret; |
2344 | + goto err_poweroff; |
2345 | |
2346 | pm_runtime_enable(&client->dev); |
2347 | pm_runtime_set_autosuspend_delay(&client->dev, RPR0521_SLEEP_DELAY_MS); |
2348 | pm_runtime_use_autosuspend(&client->dev); |
2349 | |
2350 | - return iio_device_register(indio_dev); |
2351 | + ret = iio_device_register(indio_dev); |
2352 | + if (ret) |
2353 | + goto err_pm_disable; |
2354 | + |
2355 | + return 0; |
2356 | + |
2357 | +err_pm_disable: |
2358 | + pm_runtime_disable(&client->dev); |
2359 | + pm_runtime_set_suspended(&client->dev); |
2360 | + pm_runtime_put_noidle(&client->dev); |
2361 | +err_poweroff: |
2362 | + rpr0521_poweroff(data); |
2363 | + |
2364 | + return ret; |
2365 | } |
2366 | |
2367 | static int rpr0521_remove(struct i2c_client *client) |
2368 | diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c |
2369 | index 6325e7dc8e03..f3cb4dc05391 100644 |
2370 | --- a/drivers/iio/magnetometer/st_magn_spi.c |
2371 | +++ b/drivers/iio/magnetometer/st_magn_spi.c |
2372 | @@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi) |
2373 | } |
2374 | |
2375 | static const struct spi_device_id st_magn_id_table[] = { |
2376 | - { LSM303DLHC_MAGN_DEV_NAME }, |
2377 | - { LSM303DLM_MAGN_DEV_NAME }, |
2378 | { LIS3MDL_MAGN_DEV_NAME }, |
2379 | { LSM303AGR_MAGN_DEV_NAME }, |
2380 | {}, |
2381 | diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c |
2382 | index 19d2eb46fda6..2a4a62ebfd8d 100644 |
2383 | --- a/drivers/iio/pressure/zpa2326.c |
2384 | +++ b/drivers/iio/pressure/zpa2326.c |
2385 | @@ -871,12 +871,13 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev, |
2386 | { |
2387 | int ret; |
2388 | unsigned int val; |
2389 | + long timeout; |
2390 | |
2391 | zpa2326_dbg(indio_dev, "waiting for one shot completion interrupt"); |
2392 | |
2393 | - ret = wait_for_completion_interruptible_timeout( |
2394 | + timeout = wait_for_completion_interruptible_timeout( |
2395 | &private->data_ready, ZPA2326_CONVERSION_JIFFIES); |
2396 | - if (ret > 0) |
2397 | + if (timeout > 0) |
2398 | /* |
2399 | * Interrupt handler completed before timeout: return operation |
2400 | * status. |
2401 | @@ -886,13 +887,16 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev, |
2402 | /* Clear all interrupts just to be sure. */ |
2403 | regmap_read(private->regmap, ZPA2326_INT_SOURCE_REG, &val); |
2404 | |
2405 | - if (!ret) |
2406 | + if (!timeout) { |
2407 | /* Timed out. */ |
2408 | + zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)", |
2409 | + timeout); |
2410 | ret = -ETIME; |
2411 | - |
2412 | - if (ret != -ERESTARTSYS) |
2413 | - zpa2326_warn(indio_dev, "no one shot interrupt occurred (%d)", |
2414 | - ret); |
2415 | + } else if (timeout < 0) { |
2416 | + zpa2326_warn(indio_dev, |
2417 | + "wait for one shot interrupt cancelled"); |
2418 | + ret = -ERESTARTSYS; |
2419 | + } |
2420 | |
2421 | return ret; |
2422 | } |
2423 | diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c |
2424 | index 6512a555f7f8..dd18b74cd01d 100644 |
2425 | --- a/drivers/infiniband/hw/cxgb4/cm.c |
2426 | +++ b/drivers/infiniband/hw/cxgb4/cm.c |
2427 | @@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) |
2428 | |
2429 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); |
2430 | release_ep_resources(ep); |
2431 | + kfree_skb(skb); |
2432 | return 0; |
2433 | } |
2434 | |
2435 | @@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) |
2436 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); |
2437 | c4iw_put_ep(&ep->parent_ep->com); |
2438 | release_ep_resources(ep); |
2439 | + kfree_skb(skb); |
2440 | return 0; |
2441 | } |
2442 | |
2443 | @@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) |
2444 | |
2445 | PDBG("%s rdev %p\n", __func__, rdev); |
2446 | req->cmd = CPL_ABORT_NO_RST; |
2447 | + skb_get(skb); |
2448 | ret = c4iw_ofld_send(rdev, skb); |
2449 | if (ret) { |
2450 | __state_set(&ep->com, DEAD); |
2451 | queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); |
2452 | - } |
2453 | + } else |
2454 | + kfree_skb(skb); |
2455 | } |
2456 | |
2457 | static int send_flowc(struct c4iw_ep *ep) |
2458 | diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c |
2459 | index 919a5474e651..621b60ab74ee 100644 |
2460 | --- a/drivers/infiniband/hw/hfi1/sysfs.c |
2461 | +++ b/drivers/infiniband/hw/hfi1/sysfs.c |
2462 | @@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = { |
2463 | }; |
2464 | |
2465 | static struct attribute *port_cc_default_attributes[] = { |
2466 | - &cc_prescan_attr.attr |
2467 | + &cc_prescan_attr.attr, |
2468 | + NULL |
2469 | }; |
2470 | |
2471 | static struct kobj_type port_cc_ktype = { |
2472 | diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c |
2473 | index 2c4b4d072d6a..3b973cba8975 100644 |
2474 | --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c |
2475 | +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c |
2476 | @@ -3644,8 +3644,10 @@ enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_ |
2477 | hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1; |
2478 | hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted; |
2479 | |
2480 | - hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted; |
2481 | - hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted; |
2482 | + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = |
2483 | + roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted); |
2484 | + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = |
2485 | + roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted); |
2486 | hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt = |
2487 | hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; |
2488 | hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt = |
2489 | diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h |
2490 | index d1328a697750..8ce599e1639c 100644 |
2491 | --- a/drivers/infiniband/hw/i40iw/i40iw_d.h |
2492 | +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h |
2493 | @@ -86,6 +86,7 @@ |
2494 | #define RDMA_OPCODE_MASK 0x0f |
2495 | #define RDMA_READ_REQ_OPCODE 1 |
2496 | #define Q2_BAD_FRAME_OFFSET 72 |
2497 | +#define Q2_FPSN_OFFSET 64 |
2498 | #define CQE_MAJOR_DRV 0x8000 |
2499 | |
2500 | #define I40IW_TERM_SENT 0x01 |
2501 | diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c |
2502 | index c62d354f7810..3ed49146d73c 100644 |
2503 | --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c |
2504 | +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c |
2505 | @@ -1320,7 +1320,7 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, |
2506 | u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; |
2507 | u32 rcv_wnd = hw_host_ctx[23]; |
2508 | /* first partial seq # in q2 */ |
2509 | - u32 fps = qp->q2_buf[16]; |
2510 | + u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); |
2511 | struct list_head *rxlist = &pfpdu->rxlist; |
2512 | struct list_head *plist; |
2513 | |
2514 | diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c |
2515 | index 6d9904a4a0ab..29dc5278d7be 100644 |
2516 | --- a/drivers/infiniband/sw/rdmavt/cq.c |
2517 | +++ b/drivers/infiniband/sw/rdmavt/cq.c |
2518 | @@ -197,7 +197,7 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, |
2519 | return ERR_PTR(-EINVAL); |
2520 | |
2521 | /* Allocate the completion queue structure. */ |
2522 | - cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
2523 | + cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, rdi->dparms.node); |
2524 | if (!cq) |
2525 | return ERR_PTR(-ENOMEM); |
2526 | |
2527 | @@ -213,7 +213,9 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev, |
2528 | sz += sizeof(struct ib_uverbs_wc) * (entries + 1); |
2529 | else |
2530 | sz += sizeof(struct ib_wc) * (entries + 1); |
2531 | - wc = vmalloc_user(sz); |
2532 | + wc = udata ? |
2533 | + vmalloc_user(sz) : |
2534 | + vzalloc_node(sz, rdi->dparms.node); |
2535 | if (!wc) { |
2536 | ret = ERR_PTR(-ENOMEM); |
2537 | goto bail_cq; |
2538 | @@ -368,7 +370,9 @@ int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) |
2539 | sz += sizeof(struct ib_uverbs_wc) * (cqe + 1); |
2540 | else |
2541 | sz += sizeof(struct ib_wc) * (cqe + 1); |
2542 | - wc = vmalloc_user(sz); |
2543 | + wc = udata ? |
2544 | + vmalloc_user(sz) : |
2545 | + vzalloc_node(sz, rdi->dparms.node); |
2546 | if (!wc) |
2547 | return -ENOMEM; |
2548 | |
2549 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2550 | index 29ab814693fc..876f438aa048 100644 |
2551 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
2552 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2553 | @@ -2292,12 +2292,8 @@ static void srpt_queue_response(struct se_cmd *cmd) |
2554 | } |
2555 | spin_unlock_irqrestore(&ioctx->spinlock, flags); |
2556 | |
2557 | - if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) |
2558 | - || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { |
2559 | - atomic_inc(&ch->req_lim_delta); |
2560 | - srpt_abort_cmd(ioctx); |
2561 | + if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) |
2562 | return; |
2563 | - } |
2564 | |
2565 | /* For read commands, transfer the data to the initiator. */ |
2566 | if (ioctx->cmd.data_direction == DMA_FROM_DEVICE && |
2567 | @@ -2670,7 +2666,8 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) |
2568 | struct srpt_rdma_ch *ch = ioctx->ch; |
2569 | unsigned long flags; |
2570 | |
2571 | - WARN_ON(ioctx->state != SRPT_STATE_DONE); |
2572 | + WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE && |
2573 | + !(ioctx->cmd.transport_state & CMD_T_ABORTED)); |
2574 | |
2575 | if (ioctx->n_rw_ctx) { |
2576 | srpt_free_rw_ctxs(ch, ioctx); |
2577 | diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c |
2578 | index c9d491bc85e0..3851d5715772 100644 |
2579 | --- a/drivers/input/mouse/elan_i2c_core.c |
2580 | +++ b/drivers/input/mouse/elan_i2c_core.c |
2581 | @@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client, |
2582 | return error; |
2583 | } |
2584 | |
2585 | + /* Make sure there is something at this address */ |
2586 | + error = i2c_smbus_read_byte(client); |
2587 | + if (error < 0) { |
2588 | + dev_dbg(&client->dev, "nothing at this address: %d\n", error); |
2589 | + return -ENXIO; |
2590 | + } |
2591 | + |
2592 | /* Initialize the touchpad. */ |
2593 | error = elan_initialize(data); |
2594 | if (error) |
2595 | diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c |
2596 | index a679e56c44cd..765879dcaf85 100644 |
2597 | --- a/drivers/input/mouse/elan_i2c_i2c.c |
2598 | +++ b/drivers/input/mouse/elan_i2c_i2c.c |
2599 | @@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, |
2600 | long ret; |
2601 | int error; |
2602 | int len; |
2603 | - u8 buffer[ETP_I2C_INF_LENGTH]; |
2604 | + u8 buffer[ETP_I2C_REPORT_LEN]; |
2605 | + |
2606 | + len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); |
2607 | + if (len != ETP_I2C_REPORT_LEN) { |
2608 | + error = len < 0 ? len : -EIO; |
2609 | + dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", |
2610 | + error, len); |
2611 | + } |
2612 | |
2613 | reinit_completion(completion); |
2614 | enable_irq(client->irq); |
2615 | diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c |
2616 | index 59603a5728f7..c519c0b09568 100644 |
2617 | --- a/drivers/input/mouse/elantech.c |
2618 | +++ b/drivers/input/mouse/elantech.c |
2619 | @@ -1711,6 +1711,17 @@ int elantech_init(struct psmouse *psmouse) |
2620 | etd->samples[0], etd->samples[1], etd->samples[2]); |
2621 | } |
2622 | |
2623 | + if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) { |
2624 | + /* |
2625 | + * This module has a bug which makes absolute mode |
2626 | + * unusable, so let's abort so we'll be using standard |
2627 | + * PS/2 protocol. |
2628 | + */ |
2629 | + psmouse_info(psmouse, |
2630 | + "absolute mode broken, forcing standard PS/2 protocol\n"); |
2631 | + goto init_fail; |
2632 | + } |
2633 | + |
2634 | if (elantech_set_absolute_mode(psmouse)) { |
2635 | psmouse_err(psmouse, |
2636 | "failed to put touchpad into absolute mode.\n"); |
2637 | diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c |
2638 | index 240b16f3ee97..5907fddcc966 100644 |
2639 | --- a/drivers/input/touchscreen/goodix.c |
2640 | +++ b/drivers/input/touchscreen/goodix.c |
2641 | @@ -778,8 +778,10 @@ static int __maybe_unused goodix_suspend(struct device *dev) |
2642 | int error; |
2643 | |
2644 | /* We need gpio pins to suspend/resume */ |
2645 | - if (!ts->gpiod_int || !ts->gpiod_rst) |
2646 | + if (!ts->gpiod_int || !ts->gpiod_rst) { |
2647 | + disable_irq(client->irq); |
2648 | return 0; |
2649 | + } |
2650 | |
2651 | wait_for_completion(&ts->firmware_loading_complete); |
2652 | |
2653 | @@ -819,8 +821,10 @@ static int __maybe_unused goodix_resume(struct device *dev) |
2654 | struct goodix_ts_data *ts = i2c_get_clientdata(client); |
2655 | int error; |
2656 | |
2657 | - if (!ts->gpiod_int || !ts->gpiod_rst) |
2658 | + if (!ts->gpiod_int || !ts->gpiod_rst) { |
2659 | + enable_irq(client->irq); |
2660 | return 0; |
2661 | + } |
2662 | |
2663 | /* |
2664 | * Exit sleep mode by outputting HIGH level to INT pin |
2665 | diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c |
2666 | index fd4a78296b48..100c80e48190 100644 |
2667 | --- a/drivers/irqchip/irq-gic-v3.c |
2668 | +++ b/drivers/irqchip/irq-gic-v3.c |
2669 | @@ -1250,6 +1250,10 @@ gic_acpi_parse_madt_gicc(struct acpi_subtable_header *header, |
2670 | u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; |
2671 | void __iomem *redist_base; |
2672 | |
2673 | + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ |
2674 | + if (!(gicc->flags & ACPI_MADT_ENABLED)) |
2675 | + return 0; |
2676 | + |
2677 | redist_base = ioremap(gicc->gicr_base_address, size); |
2678 | if (!redist_base) |
2679 | return -ENOMEM; |
2680 | @@ -1299,6 +1303,13 @@ static int __init gic_acpi_match_gicc(struct acpi_subtable_header *header, |
2681 | if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) |
2682 | return 0; |
2683 | |
2684 | + /* |
2685 | + * It's perfectly valid firmware can pass disabled GICC entry, driver |
2686 | + * should not treat as errors, skip the entry instead of probe fail. |
2687 | + */ |
2688 | + if (!(gicc->flags & ACPI_MADT_ENABLED)) |
2689 | + return 0; |
2690 | + |
2691 | return -ENODEV; |
2692 | } |
2693 | |
2694 | diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c |
2695 | index 03b79b061d24..05d87f60d929 100644 |
2696 | --- a/drivers/irqchip/irq-mbigen.c |
2697 | +++ b/drivers/irqchip/irq-mbigen.c |
2698 | @@ -105,10 +105,7 @@ static inline void get_mbigen_type_reg(irq_hw_number_t hwirq, |
2699 | static inline void get_mbigen_clear_reg(irq_hw_number_t hwirq, |
2700 | u32 *mask, u32 *addr) |
2701 | { |
2702 | - unsigned int ofst; |
2703 | - |
2704 | - hwirq -= RESERVED_IRQ_PER_MBIGEN_CHIP; |
2705 | - ofst = hwirq / 32 * 4; |
2706 | + unsigned int ofst = (hwirq / 32) * 4; |
2707 | |
2708 | *mask = 1 << (hwirq % 32); |
2709 | *addr = ofst + REG_MBIGEN_CLEAR_OFFSET; |
2710 | diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c |
2711 | index 9cb4b621fbc3..b92a19a594a1 100644 |
2712 | --- a/drivers/isdn/mISDN/stack.c |
2713 | +++ b/drivers/isdn/mISDN/stack.c |
2714 | @@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) |
2715 | if (sk->sk_state != MISDN_BOUND) |
2716 | continue; |
2717 | if (!cskb) |
2718 | - cskb = skb_copy(skb, GFP_KERNEL); |
2719 | + cskb = skb_copy(skb, GFP_ATOMIC); |
2720 | if (!cskb) { |
2721 | printk(KERN_WARNING "%s no skb\n", __func__); |
2722 | break; |
2723 | diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c |
2724 | index 840401ae9a4e..f6726484a8a1 100644 |
2725 | --- a/drivers/leds/leds-pca955x.c |
2726 | +++ b/drivers/leds/leds-pca955x.c |
2727 | @@ -266,7 +266,7 @@ static int pca955x_probe(struct i2c_client *client, |
2728 | "slave address 0x%02x\n", |
2729 | id->name, chip->bits, client->addr); |
2730 | |
2731 | - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) |
2732 | + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) |
2733 | return -EIO; |
2734 | |
2735 | if (pdata) { |
2736 | diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c |
2737 | index 537903bf9add..d23337e8c4ee 100644 |
2738 | --- a/drivers/md/bcache/alloc.c |
2739 | +++ b/drivers/md/bcache/alloc.c |
2740 | @@ -512,15 +512,21 @@ struct open_bucket { |
2741 | |
2742 | /* |
2743 | * We keep multiple buckets open for writes, and try to segregate different |
2744 | - * write streams for better cache utilization: first we look for a bucket where |
2745 | - * the last write to it was sequential with the current write, and failing that |
2746 | - * we look for a bucket that was last used by the same task. |
2747 | + * write streams for better cache utilization: first we try to segregate flash |
2748 | + * only volume write streams from cached devices, secondly we look for a bucket |
2749 | + * where the last write to it was sequential with the current write, and |
2750 | + * failing that we look for a bucket that was last used by the same task. |
2751 | * |
2752 | * The ideas is if you've got multiple tasks pulling data into the cache at the |
2753 | * same time, you'll get better cache utilization if you try to segregate their |
2754 | * data and preserve locality. |
2755 | * |
2756 | - * For example, say you've starting Firefox at the same time you're copying a |
2757 | + * For example, dirty sectors of flash only volume is not reclaimable, if their |
2758 | + * dirty sectors mixed with dirty sectors of cached device, such buckets will |
2759 | + * be marked as dirty and won't be reclaimed, though the dirty data of cached |
2760 | + * device have been written back to backend device. |
2761 | + * |
2762 | + * And say you've starting Firefox at the same time you're copying a |
2763 | * bunch of files. Firefox will likely end up being fairly hot and stay in the |
2764 | * cache awhile, but the data you copied might not be; if you wrote all that |
2765 | * data to the same buckets it'd get invalidated at the same time. |
2766 | @@ -537,7 +543,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, |
2767 | struct open_bucket *ret, *ret_task = NULL; |
2768 | |
2769 | list_for_each_entry_reverse(ret, &c->data_buckets, list) |
2770 | - if (!bkey_cmp(&ret->key, search)) |
2771 | + if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != |
2772 | + UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) |
2773 | + continue; |
2774 | + else if (!bkey_cmp(&ret->key, search)) |
2775 | goto found; |
2776 | else if (ret->last_write_point == write_point) |
2777 | ret_task = ret; |
2778 | diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c |
2779 | index c61341c84d2d..4af7cd423c71 100644 |
2780 | --- a/drivers/md/bcache/super.c |
2781 | +++ b/drivers/md/bcache/super.c |
2782 | @@ -892,6 +892,12 @@ static void cached_dev_detach_finish(struct work_struct *w) |
2783 | |
2784 | mutex_lock(&bch_register_lock); |
2785 | |
2786 | + cancel_delayed_work_sync(&dc->writeback_rate_update); |
2787 | + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { |
2788 | + kthread_stop(dc->writeback_thread); |
2789 | + dc->writeback_thread = NULL; |
2790 | + } |
2791 | + |
2792 | memset(&dc->sb.set_uuid, 0, 16); |
2793 | SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); |
2794 | |
2795 | diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c |
2796 | index ba7edcdd09ce..fcc2b5746a9f 100644 |
2797 | --- a/drivers/md/md-cluster.c |
2798 | +++ b/drivers/md/md-cluster.c |
2799 | @@ -1122,8 +1122,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) |
2800 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); |
2801 | lock_comm(cinfo); |
2802 | ret = __sendmsg(cinfo, &cmsg); |
2803 | - if (ret) |
2804 | + if (ret) { |
2805 | + unlock_comm(cinfo); |
2806 | return ret; |
2807 | + } |
2808 | cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; |
2809 | ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); |
2810 | cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; |
2811 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
2812 | index 4493be50fc6a..86ba7851e881 100644 |
2813 | --- a/drivers/md/raid5.c |
2814 | +++ b/drivers/md/raid5.c |
2815 | @@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) |
2816 | static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) |
2817 | { |
2818 | int i; |
2819 | - local_irq_disable(); |
2820 | - spin_lock(conf->hash_locks); |
2821 | + spin_lock_irq(conf->hash_locks); |
2822 | for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) |
2823 | spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); |
2824 | spin_lock(&conf->device_lock); |
2825 | @@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) |
2826 | { |
2827 | int i; |
2828 | spin_unlock(&conf->device_lock); |
2829 | - for (i = NR_STRIPE_HASH_LOCKS; i; i--) |
2830 | - spin_unlock(conf->hash_locks + i - 1); |
2831 | - local_irq_enable(); |
2832 | + for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) |
2833 | + spin_unlock(conf->hash_locks + i); |
2834 | + spin_unlock_irq(conf->hash_locks); |
2835 | } |
2836 | |
2837 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector |
2838 | @@ -732,12 +731,11 @@ static bool is_full_stripe_write(struct stripe_head *sh) |
2839 | |
2840 | static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
2841 | { |
2842 | - local_irq_disable(); |
2843 | if (sh1 > sh2) { |
2844 | - spin_lock(&sh2->stripe_lock); |
2845 | + spin_lock_irq(&sh2->stripe_lock); |
2846 | spin_lock_nested(&sh1->stripe_lock, 1); |
2847 | } else { |
2848 | - spin_lock(&sh1->stripe_lock); |
2849 | + spin_lock_irq(&sh1->stripe_lock); |
2850 | spin_lock_nested(&sh2->stripe_lock, 1); |
2851 | } |
2852 | } |
2853 | @@ -745,8 +743,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
2854 | static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) |
2855 | { |
2856 | spin_unlock(&sh1->stripe_lock); |
2857 | - spin_unlock(&sh2->stripe_lock); |
2858 | - local_irq_enable(); |
2859 | + spin_unlock_irq(&sh2->stripe_lock); |
2860 | } |
2861 | |
2862 | /* Only freshly new full stripe normal write stripe can be added to a batch list */ |
2863 | diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c |
2864 | index 142ae28803bb..d558ed3e59c6 100644 |
2865 | --- a/drivers/media/i2c/cx25840/cx25840-core.c |
2866 | +++ b/drivers/media/i2c/cx25840/cx25840-core.c |
2867 | @@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client) |
2868 | INIT_WORK(&state->fw_work, cx25840_work_handler); |
2869 | init_waitqueue_head(&state->fw_wait); |
2870 | q = create_singlethread_workqueue("cx25840_fw"); |
2871 | - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); |
2872 | - queue_work(q, &state->fw_work); |
2873 | - schedule(); |
2874 | - finish_wait(&state->fw_wait, &wait); |
2875 | - destroy_workqueue(q); |
2876 | + if (q) { |
2877 | + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); |
2878 | + queue_work(q, &state->fw_work); |
2879 | + schedule(); |
2880 | + finish_wait(&state->fw_wait, &wait); |
2881 | + destroy_workqueue(q); |
2882 | + } |
2883 | |
2884 | /* 6. */ |
2885 | cx25840_write(client, 0x115, 0x8c); |
2886 | @@ -634,11 +636,13 @@ static void cx23885_initialize(struct i2c_client *client) |
2887 | INIT_WORK(&state->fw_work, cx25840_work_handler); |
2888 | init_waitqueue_head(&state->fw_wait); |
2889 | q = create_singlethread_workqueue("cx25840_fw"); |
2890 | - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); |
2891 | - queue_work(q, &state->fw_work); |
2892 | - schedule(); |
2893 | - finish_wait(&state->fw_wait, &wait); |
2894 | - destroy_workqueue(q); |
2895 | + if (q) { |
2896 | + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); |
2897 | + queue_work(q, &state->fw_work); |
2898 | + schedule(); |
2899 | + finish_wait(&state->fw_wait, &wait); |
2900 | + destroy_workqueue(q); |
2901 | + } |
2902 | |
2903 | /* Call the cx23888 specific std setup func, we no longer rely on |
2904 | * the generic cx24840 func. |
2905 | @@ -752,11 +756,13 @@ static void cx231xx_initialize(struct i2c_client *client) |
2906 | INIT_WORK(&state->fw_work, cx25840_work_handler); |
2907 | init_waitqueue_head(&state->fw_wait); |
2908 | q = create_singlethread_workqueue("cx25840_fw"); |
2909 | - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); |
2910 | - queue_work(q, &state->fw_work); |
2911 | - schedule(); |
2912 | - finish_wait(&state->fw_wait, &wait); |
2913 | - destroy_workqueue(q); |
2914 | + if (q) { |
2915 | + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); |
2916 | + queue_work(q, &state->fw_work); |
2917 | + schedule(); |
2918 | + finish_wait(&state->fw_wait, &wait); |
2919 | + destroy_workqueue(q); |
2920 | + } |
2921 | |
2922 | cx25840_std_setup(client); |
2923 | |
2924 | diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c |
2925 | index c12209c701d3..390d708c807a 100644 |
2926 | --- a/drivers/media/platform/pxa_camera.c |
2927 | +++ b/drivers/media/platform/pxa_camera.c |
2928 | @@ -2169,6 +2169,12 @@ static void pxa_camera_sensor_unbind(struct v4l2_async_notifier *notifier, |
2929 | pxa_dma_stop_channels(pcdev); |
2930 | |
2931 | pxa_camera_destroy_formats(pcdev); |
2932 | + |
2933 | + if (pcdev->mclk_clk) { |
2934 | + v4l2_clk_unregister(pcdev->mclk_clk); |
2935 | + pcdev->mclk_clk = NULL; |
2936 | + } |
2937 | + |
2938 | video_unregister_device(&pcdev->vdev); |
2939 | pcdev->sensor = NULL; |
2940 | |
2941 | @@ -2495,7 +2501,13 @@ static int pxa_camera_remove(struct platform_device *pdev) |
2942 | dma_release_channel(pcdev->dma_chans[1]); |
2943 | dma_release_channel(pcdev->dma_chans[2]); |
2944 | |
2945 | - v4l2_clk_unregister(pcdev->mclk_clk); |
2946 | + v4l2_async_notifier_unregister(&pcdev->notifier); |
2947 | + |
2948 | + if (pcdev->mclk_clk) { |
2949 | + v4l2_clk_unregister(pcdev->mclk_clk); |
2950 | + pcdev->mclk_clk = NULL; |
2951 | + } |
2952 | + |
2953 | v4l2_device_unregister(&pcdev->v4l2_dev); |
2954 | |
2955 | dev_info(&pdev->dev, "PXA Camera driver unloaded\n"); |
2956 | diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c |
2957 | index db525cdfac88..d9f88a4a96bd 100644 |
2958 | --- a/drivers/media/rc/mceusb.c |
2959 | +++ b/drivers/media/rc/mceusb.c |
2960 | @@ -1381,8 +1381,13 @@ static int mceusb_dev_probe(struct usb_interface *intf, |
2961 | goto rc_dev_fail; |
2962 | |
2963 | /* wire up inbound data handler */ |
2964 | - usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, |
2965 | - mceusb_dev_recv, ir, ep_in->bInterval); |
2966 | + if (usb_endpoint_xfer_int(ep_in)) |
2967 | + usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, |
2968 | + mceusb_dev_recv, ir, ep_in->bInterval); |
2969 | + else |
2970 | + usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, |
2971 | + mceusb_dev_recv, ir); |
2972 | + |
2973 | ir->urb_in->transfer_dma = ir->dma_in; |
2974 | ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; |
2975 | |
2976 | diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c |
2977 | index 9ccf7f5e0e2e..4299ce06c25b 100644 |
2978 | --- a/drivers/media/v4l2-core/videobuf2-core.c |
2979 | +++ b/drivers/media/v4l2-core/videobuf2-core.c |
2980 | @@ -334,6 +334,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, |
2981 | struct vb2_buffer *vb; |
2982 | int ret; |
2983 | |
2984 | + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ |
2985 | + num_buffers = min_t(unsigned int, num_buffers, |
2986 | + VB2_MAX_FRAME - q->num_buffers); |
2987 | + |
2988 | for (buffer = 0; buffer < num_buffers; ++buffer) { |
2989 | /* Allocate videobuf buffer structures */ |
2990 | vb = kzalloc(q->buf_struct_size, GFP_KERNEL); |
2991 | diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c |
2992 | index c63d61e17d56..381a9a166f93 100644 |
2993 | --- a/drivers/misc/cxl/flash.c |
2994 | +++ b/drivers/misc/cxl/flash.c |
2995 | @@ -401,8 +401,10 @@ static int device_open(struct inode *inode, struct file *file) |
2996 | if (down_interruptible(&sem) != 0) |
2997 | return -EPERM; |
2998 | |
2999 | - if (!(adapter = get_cxl_adapter(adapter_num))) |
3000 | - return -ENODEV; |
3001 | + if (!(adapter = get_cxl_adapter(adapter_num))) { |
3002 | + rc = -ENODEV; |
3003 | + goto err_unlock; |
3004 | + } |
3005 | |
3006 | file->private_data = adapter; |
3007 | continue_token = 0; |
3008 | @@ -446,6 +448,8 @@ static int device_open(struct inode *inode, struct file *file) |
3009 | free_page((unsigned long) le); |
3010 | err: |
3011 | put_device(&adapter->dev); |
3012 | +err_unlock: |
3013 | + up(&sem); |
3014 | |
3015 | return rc; |
3016 | } |
3017 | diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c |
3018 | index f84a4275ca29..f735ab4ba84e 100644 |
3019 | --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c |
3020 | +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c |
3021 | @@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags) |
3022 | size_t pas_size; |
3023 | size_t vas_size; |
3024 | size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); |
3025 | - const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; |
3026 | + u64 num_pages; |
3027 | |
3028 | + if (size > SIZE_MAX - PAGE_SIZE) |
3029 | + return NULL; |
3030 | + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; |
3031 | if (num_pages > |
3032 | (SIZE_MAX - queue_size) / |
3033 | (sizeof(*queue->kernel_if->u.g.pas) + |
3034 | @@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) |
3035 | { |
3036 | struct vmci_queue *queue; |
3037 | size_t queue_page_size; |
3038 | - const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; |
3039 | + u64 num_pages; |
3040 | const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); |
3041 | |
3042 | + if (size > SIZE_MAX - PAGE_SIZE) |
3043 | + return NULL; |
3044 | + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; |
3045 | if (num_pages > (SIZE_MAX - queue_size) / |
3046 | sizeof(*queue->kernel_if->u.h.page)) |
3047 | return NULL; |
3048 | diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c |
3049 | index b0b9ceb0ab01..cfb794766fea 100644 |
3050 | --- a/drivers/mmc/host/sdhci-pci-core.c |
3051 | +++ b/drivers/mmc/host/sdhci-pci-core.c |
3052 | @@ -492,6 +492,8 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot) |
3053 | slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; |
3054 | break; |
3055 | case INTEL_MRFLD_SDIO: |
3056 | + /* Advertise 2.0v for compatibility with the SDIO card's OCR */ |
3057 | + slot->host->ocr_mask = MMC_VDD_20_21 | MMC_VDD_165_195; |
3058 | slot->host->mmc->caps |= MMC_CAP_NONREMOVABLE | |
3059 | MMC_CAP_POWER_OFF_CARD; |
3060 | break; |
3061 | diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c |
3062 | index 7d275e72903a..44ea9d88651f 100644 |
3063 | --- a/drivers/mmc/host/sdhci.c |
3064 | +++ b/drivers/mmc/host/sdhci.c |
3065 | @@ -1404,6 +1404,13 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode, |
3066 | if (mode != MMC_POWER_OFF) { |
3067 | switch (1 << vdd) { |
3068 | case MMC_VDD_165_195: |
3069 | + /* |
3070 | + * Without a regulator, SDHCI does not support 2.0v |
3071 | + * so we only get here if the driver deliberately |
3072 | + * added the 2.0v range to ocr_avail. Map it to 1.8v |
3073 | + * for the purpose of turning on the power. |
3074 | + */ |
3075 | + case MMC_VDD_20_21: |
3076 | pwr = SDHCI_POWER_180; |
3077 | break; |
3078 | case MMC_VDD_29_30: |
3079 | diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c |
3080 | index 427039b77668..d9dab4275859 100644 |
3081 | --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c |
3082 | +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c |
3083 | @@ -2047,18 +2047,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) |
3084 | |
3085 | ret = nand_boot_init(this); |
3086 | if (ret) |
3087 | - goto err_out; |
3088 | + goto err_nand_cleanup; |
3089 | ret = chip->scan_bbt(mtd); |
3090 | if (ret) |
3091 | - goto err_out; |
3092 | + goto err_nand_cleanup; |
3093 | |
3094 | ret = mtd_device_register(mtd, NULL, 0); |
3095 | if (ret) |
3096 | - goto err_out; |
3097 | + goto err_nand_cleanup; |
3098 | return 0; |
3099 | |
3100 | +err_nand_cleanup: |
3101 | + nand_cleanup(chip); |
3102 | err_out: |
3103 | - gpmi_nand_exit(this); |
3104 | + gpmi_free_dma_buffer(this); |
3105 | return ret; |
3106 | } |
3107 | |
3108 | diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c |
3109 | index a3e86e52640a..5fb45161789c 100644 |
3110 | --- a/drivers/mtd/nand/nand_base.c |
3111 | +++ b/drivers/mtd/nand/nand_base.c |
3112 | @@ -4785,6 +4785,11 @@ int nand_scan_tail(struct mtd_info *mtd) |
3113 | goto err_free; |
3114 | } |
3115 | ecc->total = ecc->steps * ecc->bytes; |
3116 | + if (ecc->total > mtd->oobsize) { |
3117 | + WARN(1, "Total number of ECC bytes exceeded oobsize\n"); |
3118 | + ret = -EINVAL; |
3119 | + goto err_free; |
3120 | + } |
3121 | |
3122 | /* |
3123 | * The number of bytes available for a client to place data into |
3124 | diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c |
3125 | index 1cb3f7758fb6..766b2c385682 100644 |
3126 | --- a/drivers/mtd/tests/oobtest.c |
3127 | +++ b/drivers/mtd/tests/oobtest.c |
3128 | @@ -193,6 +193,9 @@ static int verify_eraseblock(int ebnum) |
3129 | ops.datbuf = NULL; |
3130 | ops.oobbuf = readbuf; |
3131 | err = mtd_read_oob(mtd, addr, &ops); |
3132 | + if (mtd_is_bitflip(err)) |
3133 | + err = 0; |
3134 | + |
3135 | if (err || ops.oobretlen != use_len) { |
3136 | pr_err("error: readoob failed at %#llx\n", |
3137 | (long long)addr); |
3138 | @@ -227,6 +230,9 @@ static int verify_eraseblock(int ebnum) |
3139 | ops.datbuf = NULL; |
3140 | ops.oobbuf = readbuf; |
3141 | err = mtd_read_oob(mtd, addr, &ops); |
3142 | + if (mtd_is_bitflip(err)) |
3143 | + err = 0; |
3144 | + |
3145 | if (err || ops.oobretlen != mtd->oobavail) { |
3146 | pr_err("error: readoob failed at %#llx\n", |
3147 | (long long)addr); |
3148 | @@ -286,6 +292,9 @@ static int verify_eraseblock_in_one_go(int ebnum) |
3149 | |
3150 | /* read entire block's OOB at one go */ |
3151 | err = mtd_read_oob(mtd, addr, &ops); |
3152 | + if (mtd_is_bitflip(err)) |
3153 | + err = 0; |
3154 | + |
3155 | if (err || ops.oobretlen != len) { |
3156 | pr_err("error: readoob failed at %#llx\n", |
3157 | (long long)addr); |
3158 | @@ -527,6 +536,9 @@ static int __init mtd_oobtest_init(void) |
3159 | pr_info("attempting to start read past end of OOB\n"); |
3160 | pr_info("an error is expected...\n"); |
3161 | err = mtd_read_oob(mtd, addr0, &ops); |
3162 | + if (mtd_is_bitflip(err)) |
3163 | + err = 0; |
3164 | + |
3165 | if (err) { |
3166 | pr_info("error occurred as expected\n"); |
3167 | err = 0; |
3168 | @@ -571,6 +583,9 @@ static int __init mtd_oobtest_init(void) |
3169 | pr_info("attempting to read past end of device\n"); |
3170 | pr_info("an error is expected...\n"); |
3171 | err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); |
3172 | + if (mtd_is_bitflip(err)) |
3173 | + err = 0; |
3174 | + |
3175 | if (err) { |
3176 | pr_info("error occurred as expected\n"); |
3177 | err = 0; |
3178 | @@ -615,6 +630,9 @@ static int __init mtd_oobtest_init(void) |
3179 | pr_info("attempting to read past end of device\n"); |
3180 | pr_info("an error is expected...\n"); |
3181 | err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops); |
3182 | + if (mtd_is_bitflip(err)) |
3183 | + err = 0; |
3184 | + |
3185 | if (err) { |
3186 | pr_info("error occurred as expected\n"); |
3187 | err = 0; |
3188 | @@ -684,6 +702,9 @@ static int __init mtd_oobtest_init(void) |
3189 | ops.datbuf = NULL; |
3190 | ops.oobbuf = readbuf; |
3191 | err = mtd_read_oob(mtd, addr, &ops); |
3192 | + if (mtd_is_bitflip(err)) |
3193 | + err = 0; |
3194 | + |
3195 | if (err) |
3196 | goto out; |
3197 | if (memcmpshow(addr, readbuf, writebuf, |
3198 | diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c |
3199 | index c1f5c29e458e..b44c8d348e78 100644 |
3200 | --- a/drivers/mtd/ubi/fastmap.c |
3201 | +++ b/drivers/mtd/ubi/fastmap.c |
3202 | @@ -828,6 +828,24 @@ static int find_fm_anchor(struct ubi_attach_info *ai) |
3203 | return ret; |
3204 | } |
3205 | |
3206 | +static struct ubi_ainf_peb *clone_aeb(struct ubi_attach_info *ai, |
3207 | + struct ubi_ainf_peb *old) |
3208 | +{ |
3209 | + struct ubi_ainf_peb *new; |
3210 | + |
3211 | + new = ubi_alloc_aeb(ai, old->pnum, old->ec); |
3212 | + if (!new) |
3213 | + return NULL; |
3214 | + |
3215 | + new->vol_id = old->vol_id; |
3216 | + new->sqnum = old->sqnum; |
3217 | + new->lnum = old->lnum; |
3218 | + new->scrub = old->scrub; |
3219 | + new->copy_flag = old->copy_flag; |
3220 | + |
3221 | + return new; |
3222 | +} |
3223 | + |
3224 | /** |
3225 | * ubi_scan_fastmap - scan the fastmap. |
3226 | * @ubi: UBI device object |
3227 | @@ -847,7 +865,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, |
3228 | struct ubi_vid_hdr *vh; |
3229 | struct ubi_ec_hdr *ech; |
3230 | struct ubi_fastmap_layout *fm; |
3231 | - struct ubi_ainf_peb *tmp_aeb, *aeb; |
3232 | + struct ubi_ainf_peb *aeb; |
3233 | int i, used_blocks, pnum, fm_anchor, ret = 0; |
3234 | size_t fm_size; |
3235 | __be32 crc, tmp_crc; |
3236 | @@ -857,9 +875,16 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, |
3237 | if (fm_anchor < 0) |
3238 | return UBI_NO_FASTMAP; |
3239 | |
3240 | - /* Move all (possible) fastmap blocks into our new attach structure. */ |
3241 | - list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list) |
3242 | - list_move_tail(&aeb->u.list, &ai->fastmap); |
3243 | + /* Copy all (possible) fastmap blocks into our new attach structure. */ |
3244 | + list_for_each_entry(aeb, &scan_ai->fastmap, u.list) { |
3245 | + struct ubi_ainf_peb *new; |
3246 | + |
3247 | + new = clone_aeb(ai, aeb); |
3248 | + if (!new) |
3249 | + return -ENOMEM; |
3250 | + |
3251 | + list_add(&new->u.list, &ai->fastmap); |
3252 | + } |
3253 | |
3254 | down_write(&ubi->fm_protect); |
3255 | memset(ubi->fm_buf, 0, ubi->fm_size); |
3256 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
3257 | index 4907c9b57565..513457a2a7bf 100644 |
3258 | --- a/drivers/net/bonding/bond_main.c |
3259 | +++ b/drivers/net/bonding/bond_main.c |
3260 | @@ -1524,39 +1524,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
3261 | goto err_close; |
3262 | } |
3263 | |
3264 | - /* If the mode uses primary, then the following is handled by |
3265 | - * bond_change_active_slave(). |
3266 | - */ |
3267 | - if (!bond_uses_primary(bond)) { |
3268 | - /* set promiscuity level to new slave */ |
3269 | - if (bond_dev->flags & IFF_PROMISC) { |
3270 | - res = dev_set_promiscuity(slave_dev, 1); |
3271 | - if (res) |
3272 | - goto err_close; |
3273 | - } |
3274 | - |
3275 | - /* set allmulti level to new slave */ |
3276 | - if (bond_dev->flags & IFF_ALLMULTI) { |
3277 | - res = dev_set_allmulti(slave_dev, 1); |
3278 | - if (res) |
3279 | - goto err_close; |
3280 | - } |
3281 | - |
3282 | - netif_addr_lock_bh(bond_dev); |
3283 | - |
3284 | - dev_mc_sync_multiple(slave_dev, bond_dev); |
3285 | - dev_uc_sync_multiple(slave_dev, bond_dev); |
3286 | - |
3287 | - netif_addr_unlock_bh(bond_dev); |
3288 | - } |
3289 | - |
3290 | - if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
3291 | - /* add lacpdu mc addr to mc list */ |
3292 | - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; |
3293 | - |
3294 | - dev_mc_add(slave_dev, lacpdu_multicast); |
3295 | - } |
3296 | - |
3297 | res = vlan_vids_add_by_dev(slave_dev, bond_dev); |
3298 | if (res) { |
3299 | netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", |
3300 | @@ -1719,6 +1686,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
3301 | goto err_upper_unlink; |
3302 | } |
3303 | |
3304 | + /* If the mode uses primary, then the following is handled by |
3305 | + * bond_change_active_slave(). |
3306 | + */ |
3307 | + if (!bond_uses_primary(bond)) { |
3308 | + /* set promiscuity level to new slave */ |
3309 | + if (bond_dev->flags & IFF_PROMISC) { |
3310 | + res = dev_set_promiscuity(slave_dev, 1); |
3311 | + if (res) |
3312 | + goto err_sysfs_del; |
3313 | + } |
3314 | + |
3315 | + /* set allmulti level to new slave */ |
3316 | + if (bond_dev->flags & IFF_ALLMULTI) { |
3317 | + res = dev_set_allmulti(slave_dev, 1); |
3318 | + if (res) { |
3319 | + if (bond_dev->flags & IFF_PROMISC) |
3320 | + dev_set_promiscuity(slave_dev, -1); |
3321 | + goto err_sysfs_del; |
3322 | + } |
3323 | + } |
3324 | + |
3325 | + netif_addr_lock_bh(bond_dev); |
3326 | + dev_mc_sync_multiple(slave_dev, bond_dev); |
3327 | + dev_uc_sync_multiple(slave_dev, bond_dev); |
3328 | + netif_addr_unlock_bh(bond_dev); |
3329 | + |
3330 | + if (BOND_MODE(bond) == BOND_MODE_8023AD) { |
3331 | + /* add lacpdu mc addr to mc list */ |
3332 | + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; |
3333 | + |
3334 | + dev_mc_add(slave_dev, lacpdu_multicast); |
3335 | + } |
3336 | + } |
3337 | + |
3338 | bond->slave_cnt++; |
3339 | bond_compute_features(bond); |
3340 | bond_set_carrier(bond); |
3341 | @@ -1742,6 +1743,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
3342 | return 0; |
3343 | |
3344 | /* Undo stages on error */ |
3345 | +err_sysfs_del: |
3346 | + bond_sysfs_slave_del(new_slave); |
3347 | + |
3348 | err_upper_unlink: |
3349 | bond_upper_dev_unlink(bond, new_slave); |
3350 | |
3351 | @@ -1749,9 +1753,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) |
3352 | netdev_rx_handler_unregister(slave_dev); |
3353 | |
3354 | err_detach: |
3355 | - if (!bond_uses_primary(bond)) |
3356 | - bond_hw_addr_flush(bond_dev, slave_dev); |
3357 | - |
3358 | vlan_vids_del_by_dev(slave_dev, bond_dev); |
3359 | if (rcu_access_pointer(bond->primary_slave) == new_slave) |
3360 | RCU_INIT_POINTER(bond->primary_slave, NULL); |
3361 | @@ -2605,11 +2606,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) |
3362 | bond_for_each_slave_rcu(bond, slave, iter) { |
3363 | unsigned long trans_start = dev_trans_start(slave->dev); |
3364 | |
3365 | + slave->new_link = BOND_LINK_NOCHANGE; |
3366 | + |
3367 | if (slave->link != BOND_LINK_UP) { |
3368 | if (bond_time_in_interval(bond, trans_start, 1) && |
3369 | bond_time_in_interval(bond, slave->last_rx, 1)) { |
3370 | |
3371 | - slave->link = BOND_LINK_UP; |
3372 | + slave->new_link = BOND_LINK_UP; |
3373 | slave_state_changed = 1; |
3374 | |
3375 | /* primary_slave has no meaning in round-robin |
3376 | @@ -2636,7 +2639,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) |
3377 | if (!bond_time_in_interval(bond, trans_start, 2) || |
3378 | !bond_time_in_interval(bond, slave->last_rx, 2)) { |
3379 | |
3380 | - slave->link = BOND_LINK_DOWN; |
3381 | + slave->new_link = BOND_LINK_DOWN; |
3382 | slave_state_changed = 1; |
3383 | |
3384 | if (slave->link_failure_count < UINT_MAX) |
3385 | @@ -2667,6 +2670,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) |
3386 | if (!rtnl_trylock()) |
3387 | goto re_arm; |
3388 | |
3389 | + bond_for_each_slave(bond, slave, iter) { |
3390 | + if (slave->new_link != BOND_LINK_NOCHANGE) |
3391 | + slave->link = slave->new_link; |
3392 | + } |
3393 | + |
3394 | if (slave_state_changed) { |
3395 | bond_slave_state_change(bond); |
3396 | if (BOND_MODE(bond) == BOND_MODE_XOR) |
3397 | diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c |
3398 | index e2512ab41168..e13c9cd45dc0 100644 |
3399 | --- a/drivers/net/ethernet/amazon/ena/ena_com.c |
3400 | +++ b/drivers/net/ethernet/amazon/ena/ena_com.c |
3401 | @@ -61,6 +61,8 @@ |
3402 | |
3403 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
3404 | |
3405 | +#define ENA_REGS_ADMIN_INTR_MASK 1 |
3406 | + |
3407 | /*****************************************************************************/ |
3408 | /*****************************************************************************/ |
3409 | /*****************************************************************************/ |
3410 | @@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu |
3411 | tail_masked = admin_queue->sq.tail & queue_size_mask; |
3412 | |
3413 | /* In case of queue FULL */ |
3414 | - cnt = admin_queue->sq.tail - admin_queue->sq.head; |
3415 | + cnt = atomic_read(&admin_queue->outstanding_cmds); |
3416 | if (cnt >= admin_queue->q_depth) { |
3417 | - pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", |
3418 | - admin_queue->sq.tail, admin_queue->sq.head, |
3419 | - admin_queue->q_depth); |
3420 | + pr_debug("admin queue is full.\n"); |
3421 | admin_queue->stats.out_of_space++; |
3422 | return ERR_PTR(-ENOSPC); |
3423 | } |
3424 | @@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status) |
3425 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
3426 | struct ena_com_admin_queue *admin_queue) |
3427 | { |
3428 | - unsigned long flags; |
3429 | - u32 start_time; |
3430 | + unsigned long flags, timeout; |
3431 | int ret; |
3432 | |
3433 | - start_time = ((u32)jiffies_to_usecs(jiffies)); |
3434 | + timeout = jiffies + ADMIN_CMD_TIMEOUT_US; |
3435 | + |
3436 | + while (1) { |
3437 | + spin_lock_irqsave(&admin_queue->q_lock, flags); |
3438 | + ena_com_handle_admin_completion(admin_queue); |
3439 | + spin_unlock_irqrestore(&admin_queue->q_lock, flags); |
3440 | + |
3441 | + if (comp_ctx->status != ENA_CMD_SUBMITTED) |
3442 | + break; |
3443 | |
3444 | - while (comp_ctx->status == ENA_CMD_SUBMITTED) { |
3445 | - if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > |
3446 | - ADMIN_CMD_TIMEOUT_US) { |
3447 | + if (time_is_before_jiffies(timeout)) { |
3448 | pr_err("Wait for completion (polling) timeout\n"); |
3449 | /* ENA didn't have any completion */ |
3450 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
3451 | @@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c |
3452 | goto err; |
3453 | } |
3454 | |
3455 | - spin_lock_irqsave(&admin_queue->q_lock, flags); |
3456 | - ena_com_handle_admin_completion(admin_queue); |
3457 | - spin_unlock_irqrestore(&admin_queue->q_lock, flags); |
3458 | - |
3459 | msleep(100); |
3460 | } |
3461 | |
3462 | @@ -1449,6 +1450,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) |
3463 | |
3464 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) |
3465 | { |
3466 | + u32 mask_value = 0; |
3467 | + |
3468 | + if (polling) |
3469 | + mask_value = ENA_REGS_ADMIN_INTR_MASK; |
3470 | + |
3471 | + writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); |
3472 | ena_dev->admin_queue.polling = polling; |
3473 | } |
3474 | |
3475 | diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
3476 | index bfeaec5bd7b9..0d9ce08ee3a9 100644 |
3477 | --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c |
3478 | +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c |
3479 | @@ -1542,6 +1542,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) |
3480 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", |
3481 | qid, rc); |
3482 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
3483 | + return rc; |
3484 | } |
3485 | |
3486 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); |
3487 | @@ -1606,6 +1607,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) |
3488 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", |
3489 | qid, rc); |
3490 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
3491 | + return rc; |
3492 | } |
3493 | |
3494 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); |
3495 | @@ -2806,6 +2808,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) |
3496 | { |
3497 | int release_bars; |
3498 | |
3499 | + if (ena_dev->mem_bar) |
3500 | + devm_iounmap(&pdev->dev, ena_dev->mem_bar); |
3501 | + |
3502 | + devm_iounmap(&pdev->dev, ena_dev->reg_bar); |
3503 | + |
3504 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
3505 | pci_release_selected_regions(pdev, release_bars); |
3506 | } |
3507 | @@ -2893,8 +2900,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
3508 | goto err_free_ena_dev; |
3509 | } |
3510 | |
3511 | - ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), |
3512 | - pci_resource_len(pdev, ENA_REG_BAR)); |
3513 | + ena_dev->reg_bar = devm_ioremap(&pdev->dev, |
3514 | + pci_resource_start(pdev, ENA_REG_BAR), |
3515 | + pci_resource_len(pdev, ENA_REG_BAR)); |
3516 | if (!ena_dev->reg_bar) { |
3517 | dev_err(&pdev->dev, "failed to remap regs bar\n"); |
3518 | rc = -EFAULT; |
3519 | @@ -2914,8 +2922,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
3520 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); |
3521 | |
3522 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
3523 | - ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), |
3524 | - pci_resource_len(pdev, ENA_MEM_BAR)); |
3525 | + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, |
3526 | + pci_resource_start(pdev, ENA_MEM_BAR), |
3527 | + pci_resource_len(pdev, ENA_MEM_BAR)); |
3528 | if (!ena_dev->mem_bar) { |
3529 | rc = -EFAULT; |
3530 | goto err_device_destroy; |
3531 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
3532 | index ca6c4718000f..31287cec6e3a 100644 |
3533 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
3534 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c |
3535 | @@ -3887,15 +3887,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3536 | /* when transmitting in a vf, start bd must hold the ethertype |
3537 | * for fw to enforce it |
3538 | */ |
3539 | + u16 vlan_tci = 0; |
3540 | #ifndef BNX2X_STOP_ON_ERROR |
3541 | - if (IS_VF(bp)) |
3542 | + if (IS_VF(bp)) { |
3543 | #endif |
3544 | - tx_start_bd->vlan_or_ethertype = |
3545 | - cpu_to_le16(ntohs(eth->h_proto)); |
3546 | + /* Still need to consider inband vlan for enforced */ |
3547 | + if (__vlan_get_tag(skb, &vlan_tci)) { |
3548 | + tx_start_bd->vlan_or_ethertype = |
3549 | + cpu_to_le16(ntohs(eth->h_proto)); |
3550 | + } else { |
3551 | + tx_start_bd->bd_flags.as_bitfield |= |
3552 | + (X_ETH_INBAND_VLAN << |
3553 | + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); |
3554 | + tx_start_bd->vlan_or_ethertype = |
3555 | + cpu_to_le16(vlan_tci); |
3556 | + } |
3557 | #ifndef BNX2X_STOP_ON_ERROR |
3558 | - else |
3559 | + } else { |
3560 | /* used by FW for packet accounting */ |
3561 | tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); |
3562 | + } |
3563 | #endif |
3564 | } |
3565 | |
3566 | diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c |
3567 | index 0f6811860ad5..a36e38676640 100644 |
3568 | --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c |
3569 | +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c |
3570 | @@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) |
3571 | static void |
3572 | bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) |
3573 | { |
3574 | - memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); |
3575 | + strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); |
3576 | } |
3577 | |
3578 | static void |
3579 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
3580 | index 0c2a32a305bc..3ec32d7c5866 100644 |
3581 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
3582 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c |
3583 | @@ -2742,6 +2742,16 @@ static int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto, |
3584 | return -EOPNOTSUPP; |
3585 | } |
3586 | |
3587 | +static netdev_features_t cxgb_fix_features(struct net_device *dev, |
3588 | + netdev_features_t features) |
3589 | +{ |
3590 | + /* Disable GRO, if RX_CSUM is disabled */ |
3591 | + if (!(features & NETIF_F_RXCSUM)) |
3592 | + features &= ~NETIF_F_GRO; |
3593 | + |
3594 | + return features; |
3595 | +} |
3596 | + |
3597 | static const struct net_device_ops cxgb4_netdev_ops = { |
3598 | .ndo_open = cxgb_open, |
3599 | .ndo_stop = cxgb_close, |
3600 | @@ -2766,6 +2776,7 @@ static const struct net_device_ops cxgb4_netdev_ops = { |
3601 | #endif |
3602 | .ndo_set_tx_maxrate = cxgb_set_tx_maxrate, |
3603 | .ndo_setup_tc = cxgb_setup_tc, |
3604 | + .ndo_fix_features = cxgb_fix_features, |
3605 | }; |
3606 | |
3607 | #ifdef CONFIG_PCI_IOV |
3608 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c |
3609 | index 6fd3be69ff21..ebeeb3581b9c 100644 |
3610 | --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c |
3611 | +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c |
3612 | @@ -6185,13 +6185,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, |
3613 | if (!t4_fw_matches_chip(adap, fw_hdr)) |
3614 | return -EINVAL; |
3615 | |
3616 | + /* Disable FW_OK flag so that mbox commands with FW_OK flag set |
3617 | + * wont be sent when we are flashing FW. |
3618 | + */ |
3619 | + adap->flags &= ~FW_OK; |
3620 | + |
3621 | ret = t4_fw_halt(adap, mbox, force); |
3622 | if (ret < 0 && !force) |
3623 | - return ret; |
3624 | + goto out; |
3625 | |
3626 | ret = t4_load_fw(adap, fw_data, size); |
3627 | if (ret < 0) |
3628 | - return ret; |
3629 | + goto out; |
3630 | |
3631 | /* |
3632 | * Older versions of the firmware don't understand the new |
3633 | @@ -6202,7 +6207,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, |
3634 | * its header flags to see if it advertises the capability. |
3635 | */ |
3636 | reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); |
3637 | - return t4_fw_restart(adap, mbox, reset); |
3638 | + ret = t4_fw_restart(adap, mbox, reset); |
3639 | + |
3640 | + /* Grab potentially new Firmware Device Log parameters so we can see |
3641 | + * how healthy the new Firmware is. It's okay to contact the new |
3642 | + * Firmware for these parameters even though, as far as it's |
3643 | + * concerned, we've never said "HELLO" to it ... |
3644 | + */ |
3645 | + (void)t4_init_devlog_params(adap); |
3646 | +out: |
3647 | + adap->flags |= FW_OK; |
3648 | + return ret; |
3649 | } |
3650 | |
3651 | /** |
3652 | @@ -8073,7 +8088,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) |
3653 | ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); |
3654 | if (ret) |
3655 | break; |
3656 | - idx = (idx + 1) & UPDBGLARDPTR_M; |
3657 | + |
3658 | + /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to |
3659 | + * identify the 32-bit portion of the full 312-bit data |
3660 | + */ |
3661 | + if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) |
3662 | + idx = (idx & 0xff0) + 0x10; |
3663 | + else |
3664 | + idx++; |
3665 | + /* address can't exceed 0xfff */ |
3666 | + idx &= UPDBGLARDPTR_M; |
3667 | } |
3668 | restart: |
3669 | if (cfg & UPDBGLAEN_F) { |
3670 | diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c |
3671 | index f3ed9ce99e5e..9d64e8e7c417 100644 |
3672 | --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c |
3673 | +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c |
3674 | @@ -2616,8 +2616,8 @@ void t4vf_sge_stop(struct adapter *adapter) |
3675 | int t4vf_sge_init(struct adapter *adapter) |
3676 | { |
3677 | struct sge_params *sge_params = &adapter->params.sge; |
3678 | - u32 fl0 = sge_params->sge_fl_buffer_size[0]; |
3679 | - u32 fl1 = sge_params->sge_fl_buffer_size[1]; |
3680 | + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; |
3681 | + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; |
3682 | struct sge *s = &adapter->sge; |
3683 | |
3684 | /* |
3685 | @@ -2625,9 +2625,20 @@ int t4vf_sge_init(struct adapter *adapter) |
3686 | * the Physical Function Driver. Ideally we should be able to deal |
3687 | * with _any_ configuration. Practice is different ... |
3688 | */ |
3689 | - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { |
3690 | + |
3691 | + /* We only bother using the Large Page logic if the Large Page Buffer |
3692 | + * is larger than our Page Size Buffer. |
3693 | + */ |
3694 | + if (fl_large_pg <= fl_small_pg) |
3695 | + fl_large_pg = 0; |
3696 | + |
3697 | + /* The Page Size Buffer must be exactly equal to our Page Size and the |
3698 | + * Large Page Size Buffer should be 0 (per above) or a power of 2. |
3699 | + */ |
3700 | + if (fl_small_pg != PAGE_SIZE || |
3701 | + (fl_large_pg & (fl_large_pg - 1)) != 0) { |
3702 | dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", |
3703 | - fl0, fl1); |
3704 | + fl_small_pg, fl_large_pg); |
3705 | return -EINVAL; |
3706 | } |
3707 | if ((sge_params->sge_control & RXPKTCPLMODE_F) != |
3708 | @@ -2639,8 +2650,8 @@ int t4vf_sge_init(struct adapter *adapter) |
3709 | /* |
3710 | * Now translate the adapter parameters into our internal forms. |
3711 | */ |
3712 | - if (fl1) |
3713 | - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; |
3714 | + if (fl_large_pg) |
3715 | + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; |
3716 | s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) |
3717 | ? 128 : 64); |
3718 | s->pktshift = PKTSHIFT_G(sge_params->sge_control); |
3719 | diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c |
3720 | index 05e5b38e4891..fe00f71bc6b4 100644 |
3721 | --- a/drivers/net/ethernet/freescale/fec_main.c |
3722 | +++ b/drivers/net/ethernet/freescale/fec_main.c |
3723 | @@ -2371,6 +2371,10 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset) |
3724 | static inline void fec_enet_update_ethtool_stats(struct net_device *dev) |
3725 | { |
3726 | } |
3727 | + |
3728 | +static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) |
3729 | +{ |
3730 | +} |
3731 | #endif /* !defined(CONFIG_M5272) */ |
3732 | |
3733 | static int fec_enet_nway_reset(struct net_device *dev) |
3734 | diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c |
3735 | index 446c7b374ff5..a10de1e9c157 100644 |
3736 | --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c |
3737 | +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c |
3738 | @@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) |
3739 | { |
3740 | const struct of_device_id *id = |
3741 | of_match_device(fsl_pq_mdio_match, &pdev->dev); |
3742 | - const struct fsl_pq_mdio_data *data = id->data; |
3743 | + const struct fsl_pq_mdio_data *data; |
3744 | struct device_node *np = pdev->dev.of_node; |
3745 | struct resource res; |
3746 | struct device_node *tbi; |
3747 | @@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) |
3748 | struct mii_bus *new_bus; |
3749 | int err; |
3750 | |
3751 | + if (!id) { |
3752 | + dev_err(&pdev->dev, "Failed to match device\n"); |
3753 | + return -ENODEV; |
3754 | + } |
3755 | + |
3756 | + data = id->data; |
3757 | + |
3758 | dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); |
3759 | |
3760 | new_bus = mdiobus_alloc_size(sizeof(*priv)); |
3761 | diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c |
3762 | index 8f139197f1aa..5977b695d0fa 100644 |
3763 | --- a/drivers/net/ethernet/ibm/emac/core.c |
3764 | +++ b/drivers/net/ethernet/ibm/emac/core.c |
3765 | @@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev) |
3766 | { |
3767 | struct emac_regs __iomem *p = dev->emacp; |
3768 | int n = 20; |
3769 | + bool __maybe_unused try_internal_clock = false; |
3770 | |
3771 | DBG(dev, "reset" NL); |
3772 | |
3773 | @@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev) |
3774 | } |
3775 | |
3776 | #ifdef CONFIG_PPC_DCR_NATIVE |
3777 | +do_retry: |
3778 | /* |
3779 | * PPC460EX/GT Embedded Processor Advanced User's Manual |
3780 | * section 28.10.1 Mode Register 0 (EMACx_MR0) states: |
3781 | @@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev) |
3782 | * of the EMAC. If none is present, select the internal clock |
3783 | * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). |
3784 | * After a soft reset, select the external clock. |
3785 | + * |
3786 | + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the |
3787 | + * ethernet cable is not attached. This causes the reset to timeout |
3788 | + * and the PHY detection code in emac_init_phy() is unable to |
3789 | + * communicate and detect the AR8035-A PHY. As a result, the emac |
3790 | + * driver bails out early and the user has no ethernet. |
3791 | + * In order to stay compatible with existing configurations, the |
3792 | + * driver will temporarily switch to the internal clock, after |
3793 | + * the first reset fails. |
3794 | */ |
3795 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
3796 | - if (dev->phy_address == 0xffffffff && |
3797 | - dev->phy_map == 0xffffffff) { |
3798 | + if (try_internal_clock || (dev->phy_address == 0xffffffff && |
3799 | + dev->phy_map == 0xffffffff)) { |
3800 | /* No PHY: select internal loop clock before reset */ |
3801 | dcri_clrset(SDR0, SDR0_ETH_CFG, |
3802 | 0, SDR0_ETH_CFG_ECS << dev->cell_index); |
3803 | @@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev) |
3804 | |
3805 | #ifdef CONFIG_PPC_DCR_NATIVE |
3806 | if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { |
3807 | - if (dev->phy_address == 0xffffffff && |
3808 | - dev->phy_map == 0xffffffff) { |
3809 | + if (!n && !try_internal_clock) { |
3810 | + /* first attempt has timed out. */ |
3811 | + n = 20; |
3812 | + try_internal_clock = true; |
3813 | + goto do_retry; |
3814 | + } |
3815 | + |
3816 | + if (try_internal_clock || (dev->phy_address == 0xffffffff && |
3817 | + dev->phy_map == 0xffffffff)) { |
3818 | /* No PHY: restore external clock source after reset */ |
3819 | dcri_clrset(SDR0, SDR0_ETH_CFG, |
3820 | SDR0_ETH_CFG_ECS << dev->cell_index, 0); |
3821 | diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c |
3822 | index 528a926dd979..825ec8f710e7 100644 |
3823 | --- a/drivers/net/ethernet/intel/e1000e/netdev.c |
3824 | +++ b/drivers/net/ethernet/intel/e1000e/netdev.c |
3825 | @@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) |
3826 | struct e1000_hw *hw = &adapter->hw; |
3827 | |
3828 | if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { |
3829 | + struct sk_buff *skb = adapter->tx_hwtstamp_skb; |
3830 | struct skb_shared_hwtstamps shhwtstamps; |
3831 | u64 txstmp; |
3832 | |
3833 | @@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) |
3834 | |
3835 | e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); |
3836 | |
3837 | - skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); |
3838 | - dev_kfree_skb_any(adapter->tx_hwtstamp_skb); |
3839 | + /* Clear the global tx_hwtstamp_skb pointer and force writes |
3840 | + * prior to notifying the stack of a Tx timestamp. |
3841 | + */ |
3842 | adapter->tx_hwtstamp_skb = NULL; |
3843 | + wmb(); /* force write prior to skb_tstamp_tx */ |
3844 | + |
3845 | + skb_tstamp_tx(skb, &shhwtstamps); |
3846 | + dev_kfree_skb_any(skb); |
3847 | } else if (time_after(jiffies, adapter->tx_hwtstamp_start |
3848 | + adapter->tx_timeout_factor * HZ)) { |
3849 | dev_kfree_skb_any(adapter->tx_hwtstamp_skb); |
3850 | @@ -6645,12 +6651,17 @@ static int e1000e_pm_thaw(struct device *dev) |
3851 | static int e1000e_pm_suspend(struct device *dev) |
3852 | { |
3853 | struct pci_dev *pdev = to_pci_dev(dev); |
3854 | + int rc; |
3855 | |
3856 | e1000e_flush_lpic(pdev); |
3857 | |
3858 | e1000e_pm_freeze(dev); |
3859 | |
3860 | - return __e1000_shutdown(pdev, false); |
3861 | + rc = __e1000_shutdown(pdev, false); |
3862 | + if (rc) |
3863 | + e1000e_pm_thaw(dev); |
3864 | + |
3865 | + return rc; |
3866 | } |
3867 | |
3868 | static int e1000e_pm_resume(struct device *dev) |
3869 | diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c |
3870 | index ddf478d6322b..614f93e01500 100644 |
3871 | --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c |
3872 | +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c |
3873 | @@ -154,6 +154,7 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter) |
3874 | adapter->current_op = I40E_VIRTCHNL_OP_GET_VF_RESOURCES; |
3875 | adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG; |
3876 | caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 | |
3877 | + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF | |
3878 | I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | |
3879 | I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG | |
3880 | I40E_VIRTCHNL_VF_OFFLOAD_VLAN | |
3881 | diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c |
3882 | index a7895c4cbcc3..9eb9b68f8935 100644 |
3883 | --- a/drivers/net/ethernet/intel/igb/igb_ptp.c |
3884 | +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c |
3885 | @@ -721,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) |
3886 | **/ |
3887 | static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) |
3888 | { |
3889 | + struct sk_buff *skb = adapter->ptp_tx_skb; |
3890 | struct e1000_hw *hw = &adapter->hw; |
3891 | struct skb_shared_hwtstamps shhwtstamps; |
3892 | u64 regval; |
3893 | @@ -748,10 +749,17 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) |
3894 | shhwtstamps.hwtstamp = |
3895 | ktime_add_ns(shhwtstamps.hwtstamp, adjust); |
3896 | |
3897 | - skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); |
3898 | - dev_kfree_skb_any(adapter->ptp_tx_skb); |
3899 | + /* Clear the lock early before calling skb_tstamp_tx so that |
3900 | + * applications are not woken up before the lock bit is clear. We use |
3901 | + * a copy of the skb pointer to ensure other threads can't change it |
3902 | + * while we're notifying the stack. |
3903 | + */ |
3904 | adapter->ptp_tx_skb = NULL; |
3905 | clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); |
3906 | + |
3907 | + /* Notify the stack and free the skb after we've unlocked */ |
3908 | + skb_tstamp_tx(skb, &shhwtstamps); |
3909 | + dev_kfree_skb_any(skb); |
3910 | } |
3911 | |
3912 | /** |
3913 | diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c |
3914 | index 941c8e2c944e..93ab0b3ad393 100644 |
3915 | --- a/drivers/net/ethernet/marvell/sky2.c |
3916 | +++ b/drivers/net/ethernet/marvell/sky2.c |
3917 | @@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
3918 | INIT_WORK(&hw->restart_work, sky2_restart); |
3919 | |
3920 | pci_set_drvdata(pdev, hw); |
3921 | - pdev->d3_delay = 150; |
3922 | + pdev->d3_delay = 200; |
3923 | |
3924 | return 0; |
3925 | |
3926 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c |
3927 | index b04760a5034b..af2e6ea36eac 100644 |
3928 | --- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c |
3929 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c |
3930 | @@ -156,57 +156,63 @@ static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) |
3931 | static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev) |
3932 | { |
3933 | struct mlx4_en_priv *priv = netdev_priv(netdev); |
3934 | + struct mlx4_en_port_profile *prof = priv->prof; |
3935 | struct mlx4_en_dev *mdev = priv->mdev; |
3936 | + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; |
3937 | |
3938 | if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) |
3939 | return 1; |
3940 | |
3941 | if (priv->cee_config.pfc_state) { |
3942 | int tc; |
3943 | + rx_ppp = prof->rx_ppp; |
3944 | + tx_ppp = prof->tx_ppp; |
3945 | |
3946 | - priv->prof->rx_pause = 0; |
3947 | - priv->prof->tx_pause = 0; |
3948 | for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) { |
3949 | u8 tc_mask = 1 << tc; |
3950 | |
3951 | switch (priv->cee_config.dcb_pfc[tc]) { |
3952 | case pfc_disabled: |
3953 | - priv->prof->tx_ppp &= ~tc_mask; |
3954 | - priv->prof->rx_ppp &= ~tc_mask; |
3955 | + tx_ppp &= ~tc_mask; |
3956 | + rx_ppp &= ~tc_mask; |
3957 | break; |
3958 | case pfc_enabled_full: |
3959 | - priv->prof->tx_ppp |= tc_mask; |
3960 | - priv->prof->rx_ppp |= tc_mask; |
3961 | + tx_ppp |= tc_mask; |
3962 | + rx_ppp |= tc_mask; |
3963 | break; |
3964 | case pfc_enabled_tx: |
3965 | - priv->prof->tx_ppp |= tc_mask; |
3966 | - priv->prof->rx_ppp &= ~tc_mask; |
3967 | + tx_ppp |= tc_mask; |
3968 | + rx_ppp &= ~tc_mask; |
3969 | break; |
3970 | case pfc_enabled_rx: |
3971 | - priv->prof->tx_ppp &= ~tc_mask; |
3972 | - priv->prof->rx_ppp |= tc_mask; |
3973 | + tx_ppp &= ~tc_mask; |
3974 | + rx_ppp |= tc_mask; |
3975 | break; |
3976 | default: |
3977 | break; |
3978 | } |
3979 | } |
3980 | - en_dbg(DRV, priv, "Set pfc on\n"); |
3981 | + rx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->rx_pause; |
3982 | + tx_pause = !!(rx_ppp || tx_ppp) ? 0 : prof->tx_pause; |
3983 | } else { |
3984 | - priv->prof->rx_pause = 1; |
3985 | - priv->prof->tx_pause = 1; |
3986 | - en_dbg(DRV, priv, "Set pfc off\n"); |
3987 | + rx_ppp = 0; |
3988 | + tx_ppp = 0; |
3989 | + rx_pause = prof->rx_pause; |
3990 | + tx_pause = prof->tx_pause; |
3991 | } |
3992 | |
3993 | if (mlx4_SET_PORT_general(mdev->dev, priv->port, |
3994 | priv->rx_skb_size + ETH_FCS_LEN, |
3995 | - priv->prof->tx_pause, |
3996 | - priv->prof->tx_ppp, |
3997 | - priv->prof->rx_pause, |
3998 | - priv->prof->rx_ppp)) { |
3999 | + tx_pause, tx_ppp, rx_pause, rx_ppp)) { |
4000 | en_err(priv, "Failed setting pause params\n"); |
4001 | return 1; |
4002 | } |
4003 | |
4004 | + prof->tx_ppp = tx_ppp; |
4005 | + prof->rx_ppp = rx_ppp; |
4006 | + prof->tx_pause = tx_pause; |
4007 | + prof->rx_pause = rx_pause; |
4008 | + |
4009 | return 0; |
4010 | } |
4011 | |
4012 | @@ -310,6 +316,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets) |
4013 | } |
4014 | |
4015 | switch (ets->tc_tsa[i]) { |
4016 | + case IEEE_8021QAZ_TSA_VENDOR: |
4017 | case IEEE_8021QAZ_TSA_STRICT: |
4018 | break; |
4019 | case IEEE_8021QAZ_TSA_ETS: |
4020 | @@ -347,6 +354,10 @@ static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv, |
4021 | /* higher TC means higher priority => lower pg */ |
4022 | for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) { |
4023 | switch (ets->tc_tsa[i]) { |
4024 | + case IEEE_8021QAZ_TSA_VENDOR: |
4025 | + pg[i] = MLX4_EN_TC_VENDOR; |
4026 | + tc_tx_bw[i] = MLX4_EN_BW_MAX; |
4027 | + break; |
4028 | case IEEE_8021QAZ_TSA_STRICT: |
4029 | pg[i] = num_strict++; |
4030 | tc_tx_bw[i] = MLX4_EN_BW_MAX; |
4031 | @@ -403,6 +414,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, |
4032 | struct mlx4_en_priv *priv = netdev_priv(dev); |
4033 | struct mlx4_en_port_profile *prof = priv->prof; |
4034 | struct mlx4_en_dev *mdev = priv->mdev; |
4035 | + u32 tx_pause, tx_ppp, rx_pause, rx_ppp; |
4036 | int err; |
4037 | |
4038 | en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n", |
4039 | @@ -411,23 +423,26 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev, |
4040 | pfc->mbc, |
4041 | pfc->delay); |
4042 | |
4043 | - prof->rx_pause = !pfc->pfc_en; |
4044 | - prof->tx_pause = !pfc->pfc_en; |
4045 | - prof->rx_ppp = pfc->pfc_en; |
4046 | - prof->tx_ppp = pfc->pfc_en; |
4047 | + rx_pause = prof->rx_pause && !pfc->pfc_en; |
4048 | + tx_pause = prof->tx_pause && !pfc->pfc_en; |
4049 | + rx_ppp = pfc->pfc_en; |
4050 | + tx_ppp = pfc->pfc_en; |
4051 | |
4052 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
4053 | priv->rx_skb_size + ETH_FCS_LEN, |
4054 | - prof->tx_pause, |
4055 | - prof->tx_ppp, |
4056 | - prof->rx_pause, |
4057 | - prof->rx_ppp); |
4058 | - if (err) |
4059 | + tx_pause, tx_ppp, rx_pause, rx_ppp); |
4060 | + if (err) { |
4061 | en_err(priv, "Failed setting pause params\n"); |
4062 | - else |
4063 | - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, |
4064 | - prof->rx_ppp, prof->rx_pause, |
4065 | - prof->tx_ppp, prof->tx_pause); |
4066 | + return err; |
4067 | + } |
4068 | + |
4069 | + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, |
4070 | + rx_ppp, rx_pause, tx_ppp, tx_pause); |
4071 | + |
4072 | + prof->tx_ppp = tx_ppp; |
4073 | + prof->rx_ppp = rx_ppp; |
4074 | + prof->rx_pause = rx_pause; |
4075 | + prof->tx_pause = tx_pause; |
4076 | |
4077 | return err; |
4078 | } |
4079 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
4080 | index bdda17d2ea0f..24977cc881d2 100644 |
4081 | --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
4082 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c |
4083 | @@ -1003,27 +1003,32 @@ static int mlx4_en_set_pauseparam(struct net_device *dev, |
4084 | { |
4085 | struct mlx4_en_priv *priv = netdev_priv(dev); |
4086 | struct mlx4_en_dev *mdev = priv->mdev; |
4087 | + u8 tx_pause, tx_ppp, rx_pause, rx_ppp; |
4088 | int err; |
4089 | |
4090 | if (pause->autoneg) |
4091 | return -EINVAL; |
4092 | |
4093 | - priv->prof->tx_pause = pause->tx_pause != 0; |
4094 | - priv->prof->rx_pause = pause->rx_pause != 0; |
4095 | + tx_pause = !!(pause->tx_pause); |
4096 | + rx_pause = !!(pause->rx_pause); |
4097 | + rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause); |
4098 | + tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause); |
4099 | + |
4100 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, |
4101 | priv->rx_skb_size + ETH_FCS_LEN, |
4102 | - priv->prof->tx_pause, |
4103 | - priv->prof->tx_ppp, |
4104 | - priv->prof->rx_pause, |
4105 | - priv->prof->rx_ppp); |
4106 | - if (err) |
4107 | - en_err(priv, "Failed setting pause params\n"); |
4108 | - else |
4109 | - mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, |
4110 | - priv->prof->rx_ppp, |
4111 | - priv->prof->rx_pause, |
4112 | - priv->prof->tx_ppp, |
4113 | - priv->prof->tx_pause); |
4114 | + tx_pause, tx_ppp, rx_pause, rx_ppp); |
4115 | + if (err) { |
4116 | + en_err(priv, "Failed setting pause params, err = %d\n", err); |
4117 | + return err; |
4118 | + } |
4119 | + |
4120 | + mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap, |
4121 | + rx_ppp, rx_pause, tx_ppp, tx_pause); |
4122 | + |
4123 | + priv->prof->tx_pause = tx_pause; |
4124 | + priv->prof->rx_pause = rx_pause; |
4125 | + priv->prof->tx_ppp = tx_ppp; |
4126 | + priv->prof->rx_ppp = rx_ppp; |
4127 | |
4128 | return err; |
4129 | } |
4130 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c |
4131 | index bf7628db098a..22c3fdd5482a 100644 |
4132 | --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c |
4133 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c |
4134 | @@ -163,9 +163,9 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev) |
4135 | params->udp_rss = 0; |
4136 | } |
4137 | for (i = 1; i <= MLX4_MAX_PORTS; i++) { |
4138 | - params->prof[i].rx_pause = 1; |
4139 | + params->prof[i].rx_pause = !(pfcrx || pfctx); |
4140 | params->prof[i].rx_ppp = pfcrx; |
4141 | - params->prof[i].tx_pause = 1; |
4142 | + params->prof[i].tx_pause = !(pfcrx || pfctx); |
4143 | params->prof[i].tx_ppp = pfctx; |
4144 | params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; |
4145 | params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; |
4146 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
4147 | index d223e7cb68ba..0160c93de6d3 100644 |
4148 | --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
4149 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c |
4150 | @@ -3125,6 +3125,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
4151 | priv->msg_enable = MLX4_EN_MSG_LEVEL; |
4152 | #ifdef CONFIG_MLX4_EN_DCB |
4153 | if (!mlx4_is_slave(priv->mdev->dev)) { |
4154 | + u8 prio; |
4155 | + |
4156 | + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; ++prio) { |
4157 | + priv->ets.prio_tc[prio] = prio; |
4158 | + priv->ets.tc_tsa[prio] = IEEE_8021QAZ_TSA_VENDOR; |
4159 | + } |
4160 | + |
4161 | priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST | |
4162 | DCB_CAP_DCBX_VER_IEEE; |
4163 | priv->flags |= MLX4_EN_DCB_ENABLED; |
4164 | diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c |
4165 | index 1a670b681555..0710b3677464 100644 |
4166 | --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c |
4167 | +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c |
4168 | @@ -35,6 +35,7 @@ |
4169 | #include <linux/etherdevice.h> |
4170 | |
4171 | #include <linux/mlx4/cmd.h> |
4172 | +#include <linux/mlx4/qp.h> |
4173 | #include <linux/export.h> |
4174 | |
4175 | #include "mlx4.h" |
4176 | @@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, |
4177 | if (IS_ERR(mailbox)) |
4178 | return PTR_ERR(mailbox); |
4179 | |
4180 | + if (!mlx4_qp_lookup(dev, rule->qpn)) { |
4181 | + mlx4_err_rule(dev, "QP doesn't exist\n", rule); |
4182 | + ret = -EINVAL; |
4183 | + goto out; |
4184 | + } |
4185 | + |
4186 | trans_rule_ctrl_to_hw(rule, mailbox->buf); |
4187 | |
4188 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); |
4189 | |
4190 | list_for_each_entry(cur, &rule->list, list) { |
4191 | ret = parse_trans_rule(dev, cur, mailbox->buf + size); |
4192 | - if (ret < 0) { |
4193 | - mlx4_free_cmd_mailbox(dev, mailbox); |
4194 | - return ret; |
4195 | - } |
4196 | + if (ret < 0) |
4197 | + goto out; |
4198 | + |
4199 | size += ret; |
4200 | } |
4201 | |
4202 | @@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, |
4203 | } |
4204 | } |
4205 | |
4206 | +out: |
4207 | mlx4_free_cmd_mailbox(dev, mailbox); |
4208 | |
4209 | return ret; |
4210 | diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
4211 | index df0f39611c5e..18f221d8a04d 100644 |
4212 | --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
4213 | +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h |
4214 | @@ -472,6 +472,7 @@ struct mlx4_en_frag_info { |
4215 | #define MLX4_EN_BW_MIN 1 |
4216 | #define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */ |
4217 | |
4218 | +#define MLX4_EN_TC_VENDOR 0 |
4219 | #define MLX4_EN_TC_ETS 7 |
4220 | |
4221 | enum dcb_pfc_type { |
4222 | diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c |
4223 | index 6143113a7fef..474ff36b9755 100644 |
4224 | --- a/drivers/net/ethernet/mellanox/mlx4/qp.c |
4225 | +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c |
4226 | @@ -387,6 +387,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) |
4227 | __mlx4_qp_free_icm(dev, qpn); |
4228 | } |
4229 | |
4230 | +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) |
4231 | +{ |
4232 | + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; |
4233 | + struct mlx4_qp *qp; |
4234 | + |
4235 | + spin_lock(&qp_table->lock); |
4236 | + |
4237 | + qp = __mlx4_qp_lookup(dev, qpn); |
4238 | + |
4239 | + spin_unlock(&qp_table->lock); |
4240 | + return qp; |
4241 | +} |
4242 | + |
4243 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) |
4244 | { |
4245 | struct mlx4_priv *priv = mlx4_priv(dev); |
4246 | @@ -474,6 +487,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
4247 | } |
4248 | |
4249 | if (attr & MLX4_UPDATE_QP_QOS_VPORT) { |
4250 | + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { |
4251 | + mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); |
4252 | + err = -EOPNOTSUPP; |
4253 | + goto out; |
4254 | + } |
4255 | + |
4256 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; |
4257 | cmd->qp_context.qos_vport = params->qos_vport; |
4258 | } |
4259 | diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
4260 | index 1822382212ee..d6b06bef1b69 100644 |
4261 | --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
4262 | +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
4263 | @@ -5048,6 +5048,7 @@ static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) |
4264 | &tracker->res_tree[RES_FS_RULE]); |
4265 | list_del(&fs_rule->com.list); |
4266 | spin_unlock_irq(mlx4_tlock(dev)); |
4267 | + kfree(fs_rule->mirr_mbox); |
4268 | kfree(fs_rule); |
4269 | state = 0; |
4270 | break; |
4271 | @@ -5214,6 +5215,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) |
4272 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
4273 | } |
4274 | |
4275 | +static void update_qos_vpp(struct mlx4_update_qp_context *ctx, |
4276 | + struct mlx4_vf_immed_vlan_work *work) |
4277 | +{ |
4278 | + ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); |
4279 | + ctx->qp_context.qos_vport = work->qos_vport; |
4280 | +} |
4281 | + |
4282 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) |
4283 | { |
4284 | struct mlx4_vf_immed_vlan_work *work = |
4285 | @@ -5328,11 +5336,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) |
4286 | qp->sched_queue & 0xC7; |
4287 | upd_context->qp_context.pri_path.sched_queue |= |
4288 | ((work->qos & 0x7) << 3); |
4289 | - upd_context->qp_mask |= |
4290 | - cpu_to_be64(1ULL << |
4291 | - MLX4_UPD_QP_MASK_QOS_VPP); |
4292 | - upd_context->qp_context.qos_vport = |
4293 | - work->qos_vport; |
4294 | + |
4295 | + if (dev->caps.flags2 & |
4296 | + MLX4_DEV_CAP_FLAG2_QOS_VPP) |
4297 | + update_qos_vpp(upd_context, work); |
4298 | } |
4299 | |
4300 | err = mlx4_cmd(dev, mailbox->dma, |
4301 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
4302 | index 38981db43bc3..2d235e8433be 100644 |
4303 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
4304 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c |
4305 | @@ -2741,6 +2741,9 @@ static int set_feature_lro(struct net_device *netdev, bool enable) |
4306 | |
4307 | mutex_unlock(&priv->state_lock); |
4308 | |
4309 | + if (mlx5e_vxlan_allowed(priv->mdev)) |
4310 | + udp_tunnel_get_rx_info(netdev); |
4311 | + |
4312 | return err; |
4313 | } |
4314 | |
4315 | @@ -3785,13 +3788,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) |
4316 | if (netdev->reg_state != NETREG_REGISTERED) |
4317 | return; |
4318 | |
4319 | - /* Device already registered: sync netdev system state */ |
4320 | - if (mlx5e_vxlan_allowed(mdev)) { |
4321 | - rtnl_lock(); |
4322 | - udp_tunnel_get_rx_info(netdev); |
4323 | - rtnl_unlock(); |
4324 | - } |
4325 | - |
4326 | queue_work(priv->wq, &priv->set_rx_mode_work); |
4327 | } |
4328 | |
4329 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
4330 | index 981cd1d84a5b..3c183b8c083a 100644 |
4331 | --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c |
4332 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c |
4333 | @@ -548,7 +548,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) |
4334 | struct mlx5_priv *priv = &mdev->priv; |
4335 | struct msix_entry *msix = priv->msix_arr; |
4336 | int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; |
4337 | - int err; |
4338 | |
4339 | if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { |
4340 | mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); |
4341 | @@ -558,18 +557,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) |
4342 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), |
4343 | priv->irq_info[i].mask); |
4344 | |
4345 | - err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); |
4346 | - if (err) { |
4347 | - mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", |
4348 | - irq); |
4349 | - goto err_clear_mask; |
4350 | - } |
4351 | + if (IS_ENABLED(CONFIG_SMP) && |
4352 | + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) |
4353 | + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); |
4354 | |
4355 | return 0; |
4356 | - |
4357 | -err_clear_mask: |
4358 | - free_cpumask_var(priv->irq_info[i].mask); |
4359 | - return err; |
4360 | } |
4361 | |
4362 | static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) |
4363 | diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c |
4364 | index bea9ae31a769..60e1edcbe573 100644 |
4365 | --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c |
4366 | +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c |
4367 | @@ -1448,8 +1448,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, |
4368 | err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, |
4369 | adding, true); |
4370 | if (err) { |
4371 | - if (net_ratelimit()) |
4372 | - netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); |
4373 | + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); |
4374 | return; |
4375 | } |
4376 | |
4377 | @@ -1509,8 +1508,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, |
4378 | err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, |
4379 | adding, true); |
4380 | if (err) { |
4381 | - if (net_ratelimit()) |
4382 | - netdev_err(mlxsw_sp_port->dev, "Failed to set FDB entry\n"); |
4383 | + dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n"); |
4384 | return; |
4385 | } |
4386 | |
4387 | diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c |
4388 | index b8d5270359cd..e30676515529 100644 |
4389 | --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c |
4390 | +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c |
4391 | @@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) |
4392 | cmd.req.arg3 = 0; |
4393 | |
4394 | if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) |
4395 | - netxen_issue_cmd(adapter, &cmd); |
4396 | + rcode = netxen_issue_cmd(adapter, &cmd); |
4397 | |
4398 | if (rcode != NX_RCODE_SUCCESS) |
4399 | return -EIO; |
4400 | diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c |
4401 | index afe5e57d9acb..d02313770fc2 100644 |
4402 | --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c |
4403 | +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c |
4404 | @@ -850,7 +850,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4405 | NULL) + |
4406 | qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, |
4407 | NULL); |
4408 | - norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096); |
4409 | + norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); |
4410 | min_addr_reg1 = norm_regsize / 4096; |
4411 | pwm_regsize = db_bar_size - norm_regsize; |
4412 | |
4413 | @@ -1628,6 +1628,9 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) |
4414 | DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); |
4415 | } |
4416 | |
4417 | + p_hwfn->mcp_info->link_capabilities.default_speed_autoneg = |
4418 | + link->speed.autoneg; |
4419 | + |
4420 | link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; |
4421 | link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; |
4422 | link->pause.autoneg = !!(link_temp & |
4423 | diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c |
4424 | index dba3fbe4800e..0b949c6d83fc 100644 |
4425 | --- a/drivers/net/ethernet/qlogic/qed/qed_main.c |
4426 | +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c |
4427 | @@ -1240,7 +1240,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn, |
4428 | |
4429 | /* TODO - at the moment assume supported and advertised speed equal */ |
4430 | if_link->supported_caps = QED_LM_FIBRE_BIT; |
4431 | - if (params.speed.autoneg) |
4432 | + if (link_caps.default_speed_autoneg) |
4433 | if_link->supported_caps |= QED_LM_Autoneg_BIT; |
4434 | if (params.pause.autoneg || |
4435 | (params.pause.forced_rx && params.pause.forced_tx)) |
4436 | @@ -1250,6 +1250,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn, |
4437 | if_link->supported_caps |= QED_LM_Pause_BIT; |
4438 | |
4439 | if_link->advertised_caps = if_link->supported_caps; |
4440 | + if (params.speed.autoneg) |
4441 | + if_link->advertised_caps |= QED_LM_Autoneg_BIT; |
4442 | + else |
4443 | + if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; |
4444 | if (params.speed.advertised_speeds & |
4445 | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) |
4446 | if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | |
4447 | diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h |
4448 | index dff520ed069b..7b7a84d2c839 100644 |
4449 | --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h |
4450 | +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h |
4451 | @@ -35,6 +35,7 @@ struct qed_mcp_link_params { |
4452 | |
4453 | struct qed_mcp_link_capabilities { |
4454 | u32 speed_capabilities; |
4455 | + bool default_speed_autoneg; |
4456 | }; |
4457 | |
4458 | struct qed_mcp_link_state { |
4459 | diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c |
4460 | index 509b596cf1e8..bd1ec70fb736 100644 |
4461 | --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c |
4462 | +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c |
4463 | @@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) |
4464 | } |
4465 | return -EIO; |
4466 | } |
4467 | - usleep_range(1000, 1500); |
4468 | + udelay(1200); |
4469 | } |
4470 | |
4471 | if (id_reg) |
4472 | diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c |
4473 | index be258d90de9e..e3223f2fe2ff 100644 |
4474 | --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c |
4475 | +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c |
4476 | @@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) |
4477 | sizeof(struct mpi_coredump_global_header); |
4478 | mpi_coredump->mpi_global_header.imageSize = |
4479 | sizeof(struct ql_mpi_coredump); |
4480 | - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", |
4481 | + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", |
4482 | sizeof(mpi_coredump->mpi_global_header.idString)); |
4483 | |
4484 | /* Get generic NIC reg dump */ |
4485 | @@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, |
4486 | sizeof(struct mpi_coredump_global_header); |
4487 | mpi_coredump->mpi_global_header.imageSize = |
4488 | sizeof(struct ql_reg_dump); |
4489 | - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", |
4490 | + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", |
4491 | sizeof(mpi_coredump->mpi_global_header.idString)); |
4492 | |
4493 | |
4494 | diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c |
4495 | index 6e2add979471..8bbb55f31909 100644 |
4496 | --- a/drivers/net/ethernet/qualcomm/qca_spi.c |
4497 | +++ b/drivers/net/ethernet/qualcomm/qca_spi.c |
4498 | @@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca) |
4499 | |
4500 | /* Allocate rx SKB if we don't have one available. */ |
4501 | if (!qca->rx_skb) { |
4502 | - qca->rx_skb = netdev_alloc_skb(net_dev, |
4503 | - net_dev->mtu + VLAN_ETH_HLEN); |
4504 | + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, |
4505 | + net_dev->mtu + |
4506 | + VLAN_ETH_HLEN); |
4507 | if (!qca->rx_skb) { |
4508 | netdev_dbg(net_dev, "out of RX resources\n"); |
4509 | qca->stats.out_of_mem++; |
4510 | @@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca) |
4511 | qca->rx_skb, qca->rx_skb->dev); |
4512 | qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; |
4513 | netif_rx_ni(qca->rx_skb); |
4514 | - qca->rx_skb = netdev_alloc_skb(net_dev, |
4515 | + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, |
4516 | net_dev->mtu + VLAN_ETH_HLEN); |
4517 | if (!qca->rx_skb) { |
4518 | netdev_dbg(net_dev, "out of RX resources\n"); |
4519 | @@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev) |
4520 | if (!qca->rx_buffer) |
4521 | return -ENOBUFS; |
4522 | |
4523 | - qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); |
4524 | + qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + |
4525 | + VLAN_ETH_HLEN); |
4526 | if (!qca->rx_skb) { |
4527 | kfree(qca->rx_buffer); |
4528 | netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); |
4529 | diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
4530 | index 18e68c91e651..dbb63640bc6e 100644 |
4531 | --- a/drivers/net/ethernet/realtek/r8169.c |
4532 | +++ b/drivers/net/ethernet/realtek/r8169.c |
4533 | @@ -8446,12 +8446,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
4534 | goto err_out_msi_5; |
4535 | } |
4536 | |
4537 | + pci_set_drvdata(pdev, dev); |
4538 | + |
4539 | rc = register_netdev(dev); |
4540 | if (rc < 0) |
4541 | goto err_out_cnt_6; |
4542 | |
4543 | - pci_set_drvdata(pdev, dev); |
4544 | - |
4545 | netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", |
4546 | rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, |
4547 | (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); |
4548 | diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c |
4549 | index b6816ae00b7a..c8fd99b3ca29 100644 |
4550 | --- a/drivers/net/ethernet/renesas/sh_eth.c |
4551 | +++ b/drivers/net/ethernet/renesas/sh_eth.c |
4552 | @@ -3133,7 +3133,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) |
4553 | /* MDIO bus init */ |
4554 | ret = sh_mdio_init(mdp, pd); |
4555 | if (ret) { |
4556 | - dev_err(&ndev->dev, "failed to initialise MDIO\n"); |
4557 | + dev_err(&pdev->dev, "failed to initialise MDIO\n"); |
4558 | goto out_release; |
4559 | } |
4560 | |
4561 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
4562 | index 98bbb91336e4..c212d1dd8bfd 100644 |
4563 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
4564 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
4565 | @@ -478,7 +478,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) |
4566 | /* PTP v1, UDP, any kind of event packet */ |
4567 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
4568 | /* take time stamp for all event messages */ |
4569 | - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
4570 | + if (priv->plat->has_gmac4) |
4571 | + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; |
4572 | + else |
4573 | + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
4574 | |
4575 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
4576 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
4577 | @@ -510,7 +513,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) |
4578 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; |
4579 | ptp_v2 = PTP_TCR_TSVER2ENA; |
4580 | /* take time stamp for all event messages */ |
4581 | - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
4582 | + if (priv->plat->has_gmac4) |
4583 | + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; |
4584 | + else |
4585 | + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
4586 | |
4587 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
4588 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
4589 | @@ -544,7 +550,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) |
4590 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
4591 | ptp_v2 = PTP_TCR_TSVER2ENA; |
4592 | /* take time stamp for all event messages */ |
4593 | - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
4594 | + if (priv->plat->has_gmac4) |
4595 | + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; |
4596 | + else |
4597 | + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; |
4598 | |
4599 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; |
4600 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; |
4601 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h |
4602 | index c06938c47af5..174777cd888e 100644 |
4603 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h |
4604 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h |
4605 | @@ -63,7 +63,8 @@ |
4606 | /* Enable Snapshot for Messages Relevant to Master */ |
4607 | #define PTP_TCR_TSMSTRENA BIT(15) |
4608 | /* Select PTP packets for Taking Snapshots */ |
4609 | -#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) |
4610 | +#define PTP_TCR_SNAPTYPSEL_1 BIT(16) |
4611 | +#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) |
4612 | /* Enable MAC address for PTP Frame Filtering */ |
4613 | #define PTP_TCR_TSENMACADDR BIT(18) |
4614 | |
4615 | diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c |
4616 | index 2bd1282735b0..552de9c490c6 100644 |
4617 | --- a/drivers/net/ethernet/ti/cpsw.c |
4618 | +++ b/drivers/net/ethernet/ti/cpsw.c |
4619 | @@ -282,6 +282,10 @@ struct cpsw_ss_regs { |
4620 | /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ |
4621 | #define CPSW_V1_SEQ_ID_OFS_SHIFT 16 |
4622 | |
4623 | +#define CPSW_MAX_BLKS_TX 15 |
4624 | +#define CPSW_MAX_BLKS_TX_SHIFT 4 |
4625 | +#define CPSW_MAX_BLKS_RX 5 |
4626 | + |
4627 | struct cpsw_host_regs { |
4628 | u32 max_blks; |
4629 | u32 blk_cnt; |
4630 | @@ -1160,11 +1164,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) |
4631 | switch (cpsw->version) { |
4632 | case CPSW_VERSION_1: |
4633 | slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); |
4634 | + /* Increase RX FIFO size to 5 for supporting fullduplex |
4635 | + * flow control mode |
4636 | + */ |
4637 | + slave_write(slave, |
4638 | + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | |
4639 | + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); |
4640 | break; |
4641 | case CPSW_VERSION_2: |
4642 | case CPSW_VERSION_3: |
4643 | case CPSW_VERSION_4: |
4644 | slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); |
4645 | + /* Increase RX FIFO size to 5 for supporting fullduplex |
4646 | + * flow control mode |
4647 | + */ |
4648 | + slave_write(slave, |
4649 | + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | |
4650 | + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); |
4651 | break; |
4652 | } |
4653 | |
4654 | diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
4655 | index 3c1f89ab0110..92ad43e53c72 100644 |
4656 | --- a/drivers/net/geneve.c |
4657 | +++ b/drivers/net/geneve.c |
4658 | @@ -209,6 +209,7 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, |
4659 | struct genevehdr *gnvh = geneve_hdr(skb); |
4660 | struct metadata_dst *tun_dst = NULL; |
4661 | struct pcpu_sw_netstats *stats; |
4662 | + unsigned int len; |
4663 | int err = 0; |
4664 | void *oiph; |
4665 | |
4666 | @@ -222,8 +223,10 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, |
4667 | tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, |
4668 | vni_to_tunnel_id(gnvh->vni), |
4669 | gnvh->opt_len * 4); |
4670 | - if (!tun_dst) |
4671 | + if (!tun_dst) { |
4672 | + geneve->dev->stats.rx_dropped++; |
4673 | goto drop; |
4674 | + } |
4675 | /* Update tunnel dst according to Geneve options. */ |
4676 | ip_tunnel_info_opts_set(&tun_dst->u.tun_info, |
4677 | gnvh->options, gnvh->opt_len * 4); |
4678 | @@ -231,8 +234,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, |
4679 | /* Drop packets w/ critical options, |
4680 | * since we don't support any... |
4681 | */ |
4682 | - if (gnvh->critical) |
4683 | + if (gnvh->critical) { |
4684 | + geneve->dev->stats.rx_frame_errors++; |
4685 | + geneve->dev->stats.rx_errors++; |
4686 | goto drop; |
4687 | + } |
4688 | } |
4689 | |
4690 | skb_reset_mac_header(skb); |
4691 | @@ -243,8 +249,10 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, |
4692 | skb_dst_set(skb, &tun_dst->dst); |
4693 | |
4694 | /* Ignore packet loops (and multicast echo) */ |
4695 | - if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) |
4696 | + if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) { |
4697 | + geneve->dev->stats.rx_errors++; |
4698 | goto drop; |
4699 | + } |
4700 | |
4701 | oiph = skb_network_header(skb); |
4702 | skb_reset_network_header(skb); |
4703 | @@ -276,13 +284,15 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, |
4704 | } |
4705 | } |
4706 | |
4707 | - stats = this_cpu_ptr(geneve->dev->tstats); |
4708 | - u64_stats_update_begin(&stats->syncp); |
4709 | - stats->rx_packets++; |
4710 | - stats->rx_bytes += skb->len; |
4711 | - u64_stats_update_end(&stats->syncp); |
4712 | - |
4713 | - gro_cells_receive(&geneve->gro_cells, skb); |
4714 | + len = skb->len; |
4715 | + err = gro_cells_receive(&geneve->gro_cells, skb); |
4716 | + if (likely(err == NET_RX_SUCCESS)) { |
4717 | + stats = this_cpu_ptr(geneve->dev->tstats); |
4718 | + u64_stats_update_begin(&stats->syncp); |
4719 | + stats->rx_packets++; |
4720 | + stats->rx_bytes += len; |
4721 | + u64_stats_update_end(&stats->syncp); |
4722 | + } |
4723 | return; |
4724 | drop: |
4725 | /* Consume bad packet */ |
4726 | @@ -332,7 +342,7 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) |
4727 | struct geneve_sock *gs; |
4728 | int opts_len; |
4729 | |
4730 | - /* Need Geneve and inner Ethernet header to be present */ |
4731 | + /* Need UDP and Geneve header to be present */ |
4732 | if (unlikely(!pskb_may_pull(skb, GENEVE_BASE_HLEN))) |
4733 | goto drop; |
4734 | |
4735 | @@ -355,8 +365,10 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) |
4736 | opts_len = geneveh->opt_len * 4; |
4737 | if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, |
4738 | htons(ETH_P_TEB), |
4739 | - !net_eq(geneve->net, dev_net(geneve->dev)))) |
4740 | + !net_eq(geneve->net, dev_net(geneve->dev)))) { |
4741 | + geneve->dev->stats.rx_dropped++; |
4742 | goto drop; |
4743 | + } |
4744 | |
4745 | geneve_rx(geneve, gs, skb); |
4746 | return 0; |
4747 | diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c |
4748 | index 4bad0b894e9c..27160d1870e1 100644 |
4749 | --- a/drivers/net/hamradio/hdlcdrv.c |
4750 | +++ b/drivers/net/hamradio/hdlcdrv.c |
4751 | @@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
4752 | case HDLCDRVCTL_CALIBRATE: |
4753 | if(!capable(CAP_SYS_RAWIO)) |
4754 | return -EPERM; |
4755 | + if (s->par.bitrate <= 0) |
4756 | + return -EINVAL; |
4757 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) |
4758 | return -EINVAL; |
4759 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; |
4760 | diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c |
4761 | index 2caac0c37059..365a48cfcbbf 100644 |
4762 | --- a/drivers/net/macsec.c |
4763 | +++ b/drivers/net/macsec.c |
4764 | @@ -742,7 +742,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, |
4765 | macsec_fill_iv(iv, secy->sci, pn); |
4766 | |
4767 | sg_init_table(sg, ret); |
4768 | - skb_to_sgvec(skb, sg, 0, skb->len); |
4769 | + ret = skb_to_sgvec(skb, sg, 0, skb->len); |
4770 | + if (unlikely(ret < 0)) { |
4771 | + macsec_txsa_put(tx_sa); |
4772 | + kfree_skb(skb); |
4773 | + return ERR_PTR(ret); |
4774 | + } |
4775 | |
4776 | if (tx_sc->encrypt) { |
4777 | int len = skb->len - macsec_hdr_len(sci_present) - |
4778 | @@ -949,7 +954,11 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, |
4779 | macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); |
4780 | |
4781 | sg_init_table(sg, ret); |
4782 | - skb_to_sgvec(skb, sg, 0, skb->len); |
4783 | + ret = skb_to_sgvec(skb, sg, 0, skb->len); |
4784 | + if (unlikely(ret < 0)) { |
4785 | + kfree_skb(skb); |
4786 | + return ERR_PTR(ret); |
4787 | + } |
4788 | |
4789 | if (hdr->tci_an & MACSEC_TCI_E) { |
4790 | /* confidentiality: ethernet + macsec header |
4791 | diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c |
4792 | index 963838d4fac1..599ce24c514f 100644 |
4793 | --- a/drivers/net/phy/mdio-mux.c |
4794 | +++ b/drivers/net/phy/mdio-mux.c |
4795 | @@ -122,10 +122,9 @@ int mdio_mux_init(struct device *dev, |
4796 | pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); |
4797 | if (pb == NULL) { |
4798 | ret_val = -ENOMEM; |
4799 | - goto err_parent_bus; |
4800 | + goto err_pb_kz; |
4801 | } |
4802 | |
4803 | - |
4804 | pb->switch_data = data; |
4805 | pb->switch_fn = switch_fn; |
4806 | pb->current_child = -1; |
4807 | @@ -154,6 +153,7 @@ int mdio_mux_init(struct device *dev, |
4808 | cb->mii_bus = mdiobus_alloc(); |
4809 | if (!cb->mii_bus) { |
4810 | ret_val = -ENOMEM; |
4811 | + devm_kfree(dev, cb); |
4812 | of_node_put(child_bus_node); |
4813 | break; |
4814 | } |
4815 | @@ -170,7 +170,6 @@ int mdio_mux_init(struct device *dev, |
4816 | mdiobus_free(cb->mii_bus); |
4817 | devm_kfree(dev, cb); |
4818 | } else { |
4819 | - of_node_get(child_bus_node); |
4820 | cb->next = pb->children; |
4821 | pb->children = cb; |
4822 | } |
4823 | @@ -181,9 +180,11 @@ int mdio_mux_init(struct device *dev, |
4824 | return 0; |
4825 | } |
4826 | |
4827 | + devm_kfree(dev, pb); |
4828 | +err_pb_kz: |
4829 | /* balance the reference of_mdio_find_bus() took */ |
4830 | - put_device(&pb->mii_bus->dev); |
4831 | - |
4832 | + if (!mux_bus) |
4833 | + put_device(&parent_bus->dev); |
4834 | err_parent_bus: |
4835 | of_node_put(parent_bus_node); |
4836 | return ret_val; |
4837 | diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c |
4838 | index 2032a6de026b..4da73e2c37cf 100644 |
4839 | --- a/drivers/net/phy/micrel.c |
4840 | +++ b/drivers/net/phy/micrel.c |
4841 | @@ -268,23 +268,12 @@ static int kszphy_nand_tree_disable(struct phy_device *phydev) |
4842 | return ret; |
4843 | } |
4844 | |
4845 | -static int kszphy_config_init(struct phy_device *phydev) |
4846 | +/* Some config bits need to be set again on resume, handle them here. */ |
4847 | +static int kszphy_config_reset(struct phy_device *phydev) |
4848 | { |
4849 | struct kszphy_priv *priv = phydev->priv; |
4850 | - const struct kszphy_type *type; |
4851 | int ret; |
4852 | |
4853 | - if (!priv) |
4854 | - return 0; |
4855 | - |
4856 | - type = priv->type; |
4857 | - |
4858 | - if (type->has_broadcast_disable) |
4859 | - kszphy_broadcast_disable(phydev); |
4860 | - |
4861 | - if (type->has_nand_tree_disable) |
4862 | - kszphy_nand_tree_disable(phydev); |
4863 | - |
4864 | if (priv->rmii_ref_clk_sel) { |
4865 | ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); |
4866 | if (ret) { |
4867 | @@ -295,7 +284,7 @@ static int kszphy_config_init(struct phy_device *phydev) |
4868 | } |
4869 | |
4870 | if (priv->led_mode >= 0) |
4871 | - kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); |
4872 | + kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode); |
4873 | |
4874 | if (phy_interrupt_is_valid(phydev)) { |
4875 | int ctl = phy_read(phydev, MII_BMCR); |
4876 | @@ -311,6 +300,25 @@ static int kszphy_config_init(struct phy_device *phydev) |
4877 | return 0; |
4878 | } |
4879 | |
4880 | +static int kszphy_config_init(struct phy_device *phydev) |
4881 | +{ |
4882 | + struct kszphy_priv *priv = phydev->priv; |
4883 | + const struct kszphy_type *type; |
4884 | + |
4885 | + if (!priv) |
4886 | + return 0; |
4887 | + |
4888 | + type = priv->type; |
4889 | + |
4890 | + if (type->has_broadcast_disable) |
4891 | + kszphy_broadcast_disable(phydev); |
4892 | + |
4893 | + if (type->has_nand_tree_disable) |
4894 | + kszphy_nand_tree_disable(phydev); |
4895 | + |
4896 | + return kszphy_config_reset(phydev); |
4897 | +} |
4898 | + |
4899 | static int ksz8041_config_init(struct phy_device *phydev) |
4900 | { |
4901 | struct device_node *of_node = phydev->mdio.dev.of_node; |
4902 | @@ -715,8 +723,14 @@ static int kszphy_suspend(struct phy_device *phydev) |
4903 | |
4904 | static int kszphy_resume(struct phy_device *phydev) |
4905 | { |
4906 | + int ret; |
4907 | + |
4908 | genphy_resume(phydev); |
4909 | |
4910 | + ret = kszphy_config_reset(phydev); |
4911 | + if (ret) |
4912 | + return ret; |
4913 | + |
4914 | /* Enable PHY Interrupts */ |
4915 | if (phy_interrupt_is_valid(phydev)) { |
4916 | phydev->interrupts = PHY_INTERRUPT_ENABLED; |
4917 | diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
4918 | index e2d9ca60e467..4d217649c8b1 100644 |
4919 | --- a/drivers/net/phy/phy.c |
4920 | +++ b/drivers/net/phy/phy.c |
4921 | @@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev) |
4922 | if (phydev->drv->aneg_done) |
4923 | return phydev->drv->aneg_done(phydev); |
4924 | |
4925 | + /* Avoid genphy_aneg_done() if the Clause 45 PHY does not |
4926 | + * implement Clause 22 registers |
4927 | + */ |
4928 | + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) |
4929 | + return -EINVAL; |
4930 | + |
4931 | return genphy_aneg_done(phydev); |
4932 | } |
4933 | |
4934 | diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c |
4935 | index 1951b1085cb8..3045c9662ed6 100644 |
4936 | --- a/drivers/net/ppp/pptp.c |
4937 | +++ b/drivers/net/ppp/pptp.c |
4938 | @@ -465,7 +465,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, |
4939 | po->chan.mtu = dst_mtu(&rt->dst); |
4940 | if (!po->chan.mtu) |
4941 | po->chan.mtu = PPP_MRU; |
4942 | - ip_rt_put(rt); |
4943 | po->chan.mtu -= PPTP_HEADER_OVERHEAD; |
4944 | |
4945 | po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); |
4946 | diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c |
4947 | index a0a9c9d39f01..8673ef3c9cdc 100644 |
4948 | --- a/drivers/net/team/team.c |
4949 | +++ b/drivers/net/team/team.c |
4950 | @@ -1203,11 +1203,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) |
4951 | goto err_dev_open; |
4952 | } |
4953 | |
4954 | - netif_addr_lock_bh(dev); |
4955 | - dev_uc_sync_multiple(port_dev, dev); |
4956 | - dev_mc_sync_multiple(port_dev, dev); |
4957 | - netif_addr_unlock_bh(dev); |
4958 | - |
4959 | err = vlan_vids_add_by_dev(port_dev, dev); |
4960 | if (err) { |
4961 | netdev_err(dev, "Failed to add vlan ids to device %s\n", |
4962 | @@ -1247,6 +1242,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev) |
4963 | goto err_option_port_add; |
4964 | } |
4965 | |
4966 | + netif_addr_lock_bh(dev); |
4967 | + dev_uc_sync_multiple(port_dev, dev); |
4968 | + dev_mc_sync_multiple(port_dev, dev); |
4969 | + netif_addr_unlock_bh(dev); |
4970 | + |
4971 | port->index = -1; |
4972 | list_add_tail_rcu(&port->list, &team->port_list); |
4973 | team_port_enable(team, port); |
4974 | @@ -1271,8 +1271,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) |
4975 | vlan_vids_del_by_dev(port_dev, dev); |
4976 | |
4977 | err_vids_add: |
4978 | - dev_uc_unsync(port_dev, dev); |
4979 | - dev_mc_unsync(port_dev, dev); |
4980 | dev_close(port_dev); |
4981 | |
4982 | err_dev_open: |
4983 | diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c |
4984 | index dc6d3b0a0be8..feb61eaffe32 100644 |
4985 | --- a/drivers/net/usb/cdc_ncm.c |
4986 | +++ b/drivers/net/usb/cdc_ncm.c |
4987 | @@ -1118,6 +1118,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
4988 | u16 n = 0, index, ndplen; |
4989 | u8 ready2send = 0; |
4990 | u32 delayed_ndp_size; |
4991 | + size_t padding_count; |
4992 | |
4993 | /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated |
4994 | * accordingly. Otherwise, we should check here. |
4995 | @@ -1274,11 +1275,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) |
4996 | * a ZLP after full sized NTBs. |
4997 | */ |
4998 | if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && |
4999 | - skb_out->len > ctx->min_tx_pkt) |
5000 | - memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, |
5001 | - ctx->tx_max - skb_out->len); |
5002 | - else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) |
5003 | + skb_out->len > ctx->min_tx_pkt) { |
5004 | + padding_count = ctx->tx_max - skb_out->len; |
5005 | + memset(skb_put(skb_out, padding_count), 0, padding_count); |
5006 | + } else if (skb_out->len < ctx->tx_max && |
5007 | + (skb_out->len % dev->maxpacket) == 0) { |
5008 | *skb_put(skb_out, 1) = 0; /* force short packet */ |
5009 | + } |
5010 | |
5011 | /* set final frame length */ |
5012 | nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; |
5013 | diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c |
5014 | index 1568aedddfc9..472ed6df2221 100644 |
5015 | --- a/drivers/net/virtio_net.c |
5016 | +++ b/drivers/net/virtio_net.c |
5017 | @@ -529,7 +529,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
5018 | hdr = skb_vnet_hdr(skb); |
5019 | sg_init_table(rq->sg, 2); |
5020 | sg_set_buf(rq->sg, hdr, vi->hdr_len); |
5021 | - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
5022 | + |
5023 | + err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
5024 | + if (unlikely(err < 0)) { |
5025 | + dev_kfree_skb(skb); |
5026 | + return err; |
5027 | + } |
5028 | |
5029 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); |
5030 | if (err < 0) |
5031 | @@ -831,7 +836,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
5032 | struct virtio_net_hdr_mrg_rxbuf *hdr; |
5033 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
5034 | struct virtnet_info *vi = sq->vq->vdev->priv; |
5035 | - unsigned num_sg; |
5036 | + int num_sg; |
5037 | unsigned hdr_len = vi->hdr_len; |
5038 | bool can_push; |
5039 | |
5040 | @@ -858,11 +863,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
5041 | if (can_push) { |
5042 | __skb_push(skb, hdr_len); |
5043 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
5044 | + if (unlikely(num_sg < 0)) |
5045 | + return num_sg; |
5046 | /* Pull header back to avoid skew in tx bytes calculations. */ |
5047 | __skb_pull(skb, hdr_len); |
5048 | } else { |
5049 | sg_set_buf(sq->sg, hdr, hdr_len); |
5050 | - num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
5051 | + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); |
5052 | + if (unlikely(num_sg < 0)) |
5053 | + return num_sg; |
5054 | + num_sg++; |
5055 | } |
5056 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
5057 | } |
5058 | diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c |
5059 | index 4afba17e2403..f809eed0343c 100644 |
5060 | --- a/drivers/net/vmxnet3/vmxnet3_drv.c |
5061 | +++ b/drivers/net/vmxnet3/vmxnet3_drv.c |
5062 | @@ -2962,6 +2962,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter) |
5063 | /* we need to enable NAPI, otherwise dev_close will deadlock */ |
5064 | for (i = 0; i < adapter->num_rx_queues; i++) |
5065 | napi_enable(&adapter->rx_queue[i].napi); |
5066 | + /* |
5067 | + * Need to clear the quiesce bit to ensure that vmxnet3_close |
5068 | + * can quiesce the device properly |
5069 | + */ |
5070 | + clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); |
5071 | dev_close(adapter->netdev); |
5072 | } |
5073 | |
5074 | diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c |
5075 | index 346e48698555..42c9480acdc7 100644 |
5076 | --- a/drivers/net/vrf.c |
5077 | +++ b/drivers/net/vrf.c |
5078 | @@ -585,13 +585,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s |
5079 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
5080 | if (unlikely(!neigh)) |
5081 | neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); |
5082 | - if (!IS_ERR(neigh)) |
5083 | + if (!IS_ERR(neigh)) { |
5084 | ret = dst_neigh_output(dst, neigh, skb); |
5085 | + rcu_read_unlock_bh(); |
5086 | + return ret; |
5087 | + } |
5088 | |
5089 | rcu_read_unlock_bh(); |
5090 | err: |
5091 | - if (unlikely(ret < 0)) |
5092 | - vrf_tx_error(skb->dev, skb); |
5093 | + vrf_tx_error(skb->dev, skb); |
5094 | return ret; |
5095 | } |
5096 | |
5097 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
5098 | index 0f5dfb8a545d..28afdf22b88f 100644 |
5099 | --- a/drivers/net/vxlan.c |
5100 | +++ b/drivers/net/vxlan.c |
5101 | @@ -930,7 +930,7 @@ static bool vxlan_snoop(struct net_device *dev, |
5102 | return false; |
5103 | |
5104 | /* Don't migrate static entries, drop packets */ |
5105 | - if (f->state & NUD_NOARP) |
5106 | + if (f->state & (NUD_PERMANENT | NUD_NOARP)) |
5107 | return true; |
5108 | |
5109 | if (net_ratelimit()) |
5110 | diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c |
5111 | index 65647533b401..a8bd68f252e9 100644 |
5112 | --- a/drivers/net/wan/fsl_ucc_hdlc.c |
5113 | +++ b/drivers/net/wan/fsl_ucc_hdlc.c |
5114 | @@ -137,7 +137,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) |
5115 | priv->tx_ring_size = TX_BD_RING_LEN; |
5116 | /* Alloc Rx BD */ |
5117 | priv->rx_bd_base = dma_alloc_coherent(priv->dev, |
5118 | - RX_BD_RING_LEN * sizeof(struct qe_bd *), |
5119 | + RX_BD_RING_LEN * sizeof(struct qe_bd), |
5120 | &priv->dma_rx_bd, GFP_KERNEL); |
5121 | |
5122 | if (!priv->rx_bd_base) { |
5123 | @@ -148,7 +148,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) |
5124 | |
5125 | /* Alloc Tx BD */ |
5126 | priv->tx_bd_base = dma_alloc_coherent(priv->dev, |
5127 | - TX_BD_RING_LEN * sizeof(struct qe_bd *), |
5128 | + TX_BD_RING_LEN * sizeof(struct qe_bd), |
5129 | &priv->dma_tx_bd, GFP_KERNEL); |
5130 | |
5131 | if (!priv->tx_bd_base) { |
5132 | @@ -158,7 +158,7 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) |
5133 | } |
5134 | |
5135 | /* Alloc parameter ram for ucc hdlc */ |
5136 | - priv->ucc_pram_offset = qe_muram_alloc(sizeof(priv->ucc_pram), |
5137 | + priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param), |
5138 | ALIGNMENT_OF_UCC_HDLC_PRAM); |
5139 | |
5140 | if (priv->ucc_pram_offset < 0) { |
5141 | @@ -295,11 +295,11 @@ static int uhdlc_init(struct ucc_hdlc_private *priv) |
5142 | qe_muram_free(priv->ucc_pram_offset); |
5143 | free_tx_bd: |
5144 | dma_free_coherent(priv->dev, |
5145 | - TX_BD_RING_LEN * sizeof(struct qe_bd *), |
5146 | + TX_BD_RING_LEN * sizeof(struct qe_bd), |
5147 | priv->tx_bd_base, priv->dma_tx_bd); |
5148 | free_rx_bd: |
5149 | dma_free_coherent(priv->dev, |
5150 | - RX_BD_RING_LEN * sizeof(struct qe_bd *), |
5151 | + RX_BD_RING_LEN * sizeof(struct qe_bd), |
5152 | priv->rx_bd_base, priv->dma_rx_bd); |
5153 | free_uccf: |
5154 | ucc_fast_free(priv->uccf); |
5155 | @@ -454,7 +454,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) |
5156 | static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit) |
5157 | { |
5158 | struct net_device *dev = priv->ndev; |
5159 | - struct sk_buff *skb; |
5160 | + struct sk_buff *skb = NULL; |
5161 | hdlc_device *hdlc = dev_to_hdlc(dev); |
5162 | struct qe_bd *bd; |
5163 | u32 bd_status; |
5164 | @@ -688,7 +688,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv) |
5165 | |
5166 | if (priv->rx_bd_base) { |
5167 | dma_free_coherent(priv->dev, |
5168 | - RX_BD_RING_LEN * sizeof(struct qe_bd *), |
5169 | + RX_BD_RING_LEN * sizeof(struct qe_bd), |
5170 | priv->rx_bd_base, priv->dma_rx_bd); |
5171 | |
5172 | priv->rx_bd_base = NULL; |
5173 | @@ -697,7 +697,7 @@ static void uhdlc_memclean(struct ucc_hdlc_private *priv) |
5174 | |
5175 | if (priv->tx_bd_base) { |
5176 | dma_free_coherent(priv->dev, |
5177 | - TX_BD_RING_LEN * sizeof(struct qe_bd *), |
5178 | + TX_BD_RING_LEN * sizeof(struct qe_bd), |
5179 | priv->tx_bd_base, priv->dma_tx_bd); |
5180 | |
5181 | priv->tx_bd_base = NULL; |
5182 | @@ -1002,7 +1002,7 @@ static int ucc_hdlc_probe(struct platform_device *pdev) |
5183 | struct device_node *np = pdev->dev.of_node; |
5184 | struct ucc_hdlc_private *uhdlc_priv = NULL; |
5185 | struct ucc_tdm_info *ut_info; |
5186 | - struct ucc_tdm *utdm; |
5187 | + struct ucc_tdm *utdm = NULL; |
5188 | struct resource res; |
5189 | struct net_device *dev; |
5190 | hdlc_device *hdlc; |
5191 | diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h |
5192 | index 7d3231acfb24..82bdec744055 100644 |
5193 | --- a/drivers/net/wireless/ath/ath10k/bmi.h |
5194 | +++ b/drivers/net/wireless/ath/ath10k/bmi.h |
5195 | @@ -83,6 +83,8 @@ enum bmi_cmd_id { |
5196 | #define BMI_NVRAM_SEG_NAME_SZ 16 |
5197 | |
5198 | #define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10 |
5199 | +#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000 |
5200 | +#define BMI_PARAM_FLASH_SECTION_ALL 0x10000 |
5201 | |
5202 | #define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00 |
5203 | #define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10 |
5204 | diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c |
5205 | index 7b3017f55e3d..65ad7a130ca1 100644 |
5206 | --- a/drivers/net/wireless/ath/ath10k/core.c |
5207 | +++ b/drivers/net/wireless/ath/ath10k/core.c |
5208 | @@ -652,7 +652,7 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) |
5209 | { |
5210 | u32 result, address; |
5211 | u8 board_id, chip_id; |
5212 | - int ret; |
5213 | + int ret, bmi_board_id_param; |
5214 | |
5215 | address = ar->hw_params.patch_load_addr; |
5216 | |
5217 | @@ -676,8 +676,13 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar) |
5218 | return ret; |
5219 | } |
5220 | |
5221 | - ret = ath10k_bmi_execute(ar, address, BMI_PARAM_GET_EEPROM_BOARD_ID, |
5222 | - &result); |
5223 | + if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || |
5224 | + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) |
5225 | + bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID; |
5226 | + else |
5227 | + bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID; |
5228 | + |
5229 | + ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result); |
5230 | if (ret) { |
5231 | ath10k_err(ar, "could not execute otp for board id check: %d\n", |
5232 | ret); |
5233 | @@ -739,6 +744,11 @@ static int ath10k_download_and_run_otp(struct ath10k *ar) |
5234 | return ret; |
5235 | } |
5236 | |
5237 | + /* As of now pre-cal is valid for 10_4 variants */ |
5238 | + if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT || |
5239 | + ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE) |
5240 | + bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL; |
5241 | + |
5242 | ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result); |
5243 | if (ret) { |
5244 | ath10k_err(ar, "could not execute otp (%d)\n", ret); |
5245 | diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c |
5246 | index 4f8d9ed04f5e..4a1054408f1a 100644 |
5247 | --- a/drivers/net/wireless/ath/ath5k/debug.c |
5248 | +++ b/drivers/net/wireless/ath/ath5k/debug.c |
5249 | @@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file) |
5250 | } |
5251 | |
5252 | for (i = 0; i < eesize; ++i) { |
5253 | - AR5K_EEPROM_READ(i, val); |
5254 | + if (!ath5k_hw_nvram_read(ah, i, &val)) { |
5255 | + ret = -EIO; |
5256 | + goto freebuf; |
5257 | + } |
5258 | buf[i] = val; |
5259 | } |
5260 | |
5261 | diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c |
5262 | index d4b73dedf89b..b35adbca7a5d 100644 |
5263 | --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c |
5264 | +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c |
5265 | @@ -79,8 +79,8 @@ |
5266 | /* Lowest firmware API version supported */ |
5267 | #define IWL7260_UCODE_API_MIN 17 |
5268 | #define IWL7265_UCODE_API_MIN 17 |
5269 | -#define IWL7265D_UCODE_API_MIN 17 |
5270 | -#define IWL3168_UCODE_API_MIN 20 |
5271 | +#define IWL7265D_UCODE_API_MIN 22 |
5272 | +#define IWL3168_UCODE_API_MIN 22 |
5273 | |
5274 | /* NVM versions */ |
5275 | #define IWL7260_NVM_VERSION 0x0a1d |
5276 | diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c |
5277 | index 8d3e53fac1da..20d08ddb4388 100644 |
5278 | --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c |
5279 | +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c |
5280 | @@ -74,8 +74,8 @@ |
5281 | #define IWL8265_UCODE_API_MAX 26 |
5282 | |
5283 | /* Lowest firmware API version supported */ |
5284 | -#define IWL8000_UCODE_API_MIN 17 |
5285 | -#define IWL8265_UCODE_API_MIN 20 |
5286 | +#define IWL8000_UCODE_API_MIN 22 |
5287 | +#define IWL8265_UCODE_API_MIN 22 |
5288 | |
5289 | /* NVM versions */ |
5290 | #define IWL8000_NVM_VERSION 0x0a1d |
5291 | diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h |
5292 | index 406ef301b8ab..da8234b762bf 100644 |
5293 | --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h |
5294 | +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h |
5295 | @@ -369,6 +369,7 @@ |
5296 | #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) |
5297 | |
5298 | #define DBGC_IN_SAMPLE (0xa03c00) |
5299 | +#define DBGC_OUT_CTRL (0xa03c0c) |
5300 | |
5301 | /* enable the ID buf for read */ |
5302 | #define WFPM_PS_CTL_CLR 0xA0300C |
5303 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c |
5304 | index 700d244df34b..2642d8e477b8 100644 |
5305 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c |
5306 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c |
5307 | @@ -914,14 +914,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, |
5308 | return 0; |
5309 | } |
5310 | |
5311 | -static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) |
5312 | -{ |
5313 | - if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) |
5314 | - iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); |
5315 | - else |
5316 | - iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); |
5317 | -} |
5318 | - |
5319 | int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) |
5320 | { |
5321 | u8 *ptr; |
5322 | @@ -935,10 +927,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) |
5323 | /* EARLY START - firmware's configuration is hard coded */ |
5324 | if ((!mvm->fw->dbg_conf_tlv[conf_id] || |
5325 | !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && |
5326 | - conf_id == FW_DBG_START_FROM_ALIVE) { |
5327 | - iwl_mvm_restart_early_start(mvm); |
5328 | + conf_id == FW_DBG_START_FROM_ALIVE) |
5329 | return 0; |
5330 | - } |
5331 | |
5332 | if (!mvm->fw->dbg_conf_tlv[conf_id]) |
5333 | return -EINVAL; |
5334 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h |
5335 | index c60703e0c246..2b1c691bb4b2 100644 |
5336 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h |
5337 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h |
5338 | @@ -1666,8 +1666,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); |
5339 | */ |
5340 | static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) |
5341 | { |
5342 | + u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : |
5343 | + IWL_MVM_CMD_QUEUE; |
5344 | + |
5345 | return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & |
5346 | - ~BIT(IWL_MVM_CMD_QUEUE)); |
5347 | + ~BIT(cmd_queue)); |
5348 | } |
5349 | |
5350 | static inline |
5351 | @@ -1687,6 +1690,7 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, |
5352 | static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) |
5353 | { |
5354 | mvm->ucode_loaded = false; |
5355 | + mvm->fw_dbg_conf = FW_DBG_INVALID; |
5356 | iwl_trans_stop_device(mvm->trans); |
5357 | } |
5358 | |
5359 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c |
5360 | index 4d35deb628bc..6d38eec3f9d3 100644 |
5361 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c |
5362 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c |
5363 | @@ -1118,21 +1118,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) |
5364 | |
5365 | mutex_lock(&mvm->mutex); |
5366 | |
5367 | - /* stop recording */ |
5368 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
5369 | + /* stop recording */ |
5370 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); |
5371 | + |
5372 | + iwl_mvm_fw_error_dump(mvm); |
5373 | + |
5374 | + /* start recording again if the firmware is not crashed */ |
5375 | + if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && |
5376 | + mvm->fw->dbg_dest_tlv) |
5377 | + iwl_clear_bits_prph(mvm->trans, |
5378 | + MON_BUFF_SAMPLE_CTL, 0x100); |
5379 | } else { |
5380 | + u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); |
5381 | + u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); |
5382 | + |
5383 | + /* stop recording */ |
5384 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); |
5385 | - /* wait before we collect the data till the DBGC stop */ |
5386 | udelay(100); |
5387 | - } |
5388 | + iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); |
5389 | + /* wait before we collect the data till the DBGC stop */ |
5390 | + udelay(500); |
5391 | |
5392 | - iwl_mvm_fw_error_dump(mvm); |
5393 | + iwl_mvm_fw_error_dump(mvm); |
5394 | |
5395 | - /* start recording again if the firmware is not crashed */ |
5396 | - WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && |
5397 | - mvm->fw->dbg_dest_tlv && |
5398 | - iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); |
5399 | + /* start recording again if the firmware is not crashed */ |
5400 | + if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && |
5401 | + mvm->fw->dbg_dest_tlv) { |
5402 | + iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); |
5403 | + iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); |
5404 | + } |
5405 | + } |
5406 | |
5407 | mutex_unlock(&mvm->mutex); |
5408 | |
5409 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c |
5410 | index bec7d9c46087..c5203568a47a 100644 |
5411 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c |
5412 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c |
5413 | @@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, |
5414 | struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); |
5415 | int ret; |
5416 | |
5417 | - if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) |
5418 | - return -EIO; |
5419 | - |
5420 | mutex_lock(&mvm->mutex); |
5421 | |
5422 | + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { |
5423 | + ret = -EIO; |
5424 | + goto unlock; |
5425 | + } |
5426 | + |
5427 | if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { |
5428 | ret = -EINVAL; |
5429 | goto unlock; |
5430 | diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
5431 | index 10ef44e8ecd5..fe32de252e6b 100644 |
5432 | --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
5433 | +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c |
5434 | @@ -2824,7 +2824,8 @@ static struct iwl_trans_dump_data |
5435 | #ifdef CONFIG_PM_SLEEP |
5436 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
5437 | { |
5438 | - if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) |
5439 | + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && |
5440 | + (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) |
5441 | return iwl_pci_fw_enter_d0i3(trans); |
5442 | |
5443 | return 0; |
5444 | @@ -2832,7 +2833,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
5445 | |
5446 | static void iwl_trans_pcie_resume(struct iwl_trans *trans) |
5447 | { |
5448 | - if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) |
5449 | + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && |
5450 | + (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) |
5451 | iwl_pci_fw_exit_d0i3(trans); |
5452 | } |
5453 | #endif /* CONFIG_PM_SLEEP */ |
5454 | diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c |
5455 | index 13da95a24cf7..987c7c4f43cd 100644 |
5456 | --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c |
5457 | +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c |
5458 | @@ -142,15 +142,25 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, |
5459 | if (!rt2x00dev->ops->hw->set_rts_threshold && |
5460 | (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | |
5461 | IEEE80211_TX_RC_USE_CTS_PROTECT))) { |
5462 | - if (rt2x00queue_available(queue) <= 1) |
5463 | - goto exit_fail; |
5464 | + if (rt2x00queue_available(queue) <= 1) { |
5465 | + /* |
5466 | + * Recheck for full queue under lock to avoid race |
5467 | + * conditions with rt2x00lib_txdone(). |
5468 | + */ |
5469 | + spin_lock(&queue->tx_lock); |
5470 | + if (rt2x00queue_threshold(queue)) |
5471 | + rt2x00queue_pause_queue(queue); |
5472 | + spin_unlock(&queue->tx_lock); |
5473 | + |
5474 | + goto exit_free_skb; |
5475 | + } |
5476 | |
5477 | if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) |
5478 | - goto exit_fail; |
5479 | + goto exit_free_skb; |
5480 | } |
5481 | |
5482 | if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) |
5483 | - goto exit_fail; |
5484 | + goto exit_free_skb; |
5485 | |
5486 | /* |
5487 | * Pausing queue has to be serialized with rt2x00lib_txdone(). Note |
5488 | @@ -164,10 +174,6 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, |
5489 | |
5490 | return; |
5491 | |
5492 | - exit_fail: |
5493 | - spin_lock(&queue->tx_lock); |
5494 | - rt2x00queue_pause_queue(queue); |
5495 | - spin_unlock(&queue->tx_lock); |
5496 | exit_free_skb: |
5497 | ieee80211_free_txskb(hw, skb); |
5498 | } |
5499 | diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c |
5500 | index 0881ba8535f4..c78abfc7bd96 100644 |
5501 | --- a/drivers/net/wireless/ray_cs.c |
5502 | +++ b/drivers/net/wireless/ray_cs.c |
5503 | @@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = { |
5504 | 0x04, 0x08, /* Noise gain, limit offset */ |
5505 | 0x28, 0x28, /* det rssi, med busy offsets */ |
5506 | 7, /* det sync thresh */ |
5507 | - 0, 2, 2 /* test mode, min, max */ |
5508 | + 0, 2, 2, /* test mode, min, max */ |
5509 | + 0, /* rx/tx delay */ |
5510 | + 0, 0, 0, 0, 0, 0, /* current BSS id */ |
5511 | + 0 /* hop set */ |
5512 | }; |
5513 | |
5514 | /*===========================================================================*/ |
5515 | @@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local) |
5516 | * a_beacon_period = hops a_beacon_period = KuS |
5517 | *//* 64ms = 010000 */ |
5518 | if (local->fw_ver == 0x55) { |
5519 | - memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms, |
5520 | + memcpy(&local->sparm.b4, b4_default_startup_parms, |
5521 | sizeof(struct b4_startup_params)); |
5522 | /* Translate sane kus input values to old build 4/5 format */ |
5523 | /* i = hop time in uS truncated to 3 bytes */ |
5524 | diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c |
5525 | index 1c539c83e8cf..5e41bf04ef61 100644 |
5526 | --- a/drivers/net/wireless/ti/wl1251/main.c |
5527 | +++ b/drivers/net/wireless/ti/wl1251/main.c |
5528 | @@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, |
5529 | WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); |
5530 | |
5531 | enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; |
5532 | - wl1251_acx_arp_ip_filter(wl, enable, addr); |
5533 | - |
5534 | + ret = wl1251_acx_arp_ip_filter(wl, enable, addr); |
5535 | if (ret < 0) |
5536 | goto out_sleep; |
5537 | } |
5538 | diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c |
5539 | index ad9d82eb2aed..c823e9346389 100644 |
5540 | --- a/drivers/nvme/host/core.c |
5541 | +++ b/drivers/nvme/host/core.c |
5542 | @@ -2040,6 +2040,10 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) |
5543 | struct nvme_ns *ns; |
5544 | |
5545 | mutex_lock(&ctrl->namespaces_mutex); |
5546 | + |
5547 | + /* Forcibly start all queues to avoid having stuck requests */ |
5548 | + blk_mq_start_hw_queues(ctrl->admin_q); |
5549 | + |
5550 | list_for_each_entry(ns, &ctrl->namespaces, list) { |
5551 | /* |
5552 | * Revalidating a dead namespace sets capacity to 0. This will |
5553 | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c |
5554 | index e48ecb9303ca..8cc856ecec95 100644 |
5555 | --- a/drivers/nvme/host/pci.c |
5556 | +++ b/drivers/nvme/host/pci.c |
5557 | @@ -1263,7 +1263,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) |
5558 | bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); |
5559 | |
5560 | /* If there is a reset ongoing, we shouldn't reset again. */ |
5561 | - if (work_busy(&dev->reset_work)) |
5562 | + if (dev->ctrl.state == NVME_CTRL_RESETTING) |
5563 | return false; |
5564 | |
5565 | /* We shouldn't reset unless the controller is on fatal error state |
5566 | @@ -1755,7 +1755,7 @@ static void nvme_reset_work(struct work_struct *work) |
5567 | struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); |
5568 | int result = -ENODEV; |
5569 | |
5570 | - if (WARN_ON(dev->ctrl.state == NVME_CTRL_RESETTING)) |
5571 | + if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) |
5572 | goto out; |
5573 | |
5574 | /* |
5575 | @@ -1765,9 +1765,6 @@ static void nvme_reset_work(struct work_struct *work) |
5576 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
5577 | nvme_dev_disable(dev, false); |
5578 | |
5579 | - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) |
5580 | - goto out; |
5581 | - |
5582 | result = nvme_pci_enable(dev); |
5583 | if (result) |
5584 | goto out; |
5585 | @@ -1841,8 +1838,8 @@ static int nvme_reset(struct nvme_dev *dev) |
5586 | { |
5587 | if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) |
5588 | return -ENODEV; |
5589 | - if (work_busy(&dev->reset_work)) |
5590 | - return -ENODEV; |
5591 | + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) |
5592 | + return -EBUSY; |
5593 | if (!queue_work(nvme_workq, &dev->reset_work)) |
5594 | return -EBUSY; |
5595 | return 0; |
5596 | @@ -1944,6 +1941,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
5597 | if (result) |
5598 | goto release_pools; |
5599 | |
5600 | + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING); |
5601 | dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); |
5602 | |
5603 | queue_work(nvme_workq, &dev->reset_work); |
5604 | @@ -1987,6 +1985,7 @@ static void nvme_remove(struct pci_dev *pdev) |
5605 | |
5606 | nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); |
5607 | |
5608 | + cancel_work_sync(&dev->reset_work); |
5609 | pci_set_drvdata(pdev, NULL); |
5610 | |
5611 | if (!pci_device_is_present(pdev)) { |
5612 | diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c |
5613 | index 0a965026b134..fc5b18d3db20 100644 |
5614 | --- a/drivers/pinctrl/intel/pinctrl-baytrail.c |
5615 | +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c |
5616 | @@ -46,6 +46,9 @@ |
5617 | #define BYT_TRIG_POS BIT(25) |
5618 | #define BYT_TRIG_LVL BIT(24) |
5619 | #define BYT_DEBOUNCE_EN BIT(20) |
5620 | +#define BYT_GLITCH_FILTER_EN BIT(19) |
5621 | +#define BYT_GLITCH_F_SLOW_CLK BIT(17) |
5622 | +#define BYT_GLITCH_F_FAST_CLK BIT(16) |
5623 | #define BYT_PULL_STR_SHIFT 9 |
5624 | #define BYT_PULL_STR_MASK (3 << BYT_PULL_STR_SHIFT) |
5625 | #define BYT_PULL_STR_2K (0 << BYT_PULL_STR_SHIFT) |
5626 | @@ -1579,6 +1582,9 @@ static int byt_irq_type(struct irq_data *d, unsigned int type) |
5627 | */ |
5628 | value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG | |
5629 | BYT_TRIG_LVL); |
5630 | + /* Enable glitch filtering */ |
5631 | + value |= BYT_GLITCH_FILTER_EN | BYT_GLITCH_F_SLOW_CLK | |
5632 | + BYT_GLITCH_F_FAST_CLK; |
5633 | |
5634 | writel(value, reg); |
5635 | |
5636 | diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c |
5637 | index 7511723c6b05..257c1c2e5888 100644 |
5638 | --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c |
5639 | +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c |
5640 | @@ -138,7 +138,6 @@ static const struct pinctrl_pin_desc meson_gxbb_periphs_pins[] = { |
5641 | MESON_PIN(GPIOX_19, EE_OFF), |
5642 | MESON_PIN(GPIOX_20, EE_OFF), |
5643 | MESON_PIN(GPIOX_21, EE_OFF), |
5644 | - MESON_PIN(GPIOX_22, EE_OFF), |
5645 | |
5646 | MESON_PIN(GPIOCLK_0, EE_OFF), |
5647 | MESON_PIN(GPIOCLK_1, EE_OFF), |
5648 | diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c |
5649 | index 14bde0db8c24..5b10b50f8686 100644 |
5650 | --- a/drivers/powercap/powercap_sys.c |
5651 | +++ b/drivers/powercap/powercap_sys.c |
5652 | @@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone( |
5653 | |
5654 | power_zone->id = result; |
5655 | idr_init(&power_zone->idr); |
5656 | + result = -ENOMEM; |
5657 | power_zone->name = kstrdup(name, GFP_KERNEL); |
5658 | if (!power_zone->name) |
5659 | goto err_name_alloc; |
5660 | diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c |
5661 | index 6ebd42aad291..25cf3069e2e7 100644 |
5662 | --- a/drivers/rtc/interface.c |
5663 | +++ b/drivers/rtc/interface.c |
5664 | @@ -227,6 +227,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
5665 | missing = year; |
5666 | } |
5667 | |
5668 | + /* Can't proceed if alarm is still invalid after replacing |
5669 | + * missing fields. |
5670 | + */ |
5671 | + err = rtc_valid_tm(&alarm->time); |
5672 | + if (err) |
5673 | + goto done; |
5674 | + |
5675 | /* with luck, no rollover is needed */ |
5676 | t_now = rtc_tm_to_time64(&now); |
5677 | t_alm = rtc_tm_to_time64(&alarm->time); |
5678 | @@ -278,9 +285,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) |
5679 | dev_warn(&rtc->dev, "alarm rollover not handled\n"); |
5680 | } |
5681 | |
5682 | -done: |
5683 | err = rtc_valid_tm(&alarm->time); |
5684 | |
5685 | +done: |
5686 | if (err) { |
5687 | dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n", |
5688 | alarm->time.tm_year + 1900, alarm->time.tm_mon + 1, |
5689 | diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c |
5690 | index 58698d21c2c3..c4ca6a385790 100644 |
5691 | --- a/drivers/rtc/rtc-m41t80.c |
5692 | +++ b/drivers/rtc/rtc-m41t80.c |
5693 | @@ -168,6 +168,7 @@ static int m41t80_get_datetime(struct i2c_client *client, |
5694 | /* Sets the given date and time to the real time clock. */ |
5695 | static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) |
5696 | { |
5697 | + struct m41t80_data *clientdata = i2c_get_clientdata(client); |
5698 | unsigned char buf[8]; |
5699 | int err, flags; |
5700 | |
5701 | @@ -183,6 +184,17 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) |
5702 | buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100); |
5703 | buf[M41T80_REG_WDAY] = tm->tm_wday; |
5704 | |
5705 | + /* If the square wave output is controlled in the weekday register */ |
5706 | + if (clientdata->features & M41T80_FEATURE_SQ_ALT) { |
5707 | + int val; |
5708 | + |
5709 | + val = i2c_smbus_read_byte_data(client, M41T80_REG_WDAY); |
5710 | + if (val < 0) |
5711 | + return val; |
5712 | + |
5713 | + buf[M41T80_REG_WDAY] |= (val & 0xf0); |
5714 | + } |
5715 | + |
5716 | err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC, |
5717 | sizeof(buf), buf); |
5718 | if (err < 0) { |
5719 | diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c |
5720 | index e4324dcf9508..aa53fceaa5e0 100644 |
5721 | --- a/drivers/rtc/rtc-opal.c |
5722 | +++ b/drivers/rtc/rtc-opal.c |
5723 | @@ -150,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) |
5724 | |
5725 | y_m_d = be32_to_cpu(__y_m_d); |
5726 | h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); |
5727 | + |
5728 | + /* check if no alarm is set */ |
5729 | + if (y_m_d == 0 && h_m_s_ms == 0) { |
5730 | + pr_debug("No alarm is set\n"); |
5731 | + rc = -ENOENT; |
5732 | + goto exit; |
5733 | + } else { |
5734 | + pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms); |
5735 | + } |
5736 | + |
5737 | opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); |
5738 | |
5739 | exit: |
5740 | diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c |
5741 | index 0f11c2a228e3..a753ef9c1459 100644 |
5742 | --- a/drivers/rtc/rtc-snvs.c |
5743 | +++ b/drivers/rtc/rtc-snvs.c |
5744 | @@ -257,7 +257,7 @@ static int snvs_rtc_probe(struct platform_device *pdev) |
5745 | of_property_read_u32(pdev->dev.of_node, "offset", &data->offset); |
5746 | } |
5747 | |
5748 | - if (!data->regmap) { |
5749 | + if (IS_ERR(data->regmap)) { |
5750 | dev_err(&pdev->dev, "Can't find snvs syscon\n"); |
5751 | return -ENODEV; |
5752 | } |
5753 | diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c |
5754 | index 5ecd40884f01..0da246505f70 100644 |
5755 | --- a/drivers/s390/block/dasd.c |
5756 | +++ b/drivers/s390/block/dasd.c |
5757 | @@ -1950,8 +1950,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device, |
5758 | { |
5759 | int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); |
5760 | |
5761 | - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { |
5762 | - /* dasd is being set offline. */ |
5763 | + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && |
5764 | + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { |
5765 | + /* |
5766 | + * dasd is being set offline |
5767 | + * but it is no safe offline where we have to allow I/O |
5768 | + */ |
5769 | return 1; |
5770 | } |
5771 | if (device->stopped) { |
5772 | diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h |
5773 | index fdd4eb4e41b2..e58786f797d9 100644 |
5774 | --- a/drivers/scsi/bnx2fc/bnx2fc.h |
5775 | +++ b/drivers/scsi/bnx2fc/bnx2fc.h |
5776 | @@ -191,6 +191,7 @@ struct bnx2fc_hba { |
5777 | struct bnx2fc_cmd_mgr *cmd_mgr; |
5778 | spinlock_t hba_lock; |
5779 | struct mutex hba_mutex; |
5780 | + struct mutex hba_stats_mutex; |
5781 | unsigned long adapter_state; |
5782 | #define ADAPTER_STATE_UP 0 |
5783 | #define ADAPTER_STATE_GOING_DOWN 1 |
5784 | diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
5785 | index f9ddb6156f14..bee7d37367ca 100644 |
5786 | --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
5787 | +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c |
5788 | @@ -670,15 +670,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) |
5789 | if (!fw_stats) |
5790 | return NULL; |
5791 | |
5792 | + mutex_lock(&hba->hba_stats_mutex); |
5793 | + |
5794 | bnx2fc_stats = fc_get_host_stats(shost); |
5795 | |
5796 | init_completion(&hba->stat_req_done); |
5797 | if (bnx2fc_send_stat_req(hba)) |
5798 | - return bnx2fc_stats; |
5799 | + goto unlock_stats_mutex; |
5800 | rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); |
5801 | if (!rc) { |
5802 | BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); |
5803 | - return bnx2fc_stats; |
5804 | + goto unlock_stats_mutex; |
5805 | } |
5806 | BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); |
5807 | bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; |
5808 | @@ -700,6 +702,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) |
5809 | |
5810 | memcpy(&hba->prev_stats, hba->stats_buffer, |
5811 | sizeof(struct fcoe_statistics_params)); |
5812 | + |
5813 | +unlock_stats_mutex: |
5814 | + mutex_unlock(&hba->hba_stats_mutex); |
5815 | return bnx2fc_stats; |
5816 | } |
5817 | |
5818 | @@ -1348,6 +1353,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) |
5819 | } |
5820 | spin_lock_init(&hba->hba_lock); |
5821 | mutex_init(&hba->hba_mutex); |
5822 | + mutex_init(&hba->hba_stats_mutex); |
5823 | |
5824 | hba->cnic = cnic; |
5825 | |
5826 | diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c |
5827 | index 622bdabc8894..dab195f04da7 100644 |
5828 | --- a/drivers/scsi/csiostor/csio_hw.c |
5829 | +++ b/drivers/scsi/csiostor/csio_hw.c |
5830 | @@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) |
5831 | goto bye; |
5832 | } |
5833 | |
5834 | - mempool_free(mbp, hw->mb_mempool); |
5835 | if (finicsum != cfcsum) { |
5836 | csio_warn(hw, |
5837 | "Config File checksum mismatch: csum=%#x, computed=%#x\n", |
5838 | @@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) |
5839 | rv = csio_hw_validate_caps(hw, mbp); |
5840 | if (rv != 0) |
5841 | goto bye; |
5842 | + |
5843 | + mempool_free(mbp, hw->mb_mempool); |
5844 | + mbp = NULL; |
5845 | + |
5846 | /* |
5847 | * Note that we're operating with parameters |
5848 | * not supplied by the driver, rather than from hard-wired |
5849 | diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c |
5850 | index 4abd3fce5ab6..c2b682916337 100644 |
5851 | --- a/drivers/scsi/libiscsi.c |
5852 | +++ b/drivers/scsi/libiscsi.c |
5853 | @@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) |
5854 | */ |
5855 | switch (session->state) { |
5856 | case ISCSI_STATE_FAILED: |
5857 | + /* |
5858 | + * cmds should fail during shutdown, if the session |
5859 | + * state is bad, allowing completion to happen |
5860 | + */ |
5861 | + if (unlikely(system_state != SYSTEM_RUNNING)) { |
5862 | + reason = FAILURE_SESSION_FAILED; |
5863 | + sc->result = DID_NO_CONNECT << 16; |
5864 | + break; |
5865 | + } |
5866 | case ISCSI_STATE_IN_RECOVERY: |
5867 | reason = FAILURE_SESSION_IN_RECOVERY; |
5868 | sc->result = DID_IMM_RETRY << 16; |
5869 | @@ -1979,6 +1988,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) |
5870 | } |
5871 | |
5872 | if (session->state != ISCSI_STATE_LOGGED_IN) { |
5873 | + /* |
5874 | + * During shutdown, if session is prematurely disconnected, |
5875 | + * recovery won't happen and there will be hung cmds. Not |
5876 | + * handling cmds would trigger EH, also bad in this case. |
5877 | + * Instead, handle cmd, allow completion to happen and let |
5878 | + * upper layer to deal with the result. |
5879 | + */ |
5880 | + if (unlikely(system_state != SYSTEM_RUNNING)) { |
5881 | + sc->result = DID_NO_CONNECT << 16; |
5882 | + ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); |
5883 | + rc = BLK_EH_HANDLED; |
5884 | + goto done; |
5885 | + } |
5886 | /* |
5887 | * We are probably in the middle of iscsi recovery so let |
5888 | * that complete and handle the error. |
5889 | @@ -2083,7 +2105,7 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) |
5890 | task->last_timeout = jiffies; |
5891 | spin_unlock(&session->frwd_lock); |
5892 | ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? |
5893 | - "timer reset" : "nh"); |
5894 | + "timer reset" : "shutdown or nh"); |
5895 | return rc; |
5896 | } |
5897 | |
5898 | diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c |
5899 | index 022bb6e10d98..12886f96b286 100644 |
5900 | --- a/drivers/scsi/libsas/sas_expander.c |
5901 | +++ b/drivers/scsi/libsas/sas_expander.c |
5902 | @@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) |
5903 | phy->phy->minimum_linkrate = dr->pmin_linkrate; |
5904 | phy->phy->maximum_linkrate = dr->pmax_linkrate; |
5905 | phy->phy->negotiated_linkrate = phy->linkrate; |
5906 | + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); |
5907 | |
5908 | skip: |
5909 | if (new_phy) |
5910 | @@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) |
5911 | res = smp_execute_task(dev, req, RPEL_REQ_SIZE, |
5912 | resp, RPEL_RESP_SIZE); |
5913 | |
5914 | - if (!res) |
5915 | + if (res) |
5916 | goto out; |
5917 | |
5918 | phy->invalid_dword_count = scsi_to_u32(&resp[12]); |
5919 | @@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) |
5920 | phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); |
5921 | |
5922 | out: |
5923 | + kfree(req); |
5924 | kfree(resp); |
5925 | return res; |
5926 | |
5927 | diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
5928 | index 468acab04d3d..b8589068d175 100644 |
5929 | --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
5930 | +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c |
5931 | @@ -4065,19 +4065,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
5932 | return 0; |
5933 | } |
5934 | |
5935 | - /* |
5936 | - * Bug work around for firmware SATL handling. The loop |
5937 | - * is based on atomic operations and ensures consistency |
5938 | - * since we're lockless at this point |
5939 | - */ |
5940 | - do { |
5941 | - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { |
5942 | - scmd->result = SAM_STAT_BUSY; |
5943 | - scmd->scsi_done(scmd); |
5944 | - return 0; |
5945 | - } |
5946 | - } while (_scsih_set_satl_pending(scmd, true)); |
5947 | - |
5948 | sas_target_priv_data = sas_device_priv_data->sas_target; |
5949 | |
5950 | /* invalid device handle */ |
5951 | @@ -4103,6 +4090,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
5952 | sas_device_priv_data->block) |
5953 | return SCSI_MLQUEUE_DEVICE_BUSY; |
5954 | |
5955 | + /* |
5956 | + * Bug work around for firmware SATL handling. The loop |
5957 | + * is based on atomic operations and ensures consistency |
5958 | + * since we're lockless at this point |
5959 | + */ |
5960 | + do { |
5961 | + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { |
5962 | + scmd->result = SAM_STAT_BUSY; |
5963 | + scmd->scsi_done(scmd); |
5964 | + return 0; |
5965 | + } |
5966 | + } while (_scsih_set_satl_pending(scmd, true)); |
5967 | + |
5968 | if (scmd->sc_data_direction == DMA_FROM_DEVICE) |
5969 | mpi_control = MPI2_SCSIIO_CONTROL_READ; |
5970 | else if (scmd->sc_data_direction == DMA_TO_DEVICE) |
5971 | @@ -4124,6 +4124,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
5972 | if (!smid) { |
5973 | pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", |
5974 | ioc->name, __func__); |
5975 | + _scsih_set_satl_pending(scmd, false); |
5976 | goto out; |
5977 | } |
5978 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
5979 | @@ -4154,6 +4155,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) |
5980 | if (mpi_request->DataLength) { |
5981 | if (ioc->build_sg_scmd(ioc, scmd, smid)) { |
5982 | mpt3sas_base_free_smid(ioc, smid); |
5983 | + _scsih_set_satl_pending(scmd, false); |
5984 | goto out; |
5985 | } |
5986 | } else |
5987 | diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c |
5988 | index 170de1c9eac4..f815f9d5045f 100644 |
5989 | --- a/drivers/staging/wlan-ng/prism2mgmt.c |
5990 | +++ b/drivers/staging/wlan-ng/prism2mgmt.c |
5991 | @@ -169,7 +169,7 @@ int prism2mgmt_scan(struct wlandevice *wlandev, void *msgp) |
5992 | hw->ident_sta_fw.variant) > |
5993 | HFA384x_FIRMWARE_VERSION(1, 5, 0)) { |
5994 | if (msg->scantype.data != P80211ENUM_scantype_active) |
5995 | - word = cpu_to_le16(msg->maxchanneltime.data); |
5996 | + word = msg->maxchanneltime.data; |
5997 | else |
5998 | word = 0; |
5999 | |
6000 | diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c |
6001 | index b4d3116cfdaf..3055f9a12a17 100644 |
6002 | --- a/drivers/thermal/power_allocator.c |
6003 | +++ b/drivers/thermal/power_allocator.c |
6004 | @@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) |
6005 | struct thermal_instance *instance; |
6006 | struct power_allocator_params *params = tz->governor_data; |
6007 | |
6008 | + mutex_lock(&tz->lock); |
6009 | list_for_each_entry(instance, &tz->thermal_instances, tz_node) { |
6010 | if ((instance->trip != params->trip_max_desired_temperature) || |
6011 | (!cdev_is_power_actor(instance->cdev))) |
6012 | @@ -534,6 +535,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) |
6013 | mutex_unlock(&instance->cdev->lock); |
6014 | thermal_cdev_update(instance->cdev); |
6015 | } |
6016 | + mutex_unlock(&tz->lock); |
6017 | } |
6018 | |
6019 | /** |
6020 | diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c |
6021 | index 54cab59e20ed..fe2291795d2f 100644 |
6022 | --- a/drivers/tty/n_gsm.c |
6023 | +++ b/drivers/tty/n_gsm.c |
6024 | @@ -1467,6 +1467,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) |
6025 | * in which case an opening port goes back to closed and a closing port |
6026 | * is simply put into closed state (any further frames from the other |
6027 | * end will get a DM response) |
6028 | + * |
6029 | + * Some control dlci can stay in ADM mode with other dlci working just |
6030 | + * fine. In that case we can just keep the control dlci open after the |
6031 | + * DLCI_OPENING retries time out. |
6032 | */ |
6033 | |
6034 | static void gsm_dlci_t1(unsigned long data) |
6035 | @@ -1480,8 +1484,15 @@ static void gsm_dlci_t1(unsigned long data) |
6036 | if (dlci->retries) { |
6037 | gsm_command(dlci->gsm, dlci->addr, SABM|PF); |
6038 | mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); |
6039 | - } else |
6040 | + } else if (!dlci->addr && gsm->control == (DM | PF)) { |
6041 | + if (debug & 8) |
6042 | + pr_info("DLCI %d opening in ADM mode.\n", |
6043 | + dlci->addr); |
6044 | + gsm_dlci_open(dlci); |
6045 | + } else { |
6046 | gsm_dlci_close(dlci); |
6047 | + } |
6048 | + |
6049 | break; |
6050 | case DLCI_CLOSING: |
6051 | dlci->retries--; |
6052 | @@ -1499,8 +1510,8 @@ static void gsm_dlci_t1(unsigned long data) |
6053 | * @dlci: DLCI to open |
6054 | * |
6055 | * Commence opening a DLCI from the Linux side. We issue SABM messages |
6056 | - * to the modem which should then reply with a UA, at which point we |
6057 | - * will move into open state. Opening is done asynchronously with retry |
6058 | + * to the modem which should then reply with a UA or ADM, at which point |
6059 | + * we will move into open state. Opening is done asynchronously with retry |
6060 | * running off timers and the responses. |
6061 | */ |
6062 | |
6063 | diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c |
6064 | index da31159a03ec..e8b34f16ba2c 100644 |
6065 | --- a/drivers/tty/serial/8250/8250_omap.c |
6066 | +++ b/drivers/tty/serial/8250/8250_omap.c |
6067 | @@ -613,6 +613,10 @@ static int omap_8250_startup(struct uart_port *port) |
6068 | up->lsr_saved_flags = 0; |
6069 | up->msr_saved_flags = 0; |
6070 | |
6071 | + /* Disable DMA for console UART */ |
6072 | + if (uart_console(port)) |
6073 | + up->dma = NULL; |
6074 | + |
6075 | if (up->dma) { |
6076 | ret = serial8250_request_dma(up); |
6077 | if (ret) { |
6078 | diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c |
6079 | index fcf803ffad19..cdd2f942317c 100644 |
6080 | --- a/drivers/tty/serial/sccnxp.c |
6081 | +++ b/drivers/tty/serial/sccnxp.c |
6082 | @@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev) |
6083 | |
6084 | clk = devm_clk_get(&pdev->dev, NULL); |
6085 | if (IS_ERR(clk)) { |
6086 | - if (PTR_ERR(clk) == -EPROBE_DEFER) { |
6087 | - ret = -EPROBE_DEFER; |
6088 | + ret = PTR_ERR(clk); |
6089 | + if (ret == -EPROBE_DEFER) |
6090 | goto err_out; |
6091 | - } |
6092 | + uartclk = 0; |
6093 | + } else { |
6094 | + clk_prepare_enable(clk); |
6095 | + uartclk = clk_get_rate(clk); |
6096 | + } |
6097 | + |
6098 | + if (!uartclk) { |
6099 | dev_notice(&pdev->dev, "Using default clock frequency\n"); |
6100 | uartclk = s->chip->freq_std; |
6101 | - } else |
6102 | - uartclk = clk_get_rate(clk); |
6103 | + } |
6104 | |
6105 | /* Check input frequency */ |
6106 | if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) { |
6107 | diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c |
6108 | index a55c94dfefd2..107f0d194ac5 100644 |
6109 | --- a/drivers/tty/serial/sh-sci.c |
6110 | +++ b/drivers/tty/serial/sh-sci.c |
6111 | @@ -1545,7 +1545,16 @@ static void sci_free_dma(struct uart_port *port) |
6112 | if (s->chan_rx) |
6113 | sci_rx_dma_release(s, false); |
6114 | } |
6115 | -#else |
6116 | + |
6117 | +static void sci_flush_buffer(struct uart_port *port) |
6118 | +{ |
6119 | + /* |
6120 | + * In uart_flush_buffer(), the xmit circular buffer has just been |
6121 | + * cleared, so we have to reset tx_dma_len accordingly. |
6122 | + */ |
6123 | + to_sci_port(port)->tx_dma_len = 0; |
6124 | +} |
6125 | +#else /* !CONFIG_SERIAL_SH_SCI_DMA */ |
6126 | static inline void sci_request_dma(struct uart_port *port) |
6127 | { |
6128 | } |
6129 | @@ -1553,7 +1562,9 @@ static inline void sci_request_dma(struct uart_port *port) |
6130 | static inline void sci_free_dma(struct uart_port *port) |
6131 | { |
6132 | } |
6133 | -#endif |
6134 | + |
6135 | +#define sci_flush_buffer NULL |
6136 | +#endif /* !CONFIG_SERIAL_SH_SCI_DMA */ |
6137 | |
6138 | static irqreturn_t sci_rx_interrupt(int irq, void *ptr) |
6139 | { |
6140 | @@ -2551,6 +2562,7 @@ static const struct uart_ops sci_uart_ops = { |
6141 | .break_ctl = sci_break_ctl, |
6142 | .startup = sci_startup, |
6143 | .shutdown = sci_shutdown, |
6144 | + .flush_buffer = sci_flush_buffer, |
6145 | .set_termios = sci_set_termios, |
6146 | .pm = sci_pm, |
6147 | .type = sci_type, |
6148 | diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c |
6149 | index fba021f5736a..208bc52fc84d 100644 |
6150 | --- a/drivers/uio/uio.c |
6151 | +++ b/drivers/uio/uio.c |
6152 | @@ -279,7 +279,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) |
6153 | map = kzalloc(sizeof(*map), GFP_KERNEL); |
6154 | if (!map) { |
6155 | ret = -ENOMEM; |
6156 | - goto err_map_kobj; |
6157 | + goto err_map; |
6158 | } |
6159 | kobject_init(&map->kobj, &map_attr_type); |
6160 | map->mem = mem; |
6161 | @@ -289,7 +289,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) |
6162 | goto err_map_kobj; |
6163 | ret = kobject_uevent(&map->kobj, KOBJ_ADD); |
6164 | if (ret) |
6165 | - goto err_map; |
6166 | + goto err_map_kobj; |
6167 | } |
6168 | |
6169 | for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { |
6170 | @@ -308,7 +308,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) |
6171 | portio = kzalloc(sizeof(*portio), GFP_KERNEL); |
6172 | if (!portio) { |
6173 | ret = -ENOMEM; |
6174 | - goto err_portio_kobj; |
6175 | + goto err_portio; |
6176 | } |
6177 | kobject_init(&portio->kobj, &portio_attr_type); |
6178 | portio->port = port; |
6179 | @@ -319,7 +319,7 @@ static int uio_dev_add_attributes(struct uio_device *idev) |
6180 | goto err_portio_kobj; |
6181 | ret = kobject_uevent(&portio->kobj, KOBJ_ADD); |
6182 | if (ret) |
6183 | - goto err_portio; |
6184 | + goto err_portio_kobj; |
6185 | } |
6186 | |
6187 | return 0; |
6188 | diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c |
6189 | index 6e0d614a8075..64c6af2c8559 100644 |
6190 | --- a/drivers/usb/chipidea/core.c |
6191 | +++ b/drivers/usb/chipidea/core.c |
6192 | @@ -839,7 +839,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci) |
6193 | { |
6194 | ci_hdrc_gadget_destroy(ci); |
6195 | ci_hdrc_host_destroy(ci); |
6196 | - if (ci->is_otg) |
6197 | + if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) |
6198 | ci_hdrc_otg_destroy(ci); |
6199 | } |
6200 | |
6201 | @@ -939,27 +939,35 @@ static int ci_hdrc_probe(struct platform_device *pdev) |
6202 | /* initialize role(s) before the interrupt is requested */ |
6203 | if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { |
6204 | ret = ci_hdrc_host_init(ci); |
6205 | - if (ret) |
6206 | - dev_info(dev, "doesn't support host\n"); |
6207 | + if (ret) { |
6208 | + if (ret == -ENXIO) |
6209 | + dev_info(dev, "doesn't support host\n"); |
6210 | + else |
6211 | + goto deinit_phy; |
6212 | + } |
6213 | } |
6214 | |
6215 | if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { |
6216 | ret = ci_hdrc_gadget_init(ci); |
6217 | - if (ret) |
6218 | - dev_info(dev, "doesn't support gadget\n"); |
6219 | + if (ret) { |
6220 | + if (ret == -ENXIO) |
6221 | + dev_info(dev, "doesn't support gadget\n"); |
6222 | + else |
6223 | + goto deinit_host; |
6224 | + } |
6225 | } |
6226 | |
6227 | if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) { |
6228 | dev_err(dev, "no supported roles\n"); |
6229 | ret = -ENODEV; |
6230 | - goto deinit_phy; |
6231 | + goto deinit_gadget; |
6232 | } |
6233 | |
6234 | if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) { |
6235 | ret = ci_hdrc_otg_init(ci); |
6236 | if (ret) { |
6237 | dev_err(dev, "init otg fails, ret = %d\n", ret); |
6238 | - goto stop; |
6239 | + goto deinit_gadget; |
6240 | } |
6241 | } |
6242 | |
6243 | @@ -1024,7 +1032,12 @@ static int ci_hdrc_probe(struct platform_device *pdev) |
6244 | |
6245 | ci_extcon_unregister(ci); |
6246 | stop: |
6247 | - ci_role_destroy(ci); |
6248 | + if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) |
6249 | + ci_hdrc_otg_destroy(ci); |
6250 | +deinit_gadget: |
6251 | + ci_hdrc_gadget_destroy(ci); |
6252 | +deinit_host: |
6253 | + ci_hdrc_host_destroy(ci); |
6254 | deinit_phy: |
6255 | ci_usb_phy_exit(ci); |
6256 | |
6257 | diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c |
6258 | index 72664700b8a2..12ee23f53cdd 100644 |
6259 | --- a/drivers/usb/dwc3/dwc3-keystone.c |
6260 | +++ b/drivers/usb/dwc3/dwc3-keystone.c |
6261 | @@ -107,6 +107,10 @@ static int kdwc3_probe(struct platform_device *pdev) |
6262 | return PTR_ERR(kdwc->usbss); |
6263 | |
6264 | kdwc->clk = devm_clk_get(kdwc->dev, "usb"); |
6265 | + if (IS_ERR(kdwc->clk)) { |
6266 | + dev_err(kdwc->dev, "unable to get usb clock\n"); |
6267 | + return PTR_ERR(kdwc->clk); |
6268 | + } |
6269 | |
6270 | error = clk_prepare_enable(kdwc->clk); |
6271 | if (error < 0) { |
6272 | diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
6273 | index dec100811946..ca8b0b1ae37d 100644 |
6274 | --- a/drivers/usb/host/xhci-plat.c |
6275 | +++ b/drivers/usb/host/xhci-plat.c |
6276 | @@ -335,7 +335,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); |
6277 | static struct platform_driver usb_xhci_driver = { |
6278 | .probe = xhci_plat_probe, |
6279 | .remove = xhci_plat_remove, |
6280 | - .shutdown = usb_hcd_platform_shutdown, |
6281 | .driver = { |
6282 | .name = "xhci-hcd", |
6283 | .pm = DEV_PM_OPS, |
6284 | diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c |
6285 | index 4340b4925daa..4d6eb48b2c45 100644 |
6286 | --- a/drivers/usb/storage/ene_ub6250.c |
6287 | +++ b/drivers/usb/storage/ene_ub6250.c |
6288 | @@ -1942,6 +1942,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag) |
6289 | bcb->CDB[0] = 0xEF; |
6290 | |
6291 | result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0); |
6292 | + if (us->srb != NULL) |
6293 | + scsi_set_resid(us->srb, 0); |
6294 | info->BIN_FLAG = flag; |
6295 | kfree(buf); |
6296 | |
6297 | @@ -2295,21 +2297,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb) |
6298 | |
6299 | static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) |
6300 | { |
6301 | - int result = 0; |
6302 | + int result = USB_STOR_XFER_GOOD; |
6303 | struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); |
6304 | |
6305 | /*US_DEBUG(usb_stor_show_command(us, srb)); */ |
6306 | scsi_set_resid(srb, 0); |
6307 | - if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) { |
6308 | + if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) |
6309 | result = ene_init(us); |
6310 | - } else { |
6311 | + if (result == USB_STOR_XFER_GOOD) { |
6312 | + result = USB_STOR_TRANSPORT_ERROR; |
6313 | if (info->SD_Status.Ready) |
6314 | result = sd_scsi_irp(us, srb); |
6315 | |
6316 | if (info->MS_Status.Ready) |
6317 | result = ms_scsi_irp(us, srb); |
6318 | } |
6319 | - return 0; |
6320 | + return result; |
6321 | } |
6322 | |
6323 | static struct scsi_host_template ene_ub6250_host_template; |
6324 | diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c |
6325 | index e5b7652234fc..487586e2d8b9 100644 |
6326 | --- a/drivers/vhost/net.c |
6327 | +++ b/drivers/vhost/net.c |
6328 | @@ -524,7 +524,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) |
6329 | |
6330 | if (!len && vq->busyloop_timeout) { |
6331 | /* Both tx vq and rx socket were polled here */ |
6332 | - mutex_lock(&vq->mutex); |
6333 | + mutex_lock_nested(&vq->mutex, 1); |
6334 | vhost_disable_notify(&net->dev, vq); |
6335 | |
6336 | preempt_disable(); |
6337 | @@ -657,7 +657,7 @@ static void handle_rx(struct vhost_net *net) |
6338 | struct iov_iter fixup; |
6339 | __virtio16 num_buffers; |
6340 | |
6341 | - mutex_lock(&vq->mutex); |
6342 | + mutex_lock_nested(&vq->mutex, 0); |
6343 | sock = vq->private_data; |
6344 | if (!sock) |
6345 | goto out; |
6346 | diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c |
6347 | index cd38f5add254..e2c37aeed45a 100644 |
6348 | --- a/drivers/vhost/vhost.c |
6349 | +++ b/drivers/vhost/vhost.c |
6350 | @@ -211,8 +211,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) |
6351 | if (mask) |
6352 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); |
6353 | if (mask & POLLERR) { |
6354 | - if (poll->wqh) |
6355 | - remove_wait_queue(poll->wqh, &poll->wait); |
6356 | + vhost_poll_stop(poll); |
6357 | ret = -EINVAL; |
6358 | } |
6359 | |
6360 | @@ -1176,14 +1175,12 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq, |
6361 | /* Caller should have vq mutex and device mutex */ |
6362 | int vhost_vq_access_ok(struct vhost_virtqueue *vq) |
6363 | { |
6364 | - if (vq->iotlb) { |
6365 | - /* When device IOTLB was used, the access validation |
6366 | - * will be validated during prefetching. |
6367 | - */ |
6368 | - return 1; |
6369 | - } |
6370 | - return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) && |
6371 | - vq_log_access_ok(vq, vq->log_base); |
6372 | + int ret = vq_log_access_ok(vq, vq->log_base); |
6373 | + |
6374 | + if (ret || vq->iotlb) |
6375 | + return ret; |
6376 | + |
6377 | + return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used); |
6378 | } |
6379 | EXPORT_SYMBOL_GPL(vhost_vq_access_ok); |
6380 | |
6381 | diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c |
6382 | index 288318ad21dd..8049e7656daa 100644 |
6383 | --- a/drivers/video/backlight/backlight.c |
6384 | +++ b/drivers/video/backlight/backlight.c |
6385 | @@ -134,7 +134,7 @@ static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr, |
6386 | { |
6387 | int rc; |
6388 | struct backlight_device *bd = to_backlight_device(dev); |
6389 | - unsigned long power; |
6390 | + unsigned long power, old_power; |
6391 | |
6392 | rc = kstrtoul(buf, 0, &power); |
6393 | if (rc) |
6394 | @@ -145,10 +145,16 @@ static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr, |
6395 | if (bd->ops) { |
6396 | pr_debug("set power to %lu\n", power); |
6397 | if (bd->props.power != power) { |
6398 | + old_power = bd->props.power; |
6399 | bd->props.power = power; |
6400 | - backlight_update_status(bd); |
6401 | + rc = backlight_update_status(bd); |
6402 | + if (rc) |
6403 | + bd->props.power = old_power; |
6404 | + else |
6405 | + rc = count; |
6406 | + } else { |
6407 | + rc = count; |
6408 | } |
6409 | - rc = count; |
6410 | } |
6411 | mutex_unlock(&bd->ops_lock); |
6412 | |
6413 | @@ -176,8 +182,7 @@ int backlight_device_set_brightness(struct backlight_device *bd, |
6414 | else { |
6415 | pr_debug("set brightness to %lu\n", brightness); |
6416 | bd->props.brightness = brightness; |
6417 | - backlight_update_status(bd); |
6418 | - rc = 0; |
6419 | + rc = backlight_update_status(bd); |
6420 | } |
6421 | } |
6422 | mutex_unlock(&bd->ops_lock); |
6423 | diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c |
6424 | index d7c239ea3d09..f5574060f9c8 100644 |
6425 | --- a/drivers/video/backlight/corgi_lcd.c |
6426 | +++ b/drivers/video/backlight/corgi_lcd.c |
6427 | @@ -177,7 +177,7 @@ static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data) |
6428 | struct spi_message msg; |
6429 | struct spi_transfer xfer = { |
6430 | .len = 1, |
6431 | - .cs_change = 1, |
6432 | + .cs_change = 0, |
6433 | .tx_buf = lcd->buf, |
6434 | }; |
6435 | |
6436 | diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c |
6437 | index eab1f842f9c0..e4bd63e9db6b 100644 |
6438 | --- a/drivers/video/backlight/tdo24m.c |
6439 | +++ b/drivers/video/backlight/tdo24m.c |
6440 | @@ -369,7 +369,7 @@ static int tdo24m_probe(struct spi_device *spi) |
6441 | |
6442 | spi_message_init(m); |
6443 | |
6444 | - x->cs_change = 1; |
6445 | + x->cs_change = 0; |
6446 | x->tx_buf = &lcd->buf[0]; |
6447 | spi_message_add_tail(x, m); |
6448 | |
6449 | diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c |
6450 | index 6a41ea92737a..4dc5ee8debeb 100644 |
6451 | --- a/drivers/video/backlight/tosa_lcd.c |
6452 | +++ b/drivers/video/backlight/tosa_lcd.c |
6453 | @@ -49,7 +49,7 @@ static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data) |
6454 | struct spi_message msg; |
6455 | struct spi_transfer xfer = { |
6456 | .len = 1, |
6457 | - .cs_change = 1, |
6458 | + .cs_change = 0, |
6459 | .tx_buf = buf, |
6460 | }; |
6461 | |
6462 | diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c |
6463 | index da653a080394..54127905bfe7 100644 |
6464 | --- a/drivers/video/fbdev/vfb.c |
6465 | +++ b/drivers/video/fbdev/vfb.c |
6466 | @@ -239,8 +239,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var, |
6467 | */ |
6468 | static int vfb_set_par(struct fb_info *info) |
6469 | { |
6470 | + switch (info->var.bits_per_pixel) { |
6471 | + case 1: |
6472 | + info->fix.visual = FB_VISUAL_MONO01; |
6473 | + break; |
6474 | + case 8: |
6475 | + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; |
6476 | + break; |
6477 | + case 16: |
6478 | + case 24: |
6479 | + case 32: |
6480 | + info->fix.visual = FB_VISUAL_TRUECOLOR; |
6481 | + break; |
6482 | + } |
6483 | + |
6484 | info->fix.line_length = get_line_length(info->var.xres_virtual, |
6485 | info->var.bits_per_pixel); |
6486 | + |
6487 | return 0; |
6488 | } |
6489 | |
6490 | @@ -450,6 +465,8 @@ static int vfb_probe(struct platform_device *dev) |
6491 | goto err2; |
6492 | platform_set_drvdata(dev, info); |
6493 | |
6494 | + vfb_set_par(info); |
6495 | + |
6496 | fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n", |
6497 | videomemorysize >> 10); |
6498 | return 0; |
6499 | diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig |
6500 | index 3eb58cb51e56..8f8909a668d7 100644 |
6501 | --- a/drivers/watchdog/Kconfig |
6502 | +++ b/drivers/watchdog/Kconfig |
6503 | @@ -799,11 +799,12 @@ config EBC_C384_WDT |
6504 | the timeout module parameter. |
6505 | |
6506 | config F71808E_WDT |
6507 | - tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog" |
6508 | + tristate "Fintek F718xx, F818xx Super I/O Watchdog" |
6509 | depends on X86 |
6510 | help |
6511 | - This is the driver for the hardware watchdog on the Fintek |
6512 | - F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers. |
6513 | + This is the driver for the hardware watchdog on the Fintek F71808E, |
6514 | + F71862FG, F71868, F71869, F71882FG, F71889FG, F81865 and F81866 |
6515 | + Super I/O controllers. |
6516 | |
6517 | You can compile this driver directly into the kernel, or use |
6518 | it as a module. The module will be called f71808e_wdt. |
6519 | diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c |
6520 | index 1b7e9169072f..8658dba21768 100644 |
6521 | --- a/drivers/watchdog/f71808e_wdt.c |
6522 | +++ b/drivers/watchdog/f71808e_wdt.c |
6523 | @@ -57,6 +57,7 @@ |
6524 | #define SIO_F71808_ID 0x0901 /* Chipset ID */ |
6525 | #define SIO_F71858_ID 0x0507 /* Chipset ID */ |
6526 | #define SIO_F71862_ID 0x0601 /* Chipset ID */ |
6527 | +#define SIO_F71868_ID 0x1106 /* Chipset ID */ |
6528 | #define SIO_F71869_ID 0x0814 /* Chipset ID */ |
6529 | #define SIO_F71869A_ID 0x1007 /* Chipset ID */ |
6530 | #define SIO_F71882_ID 0x0541 /* Chipset ID */ |
6531 | @@ -101,7 +102,7 @@ MODULE_PARM_DESC(timeout, |
6532 | static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH; |
6533 | module_param(pulse_width, uint, 0); |
6534 | MODULE_PARM_DESC(pulse_width, |
6535 | - "Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms" |
6536 | + "Watchdog signal pulse width. 0(=level), 1, 25, 30, 125, 150, 5000 or 6000 ms" |
6537 | " (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")"); |
6538 | |
6539 | static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN; |
6540 | @@ -119,13 +120,14 @@ module_param(start_withtimeout, uint, 0); |
6541 | MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with" |
6542 | " given initial timeout. Zero (default) disables this feature."); |
6543 | |
6544 | -enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865, |
6545 | - f81866}; |
6546 | +enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg, |
6547 | + f81865, f81866}; |
6548 | |
6549 | static const char *f71808e_names[] = { |
6550 | "f71808fg", |
6551 | "f71858fg", |
6552 | "f71862fg", |
6553 | + "f71868", |
6554 | "f71869", |
6555 | "f71882fg", |
6556 | "f71889fg", |
6557 | @@ -252,16 +254,23 @@ static int watchdog_set_timeout(int timeout) |
6558 | static int watchdog_set_pulse_width(unsigned int pw) |
6559 | { |
6560 | int err = 0; |
6561 | + unsigned int t1 = 25, t2 = 125, t3 = 5000; |
6562 | + |
6563 | + if (watchdog.type == f71868) { |
6564 | + t1 = 30; |
6565 | + t2 = 150; |
6566 | + t3 = 6000; |
6567 | + } |
6568 | |
6569 | mutex_lock(&watchdog.lock); |
6570 | |
6571 | - if (pw <= 1) { |
6572 | + if (pw <= 1) { |
6573 | watchdog.pulse_val = 0; |
6574 | - } else if (pw <= 25) { |
6575 | + } else if (pw <= t1) { |
6576 | watchdog.pulse_val = 1; |
6577 | - } else if (pw <= 125) { |
6578 | + } else if (pw <= t2) { |
6579 | watchdog.pulse_val = 2; |
6580 | - } else if (pw <= 5000) { |
6581 | + } else if (pw <= t3) { |
6582 | watchdog.pulse_val = 3; |
6583 | } else { |
6584 | pr_err("pulse width out of range\n"); |
6585 | @@ -354,6 +363,7 @@ static int watchdog_start(void) |
6586 | goto exit_superio; |
6587 | break; |
6588 | |
6589 | + case f71868: |
6590 | case f71869: |
6591 | /* GPIO14 --> WDTRST# */ |
6592 | superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4); |
6593 | @@ -792,6 +802,9 @@ static int __init f71808e_find(int sioaddr) |
6594 | watchdog.type = f71862fg; |
6595 | err = f71862fg_pin_configure(0); /* validate module parameter */ |
6596 | break; |
6597 | + case SIO_F71868_ID: |
6598 | + watchdog.type = f71868; |
6599 | + break; |
6600 | case SIO_F71869_ID: |
6601 | case SIO_F71869A_ID: |
6602 | watchdog.type = f71869; |
6603 | diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
6604 | index 8ed05d95584a..03ac3ab4b3b4 100644 |
6605 | --- a/fs/btrfs/extent_io.c |
6606 | +++ b/fs/btrfs/extent_io.c |
6607 | @@ -2453,7 +2453,7 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) |
6608 | if (!uptodate) { |
6609 | ClearPageUptodate(page); |
6610 | SetPageError(page); |
6611 | - ret = ret < 0 ? ret : -EIO; |
6612 | + ret = err < 0 ? err : -EIO; |
6613 | mapping_set_error(page->mapping, ret); |
6614 | } |
6615 | } |
6616 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
6617 | index 02e403af9518..49eeed25f200 100644 |
6618 | --- a/fs/cifs/file.c |
6619 | +++ b/fs/cifs/file.c |
6620 | @@ -589,7 +589,7 @@ cifs_relock_file(struct cifsFileInfo *cfile) |
6621 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
6622 | int rc = 0; |
6623 | |
6624 | - down_read(&cinode->lock_sem); |
6625 | + down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); |
6626 | if (cinode->can_cache_brlcks) { |
6627 | /* can cache locks - no need to relock */ |
6628 | up_read(&cinode->lock_sem); |
6629 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
6630 | index 7c26286a525d..44b7ccbe4b08 100644 |
6631 | --- a/fs/cifs/smb2pdu.c |
6632 | +++ b/fs/cifs/smb2pdu.c |
6633 | @@ -1151,15 +1151,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, |
6634 | goto tcon_exit; |
6635 | } |
6636 | |
6637 | - if (rsp->ShareType & SMB2_SHARE_TYPE_DISK) |
6638 | + switch (rsp->ShareType) { |
6639 | + case SMB2_SHARE_TYPE_DISK: |
6640 | cifs_dbg(FYI, "connection to disk share\n"); |
6641 | - else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) { |
6642 | + break; |
6643 | + case SMB2_SHARE_TYPE_PIPE: |
6644 | tcon->ipc = true; |
6645 | cifs_dbg(FYI, "connection to pipe share\n"); |
6646 | - } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) { |
6647 | - tcon->print = true; |
6648 | + break; |
6649 | + case SMB2_SHARE_TYPE_PRINT: |
6650 | + tcon->ipc = true; |
6651 | cifs_dbg(FYI, "connection to printer\n"); |
6652 | - } else { |
6653 | + break; |
6654 | + default: |
6655 | cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType); |
6656 | rc = -EOPNOTSUPP; |
6657 | goto tcon_error_exit; |
6658 | diff --git a/fs/dcache.c b/fs/dcache.c |
6659 | index c0c7fa8224ba..2225b9855c5f 100644 |
6660 | --- a/fs/dcache.c |
6661 | +++ b/fs/dcache.c |
6662 | @@ -461,9 +461,11 @@ static void dentry_lru_add(struct dentry *dentry) |
6663 | * d_drop() is used mainly for stuff that wants to invalidate a dentry for some |
6664 | * reason (NFS timeouts or autofs deletes). |
6665 | * |
6666 | - * __d_drop requires dentry->d_lock. |
6667 | + * __d_drop requires dentry->d_lock |
6668 | + * ___d_drop doesn't mark dentry as "unhashed" |
6669 | + * (dentry->d_hash.pprev will be LIST_POISON2, not NULL). |
6670 | */ |
6671 | -void __d_drop(struct dentry *dentry) |
6672 | +static void ___d_drop(struct dentry *dentry) |
6673 | { |
6674 | if (!d_unhashed(dentry)) { |
6675 | struct hlist_bl_head *b; |
6676 | @@ -479,12 +481,17 @@ void __d_drop(struct dentry *dentry) |
6677 | |
6678 | hlist_bl_lock(b); |
6679 | __hlist_bl_del(&dentry->d_hash); |
6680 | - dentry->d_hash.pprev = NULL; |
6681 | hlist_bl_unlock(b); |
6682 | /* After this call, in-progress rcu-walk path lookup will fail. */ |
6683 | write_seqcount_invalidate(&dentry->d_seq); |
6684 | } |
6685 | } |
6686 | + |
6687 | +void __d_drop(struct dentry *dentry) |
6688 | +{ |
6689 | + ___d_drop(dentry); |
6690 | + dentry->d_hash.pprev = NULL; |
6691 | +} |
6692 | EXPORT_SYMBOL(__d_drop); |
6693 | |
6694 | void d_drop(struct dentry *dentry) |
6695 | @@ -2378,7 +2385,7 @@ EXPORT_SYMBOL(d_delete); |
6696 | static void __d_rehash(struct dentry *entry) |
6697 | { |
6698 | struct hlist_bl_head *b = d_hash(entry->d_name.hash); |
6699 | - BUG_ON(!d_unhashed(entry)); |
6700 | + |
6701 | hlist_bl_lock(b); |
6702 | hlist_bl_add_head_rcu(&entry->d_hash, b); |
6703 | hlist_bl_unlock(b); |
6704 | @@ -2815,9 +2822,9 @@ static void __d_move(struct dentry *dentry, struct dentry *target, |
6705 | write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); |
6706 | |
6707 | /* unhash both */ |
6708 | - /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ |
6709 | - __d_drop(dentry); |
6710 | - __d_drop(target); |
6711 | + /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */ |
6712 | + ___d_drop(dentry); |
6713 | + ___d_drop(target); |
6714 | |
6715 | /* Switch the names.. */ |
6716 | if (exchange) |
6717 | @@ -2829,6 +2836,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target, |
6718 | __d_rehash(dentry); |
6719 | if (exchange) |
6720 | __d_rehash(target); |
6721 | + else |
6722 | + target->d_hash.pprev = NULL; |
6723 | |
6724 | /* ... and switch them in the tree */ |
6725 | if (IS_ROOT(dentry)) { |
6726 | diff --git a/fs/ext4/file.c b/fs/ext4/file.c |
6727 | index 510e66422f04..08fca4add1e2 100644 |
6728 | --- a/fs/ext4/file.c |
6729 | +++ b/fs/ext4/file.c |
6730 | @@ -429,7 +429,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, |
6731 | int i, num; |
6732 | unsigned long nr_pages; |
6733 | |
6734 | - num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); |
6735 | + num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1; |
6736 | nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, |
6737 | (pgoff_t)num); |
6738 | if (nr_pages == 0) |
6739 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
6740 | index 64056c6eb857..14bd37041e1a 100644 |
6741 | --- a/fs/ext4/mballoc.c |
6742 | +++ b/fs/ext4/mballoc.c |
6743 | @@ -3877,7 +3877,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, |
6744 | |
6745 | err = ext4_mb_load_buddy(sb, group, &e4b); |
6746 | if (err) { |
6747 | - ext4_error(sb, "Error loading buddy information for %u", group); |
6748 | + ext4_warning(sb, "Error %d loading buddy information for %u", |
6749 | + err, group); |
6750 | put_bh(bitmap_bh); |
6751 | return 0; |
6752 | } |
6753 | @@ -4034,10 +4035,11 @@ void ext4_discard_preallocations(struct inode *inode) |
6754 | BUG_ON(pa->pa_type != MB_INODE_PA); |
6755 | group = ext4_get_group_number(sb, pa->pa_pstart); |
6756 | |
6757 | - err = ext4_mb_load_buddy(sb, group, &e4b); |
6758 | + err = ext4_mb_load_buddy_gfp(sb, group, &e4b, |
6759 | + GFP_NOFS|__GFP_NOFAIL); |
6760 | if (err) { |
6761 | - ext4_error(sb, "Error loading buddy information for %u", |
6762 | - group); |
6763 | + ext4_error(sb, "Error %d loading buddy information for %u", |
6764 | + err, group); |
6765 | continue; |
6766 | } |
6767 | |
6768 | @@ -4293,11 +4295,14 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, |
6769 | spin_unlock(&lg->lg_prealloc_lock); |
6770 | |
6771 | list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { |
6772 | + int err; |
6773 | |
6774 | group = ext4_get_group_number(sb, pa->pa_pstart); |
6775 | - if (ext4_mb_load_buddy(sb, group, &e4b)) { |
6776 | - ext4_error(sb, "Error loading buddy information for %u", |
6777 | - group); |
6778 | + err = ext4_mb_load_buddy_gfp(sb, group, &e4b, |
6779 | + GFP_NOFS|__GFP_NOFAIL); |
6780 | + if (err) { |
6781 | + ext4_error(sb, "Error %d loading buddy information for %u", |
6782 | + err, group); |
6783 | continue; |
6784 | } |
6785 | ext4_lock_group(sb, group); |
6786 | @@ -5117,8 +5122,8 @@ ext4_trim_all_free(struct super_block *sb, ext4_group_t group, |
6787 | |
6788 | ret = ext4_mb_load_buddy(sb, group, &e4b); |
6789 | if (ret) { |
6790 | - ext4_error(sb, "Error in loading buddy " |
6791 | - "information for %u", group); |
6792 | + ext4_warning(sb, "Error %d loading buddy information for %u", |
6793 | + ret, group); |
6794 | return ret; |
6795 | } |
6796 | bitmap = e4b.bd_bitmap; |
6797 | diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c |
6798 | index 9d373247222c..85135df0eb34 100644 |
6799 | --- a/fs/lockd/svc.c |
6800 | +++ b/fs/lockd/svc.c |
6801 | @@ -132,6 +132,8 @@ lockd(void *vrqstp) |
6802 | { |
6803 | int err = 0; |
6804 | struct svc_rqst *rqstp = vrqstp; |
6805 | + struct net *net = &init_net; |
6806 | + struct lockd_net *ln = net_generic(net, lockd_net_id); |
6807 | |
6808 | /* try_to_freeze() is called from svc_recv() */ |
6809 | set_freezable(); |
6810 | @@ -176,6 +178,8 @@ lockd(void *vrqstp) |
6811 | if (nlmsvc_ops) |
6812 | nlmsvc_invalidate_all(); |
6813 | nlm_shutdown_hosts(); |
6814 | + cancel_delayed_work_sync(&ln->grace_period_end); |
6815 | + locks_end_grace(&ln->lockd_manager); |
6816 | return 0; |
6817 | } |
6818 | |
6819 | @@ -270,8 +274,6 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net) |
6820 | if (ln->nlmsvc_users) { |
6821 | if (--ln->nlmsvc_users == 0) { |
6822 | nlm_shutdown_hosts_net(net); |
6823 | - cancel_delayed_work_sync(&ln->grace_period_end); |
6824 | - locks_end_grace(&ln->lockd_manager); |
6825 | svc_shutdown_net(serv, net); |
6826 | dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net); |
6827 | } |
6828 | diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c |
6829 | index 13abd608af0f..4539008502ce 100644 |
6830 | --- a/fs/nfs/flexfilelayout/flexfilelayout.c |
6831 | +++ b/fs/nfs/flexfilelayout/flexfilelayout.c |
6832 | @@ -475,6 +475,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, |
6833 | goto out_err_free; |
6834 | |
6835 | /* fh */ |
6836 | + rc = -EIO; |
6837 | p = xdr_inline_decode(&stream, 4); |
6838 | if (!p) |
6839 | goto out_err_free; |
6840 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
6841 | index 4638654e26f3..1b1b616a6171 100644 |
6842 | --- a/fs/nfs/nfs4proc.c |
6843 | +++ b/fs/nfs/nfs4proc.c |
6844 | @@ -3300,6 +3300,7 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f |
6845 | .rpc_resp = &res, |
6846 | }; |
6847 | int status; |
6848 | + int i; |
6849 | |
6850 | bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS | |
6851 | FATTR4_WORD0_FH_EXPIRE_TYPE | |
6852 | @@ -3365,8 +3366,13 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f |
6853 | server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE; |
6854 | server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY; |
6855 | server->cache_consistency_bitmask[2] = 0; |
6856 | + |
6857 | + /* Avoid a regression due to buggy server */ |
6858 | + for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++) |
6859 | + res.exclcreat_bitmask[i] &= res.attr_bitmask[i]; |
6860 | memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask, |
6861 | sizeof(server->exclcreat_bitmask)); |
6862 | + |
6863 | server->acl_bitmask = res.acl_bitmask; |
6864 | server->fh_expire_type = res.fh_expire_type; |
6865 | } |
6866 | @@ -8173,6 +8179,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf |
6867 | /* fall through */ |
6868 | case -NFS4ERR_RETRY_UNCACHED_REP: |
6869 | return -EAGAIN; |
6870 | + case -NFS4ERR_BADSESSION: |
6871 | + case -NFS4ERR_DEADSESSION: |
6872 | + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
6873 | + nfs4_schedule_session_recovery(clp->cl_session, |
6874 | + task->tk_status); |
6875 | + break; |
6876 | default: |
6877 | nfs4_schedule_lease_recovery(clp); |
6878 | } |
6879 | @@ -8251,7 +8263,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp, |
6880 | if (status == 0) |
6881 | status = task->tk_status; |
6882 | rpc_put_task(task); |
6883 | - return 0; |
6884 | out: |
6885 | dprintk("<-- %s status=%d\n", __func__, status); |
6886 | return status; |
6887 | diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
6888 | index 71deeae6eefd..0bb0e620cf42 100644 |
6889 | --- a/fs/nfs/nfs4state.c |
6890 | +++ b/fs/nfs/nfs4state.c |
6891 | @@ -1637,13 +1637,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp) |
6892 | nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot); |
6893 | } |
6894 | |
6895 | -static void nfs4_reclaim_complete(struct nfs_client *clp, |
6896 | +static int nfs4_reclaim_complete(struct nfs_client *clp, |
6897 | const struct nfs4_state_recovery_ops *ops, |
6898 | struct rpc_cred *cred) |
6899 | { |
6900 | /* Notify the server we're done reclaiming our state */ |
6901 | if (ops->reclaim_complete) |
6902 | - (void)ops->reclaim_complete(clp, cred); |
6903 | + return ops->reclaim_complete(clp, cred); |
6904 | + return 0; |
6905 | } |
6906 | |
6907 | static void nfs4_clear_reclaim_server(struct nfs_server *server) |
6908 | @@ -1690,13 +1691,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp) |
6909 | { |
6910 | const struct nfs4_state_recovery_ops *ops; |
6911 | struct rpc_cred *cred; |
6912 | + int err; |
6913 | |
6914 | if (!nfs4_state_clear_reclaim_reboot(clp)) |
6915 | return; |
6916 | ops = clp->cl_mvops->reboot_recovery_ops; |
6917 | cred = nfs4_get_clid_cred(clp); |
6918 | - nfs4_reclaim_complete(clp, ops, cred); |
6919 | + err = nfs4_reclaim_complete(clp, ops, cred); |
6920 | put_rpccred(cred); |
6921 | + if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION) |
6922 | + set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state); |
6923 | } |
6924 | |
6925 | static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp) |
6926 | diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c |
6927 | index 306b6c161840..8546384a5fdf 100644 |
6928 | --- a/fs/overlayfs/dir.c |
6929 | +++ b/fs/overlayfs/dir.c |
6930 | @@ -180,6 +180,9 @@ static void ovl_instantiate(struct dentry *dentry, struct inode *inode, |
6931 | inc_nlink(inode); |
6932 | } |
6933 | d_instantiate(dentry, inode); |
6934 | + /* Force lookup of new upper hardlink to find its lower */ |
6935 | + if (hardlink) |
6936 | + d_drop(dentry); |
6937 | } |
6938 | |
6939 | static int ovl_create_upper(struct dentry *dentry, struct inode *inode, |
6940 | diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c |
6941 | index 7fb53d055537..16f6db88c8e5 100644 |
6942 | --- a/fs/overlayfs/inode.c |
6943 | +++ b/fs/overlayfs/inode.c |
6944 | @@ -227,6 +227,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name, |
6945 | return res; |
6946 | } |
6947 | |
6948 | +static bool ovl_can_list(const char *s) |
6949 | +{ |
6950 | + /* List all non-trusted xatts */ |
6951 | + if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) |
6952 | + return true; |
6953 | + |
6954 | + /* Never list trusted.overlay, list other trusted for superuser only */ |
6955 | + return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); |
6956 | +} |
6957 | + |
6958 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) |
6959 | { |
6960 | struct dentry *realdentry = ovl_dentry_real(dentry); |
6961 | @@ -250,7 +260,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) |
6962 | return -EIO; |
6963 | |
6964 | len -= slen; |
6965 | - if (ovl_is_private_xattr(s)) { |
6966 | + if (!ovl_can_list(s)) { |
6967 | res -= slen; |
6968 | memmove(s, s + slen, len); |
6969 | } else { |
6970 | diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h |
6971 | index 8f66aaabadf7..9e3f7618593f 100644 |
6972 | --- a/include/acpi/platform/acgcc.h |
6973 | +++ b/include/acpi/platform/acgcc.h |
6974 | @@ -48,7 +48,17 @@ |
6975 | * Use compiler specific <stdarg.h> is a good practice for even when |
6976 | * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. |
6977 | */ |
6978 | +#ifndef va_arg |
6979 | +#ifdef ACPI_USE_BUILTIN_STDARG |
6980 | +typedef __builtin_va_list va_list; |
6981 | +#define va_start(v, l) __builtin_va_start(v, l) |
6982 | +#define va_end(v) __builtin_va_end(v) |
6983 | +#define va_arg(v, l) __builtin_va_arg(v, l) |
6984 | +#define va_copy(d, s) __builtin_va_copy(d, s) |
6985 | +#else |
6986 | #include <stdarg.h> |
6987 | +#endif |
6988 | +#endif |
6989 | |
6990 | #define ACPI_INLINE __inline__ |
6991 | |
6992 | diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h |
6993 | index 17bd3b7b4e5a..bdb6858e2458 100644 |
6994 | --- a/include/acpi/platform/acintel.h |
6995 | +++ b/include/acpi/platform/acintel.h |
6996 | @@ -48,7 +48,9 @@ |
6997 | * Use compiler specific <stdarg.h> is a good practice for even when |
6998 | * -nostdinc is specified (i.e., ACPI_USE_STANDARD_HEADERS undefined. |
6999 | */ |
7000 | +#ifndef va_arg |
7001 | #include <stdarg.h> |
7002 | +#endif |
7003 | |
7004 | /* Configuration specific to Intel 64-bit C compiler */ |
7005 | |
7006 | diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h |
7007 | index b4ee8f62ce8d..8e2828d48d7f 100644 |
7008 | --- a/include/linux/mlx4/qp.h |
7009 | +++ b/include/linux/mlx4/qp.h |
7010 | @@ -470,6 +470,7 @@ struct mlx4_update_qp_params { |
7011 | u16 rate_val; |
7012 | }; |
7013 | |
7014 | +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); |
7015 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
7016 | enum mlx4_update_qp_attr attr, |
7017 | struct mlx4_update_qp_params *params); |
7018 | diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h |
7019 | index 58276144ba81..5f3e62253e2f 100644 |
7020 | --- a/include/linux/mlx5/device.h |
7021 | +++ b/include/linux/mlx5/device.h |
7022 | @@ -750,8 +750,14 @@ enum { |
7023 | }; |
7024 | |
7025 | enum { |
7026 | - CQE_RSS_HTYPE_IP = 0x3 << 6, |
7027 | - CQE_RSS_HTYPE_L4 = 0x3 << 2, |
7028 | + CQE_RSS_HTYPE_IP = 0x3 << 2, |
7029 | + /* cqe->rss_hash_type[3:2] - IP destination selected for hash |
7030 | + * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved) |
7031 | + */ |
7032 | + CQE_RSS_HTYPE_L4 = 0x3 << 6, |
7033 | + /* cqe->rss_hash_type[7:6] - L4 destination selected for hash |
7034 | + * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI |
7035 | + */ |
7036 | }; |
7037 | |
7038 | enum { |
7039 | diff --git a/include/linux/pci.h b/include/linux/pci.h |
7040 | index 1b711796d989..36522905685b 100644 |
7041 | --- a/include/linux/pci.h |
7042 | +++ b/include/linux/pci.h |
7043 | @@ -1348,9 +1348,9 @@ static inline int pci_alloc_irq_vectors(struct pci_dev *dev, |
7044 | unsigned int min_vecs, unsigned int max_vecs, |
7045 | unsigned int flags) |
7046 | { |
7047 | - if (min_vecs > 1) |
7048 | - return -EINVAL; |
7049 | - return 1; |
7050 | + if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) |
7051 | + return 1; |
7052 | + return -ENOSPC; |
7053 | } |
7054 | static inline void pci_free_irq_vectors(struct pci_dev *dev) |
7055 | { |
7056 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
7057 | index a4d0afc009a7..c549c8c9245c 100644 |
7058 | --- a/include/linux/sched.h |
7059 | +++ b/include/linux/sched.h |
7060 | @@ -1412,6 +1412,7 @@ struct sched_dl_entity { |
7061 | u64 dl_deadline; /* relative deadline of each instance */ |
7062 | u64 dl_period; /* separation of two instances (period) */ |
7063 | u64 dl_bw; /* dl_runtime / dl_deadline */ |
7064 | + u64 dl_density; /* dl_runtime / dl_deadline */ |
7065 | |
7066 | /* |
7067 | * Actual scheduling parameters. Initialized with the values above, |
7068 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
7069 | index 601dfa849d30..1b3a2f95503d 100644 |
7070 | --- a/include/linux/skbuff.h |
7071 | +++ b/include/linux/skbuff.h |
7072 | @@ -984,10 +984,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, |
7073 | unsigned int headroom); |
7074 | struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, |
7075 | int newtailroom, gfp_t priority); |
7076 | -int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
7077 | - int offset, int len); |
7078 | -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, |
7079 | - int len); |
7080 | +int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
7081 | + int offset, int len); |
7082 | +int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, |
7083 | + int offset, int len); |
7084 | int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); |
7085 | int skb_pad(struct sk_buff *skb, int pad); |
7086 | #define dev_kfree_skb(a) consume_skb(a) |
7087 | diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h |
7088 | index 66167138120a..9d57639223c3 100644 |
7089 | --- a/include/net/cfg80211.h |
7090 | +++ b/include/net/cfg80211.h |
7091 | @@ -947,9 +947,9 @@ enum rate_info_flags { |
7092 | * @RATE_INFO_BW_160: 160 MHz bandwidth |
7093 | */ |
7094 | enum rate_info_bw { |
7095 | + RATE_INFO_BW_20 = 0, |
7096 | RATE_INFO_BW_5, |
7097 | RATE_INFO_BW_10, |
7098 | - RATE_INFO_BW_20, |
7099 | RATE_INFO_BW_40, |
7100 | RATE_INFO_BW_80, |
7101 | RATE_INFO_BW_160, |
7102 | diff --git a/include/net/x25.h b/include/net/x25.h |
7103 | index c383aa4edbf0..6d30a01d281d 100644 |
7104 | --- a/include/net/x25.h |
7105 | +++ b/include/net/x25.h |
7106 | @@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *); |
7107 | |
7108 | /* sysctl_net_x25.c */ |
7109 | #ifdef CONFIG_SYSCTL |
7110 | -void x25_register_sysctl(void); |
7111 | +int x25_register_sysctl(void); |
7112 | void x25_unregister_sysctl(void); |
7113 | #else |
7114 | -static inline void x25_register_sysctl(void) {}; |
7115 | +static inline int x25_register_sysctl(void) { return 0; }; |
7116 | static inline void x25_unregister_sysctl(void) {}; |
7117 | #endif /* CONFIG_SYSCTL */ |
7118 | |
7119 | diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h |
7120 | index 0cd4c11479b1..226f915a68c2 100644 |
7121 | --- a/include/soc/fsl/qe/qe.h |
7122 | +++ b/include/soc/fsl/qe/qe.h |
7123 | @@ -668,6 +668,10 @@ struct ucc_slow_pram { |
7124 | #define UCC_FAST_GUMR_CTSS 0x00800000 |
7125 | #define UCC_FAST_GUMR_TXSY 0x00020000 |
7126 | #define UCC_FAST_GUMR_RSYN 0x00010000 |
7127 | +#define UCC_FAST_GUMR_SYNL_MASK 0x0000C000 |
7128 | +#define UCC_FAST_GUMR_SYNL_16 0x0000C000 |
7129 | +#define UCC_FAST_GUMR_SYNL_8 0x00008000 |
7130 | +#define UCC_FAST_GUMR_SYNL_AUTO 0x00004000 |
7131 | #define UCC_FAST_GUMR_RTSM 0x00002000 |
7132 | #define UCC_FAST_GUMR_REVD 0x00000400 |
7133 | #define UCC_FAST_GUMR_ENR 0x00000020 |
7134 | diff --git a/kernel/cpu.c b/kernel/cpu.c |
7135 | index 802eb3361a0a..967163fb90a8 100644 |
7136 | --- a/kernel/cpu.c |
7137 | +++ b/kernel/cpu.c |
7138 | @@ -63,6 +63,12 @@ struct cpuhp_cpu_state { |
7139 | |
7140 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state); |
7141 | |
7142 | +#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) |
7143 | +static struct lock_class_key cpuhp_state_key; |
7144 | +static struct lockdep_map cpuhp_state_lock_map = |
7145 | + STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key); |
7146 | +#endif |
7147 | + |
7148 | /** |
7149 | * cpuhp_step - Hotplug state machine step |
7150 | * @name: Name of the step |
7151 | @@ -563,6 +569,7 @@ static void cpuhp_thread_fun(unsigned int cpu) |
7152 | |
7153 | st->should_run = false; |
7154 | |
7155 | + lock_map_acquire(&cpuhp_state_lock_map); |
7156 | /* Single callback invocation for [un]install ? */ |
7157 | if (st->single) { |
7158 | if (st->cb_state < CPUHP_AP_ONLINE) { |
7159 | @@ -594,6 +601,7 @@ static void cpuhp_thread_fun(unsigned int cpu) |
7160 | else if (st->state > st->target) |
7161 | ret = cpuhp_ap_offline(cpu, st); |
7162 | } |
7163 | + lock_map_release(&cpuhp_state_lock_map); |
7164 | st->result = ret; |
7165 | complete(&st->done); |
7166 | } |
7167 | @@ -608,6 +616,9 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, |
7168 | if (!cpu_online(cpu)) |
7169 | return 0; |
7170 | |
7171 | + lock_map_acquire(&cpuhp_state_lock_map); |
7172 | + lock_map_release(&cpuhp_state_lock_map); |
7173 | + |
7174 | /* |
7175 | * If we are up and running, use the hotplug thread. For early calls |
7176 | * we invoke the thread function directly. |
7177 | @@ -651,6 +662,8 @@ static int cpuhp_kick_ap_work(unsigned int cpu) |
7178 | enum cpuhp_state state = st->state; |
7179 | |
7180 | trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work); |
7181 | + lock_map_acquire(&cpuhp_state_lock_map); |
7182 | + lock_map_release(&cpuhp_state_lock_map); |
7183 | __cpuhp_kick_ap_work(st); |
7184 | wait_for_completion(&st->done); |
7185 | trace_cpuhp_exit(cpu, st->state, state, st->result); |
7186 | diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c |
7187 | index e9fdb5203de5..411226b26bca 100644 |
7188 | --- a/kernel/events/callchain.c |
7189 | +++ b/kernel/events/callchain.c |
7190 | @@ -227,12 +227,18 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user, |
7191 | } |
7192 | |
7193 | if (regs) { |
7194 | + mm_segment_t fs; |
7195 | + |
7196 | if (crosstask) |
7197 | goto exit_put; |
7198 | |
7199 | if (add_mark) |
7200 | perf_callchain_store_context(&ctx, PERF_CONTEXT_USER); |
7201 | + |
7202 | + fs = get_fs(); |
7203 | + set_fs(USER_DS); |
7204 | perf_callchain_user(&ctx, regs); |
7205 | + set_fs(fs); |
7206 | } |
7207 | } |
7208 | |
7209 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
7210 | index 13b9784427b0..c4100c38a467 100644 |
7211 | --- a/kernel/events/core.c |
7212 | +++ b/kernel/events/core.c |
7213 | @@ -5669,9 +5669,6 @@ static void perf_output_read_one(struct perf_output_handle *handle, |
7214 | __output_copy(handle, values, n * sizeof(u64)); |
7215 | } |
7216 | |
7217 | -/* |
7218 | - * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. |
7219 | - */ |
7220 | static void perf_output_read_group(struct perf_output_handle *handle, |
7221 | struct perf_event *event, |
7222 | u64 enabled, u64 running) |
7223 | @@ -5716,6 +5713,13 @@ static void perf_output_read_group(struct perf_output_handle *handle, |
7224 | #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ |
7225 | PERF_FORMAT_TOTAL_TIME_RUNNING) |
7226 | |
7227 | +/* |
7228 | + * XXX PERF_SAMPLE_READ vs inherited events seems difficult. |
7229 | + * |
7230 | + * The problem is that its both hard and excessively expensive to iterate the |
7231 | + * child list, not to mention that its impossible to IPI the children running |
7232 | + * on another CPU, from interrupt/NMI context. |
7233 | + */ |
7234 | static void perf_output_read(struct perf_output_handle *handle, |
7235 | struct perf_event *event) |
7236 | { |
7237 | @@ -9259,9 +9263,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, |
7238 | local64_set(&hwc->period_left, hwc->sample_period); |
7239 | |
7240 | /* |
7241 | - * we currently do not support PERF_FORMAT_GROUP on inherited events |
7242 | + * We currently do not support PERF_SAMPLE_READ on inherited events. |
7243 | + * See perf_output_read(). |
7244 | */ |
7245 | - if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) |
7246 | + if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)) |
7247 | goto err_ns; |
7248 | |
7249 | if (!has_branch_stack(event)) |
7250 | @@ -9289,8 +9294,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, |
7251 | event->addr_filters_offs = kcalloc(pmu->nr_addr_filters, |
7252 | sizeof(unsigned long), |
7253 | GFP_KERNEL); |
7254 | - if (!event->addr_filters_offs) |
7255 | + if (!event->addr_filters_offs) { |
7256 | + err = -ENOMEM; |
7257 | goto err_per_task; |
7258 | + } |
7259 | |
7260 | /* force hw sync on the address filters */ |
7261 | event->addr_filters_gen = 1; |
7262 | diff --git a/kernel/pid.c b/kernel/pid.c |
7263 | index 693a64385d59..fa704f88ff8e 100644 |
7264 | --- a/kernel/pid.c |
7265 | +++ b/kernel/pid.c |
7266 | @@ -322,8 +322,10 @@ struct pid *alloc_pid(struct pid_namespace *ns) |
7267 | } |
7268 | |
7269 | if (unlikely(is_child_reaper(pid))) { |
7270 | - if (pid_ns_prepare_proc(ns)) |
7271 | + if (pid_ns_prepare_proc(ns)) { |
7272 | + disable_pid_allocation(ns); |
7273 | goto out_free; |
7274 | + } |
7275 | } |
7276 | |
7277 | get_pid_ns(ns); |
7278 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
7279 | index 291ea6fa7ee6..917be221438b 100644 |
7280 | --- a/kernel/sched/core.c |
7281 | +++ b/kernel/sched/core.c |
7282 | @@ -2184,6 +2184,7 @@ void __dl_clear_params(struct task_struct *p) |
7283 | dl_se->dl_period = 0; |
7284 | dl_se->flags = 0; |
7285 | dl_se->dl_bw = 0; |
7286 | + dl_se->dl_density = 0; |
7287 | |
7288 | dl_se->dl_throttled = 0; |
7289 | dl_se->dl_yielded = 0; |
7290 | @@ -3912,6 +3913,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) |
7291 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; |
7292 | dl_se->flags = attr->sched_flags; |
7293 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
7294 | + dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); |
7295 | |
7296 | /* |
7297 | * Changing the parameters of a task is 'tricky' and we're not doing |
7298 | diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c |
7299 | index 3042881169b4..3042927c8b8a 100644 |
7300 | --- a/kernel/sched/deadline.c |
7301 | +++ b/kernel/sched/deadline.c |
7302 | @@ -484,13 +484,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, |
7303 | } |
7304 | |
7305 | /* |
7306 | - * When a -deadline entity is queued back on the runqueue, its runtime and |
7307 | - * deadline might need updating. |
7308 | + * Revised wakeup rule [1]: For self-suspending tasks, rather then |
7309 | + * re-initializing task's runtime and deadline, the revised wakeup |
7310 | + * rule adjusts the task's runtime to avoid the task to overrun its |
7311 | + * density. |
7312 | * |
7313 | - * The policy here is that we update the deadline of the entity only if: |
7314 | - * - the current deadline is in the past, |
7315 | - * - using the remaining runtime with the current deadline would make |
7316 | - * the entity exceed its bandwidth. |
7317 | + * Reasoning: a task may overrun the density if: |
7318 | + * runtime / (deadline - t) > dl_runtime / dl_deadline |
7319 | + * |
7320 | + * Therefore, runtime can be adjusted to: |
7321 | + * runtime = (dl_runtime / dl_deadline) * (deadline - t) |
7322 | + * |
7323 | + * In such way that runtime will be equal to the maximum density |
7324 | + * the task can use without breaking any rule. |
7325 | + * |
7326 | + * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant |
7327 | + * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24. |
7328 | + */ |
7329 | +static void |
7330 | +update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq) |
7331 | +{ |
7332 | + u64 laxity = dl_se->deadline - rq_clock(rq); |
7333 | + |
7334 | + /* |
7335 | + * If the task has deadline < period, and the deadline is in the past, |
7336 | + * it should already be throttled before this check. |
7337 | + * |
7338 | + * See update_dl_entity() comments for further details. |
7339 | + */ |
7340 | + WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq))); |
7341 | + |
7342 | + dl_se->runtime = (dl_se->dl_density * laxity) >> 20; |
7343 | +} |
7344 | + |
7345 | +/* |
7346 | + * Regarding the deadline, a task with implicit deadline has a relative |
7347 | + * deadline == relative period. A task with constrained deadline has a |
7348 | + * relative deadline <= relative period. |
7349 | + * |
7350 | + * We support constrained deadline tasks. However, there are some restrictions |
7351 | + * applied only for tasks which do not have an implicit deadline. See |
7352 | + * update_dl_entity() to know more about such restrictions. |
7353 | + * |
7354 | + * The dl_is_implicit() returns true if the task has an implicit deadline. |
7355 | + */ |
7356 | +static inline bool dl_is_implicit(struct sched_dl_entity *dl_se) |
7357 | +{ |
7358 | + return dl_se->dl_deadline == dl_se->dl_period; |
7359 | +} |
7360 | + |
7361 | +/* |
7362 | + * When a deadline entity is placed in the runqueue, its runtime and deadline |
7363 | + * might need to be updated. This is done by a CBS wake up rule. There are two |
7364 | + * different rules: 1) the original CBS; and 2) the Revisited CBS. |
7365 | + * |
7366 | + * When the task is starting a new period, the Original CBS is used. In this |
7367 | + * case, the runtime is replenished and a new absolute deadline is set. |
7368 | + * |
7369 | + * When a task is queued before the begin of the next period, using the |
7370 | + * remaining runtime and deadline could make the entity to overflow, see |
7371 | + * dl_entity_overflow() to find more about runtime overflow. When such case |
7372 | + * is detected, the runtime and deadline need to be updated. |
7373 | + * |
7374 | + * If the task has an implicit deadline, i.e., deadline == period, the Original |
7375 | + * CBS is applied. the runtime is replenished and a new absolute deadline is |
7376 | + * set, as in the previous cases. |
7377 | + * |
7378 | + * However, the Original CBS does not work properly for tasks with |
7379 | + * deadline < period, which are said to have a constrained deadline. By |
7380 | + * applying the Original CBS, a constrained deadline task would be able to run |
7381 | + * runtime/deadline in a period. With deadline < period, the task would |
7382 | + * overrun the runtime/period allowed bandwidth, breaking the admission test. |
7383 | + * |
7384 | + * In order to prevent this misbehave, the Revisited CBS is used for |
7385 | + * constrained deadline tasks when a runtime overflow is detected. In the |
7386 | + * Revisited CBS, rather than replenishing & setting a new absolute deadline, |
7387 | + * the remaining runtime of the task is reduced to avoid runtime overflow. |
7388 | + * Please refer to the comments update_dl_revised_wakeup() function to find |
7389 | + * more about the Revised CBS rule. |
7390 | */ |
7391 | static void update_dl_entity(struct sched_dl_entity *dl_se, |
7392 | struct sched_dl_entity *pi_se) |
7393 | @@ -500,6 +571,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, |
7394 | |
7395 | if (dl_time_before(dl_se->deadline, rq_clock(rq)) || |
7396 | dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { |
7397 | + |
7398 | + if (unlikely(!dl_is_implicit(dl_se) && |
7399 | + !dl_time_before(dl_se->deadline, rq_clock(rq)) && |
7400 | + !dl_se->dl_boosted)){ |
7401 | + update_dl_revised_wakeup(dl_se, rq); |
7402 | + return; |
7403 | + } |
7404 | + |
7405 | dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; |
7406 | dl_se->runtime = pi_se->dl_runtime; |
7407 | } |
7408 | @@ -961,11 +1040,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se) |
7409 | __dequeue_dl_entity(dl_se); |
7410 | } |
7411 | |
7412 | -static inline bool dl_is_constrained(struct sched_dl_entity *dl_se) |
7413 | -{ |
7414 | - return dl_se->dl_deadline < dl_se->dl_period; |
7415 | -} |
7416 | - |
7417 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
7418 | { |
7419 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
7420 | @@ -997,7 +1071,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
7421 | * If that is the case, the task will be throttled and |
7422 | * the replenishment timer will be set to the next period. |
7423 | */ |
7424 | - if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) |
7425 | + if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl)) |
7426 | dl_check_constrained_dl(&p->dl); |
7427 | |
7428 | /* |
7429 | diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c |
7430 | index 3d862f5b0331..f6e8727f7fa3 100644 |
7431 | --- a/kernel/sched/fair.c |
7432 | +++ b/kernel/sched/fair.c |
7433 | @@ -2429,7 +2429,8 @@ void task_numa_work(struct callback_head *work) |
7434 | return; |
7435 | |
7436 | |
7437 | - down_read(&mm->mmap_sem); |
7438 | + if (!down_read_trylock(&mm->mmap_sem)) |
7439 | + return; |
7440 | vma = find_vma(mm, start); |
7441 | if (!vma) { |
7442 | reset_ptenuma_scan(p); |
7443 | diff --git a/mm/vmstat.c b/mm/vmstat.c |
7444 | index 68b3193e4493..5f658b6a684f 100644 |
7445 | --- a/mm/vmstat.c |
7446 | +++ b/mm/vmstat.c |
7447 | @@ -1351,8 +1351,6 @@ static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone) |
7448 | return zone == compare; |
7449 | } |
7450 | |
7451 | - /* The zone must be somewhere! */ |
7452 | - WARN_ON_ONCE(1); |
7453 | return false; |
7454 | } |
7455 | |
7456 | diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c |
7457 | index 767144128b95..fb3e2a50d76e 100644 |
7458 | --- a/net/8021q/vlan_dev.c |
7459 | +++ b/net/8021q/vlan_dev.c |
7460 | @@ -29,6 +29,7 @@ |
7461 | #include <linux/net_tstamp.h> |
7462 | #include <linux/etherdevice.h> |
7463 | #include <linux/ethtool.h> |
7464 | +#include <linux/phy.h> |
7465 | #include <net/arp.h> |
7466 | #include <net/switchdev.h> |
7467 | |
7468 | @@ -658,8 +659,11 @@ static int vlan_ethtool_get_ts_info(struct net_device *dev, |
7469 | { |
7470 | const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
7471 | const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops; |
7472 | + struct phy_device *phydev = vlan->real_dev->phydev; |
7473 | |
7474 | - if (ops->get_ts_info) { |
7475 | + if (phydev && phydev->drv && phydev->drv->ts_info) { |
7476 | + return phydev->drv->ts_info(phydev, info); |
7477 | + } else if (ops->get_ts_info) { |
7478 | return ops->get_ts_info(vlan->real_dev, info); |
7479 | } else { |
7480 | info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | |
7481 | diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c |
7482 | index 3ac89e9ace71..4bd72d2fe415 100644 |
7483 | --- a/net/bluetooth/hci_core.c |
7484 | +++ b/net/bluetooth/hci_core.c |
7485 | @@ -548,6 +548,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req) |
7486 | { |
7487 | struct hci_dev *hdev = req->hdev; |
7488 | u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
7489 | + bool changed = false; |
7490 | |
7491 | /* If Connectionless Slave Broadcast master role is supported |
7492 | * enable all necessary events for it. |
7493 | @@ -557,6 +558,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req) |
7494 | events[1] |= 0x80; /* Synchronization Train Complete */ |
7495 | events[2] |= 0x10; /* Slave Page Response Timeout */ |
7496 | events[2] |= 0x20; /* CSB Channel Map Change */ |
7497 | + changed = true; |
7498 | } |
7499 | |
7500 | /* If Connectionless Slave Broadcast slave role is supported |
7501 | @@ -567,13 +569,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req) |
7502 | events[2] |= 0x02; /* CSB Receive */ |
7503 | events[2] |= 0x04; /* CSB Timeout */ |
7504 | events[2] |= 0x08; /* Truncated Page Complete */ |
7505 | + changed = true; |
7506 | } |
7507 | |
7508 | /* Enable Authenticated Payload Timeout Expired event if supported */ |
7509 | - if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) |
7510 | + if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { |
7511 | events[2] |= 0x80; |
7512 | + changed = true; |
7513 | + } |
7514 | |
7515 | - hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); |
7516 | + /* Some Broadcom based controllers indicate support for Set Event |
7517 | + * Mask Page 2 command, but then actually do not support it. Since |
7518 | + * the default value is all bits set to zero, the command is only |
7519 | + * required if the event mask has to be changed. In case no change |
7520 | + * to the event mask is needed, skip this command. |
7521 | + */ |
7522 | + if (changed) |
7523 | + hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, |
7524 | + sizeof(events), events); |
7525 | } |
7526 | |
7527 | static int hci_init3_req(struct hci_request *req, unsigned long opt) |
7528 | diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c |
7529 | index d3f6c26425b3..255c0a075e49 100644 |
7530 | --- a/net/ceph/osdmap.c |
7531 | +++ b/net/ceph/osdmap.c |
7532 | @@ -295,6 +295,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end) |
7533 | u32 yes; |
7534 | struct crush_rule *r; |
7535 | |
7536 | + err = -EINVAL; |
7537 | ceph_decode_32_safe(p, end, yes, bad); |
7538 | if (!yes) { |
7539 | dout("crush_decode NO rule %d off %x %p to %p\n", |
7540 | diff --git a/net/core/dev.c b/net/core/dev.c |
7541 | index 07d2c93c9636..3d9190c2940d 100644 |
7542 | --- a/net/core/dev.c |
7543 | +++ b/net/core/dev.c |
7544 | @@ -993,7 +993,7 @@ bool dev_valid_name(const char *name) |
7545 | { |
7546 | if (*name == '\0') |
7547 | return false; |
7548 | - if (strlen(name) >= IFNAMSIZ) |
7549 | + if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) |
7550 | return false; |
7551 | if (!strcmp(name, ".") || !strcmp(name, "..")) |
7552 | return false; |
7553 | @@ -2667,7 +2667,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth) |
7554 | if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) |
7555 | return 0; |
7556 | |
7557 | - eth = (struct ethhdr *)skb_mac_header(skb); |
7558 | + eth = (struct ethhdr *)skb->data; |
7559 | type = eth->h_proto; |
7560 | } |
7561 | |
7562 | diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
7563 | index 7b315663f840..a426790b0688 100644 |
7564 | --- a/net/core/neighbour.c |
7565 | +++ b/net/core/neighbour.c |
7566 | @@ -1130,10 +1130,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, |
7567 | lladdr = neigh->ha; |
7568 | } |
7569 | |
7570 | - if (new & NUD_CONNECTED) |
7571 | - neigh->confirmed = jiffies; |
7572 | - neigh->updated = jiffies; |
7573 | - |
7574 | /* If entry was valid and address is not changed, |
7575 | do not change entry state, if new one is STALE. |
7576 | */ |
7577 | @@ -1155,6 +1151,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, |
7578 | } |
7579 | } |
7580 | |
7581 | + /* Update timestamps only once we know we will make a change to the |
7582 | + * neighbour entry. Otherwise we risk to move the locktime window with |
7583 | + * noop updates and ignore relevant ARP updates. |
7584 | + */ |
7585 | + if (new != old || lladdr != neigh->ha) { |
7586 | + if (new & NUD_CONNECTED) |
7587 | + neigh->confirmed = jiffies; |
7588 | + neigh->updated = jiffies; |
7589 | + } |
7590 | + |
7591 | if (new != old) { |
7592 | neigh_del_timer(neigh); |
7593 | if (new & NUD_PROBE) |
7594 | diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c |
7595 | index b7efe2f19f83..04fd04ccaa04 100644 |
7596 | --- a/net/core/net_namespace.c |
7597 | +++ b/net/core/net_namespace.c |
7598 | @@ -312,6 +312,25 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) |
7599 | goto out; |
7600 | } |
7601 | |
7602 | +static int __net_init net_defaults_init_net(struct net *net) |
7603 | +{ |
7604 | + net->core.sysctl_somaxconn = SOMAXCONN; |
7605 | + return 0; |
7606 | +} |
7607 | + |
7608 | +static struct pernet_operations net_defaults_ops = { |
7609 | + .init = net_defaults_init_net, |
7610 | +}; |
7611 | + |
7612 | +static __init int net_defaults_init(void) |
7613 | +{ |
7614 | + if (register_pernet_subsys(&net_defaults_ops)) |
7615 | + panic("Cannot initialize net default settings"); |
7616 | + |
7617 | + return 0; |
7618 | +} |
7619 | + |
7620 | +core_initcall(net_defaults_init); |
7621 | |
7622 | #ifdef CONFIG_NET_NS |
7623 | static struct ucounts *inc_net_namespaces(struct user_namespace *ns) |
7624 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
7625 | index c5ac9f48f058..fb422dfec848 100644 |
7626 | --- a/net/core/skbuff.c |
7627 | +++ b/net/core/skbuff.c |
7628 | @@ -2615,7 +2615,8 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) |
7629 | { |
7630 | int pos = skb_headlen(skb); |
7631 | |
7632 | - skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; |
7633 | + skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & |
7634 | + SKBTX_SHARED_FRAG; |
7635 | if (len < pos) /* Split line is inside header. */ |
7636 | skb_split_inside_header(skb, skb1, len, pos); |
7637 | else /* Second chunk has no header, nothing to copy. */ |
7638 | @@ -3228,8 +3229,8 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, |
7639 | skb_copy_from_linear_data_offset(head_skb, offset, |
7640 | skb_put(nskb, hsize), hsize); |
7641 | |
7642 | - skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & |
7643 | - SKBTX_SHARED_FRAG; |
7644 | + skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags & |
7645 | + SKBTX_SHARED_FRAG; |
7646 | |
7647 | while (pos < offset + len) { |
7648 | if (i >= nfrags) { |
7649 | @@ -3475,24 +3476,18 @@ void __init skb_init(void) |
7650 | NULL); |
7651 | } |
7652 | |
7653 | -/** |
7654 | - * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
7655 | - * @skb: Socket buffer containing the buffers to be mapped |
7656 | - * @sg: The scatter-gather list to map into |
7657 | - * @offset: The offset into the buffer's contents to start mapping |
7658 | - * @len: Length of buffer space to be mapped |
7659 | - * |
7660 | - * Fill the specified scatter-gather list with mappings/pointers into a |
7661 | - * region of the buffer space attached to a socket buffer. |
7662 | - */ |
7663 | static int |
7664 | -__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7665 | +__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, |
7666 | + unsigned int recursion_level) |
7667 | { |
7668 | int start = skb_headlen(skb); |
7669 | int i, copy = start - offset; |
7670 | struct sk_buff *frag_iter; |
7671 | int elt = 0; |
7672 | |
7673 | + if (unlikely(recursion_level >= 24)) |
7674 | + return -EMSGSIZE; |
7675 | + |
7676 | if (copy > 0) { |
7677 | if (copy > len) |
7678 | copy = len; |
7679 | @@ -3511,6 +3506,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7680 | end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); |
7681 | if ((copy = end - offset) > 0) { |
7682 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
7683 | + if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
7684 | + return -EMSGSIZE; |
7685 | |
7686 | if (copy > len) |
7687 | copy = len; |
7688 | @@ -3525,16 +3522,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7689 | } |
7690 | |
7691 | skb_walk_frags(skb, frag_iter) { |
7692 | - int end; |
7693 | + int end, ret; |
7694 | |
7695 | WARN_ON(start > offset + len); |
7696 | |
7697 | end = start + frag_iter->len; |
7698 | if ((copy = end - offset) > 0) { |
7699 | + if (unlikely(elt && sg_is_last(&sg[elt - 1]))) |
7700 | + return -EMSGSIZE; |
7701 | + |
7702 | if (copy > len) |
7703 | copy = len; |
7704 | - elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
7705 | - copy); |
7706 | + ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, |
7707 | + copy, recursion_level + 1); |
7708 | + if (unlikely(ret < 0)) |
7709 | + return ret; |
7710 | + elt += ret; |
7711 | if ((len -= copy) == 0) |
7712 | return elt; |
7713 | offset += copy; |
7714 | @@ -3545,6 +3548,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7715 | return elt; |
7716 | } |
7717 | |
7718 | +/** |
7719 | + * skb_to_sgvec - Fill a scatter-gather list from a socket buffer |
7720 | + * @skb: Socket buffer containing the buffers to be mapped |
7721 | + * @sg: The scatter-gather list to map into |
7722 | + * @offset: The offset into the buffer's contents to start mapping |
7723 | + * @len: Length of buffer space to be mapped |
7724 | + * |
7725 | + * Fill the specified scatter-gather list with mappings/pointers into a |
7726 | + * region of the buffer space attached to a socket buffer. Returns either |
7727 | + * the number of scatterlist items used, or -EMSGSIZE if the contents |
7728 | + * could not fit. |
7729 | + */ |
7730 | +int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7731 | +{ |
7732 | + int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); |
7733 | + |
7734 | + if (nsg <= 0) |
7735 | + return nsg; |
7736 | + |
7737 | + sg_mark_end(&sg[nsg - 1]); |
7738 | + |
7739 | + return nsg; |
7740 | +} |
7741 | +EXPORT_SYMBOL_GPL(skb_to_sgvec); |
7742 | + |
7743 | /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given |
7744 | * sglist without mark the sg which contain last skb data as the end. |
7745 | * So the caller can mannipulate sg list as will when padding new data after |
7746 | @@ -3567,19 +3595,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7747 | int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, |
7748 | int offset, int len) |
7749 | { |
7750 | - return __skb_to_sgvec(skb, sg, offset, len); |
7751 | + return __skb_to_sgvec(skb, sg, offset, len, 0); |
7752 | } |
7753 | EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark); |
7754 | |
7755 | -int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
7756 | -{ |
7757 | - int nsg = __skb_to_sgvec(skb, sg, offset, len); |
7758 | |
7759 | - sg_mark_end(&sg[nsg - 1]); |
7760 | - |
7761 | - return nsg; |
7762 | -} |
7763 | -EXPORT_SYMBOL_GPL(skb_to_sgvec); |
7764 | |
7765 | /** |
7766 | * skb_cow_data - Check that a socket buffer's data buffers are writable |
7767 | @@ -3862,7 +3882,8 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, |
7768 | return; |
7769 | |
7770 | if (tsonly) { |
7771 | - skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags; |
7772 | + skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & |
7773 | + SKBTX_ANY_TSTAMP; |
7774 | skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; |
7775 | } |
7776 | |
7777 | diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c |
7778 | index 1b4619008c4e..546ba76b35a5 100644 |
7779 | --- a/net/core/sysctl_net_core.c |
7780 | +++ b/net/core/sysctl_net_core.c |
7781 | @@ -438,8 +438,6 @@ static __net_init int sysctl_core_net_init(struct net *net) |
7782 | { |
7783 | struct ctl_table *tbl; |
7784 | |
7785 | - net->core.sysctl_somaxconn = SOMAXCONN; |
7786 | - |
7787 | tbl = netns_core_table; |
7788 | if (!net_eq(net, &init_net)) { |
7789 | tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); |
7790 | diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c |
7791 | index 4ebe2aa3e7d3..04b5450c5a55 100644 |
7792 | --- a/net/hsr/hsr_forward.c |
7793 | +++ b/net/hsr/hsr_forward.c |
7794 | @@ -324,8 +324,7 @@ static int hsr_fill_frame_info(struct hsr_frame_info *frame, |
7795 | unsigned long irqflags; |
7796 | |
7797 | frame->is_supervision = is_supervision_frame(port->hsr, skb); |
7798 | - frame->node_src = hsr_get_node(&port->hsr->node_db, skb, |
7799 | - frame->is_supervision); |
7800 | + frame->node_src = hsr_get_node(port, skb, frame->is_supervision); |
7801 | if (frame->node_src == NULL) |
7802 | return -1; /* Unknown node and !is_supervision, or no mem */ |
7803 | |
7804 | diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c |
7805 | index 7ea925816f79..284a9b820df8 100644 |
7806 | --- a/net/hsr/hsr_framereg.c |
7807 | +++ b/net/hsr/hsr_framereg.c |
7808 | @@ -158,9 +158,10 @@ struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], |
7809 | |
7810 | /* Get the hsr_node from which 'skb' was sent. |
7811 | */ |
7812 | -struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, |
7813 | +struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, |
7814 | bool is_sup) |
7815 | { |
7816 | + struct list_head *node_db = &port->hsr->node_db; |
7817 | struct hsr_node *node; |
7818 | struct ethhdr *ethhdr; |
7819 | u16 seq_out; |
7820 | @@ -186,7 +187,11 @@ struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, |
7821 | */ |
7822 | seq_out = hsr_get_skb_sequence_nr(skb) - 1; |
7823 | } else { |
7824 | - WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); |
7825 | + /* this is called also for frames from master port and |
7826 | + * so warn only for non master ports |
7827 | + */ |
7828 | + if (port->type != HSR_PT_MASTER) |
7829 | + WARN_ONCE(1, "%s: Non-HSR frame\n", __func__); |
7830 | seq_out = HSR_SEQNR_START; |
7831 | } |
7832 | |
7833 | diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h |
7834 | index 438b40f98f5a..4e04f0e868e9 100644 |
7835 | --- a/net/hsr/hsr_framereg.h |
7836 | +++ b/net/hsr/hsr_framereg.h |
7837 | @@ -18,7 +18,7 @@ struct hsr_node; |
7838 | |
7839 | struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], |
7840 | u16 seq_out); |
7841 | -struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb, |
7842 | +struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, |
7843 | bool is_sup); |
7844 | void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, |
7845 | struct hsr_port *port); |
7846 | diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c |
7847 | index e0bd013a1e5e..bf5d26d83af0 100644 |
7848 | --- a/net/ieee802154/socket.c |
7849 | +++ b/net/ieee802154/socket.c |
7850 | @@ -304,12 +304,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
7851 | skb->sk = sk; |
7852 | skb->protocol = htons(ETH_P_IEEE802154); |
7853 | |
7854 | - dev_put(dev); |
7855 | - |
7856 | err = dev_queue_xmit(skb); |
7857 | if (err > 0) |
7858 | err = net_xmit_errno(err); |
7859 | |
7860 | + dev_put(dev); |
7861 | + |
7862 | return err ?: size; |
7863 | |
7864 | out_skb: |
7865 | @@ -693,12 +693,12 @@ static int dgram_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) |
7866 | skb->sk = sk; |
7867 | skb->protocol = htons(ETH_P_IEEE802154); |
7868 | |
7869 | - dev_put(dev); |
7870 | - |
7871 | err = dev_queue_xmit(skb); |
7872 | if (err > 0) |
7873 | err = net_xmit_errno(err); |
7874 | |
7875 | + dev_put(dev); |
7876 | + |
7877 | return err ?: size; |
7878 | |
7879 | out_skb: |
7880 | diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c |
7881 | index 22377c8ff14b..e8f862358518 100644 |
7882 | --- a/net/ipv4/ah4.c |
7883 | +++ b/net/ipv4/ah4.c |
7884 | @@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb) |
7885 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); |
7886 | |
7887 | sg_init_table(sg, nfrags + sglists); |
7888 | - skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
7889 | + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
7890 | + if (unlikely(err < 0)) |
7891 | + goto out_free; |
7892 | |
7893 | if (x->props.flags & XFRM_STATE_ESN) { |
7894 | /* Attach seqhi sg right after packet payload */ |
7895 | @@ -393,7 +395,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb) |
7896 | skb_push(skb, ihl); |
7897 | |
7898 | sg_init_table(sg, nfrags + sglists); |
7899 | - skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
7900 | + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
7901 | + if (unlikely(err < 0)) |
7902 | + goto out_free; |
7903 | |
7904 | if (x->props.flags & XFRM_STATE_ESN) { |
7905 | /* Attach seqhi sg right after packet payload */ |
7906 | diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c |
7907 | index e60517eb1c3a..8cae791a7ace 100644 |
7908 | --- a/net/ipv4/arp.c |
7909 | +++ b/net/ipv4/arp.c |
7910 | @@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev) |
7911 | /*unsigned long now; */ |
7912 | struct net *net = dev_net(dev); |
7913 | |
7914 | - rt = ip_route_output(net, sip, tip, 0, 0); |
7915 | + rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev)); |
7916 | if (IS_ERR(rt)) |
7917 | return 1; |
7918 | if (rt->dst.dev != dev) { |
7919 | @@ -658,6 +658,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) |
7920 | unsigned char *arp_ptr; |
7921 | struct rtable *rt; |
7922 | unsigned char *sha; |
7923 | + unsigned char *tha = NULL; |
7924 | __be32 sip, tip; |
7925 | u16 dev_type = dev->type; |
7926 | int addr_type; |
7927 | @@ -729,6 +730,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) |
7928 | break; |
7929 | #endif |
7930 | default: |
7931 | + tha = arp_ptr; |
7932 | arp_ptr += dev->addr_len; |
7933 | } |
7934 | memcpy(&tip, arp_ptr, 4); |
7935 | @@ -847,8 +849,18 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb) |
7936 | It is possible, that this option should be enabled for some |
7937 | devices (strip is candidate) |
7938 | */ |
7939 | - is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip && |
7940 | - addr_type == RTN_UNICAST; |
7941 | + is_garp = tip == sip && addr_type == RTN_UNICAST; |
7942 | + |
7943 | + /* Unsolicited ARP _replies_ also require target hwaddr to be |
7944 | + * the same as source. |
7945 | + */ |
7946 | + if (is_garp && arp->ar_op == htons(ARPOP_REPLY)) |
7947 | + is_garp = |
7948 | + /* IPv4 over IEEE 1394 doesn't provide target |
7949 | + * hardware address field in its ARP payload. |
7950 | + */ |
7951 | + tha && |
7952 | + !memcmp(tha, sha, dev->addr_len); |
7953 | |
7954 | if (!n && |
7955 | ((arp->ar_op == htons(ARPOP_REPLY) && |
7956 | diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c |
7957 | index 20fb25e3027b..3d8021d55336 100644 |
7958 | --- a/net/ipv4/esp4.c |
7959 | +++ b/net/ipv4/esp4.c |
7960 | @@ -268,10 +268,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) |
7961 | esph->spi = x->id.spi; |
7962 | |
7963 | sg_init_table(sg, nfrags); |
7964 | - skb_to_sgvec(skb, sg, |
7965 | - (unsigned char *)esph - skb->data, |
7966 | - assoclen + ivlen + clen + alen); |
7967 | - |
7968 | + err = skb_to_sgvec(skb, sg, |
7969 | + (unsigned char *)esph - skb->data, |
7970 | + assoclen + ivlen + clen + alen); |
7971 | + if (unlikely(err < 0)) |
7972 | + goto error; |
7973 | aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); |
7974 | aead_request_set_ad(req, assoclen); |
7975 | |
7976 | @@ -481,7 +482,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) |
7977 | } |
7978 | |
7979 | sg_init_table(sg, nfrags); |
7980 | - skb_to_sgvec(skb, sg, 0, skb->len); |
7981 | + err = skb_to_sgvec(skb, sg, 0, skb->len); |
7982 | + if (unlikely(err < 0)) |
7983 | + goto out; |
7984 | |
7985 | aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); |
7986 | aead_request_set_ad(req, assoclen); |
7987 | diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c |
7988 | index 7e7b7a3efa99..e1be24416c0e 100644 |
7989 | --- a/net/ipv4/fib_semantics.c |
7990 | +++ b/net/ipv4/fib_semantics.c |
7991 | @@ -1611,18 +1611,20 @@ void fib_select_multipath(struct fib_result *res, int hash) |
7992 | bool first = false; |
7993 | |
7994 | for_nexthops(fi) { |
7995 | + if (net->ipv4.sysctl_fib_multipath_use_neigh) { |
7996 | + if (!fib_good_nh(nh)) |
7997 | + continue; |
7998 | + if (!first) { |
7999 | + res->nh_sel = nhsel; |
8000 | + first = true; |
8001 | + } |
8002 | + } |
8003 | + |
8004 | if (hash > atomic_read(&nh->nh_upper_bound)) |
8005 | continue; |
8006 | |
8007 | - if (!net->ipv4.sysctl_fib_multipath_use_neigh || |
8008 | - fib_good_nh(nh)) { |
8009 | - res->nh_sel = nhsel; |
8010 | - return; |
8011 | - } |
8012 | - if (!first) { |
8013 | - res->nh_sel = nhsel; |
8014 | - first = true; |
8015 | - } |
8016 | + res->nh_sel = nhsel; |
8017 | + return; |
8018 | } endfor_nexthops(fi); |
8019 | } |
8020 | #endif |
8021 | diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c |
8022 | index 96536a0d6e2d..e1271e75e107 100644 |
8023 | --- a/net/ipv4/ip_tunnel.c |
8024 | +++ b/net/ipv4/ip_tunnel.c |
8025 | @@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net, |
8026 | struct net_device *dev; |
8027 | char name[IFNAMSIZ]; |
8028 | |
8029 | - if (parms->name[0]) |
8030 | + err = -E2BIG; |
8031 | + if (parms->name[0]) { |
8032 | + if (!dev_valid_name(parms->name)) |
8033 | + goto failed; |
8034 | strlcpy(name, parms->name, IFNAMSIZ); |
8035 | - else { |
8036 | - if (strlen(ops->kind) > (IFNAMSIZ - 3)) { |
8037 | - err = -E2BIG; |
8038 | + } else { |
8039 | + if (strlen(ops->kind) > (IFNAMSIZ - 3)) |
8040 | goto failed; |
8041 | - } |
8042 | strlcpy(name, ops->kind, IFNAMSIZ); |
8043 | strncat(name, "%d", 2); |
8044 | } |
8045 | diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c |
8046 | index 27089f5ebbb1..742a3432c3ea 100644 |
8047 | --- a/net/ipv4/ipmr.c |
8048 | +++ b/net/ipv4/ipmr.c |
8049 | @@ -1929,6 +1929,20 @@ int ip_mr_input(struct sk_buff *skb) |
8050 | struct net *net = dev_net(skb->dev); |
8051 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; |
8052 | struct mr_table *mrt; |
8053 | + struct net_device *dev; |
8054 | + |
8055 | + /* skb->dev passed in is the loX master dev for vrfs. |
8056 | + * As there are no vifs associated with loopback devices, |
8057 | + * get the proper interface that does have a vif associated with it. |
8058 | + */ |
8059 | + dev = skb->dev; |
8060 | + if (netif_is_l3_master(skb->dev)) { |
8061 | + dev = dev_get_by_index_rcu(net, IPCB(skb)->iif); |
8062 | + if (!dev) { |
8063 | + kfree_skb(skb); |
8064 | + return -ENODEV; |
8065 | + } |
8066 | + } |
8067 | |
8068 | /* Packet is looped back after forward, it should not be |
8069 | * forwarded second time, but still can be delivered locally. |
8070 | @@ -1966,7 +1980,7 @@ int ip_mr_input(struct sk_buff *skb) |
8071 | /* already under rcu_read_lock() */ |
8072 | cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); |
8073 | if (!cache) { |
8074 | - int vif = ipmr_find_vif(mrt, skb->dev); |
8075 | + int vif = ipmr_find_vif(mrt, dev); |
8076 | |
8077 | if (vif >= 0) |
8078 | cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, |
8079 | @@ -1986,7 +2000,7 @@ int ip_mr_input(struct sk_buff *skb) |
8080 | } |
8081 | |
8082 | read_lock(&mrt_lock); |
8083 | - vif = ipmr_find_vif(mrt, skb->dev); |
8084 | + vif = ipmr_find_vif(mrt, dev); |
8085 | if (vif >= 0) { |
8086 | int err2 = ipmr_cache_unresolved(mrt, vif, skb); |
8087 | read_unlock(&mrt_lock); |
8088 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
8089 | index b5f264ae3bff..eb05ad940e37 100644 |
8090 | --- a/net/ipv4/tcp_input.c |
8091 | +++ b/net/ipv4/tcp_input.c |
8092 | @@ -115,6 +115,7 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2; |
8093 | #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ |
8094 | #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ |
8095 | #define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ |
8096 | +#define FLAG_NO_CHALLENGE_ACK 0x8000 /* do not call tcp_send_challenge_ack() */ |
8097 | |
8098 | #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) |
8099 | #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) |
8100 | @@ -3618,7 +3619,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) |
8101 | if (before(ack, prior_snd_una)) { |
8102 | /* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */ |
8103 | if (before(ack, prior_snd_una - tp->max_window)) { |
8104 | - tcp_send_challenge_ack(sk, skb); |
8105 | + if (!(flag & FLAG_NO_CHALLENGE_ACK)) |
8106 | + tcp_send_challenge_ack(sk, skb); |
8107 | return -1; |
8108 | } |
8109 | goto old_ack; |
8110 | @@ -5969,13 +5971,17 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) |
8111 | |
8112 | /* step 5: check the ACK field */ |
8113 | acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | |
8114 | - FLAG_UPDATE_TS_RECENT) > 0; |
8115 | + FLAG_UPDATE_TS_RECENT | |
8116 | + FLAG_NO_CHALLENGE_ACK) > 0; |
8117 | |
8118 | + if (!acceptable) { |
8119 | + if (sk->sk_state == TCP_SYN_RECV) |
8120 | + return 1; /* send one RST */ |
8121 | + tcp_send_challenge_ack(sk, skb); |
8122 | + goto discard; |
8123 | + } |
8124 | switch (sk->sk_state) { |
8125 | case TCP_SYN_RECV: |
8126 | - if (!acceptable) |
8127 | - return 1; |
8128 | - |
8129 | if (!tp->srtt_us) |
8130 | tcp_synack_rtt_meas(sk, req); |
8131 | |
8132 | @@ -6045,14 +6051,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) |
8133 | * our SYNACK so stop the SYNACK timer. |
8134 | */ |
8135 | if (req) { |
8136 | - /* Return RST if ack_seq is invalid. |
8137 | - * Note that RFC793 only says to generate a |
8138 | - * DUPACK for it but for TCP Fast Open it seems |
8139 | - * better to treat this case like TCP_SYN_RECV |
8140 | - * above. |
8141 | - */ |
8142 | - if (!acceptable) |
8143 | - return 1; |
8144 | /* We no longer need the request sock. */ |
8145 | reqsk_fastopen_remove(sk, req, false); |
8146 | tcp_rearm_rto(sk); |
8147 | diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c |
8148 | index 1594d9fc9c92..3a27cf762da1 100644 |
8149 | --- a/net/ipv6/addrconf.c |
8150 | +++ b/net/ipv6/addrconf.c |
8151 | @@ -988,7 +988,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, |
8152 | INIT_HLIST_NODE(&ifa->addr_lst); |
8153 | ifa->scope = scope; |
8154 | ifa->prefix_len = pfxlen; |
8155 | - ifa->flags = flags | IFA_F_TENTATIVE; |
8156 | + ifa->flags = flags; |
8157 | + /* No need to add the TENTATIVE flag for addresses with NODAD */ |
8158 | + if (!(flags & IFA_F_NODAD)) |
8159 | + ifa->flags |= IFA_F_TENTATIVE; |
8160 | ifa->valid_lft = valid_lft; |
8161 | ifa->prefered_lft = prefered_lft; |
8162 | ifa->cstamp = ifa->tstamp = jiffies; |
8163 | diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c |
8164 | index 0630a4d5daaa..0edc44cb254e 100644 |
8165 | --- a/net/ipv6/ah6.c |
8166 | +++ b/net/ipv6/ah6.c |
8167 | @@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) |
8168 | ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); |
8169 | |
8170 | sg_init_table(sg, nfrags + sglists); |
8171 | - skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
8172 | + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
8173 | + if (unlikely(err < 0)) |
8174 | + goto out_free; |
8175 | |
8176 | if (x->props.flags & XFRM_STATE_ESN) { |
8177 | /* Attach seqhi sg right after packet payload */ |
8178 | @@ -603,7 +605,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb) |
8179 | ip6h->hop_limit = 0; |
8180 | |
8181 | sg_init_table(sg, nfrags + sglists); |
8182 | - skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
8183 | + err = skb_to_sgvec_nomark(skb, sg, 0, skb->len); |
8184 | + if (unlikely(err < 0)) |
8185 | + goto out_free; |
8186 | |
8187 | if (x->props.flags & XFRM_STATE_ESN) { |
8188 | /* Attach seqhi sg right after packet payload */ |
8189 | diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c |
8190 | index 111ba55fd512..6a924be66e37 100644 |
8191 | --- a/net/ipv6/esp6.c |
8192 | +++ b/net/ipv6/esp6.c |
8193 | @@ -248,9 +248,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) |
8194 | esph->spi = x->id.spi; |
8195 | |
8196 | sg_init_table(sg, nfrags); |
8197 | - skb_to_sgvec(skb, sg, |
8198 | - (unsigned char *)esph - skb->data, |
8199 | - assoclen + ivlen + clen + alen); |
8200 | + err = skb_to_sgvec(skb, sg, |
8201 | + (unsigned char *)esph - skb->data, |
8202 | + assoclen + ivlen + clen + alen); |
8203 | + if (unlikely(err < 0)) |
8204 | + goto error; |
8205 | |
8206 | aead_request_set_crypt(req, sg, sg, ivlen + clen, iv); |
8207 | aead_request_set_ad(req, assoclen); |
8208 | @@ -423,7 +425,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
8209 | } |
8210 | |
8211 | sg_init_table(sg, nfrags); |
8212 | - skb_to_sgvec(skb, sg, 0, skb->len); |
8213 | + ret = skb_to_sgvec(skb, sg, 0, skb->len); |
8214 | + if (unlikely(ret < 0)) |
8215 | + goto out; |
8216 | |
8217 | aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); |
8218 | aead_request_set_ad(req, assoclen); |
8219 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
8220 | index db2613b4a049..caee5530ae2c 100644 |
8221 | --- a/net/ipv6/ip6_gre.c |
8222 | +++ b/net/ipv6/ip6_gre.c |
8223 | @@ -319,11 +319,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net, |
8224 | if (t || !create) |
8225 | return t; |
8226 | |
8227 | - if (parms->name[0]) |
8228 | + if (parms->name[0]) { |
8229 | + if (!dev_valid_name(parms->name)) |
8230 | + return NULL; |
8231 | strlcpy(name, parms->name, IFNAMSIZ); |
8232 | - else |
8233 | + } else { |
8234 | strcpy(name, "ip6gre%d"); |
8235 | - |
8236 | + } |
8237 | dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, |
8238 | ip6gre_tunnel_setup); |
8239 | if (!dev) |
8240 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
8241 | index 2e3db3619858..58a6eeeacbf7 100644 |
8242 | --- a/net/ipv6/ip6_output.c |
8243 | +++ b/net/ipv6/ip6_output.c |
8244 | @@ -356,6 +356,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb) |
8245 | static inline int ip6_forward_finish(struct net *net, struct sock *sk, |
8246 | struct sk_buff *skb) |
8247 | { |
8248 | + struct dst_entry *dst = skb_dst(skb); |
8249 | + |
8250 | + __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
8251 | + __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); |
8252 | + |
8253 | return dst_output(net, sk, skb); |
8254 | } |
8255 | |
8256 | @@ -549,8 +554,6 @@ int ip6_forward(struct sk_buff *skb) |
8257 | |
8258 | hdr->hop_limit--; |
8259 | |
8260 | - __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); |
8261 | - __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); |
8262 | return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, |
8263 | net, NULL, skb, skb->dev, dst->dev, |
8264 | ip6_forward_finish); |
8265 | @@ -1291,7 +1294,7 @@ static int __ip6_append_data(struct sock *sk, |
8266 | const struct sockcm_cookie *sockc) |
8267 | { |
8268 | struct sk_buff *skb, *skb_prev = NULL; |
8269 | - unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; |
8270 | + unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu; |
8271 | int exthdrlen = 0; |
8272 | int dst_exthdrlen = 0; |
8273 | int hh_len; |
8274 | @@ -1327,6 +1330,12 @@ static int __ip6_append_data(struct sock *sk, |
8275 | sizeof(struct frag_hdr) : 0) + |
8276 | rt->rt6i_nfheader_len; |
8277 | |
8278 | + /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit |
8279 | + * the first fragment |
8280 | + */ |
8281 | + if (headersize + transhdrlen > mtu) |
8282 | + goto emsgsize; |
8283 | + |
8284 | if (cork->length + length > mtu - headersize && ipc6->dontfrag && |
8285 | (sk->sk_protocol == IPPROTO_UDP || |
8286 | sk->sk_protocol == IPPROTO_RAW)) { |
8287 | @@ -1342,9 +1351,8 @@ static int __ip6_append_data(struct sock *sk, |
8288 | |
8289 | if (cork->length + length > maxnonfragsize - headersize) { |
8290 | emsgsize: |
8291 | - ipv6_local_error(sk, EMSGSIZE, fl6, |
8292 | - mtu - headersize + |
8293 | - sizeof(struct ipv6hdr)); |
8294 | + pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0); |
8295 | + ipv6_local_error(sk, EMSGSIZE, fl6, pmtu); |
8296 | return -EMSGSIZE; |
8297 | } |
8298 | |
8299 | diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c |
8300 | index a2fcf7bdb597..417af5ea2509 100644 |
8301 | --- a/net/ipv6/ip6_tunnel.c |
8302 | +++ b/net/ipv6/ip6_tunnel.c |
8303 | @@ -298,13 +298,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p) |
8304 | struct net_device *dev; |
8305 | struct ip6_tnl *t; |
8306 | char name[IFNAMSIZ]; |
8307 | - int err = -ENOMEM; |
8308 | + int err = -E2BIG; |
8309 | |
8310 | - if (p->name[0]) |
8311 | + if (p->name[0]) { |
8312 | + if (!dev_valid_name(p->name)) |
8313 | + goto failed; |
8314 | strlcpy(name, p->name, IFNAMSIZ); |
8315 | - else |
8316 | + } else { |
8317 | sprintf(name, "ip6tnl%%d"); |
8318 | - |
8319 | + } |
8320 | + err = -ENOMEM; |
8321 | dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, |
8322 | ip6_tnl_dev_setup); |
8323 | if (!dev) |
8324 | @@ -1097,6 +1100,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, |
8325 | |
8326 | if (!dst) { |
8327 | route_lookup: |
8328 | + /* add dsfield to flowlabel for route lookup */ |
8329 | + fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); |
8330 | + |
8331 | dst = ip6_route_output(net, NULL, fl6); |
8332 | |
8333 | if (dst->error) |
8334 | diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c |
8335 | index 912333586de6..beae93fd66d5 100644 |
8336 | --- a/net/ipv6/ip6_vti.c |
8337 | +++ b/net/ipv6/ip6_vti.c |
8338 | @@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p |
8339 | char name[IFNAMSIZ]; |
8340 | int err; |
8341 | |
8342 | - if (p->name[0]) |
8343 | + if (p->name[0]) { |
8344 | + if (!dev_valid_name(p->name)) |
8345 | + goto failed; |
8346 | strlcpy(name, p->name, IFNAMSIZ); |
8347 | - else |
8348 | + } else { |
8349 | sprintf(name, "ip6_vti%%d"); |
8350 | + } |
8351 | |
8352 | dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup); |
8353 | if (!dev) |
8354 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
8355 | index a8f80bd20c55..d6a4b2c73a7c 100644 |
8356 | --- a/net/ipv6/route.c |
8357 | +++ b/net/ipv6/route.c |
8358 | @@ -856,6 +856,9 @@ static struct rt6_info *ip6_pol_route_lookup(struct net *net, |
8359 | struct fib6_node *fn; |
8360 | struct rt6_info *rt; |
8361 | |
8362 | + if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) |
8363 | + flags &= ~RT6_LOOKUP_F_IFACE; |
8364 | + |
8365 | read_lock_bh(&table->tb6_lock); |
8366 | fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); |
8367 | restart: |
8368 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
8369 | index d4d84da28672..dcb292134c21 100644 |
8370 | --- a/net/ipv6/sit.c |
8371 | +++ b/net/ipv6/sit.c |
8372 | @@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, |
8373 | if (!create) |
8374 | goto failed; |
8375 | |
8376 | - if (parms->name[0]) |
8377 | + if (parms->name[0]) { |
8378 | + if (!dev_valid_name(parms->name)) |
8379 | + goto failed; |
8380 | strlcpy(name, parms->name, IFNAMSIZ); |
8381 | - else |
8382 | + } else { |
8383 | strcpy(name, "sit%d"); |
8384 | - |
8385 | + } |
8386 | dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, |
8387 | ipip6_tunnel_setup); |
8388 | if (!dev) |
8389 | @@ -657,6 +659,7 @@ static int ipip6_rcv(struct sk_buff *skb) |
8390 | if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6), |
8391 | !net_eq(tunnel->net, dev_net(tunnel->dev)))) |
8392 | goto out; |
8393 | + iph = ip_hdr(skb); |
8394 | |
8395 | err = IP_ECN_decapsulate(iph, skb); |
8396 | if (unlikely(err)) { |
8397 | diff --git a/net/key/af_key.c b/net/key/af_key.c |
8398 | index 6482b001f19a..15150b412930 100644 |
8399 | --- a/net/key/af_key.c |
8400 | +++ b/net/key/af_key.c |
8401 | @@ -3305,7 +3305,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt, |
8402 | p += pol->sadb_x_policy_len*8; |
8403 | sec_ctx = (struct sadb_x_sec_ctx *)p; |
8404 | if (len < pol->sadb_x_policy_len*8 + |
8405 | - sec_ctx->sadb_x_sec_len) { |
8406 | + sec_ctx->sadb_x_sec_len*8) { |
8407 | *dir = -EINVAL; |
8408 | goto out; |
8409 | } |
8410 | diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c |
8411 | index ee03bc866d1b..ce1238492c0f 100644 |
8412 | --- a/net/l2tp/l2tp_netlink.c |
8413 | +++ b/net/l2tp/l2tp_netlink.c |
8414 | @@ -750,6 +750,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl |
8415 | |
8416 | if ((session->ifname[0] && |
8417 | nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) || |
8418 | + (session->offset && |
8419 | + nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) || |
8420 | (session->cookie_len && |
8421 | nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len, |
8422 | &session->cookie[0])) || |
8423 | diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c |
8424 | index db916cf51ffe..f7caf0f5d9c8 100644 |
8425 | --- a/net/llc/af_llc.c |
8426 | +++ b/net/llc/af_llc.c |
8427 | @@ -309,6 +309,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) |
8428 | int rc = -EINVAL; |
8429 | |
8430 | dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); |
8431 | + |
8432 | + lock_sock(sk); |
8433 | if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) |
8434 | goto out; |
8435 | rc = -EAFNOSUPPORT; |
8436 | @@ -380,6 +382,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) |
8437 | out_put: |
8438 | llc_sap_put(sap); |
8439 | out: |
8440 | + release_sock(sk); |
8441 | return rc; |
8442 | } |
8443 | |
8444 | diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c |
8445 | index efa2a2fcae72..d7801f6877af 100644 |
8446 | --- a/net/mac80211/cfg.c |
8447 | +++ b/net/mac80211/cfg.c |
8448 | @@ -2341,10 +2341,17 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, |
8449 | struct ieee80211_sub_if_data *sdata; |
8450 | enum nl80211_tx_power_setting txp_type = type; |
8451 | bool update_txp_type = false; |
8452 | + bool has_monitor = false; |
8453 | |
8454 | if (wdev) { |
8455 | sdata = IEEE80211_WDEV_TO_SUB_IF(wdev); |
8456 | |
8457 | + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { |
8458 | + sdata = rtnl_dereference(local->monitor_sdata); |
8459 | + if (!sdata) |
8460 | + return -EOPNOTSUPP; |
8461 | + } |
8462 | + |
8463 | switch (type) { |
8464 | case NL80211_TX_POWER_AUTOMATIC: |
8465 | sdata->user_power_level = IEEE80211_UNSET_POWER_LEVEL; |
8466 | @@ -2383,15 +2390,34 @@ static int ieee80211_set_tx_power(struct wiphy *wiphy, |
8467 | |
8468 | mutex_lock(&local->iflist_mtx); |
8469 | list_for_each_entry(sdata, &local->interfaces, list) { |
8470 | + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) { |
8471 | + has_monitor = true; |
8472 | + continue; |
8473 | + } |
8474 | sdata->user_power_level = local->user_power_level; |
8475 | if (txp_type != sdata->vif.bss_conf.txpower_type) |
8476 | update_txp_type = true; |
8477 | sdata->vif.bss_conf.txpower_type = txp_type; |
8478 | } |
8479 | - list_for_each_entry(sdata, &local->interfaces, list) |
8480 | + list_for_each_entry(sdata, &local->interfaces, list) { |
8481 | + if (sdata->vif.type == NL80211_IFTYPE_MONITOR) |
8482 | + continue; |
8483 | ieee80211_recalc_txpower(sdata, update_txp_type); |
8484 | + } |
8485 | mutex_unlock(&local->iflist_mtx); |
8486 | |
8487 | + if (has_monitor) { |
8488 | + sdata = rtnl_dereference(local->monitor_sdata); |
8489 | + if (sdata) { |
8490 | + sdata->user_power_level = local->user_power_level; |
8491 | + if (txp_type != sdata->vif.bss_conf.txpower_type) |
8492 | + update_txp_type = true; |
8493 | + sdata->vif.bss_conf.txpower_type = txp_type; |
8494 | + |
8495 | + ieee80211_recalc_txpower(sdata, update_txp_type); |
8496 | + } |
8497 | + } |
8498 | + |
8499 | return 0; |
8500 | } |
8501 | |
8502 | diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h |
8503 | index 09f77e4a8a79..49c8a9c9b91f 100644 |
8504 | --- a/net/mac80211/driver-ops.h |
8505 | +++ b/net/mac80211/driver-ops.h |
8506 | @@ -164,7 +164,8 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, |
8507 | if (WARN_ON_ONCE(sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE || |
8508 | sdata->vif.type == NL80211_IFTYPE_NAN || |
8509 | (sdata->vif.type == NL80211_IFTYPE_MONITOR && |
8510 | - !sdata->vif.mu_mimo_owner))) |
8511 | + !sdata->vif.mu_mimo_owner && |
8512 | + !(changed & BSS_CHANGED_TXPOWER)))) |
8513 | return; |
8514 | |
8515 | if (!check_sdata_in_driver(sdata)) |
8516 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
8517 | index fdab4f1390d2..e6f42d12222e 100644 |
8518 | --- a/net/mac80211/mlme.c |
8519 | +++ b/net/mac80211/mlme.c |
8520 | @@ -4332,6 +4332,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, |
8521 | if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) |
8522 | return -EINVAL; |
8523 | |
8524 | + /* If a reconfig is happening, bail out */ |
8525 | + if (local->in_reconfig) |
8526 | + return -EBUSY; |
8527 | + |
8528 | if (assoc) { |
8529 | rcu_read_lock(); |
8530 | have_sta = sta_info_get(sdata, cbss->bssid); |
8531 | diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c |
8532 | index 750b8bf13e60..2039fd7daf4e 100644 |
8533 | --- a/net/netfilter/nf_conntrack_core.c |
8534 | +++ b/net/netfilter/nf_conntrack_core.c |
8535 | @@ -1542,7 +1542,6 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), |
8536 | struct nf_conntrack_tuple_hash *h; |
8537 | struct nf_conn *ct; |
8538 | struct hlist_nulls_node *n; |
8539 | - int cpu; |
8540 | spinlock_t *lockp; |
8541 | |
8542 | for (; *bucket < nf_conntrack_htable_size; (*bucket)++) { |
8543 | @@ -1564,24 +1563,40 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), |
8544 | cond_resched(); |
8545 | } |
8546 | |
8547 | + return NULL; |
8548 | +found: |
8549 | + atomic_inc(&ct->ct_general.use); |
8550 | + spin_unlock(lockp); |
8551 | + local_bh_enable(); |
8552 | + return ct; |
8553 | +} |
8554 | + |
8555 | +static void |
8556 | +__nf_ct_unconfirmed_destroy(struct net *net) |
8557 | +{ |
8558 | + int cpu; |
8559 | + |
8560 | for_each_possible_cpu(cpu) { |
8561 | - struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); |
8562 | + struct nf_conntrack_tuple_hash *h; |
8563 | + struct hlist_nulls_node *n; |
8564 | + struct ct_pcpu *pcpu; |
8565 | + |
8566 | + pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); |
8567 | |
8568 | spin_lock_bh(&pcpu->lock); |
8569 | hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) { |
8570 | + struct nf_conn *ct; |
8571 | + |
8572 | ct = nf_ct_tuplehash_to_ctrack(h); |
8573 | - if (iter(ct, data)) |
8574 | - set_bit(IPS_DYING_BIT, &ct->status); |
8575 | + |
8576 | + /* we cannot call iter() on unconfirmed list, the |
8577 | + * owning cpu can reallocate ct->ext at any time. |
8578 | + */ |
8579 | + set_bit(IPS_DYING_BIT, &ct->status); |
8580 | } |
8581 | spin_unlock_bh(&pcpu->lock); |
8582 | cond_resched(); |
8583 | } |
8584 | - return NULL; |
8585 | -found: |
8586 | - atomic_inc(&ct->ct_general.use); |
8587 | - spin_unlock(lockp); |
8588 | - local_bh_enable(); |
8589 | - return ct; |
8590 | } |
8591 | |
8592 | void nf_ct_iterate_cleanup(struct net *net, |
8593 | @@ -1596,6 +1611,10 @@ void nf_ct_iterate_cleanup(struct net *net, |
8594 | if (atomic_read(&net->ct.count) == 0) |
8595 | return; |
8596 | |
8597 | + __nf_ct_unconfirmed_destroy(net); |
8598 | + |
8599 | + synchronize_net(); |
8600 | + |
8601 | while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) { |
8602 | /* Time to push up daises... */ |
8603 | |
8604 | diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c |
8605 | index d49a4639465f..9e30fd0ab227 100644 |
8606 | --- a/net/netfilter/nf_conntrack_netlink.c |
8607 | +++ b/net/netfilter/nf_conntrack_netlink.c |
8608 | @@ -890,8 +890,13 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) |
8609 | } |
8610 | out: |
8611 | local_bh_enable(); |
8612 | - if (last) |
8613 | + if (last) { |
8614 | + /* nf ct hash resize happened, now clear the leftover. */ |
8615 | + if ((struct nf_conn *)cb->args[1] == last) |
8616 | + cb->args[1] = 0; |
8617 | + |
8618 | nf_ct_put(last); |
8619 | + } |
8620 | |
8621 | while (i) { |
8622 | i--; |
8623 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
8624 | index c1f59a06da6f..1e97b8d9a159 100644 |
8625 | --- a/net/netlink/af_netlink.c |
8626 | +++ b/net/netlink/af_netlink.c |
8627 | @@ -1054,6 +1054,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, |
8628 | if (addr->sa_family != AF_NETLINK) |
8629 | return -EINVAL; |
8630 | |
8631 | + if (alen < sizeof(struct sockaddr_nl)) |
8632 | + return -EINVAL; |
8633 | + |
8634 | if ((nladdr->nl_groups || nladdr->nl_pid) && |
8635 | !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) |
8636 | return -EPERM; |
8637 | diff --git a/net/rds/bind.c b/net/rds/bind.c |
8638 | index 095f6ce583fe..adb53ae97a02 100644 |
8639 | --- a/net/rds/bind.c |
8640 | +++ b/net/rds/bind.c |
8641 | @@ -114,6 +114,7 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port) |
8642 | rs, &addr, (int)ntohs(*port)); |
8643 | break; |
8644 | } else { |
8645 | + rs->rs_bound_addr = 0; |
8646 | rds_sock_put(rs); |
8647 | ret = -ENOMEM; |
8648 | break; |
8649 | diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c |
8650 | index 4374e7b9c7bf..90df95e18ea4 100644 |
8651 | --- a/net/rxrpc/rxkad.c |
8652 | +++ b/net/rxrpc/rxkad.c |
8653 | @@ -229,7 +229,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call, |
8654 | len &= ~(call->conn->size_align - 1); |
8655 | |
8656 | sg_init_table(sg, nsg); |
8657 | - skb_to_sgvec(skb, sg, 0, len); |
8658 | + err = skb_to_sgvec(skb, sg, 0, len); |
8659 | + if (unlikely(err < 0)) |
8660 | + goto out; |
8661 | skcipher_request_set_crypt(req, sg, sg, len, iv.x); |
8662 | crypto_skcipher_encrypt(req); |
8663 | |
8664 | @@ -325,7 +327,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, |
8665 | struct sk_buff *trailer; |
8666 | u32 data_size, buf; |
8667 | u16 check; |
8668 | - int nsg; |
8669 | + int nsg, ret; |
8670 | |
8671 | _enter(""); |
8672 | |
8673 | @@ -342,7 +344,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb, |
8674 | goto nomem; |
8675 | |
8676 | sg_init_table(sg, nsg); |
8677 | - skb_to_sgvec(skb, sg, offset, 8); |
8678 | + ret = skb_to_sgvec(skb, sg, offset, 8); |
8679 | + if (unlikely(ret < 0)) |
8680 | + return ret; |
8681 | |
8682 | /* start the decryption afresh */ |
8683 | memset(&iv, 0, sizeof(iv)); |
8684 | @@ -405,7 +409,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, |
8685 | struct sk_buff *trailer; |
8686 | u32 data_size, buf; |
8687 | u16 check; |
8688 | - int nsg; |
8689 | + int nsg, ret; |
8690 | |
8691 | _enter(",{%d}", skb->len); |
8692 | |
8693 | @@ -429,7 +433,12 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb, |
8694 | } |
8695 | |
8696 | sg_init_table(sg, nsg); |
8697 | - skb_to_sgvec(skb, sg, offset, len); |
8698 | + ret = skb_to_sgvec(skb, sg, offset, len); |
8699 | + if (unlikely(ret < 0)) { |
8700 | + if (sg != _sg) |
8701 | + kfree(sg); |
8702 | + return ret; |
8703 | + } |
8704 | |
8705 | /* decrypt from the session key */ |
8706 | token = call->conn->params.key->payload.data[0]; |
8707 | diff --git a/net/sched/act_api.c b/net/sched/act_api.c |
8708 | index f3117324146a..67adb4ecded2 100644 |
8709 | --- a/net/sched/act_api.c |
8710 | +++ b/net/sched/act_api.c |
8711 | @@ -95,8 +95,10 @@ static int tcf_dump_walker(struct tcf_hashinfo *hinfo, struct sk_buff *skb, |
8712 | continue; |
8713 | |
8714 | nest = nla_nest_start(skb, n_i); |
8715 | - if (nest == NULL) |
8716 | + if (nest == NULL) { |
8717 | + index--; |
8718 | goto nla_put_failure; |
8719 | + } |
8720 | err = tcf_action_dump_1(skb, p, 0, 0); |
8721 | if (err < 0) { |
8722 | index--; |
8723 | diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c |
8724 | index 1d3960033f61..40496f34bdb3 100644 |
8725 | --- a/net/sched/act_bpf.c |
8726 | +++ b/net/sched/act_bpf.c |
8727 | @@ -245,10 +245,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) |
8728 | |
8729 | static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) |
8730 | { |
8731 | - if (cfg->is_ebpf) |
8732 | - bpf_prog_put(cfg->filter); |
8733 | - else |
8734 | - bpf_prog_destroy(cfg->filter); |
8735 | + struct bpf_prog *filter = cfg->filter; |
8736 | + |
8737 | + if (filter) { |
8738 | + if (cfg->is_ebpf) |
8739 | + bpf_prog_put(filter); |
8740 | + else |
8741 | + bpf_prog_destroy(filter); |
8742 | + } |
8743 | |
8744 | kfree(cfg->bpf_ops); |
8745 | kfree(cfg->bpf_name); |
8746 | diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c |
8747 | index f85313d60a4d..bd8e86cb8743 100644 |
8748 | --- a/net/sched/act_skbmod.c |
8749 | +++ b/net/sched/act_skbmod.c |
8750 | @@ -192,7 +192,8 @@ static void tcf_skbmod_cleanup(struct tc_action *a, int bind) |
8751 | struct tcf_skbmod_params *p; |
8752 | |
8753 | p = rcu_dereference_protected(d->skbmod_p, 1); |
8754 | - kfree_rcu(p, rcu); |
8755 | + if (p) |
8756 | + kfree_rcu(p, rcu); |
8757 | } |
8758 | |
8759 | static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, |
8760 | diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c |
8761 | index b6e3abe505ac..901fb8bb9dce 100644 |
8762 | --- a/net/sched/act_tunnel_key.c |
8763 | +++ b/net/sched/act_tunnel_key.c |
8764 | @@ -196,11 +196,12 @@ static void tunnel_key_release(struct tc_action *a, int bind) |
8765 | struct tcf_tunnel_key_params *params; |
8766 | |
8767 | params = rcu_dereference_protected(t->params, 1); |
8768 | + if (params) { |
8769 | + if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) |
8770 | + dst_release(¶ms->tcft_enc_metadata->dst); |
8771 | |
8772 | - if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET) |
8773 | - dst_release(¶ms->tcft_enc_metadata->dst); |
8774 | - |
8775 | - kfree_rcu(params, rcu); |
8776 | + kfree_rcu(params, rcu); |
8777 | + } |
8778 | } |
8779 | |
8780 | static int tunnel_key_dump_addresses(struct sk_buff *skb, |
8781 | diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c |
8782 | index 11f69d4c5619..355d95a7cd81 100644 |
8783 | --- a/net/sctp/ipv6.c |
8784 | +++ b/net/sctp/ipv6.c |
8785 | @@ -727,8 +727,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) |
8786 | sctp_v6_map_v4(addr); |
8787 | } |
8788 | |
8789 | - if (addr->sa.sa_family == AF_INET) |
8790 | + if (addr->sa.sa_family == AF_INET) { |
8791 | + memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); |
8792 | return sizeof(struct sockaddr_in); |
8793 | + } |
8794 | return sizeof(struct sockaddr_in6); |
8795 | } |
8796 | |
8797 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
8798 | index 8cdd6bbe2efa..78f38056fca6 100644 |
8799 | --- a/net/sctp/socket.c |
8800 | +++ b/net/sctp/socket.c |
8801 | @@ -335,11 +335,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, |
8802 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) |
8803 | return NULL; |
8804 | |
8805 | - /* V4 mapped address are really of AF_INET family */ |
8806 | - if (addr->sa.sa_family == AF_INET6 && |
8807 | - ipv6_addr_v4mapped(&addr->v6.sin6_addr) && |
8808 | - !opt->pf->af_supported(AF_INET, opt)) |
8809 | - return NULL; |
8810 | + if (addr->sa.sa_family == AF_INET6) { |
8811 | + if (len < SIN6_LEN_RFC2133) |
8812 | + return NULL; |
8813 | + /* V4 mapped address are really of AF_INET family */ |
8814 | + if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) && |
8815 | + !opt->pf->af_supported(AF_INET, opt)) |
8816 | + return NULL; |
8817 | + } |
8818 | |
8819 | /* If we get this far, af is valid. */ |
8820 | af = sctp_get_af_specific(addr->sa.sa_family); |
8821 | @@ -1519,7 +1522,7 @@ static void sctp_close(struct sock *sk, long timeout) |
8822 | |
8823 | pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); |
8824 | |
8825 | - lock_sock(sk); |
8826 | + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); |
8827 | sk->sk_shutdown = SHUTDOWN_MASK; |
8828 | sk->sk_state = SCTP_SS_CLOSING; |
8829 | |
8830 | @@ -1569,7 +1572,7 @@ static void sctp_close(struct sock *sk, long timeout) |
8831 | * held and that should be grabbed before socket lock. |
8832 | */ |
8833 | spin_lock_bh(&net->sctp.addr_wq_lock); |
8834 | - bh_lock_sock(sk); |
8835 | + bh_lock_sock_nested(sk); |
8836 | |
8837 | /* Hold the sock, since sk_common_release() will put sock_put() |
8838 | * and we have just a little more cleanup. |
8839 | diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c |
8840 | index b5c279b22680..6cbc935ddd96 100644 |
8841 | --- a/net/strparser/strparser.c |
8842 | +++ b/net/strparser/strparser.c |
8843 | @@ -59,7 +59,7 @@ static void strp_abort_rx_strp(struct strparser *strp, int err) |
8844 | strp->rx_stopped = 1; |
8845 | |
8846 | /* Report an error on the lower socket */ |
8847 | - csk->sk_err = err; |
8848 | + csk->sk_err = -err; |
8849 | csk->sk_error_report(csk); |
8850 | } |
8851 | |
8852 | @@ -422,7 +422,7 @@ static void strp_rx_msg_timeout(unsigned long arg) |
8853 | /* Message assembly timed out */ |
8854 | STRP_STATS_INCR(strp->stats.rx_msg_timeouts); |
8855 | lock_sock(strp->sk); |
8856 | - strp->cb.abort_parser(strp, ETIMEDOUT); |
8857 | + strp->cb.abort_parser(strp, -ETIMEDOUT); |
8858 | release_sock(strp->sk); |
8859 | } |
8860 | |
8861 | diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c |
8862 | index d24d14ea8ba4..1bf9153004cd 100644 |
8863 | --- a/net/sunrpc/xprtsock.c |
8864 | +++ b/net/sunrpc/xprtsock.c |
8865 | @@ -2384,7 +2384,12 @@ static void xs_tcp_setup_socket(struct work_struct *work) |
8866 | case -EHOSTUNREACH: |
8867 | case -EADDRINUSE: |
8868 | case -ENOBUFS: |
8869 | - /* retry with existing socket, after a delay */ |
8870 | + /* |
8871 | + * xs_tcp_force_close() wakes tasks with -EIO. |
8872 | + * We need to wake them first to ensure the |
8873 | + * correct error code. |
8874 | + */ |
8875 | + xprt_wake_pending_tasks(xprt, status); |
8876 | xs_tcp_force_close(xprt); |
8877 | goto out; |
8878 | } |
8879 | diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c |
8880 | index f83b74d3e2ac..007721632b07 100644 |
8881 | --- a/net/x25/af_x25.c |
8882 | +++ b/net/x25/af_x25.c |
8883 | @@ -1790,32 +1790,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb) |
8884 | |
8885 | static int __init x25_init(void) |
8886 | { |
8887 | - int rc = proto_register(&x25_proto, 0); |
8888 | + int rc; |
8889 | |
8890 | - if (rc != 0) |
8891 | + rc = proto_register(&x25_proto, 0); |
8892 | + if (rc) |
8893 | goto out; |
8894 | |
8895 | rc = sock_register(&x25_family_ops); |
8896 | - if (rc != 0) |
8897 | + if (rc) |
8898 | goto out_proto; |
8899 | |
8900 | dev_add_pack(&x25_packet_type); |
8901 | |
8902 | rc = register_netdevice_notifier(&x25_dev_notifier); |
8903 | - if (rc != 0) |
8904 | + if (rc) |
8905 | goto out_sock; |
8906 | |
8907 | - pr_info("Linux Version 0.2\n"); |
8908 | + rc = x25_register_sysctl(); |
8909 | + if (rc) |
8910 | + goto out_dev; |
8911 | |
8912 | - x25_register_sysctl(); |
8913 | rc = x25_proc_init(); |
8914 | - if (rc != 0) |
8915 | - goto out_dev; |
8916 | + if (rc) |
8917 | + goto out_sysctl; |
8918 | + |
8919 | + pr_info("Linux Version 0.2\n"); |
8920 | + |
8921 | out: |
8922 | return rc; |
8923 | +out_sysctl: |
8924 | + x25_unregister_sysctl(); |
8925 | out_dev: |
8926 | unregister_netdevice_notifier(&x25_dev_notifier); |
8927 | out_sock: |
8928 | + dev_remove_pack(&x25_packet_type); |
8929 | sock_unregister(AF_X25); |
8930 | out_proto: |
8931 | proto_unregister(&x25_proto); |
8932 | diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c |
8933 | index 43239527a205..703d46aae7a2 100644 |
8934 | --- a/net/x25/sysctl_net_x25.c |
8935 | +++ b/net/x25/sysctl_net_x25.c |
8936 | @@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = { |
8937 | { 0, }, |
8938 | }; |
8939 | |
8940 | -void __init x25_register_sysctl(void) |
8941 | +int __init x25_register_sysctl(void) |
8942 | { |
8943 | x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); |
8944 | + if (!x25_table_header) |
8945 | + return -ENOMEM; |
8946 | + return 0; |
8947 | } |
8948 | |
8949 | void x25_unregister_sysctl(void) |
8950 | diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c |
8951 | index fdb9742d934e..6f5635770d6a 100644 |
8952 | --- a/net/xfrm/xfrm_state.c |
8953 | +++ b/net/xfrm/xfrm_state.c |
8954 | @@ -1246,6 +1246,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig) |
8955 | x->curlft.add_time = orig->curlft.add_time; |
8956 | x->km.state = orig->km.state; |
8957 | x->km.seq = orig->km.seq; |
8958 | + x->replay = orig->replay; |
8959 | + x->preplay = orig->preplay; |
8960 | |
8961 | return x; |
8962 | |
8963 | diff --git a/scripts/tags.sh b/scripts/tags.sh |
8964 | index a2ff3388e5ea..2a61db329adf 100755 |
8965 | --- a/scripts/tags.sh |
8966 | +++ b/scripts/tags.sh |
8967 | @@ -106,6 +106,7 @@ all_compiled_sources() |
8968 | case "$i" in |
8969 | *.[cS]) |
8970 | j=${i/\.[cS]/\.o} |
8971 | + j="${j#$tree}" |
8972 | if [ -e $j ]; then |
8973 | echo $i |
8974 | fi |
8975 | diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c |
8976 | index 17627d8d5a26..8ded80867b92 100644 |
8977 | --- a/security/selinux/hooks.c |
8978 | +++ b/security/selinux/hooks.c |
8979 | @@ -2033,8 +2033,9 @@ static inline u32 file_to_av(struct file *file) |
8980 | static inline u32 open_file_to_av(struct file *file) |
8981 | { |
8982 | u32 av = file_to_av(file); |
8983 | + struct inode *inode = file_inode(file); |
8984 | |
8985 | - if (selinux_policycap_openperm) |
8986 | + if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC) |
8987 | av |= FILE__OPEN; |
8988 | |
8989 | return av; |
8990 | @@ -3031,6 +3032,7 @@ static int selinux_inode_permission(struct inode *inode, int mask) |
8991 | static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) |
8992 | { |
8993 | const struct cred *cred = current_cred(); |
8994 | + struct inode *inode = d_backing_inode(dentry); |
8995 | unsigned int ia_valid = iattr->ia_valid; |
8996 | __u32 av = FILE__WRITE; |
8997 | |
8998 | @@ -3046,8 +3048,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) |
8999 | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) |
9000 | return dentry_has_perm(cred, dentry, FILE__SETATTR); |
9001 | |
9002 | - if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE) |
9003 | - && !(ia_valid & ATTR_FILE)) |
9004 | + if (selinux_policycap_openperm && |
9005 | + inode->i_sb->s_magic != SOCKFS_MAGIC && |
9006 | + (ia_valid & ATTR_SIZE) && |
9007 | + !(ia_valid & ATTR_FILE)) |
9008 | av |= FILE__OPEN; |
9009 | |
9010 | return dentry_has_perm(cred, dentry, av); |
9011 | diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c |
9012 | index dd88c2cb6470..48804e4ab530 100644 |
9013 | --- a/sound/soc/generic/simple-card.c |
9014 | +++ b/sound/soc/generic/simple-card.c |
9015 | @@ -201,7 +201,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd) |
9016 | if (ret < 0) |
9017 | return ret; |
9018 | |
9019 | - ret = asoc_simple_card_init_mic(rtd->card, &priv->hp_jack, PREFIX); |
9020 | + ret = asoc_simple_card_init_mic(rtd->card, &priv->mic_jack, PREFIX); |
9021 | if (ret < 0) |
9022 | return ret; |
9023 | |
9024 | diff --git a/sound/soc/intel/atom/sst/sst_stream.c b/sound/soc/intel/atom/sst/sst_stream.c |
9025 | index 4ccc80e5e8cc..c798f8d4ae43 100644 |
9026 | --- a/sound/soc/intel/atom/sst/sst_stream.c |
9027 | +++ b/sound/soc/intel/atom/sst/sst_stream.c |
9028 | @@ -221,7 +221,7 @@ int sst_send_byte_stream_mrfld(struct intel_sst_drv *sst_drv_ctx, |
9029 | sst_free_block(sst_drv_ctx, block); |
9030 | out: |
9031 | test_and_clear_bit(pvt_id, &sst_drv_ctx->pvt_id); |
9032 | - return 0; |
9033 | + return ret; |
9034 | } |
9035 | |
9036 | /* |
9037 | diff --git a/sound/soc/intel/boards/cht_bsw_rt5645.c b/sound/soc/intel/boards/cht_bsw_rt5645.c |
9038 | index 90525614c20a..b1eb696f33b6 100644 |
9039 | --- a/sound/soc/intel/boards/cht_bsw_rt5645.c |
9040 | +++ b/sound/soc/intel/boards/cht_bsw_rt5645.c |
9041 | @@ -111,6 +111,7 @@ static const struct snd_soc_dapm_widget cht_dapm_widgets[] = { |
9042 | SND_SOC_DAPM_HP("Headphone", NULL), |
9043 | SND_SOC_DAPM_MIC("Headset Mic", NULL), |
9044 | SND_SOC_DAPM_MIC("Int Mic", NULL), |
9045 | + SND_SOC_DAPM_MIC("Int Analog Mic", NULL), |
9046 | SND_SOC_DAPM_SPK("Ext Spk", NULL), |
9047 | SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, |
9048 | platform_clock_control, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), |
9049 | @@ -121,6 +122,8 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = { |
9050 | {"IN1N", NULL, "Headset Mic"}, |
9051 | {"DMIC L1", NULL, "Int Mic"}, |
9052 | {"DMIC R1", NULL, "Int Mic"}, |
9053 | + {"IN2P", NULL, "Int Analog Mic"}, |
9054 | + {"IN2N", NULL, "Int Analog Mic"}, |
9055 | {"Headphone", NULL, "HPOL"}, |
9056 | {"Headphone", NULL, "HPOR"}, |
9057 | {"Ext Spk", NULL, "SPOL"}, |
9058 | @@ -134,6 +137,9 @@ static const struct snd_soc_dapm_route cht_rt5645_audio_map[] = { |
9059 | {"Headphone", NULL, "Platform Clock"}, |
9060 | {"Headset Mic", NULL, "Platform Clock"}, |
9061 | {"Int Mic", NULL, "Platform Clock"}, |
9062 | + {"Int Analog Mic", NULL, "Platform Clock"}, |
9063 | + {"Int Analog Mic", NULL, "micbias1"}, |
9064 | + {"Int Analog Mic", NULL, "micbias2"}, |
9065 | {"Ext Spk", NULL, "Platform Clock"}, |
9066 | }; |
9067 | |
9068 | @@ -162,6 +168,7 @@ static const struct snd_kcontrol_new cht_mc_controls[] = { |
9069 | SOC_DAPM_PIN_SWITCH("Headphone"), |
9070 | SOC_DAPM_PIN_SWITCH("Headset Mic"), |
9071 | SOC_DAPM_PIN_SWITCH("Int Mic"), |
9072 | + SOC_DAPM_PIN_SWITCH("Int Analog Mic"), |
9073 | SOC_DAPM_PIN_SWITCH("Ext Spk"), |
9074 | }; |
9075 | |
9076 | diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c |
9077 | index 805b7f2173f3..78472c908ae9 100644 |
9078 | --- a/sound/soc/intel/skylake/skl-messages.c |
9079 | +++ b/sound/soc/intel/skylake/skl-messages.c |
9080 | @@ -331,7 +331,11 @@ int skl_resume_dsp(struct skl *skl) |
9081 | if (skl->skl_sst->is_first_boot == true) |
9082 | return 0; |
9083 | |
9084 | + /* disable dynamic clock gating during fw and lib download */ |
9085 | + ctx->enable_miscbdcge(ctx->dev, false); |
9086 | + |
9087 | ret = skl_dsp_wake(ctx->dsp); |
9088 | + ctx->enable_miscbdcge(ctx->dev, true); |
9089 | if (ret < 0) |
9090 | return ret; |
9091 | |
9092 | diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c |
9093 | index 58c728662600..2fd213cd9a40 100644 |
9094 | --- a/sound/soc/intel/skylake/skl-pcm.c |
9095 | +++ b/sound/soc/intel/skylake/skl-pcm.c |
9096 | @@ -1191,7 +1191,11 @@ static int skl_platform_soc_probe(struct snd_soc_platform *platform) |
9097 | return -EIO; |
9098 | } |
9099 | |
9100 | + /* disable dynamic clock gating during fw and lib download */ |
9101 | + skl->skl_sst->enable_miscbdcge(platform->dev, false); |
9102 | + |
9103 | ret = ops->init_fw(platform->dev, skl->skl_sst); |
9104 | + skl->skl_sst->enable_miscbdcge(platform->dev, true); |
9105 | if (ret < 0) { |
9106 | dev_err(platform->dev, "Failed to boot first fw: %d\n", ret); |
9107 | return ret; |
9108 | diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c |
9109 | index fefa6ad5de8b..17e305b71fd9 100644 |
9110 | --- a/sound/soc/sh/rcar/ssi.c |
9111 | +++ b/sound/soc/sh/rcar/ssi.c |
9112 | @@ -552,6 +552,13 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, |
9113 | struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); |
9114 | u32 *buf = (u32 *)(runtime->dma_area + |
9115 | rsnd_dai_pointer_offset(io, 0)); |
9116 | + int shift = 0; |
9117 | + |
9118 | + switch (runtime->sample_bits) { |
9119 | + case 32: |
9120 | + shift = 8; |
9121 | + break; |
9122 | + } |
9123 | |
9124 | /* |
9125 | * 8/16/32 data can be assesse to TDR/RDR register |
9126 | @@ -559,9 +566,9 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod, |
9127 | * see rsnd_ssi_init() |
9128 | */ |
9129 | if (rsnd_io_is_play(io)) |
9130 | - rsnd_mod_write(mod, SSITDR, *buf); |
9131 | + rsnd_mod_write(mod, SSITDR, (*buf) << shift); |
9132 | else |
9133 | - *buf = rsnd_mod_read(mod, SSIRDR); |
9134 | + *buf = (rsnd_mod_read(mod, SSIRDR) >> shift); |
9135 | |
9136 | elapsed = rsnd_dai_pointer_update(io, sizeof(*buf)); |
9137 | } |
9138 | diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c |
9139 | index 4c596ba310cb..0fd8bfb77f65 100644 |
9140 | --- a/tools/perf/builtin-trace.c |
9141 | +++ b/tools/perf/builtin-trace.c |
9142 | @@ -679,6 +679,10 @@ static struct syscall_fmt { |
9143 | { .name = "mlockall", .errmsg = true, |
9144 | .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, }, |
9145 | { .name = "mmap", .hexret = true, |
9146 | +/* The standard mmap maps to old_mmap on s390x */ |
9147 | +#if defined(__s390x__) |
9148 | + .alias = "old_mmap", |
9149 | +#endif |
9150 | .arg_scnprintf = { [0] = SCA_HEX, /* addr */ |
9151 | [2] = SCA_MMAP_PROT, /* prot */ |
9152 | [3] = SCA_MMAP_FLAGS, /* flags */ }, }, |
9153 | diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c |
9154 | index ff5bc6363a79..150334064071 100644 |
9155 | --- a/tools/perf/tests/code-reading.c |
9156 | +++ b/tools/perf/tests/code-reading.c |
9157 | @@ -224,6 +224,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, |
9158 | unsigned char buf2[BUFSZ]; |
9159 | size_t ret_len; |
9160 | u64 objdump_addr; |
9161 | + const char *objdump_name; |
9162 | + char decomp_name[KMOD_DECOMP_LEN]; |
9163 | int ret; |
9164 | |
9165 | pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr); |
9166 | @@ -284,9 +286,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode, |
9167 | state->done[state->done_cnt++] = al.map->start; |
9168 | } |
9169 | |
9170 | + objdump_name = al.map->dso->long_name; |
9171 | + if (dso__needs_decompress(al.map->dso)) { |
9172 | + if (dso__decompress_kmodule_path(al.map->dso, objdump_name, |
9173 | + decomp_name, |
9174 | + sizeof(decomp_name)) < 0) { |
9175 | + pr_debug("decompression failed\n"); |
9176 | + return -1; |
9177 | + } |
9178 | + |
9179 | + objdump_name = decomp_name; |
9180 | + } |
9181 | + |
9182 | /* Read the object code using objdump */ |
9183 | objdump_addr = map__rip_2objdump(al.map, al.addr); |
9184 | - ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len); |
9185 | + ret = read_via_objdump(objdump_name, objdump_addr, buf2, len); |
9186 | + |
9187 | + if (dso__needs_decompress(al.map->dso)) |
9188 | + unlink(objdump_name); |
9189 | + |
9190 | if (ret > 0) { |
9191 | /* |
9192 | * The kernel maps are inaccurate - assume objdump is right in |
9193 | diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c |
9194 | index d2c6cdd9d42b..4bc58822416c 100644 |
9195 | --- a/tools/perf/util/dso.c |
9196 | +++ b/tools/perf/util/dso.c |
9197 | @@ -366,7 +366,23 @@ static int __open_dso(struct dso *dso, struct machine *machine) |
9198 | if (!is_regular_file(name)) |
9199 | return -EINVAL; |
9200 | |
9201 | + if (dso__needs_decompress(dso)) { |
9202 | + char newpath[KMOD_DECOMP_LEN]; |
9203 | + size_t len = sizeof(newpath); |
9204 | + |
9205 | + if (dso__decompress_kmodule_path(dso, name, newpath, len) < 0) { |
9206 | + free(name); |
9207 | + return -dso->load_errno; |
9208 | + } |
9209 | + |
9210 | + strcpy(name, newpath); |
9211 | + } |
9212 | + |
9213 | fd = do_open(name); |
9214 | + |
9215 | + if (dso__needs_decompress(dso)) |
9216 | + unlink(name); |
9217 | + |
9218 | free(name); |
9219 | return fd; |
9220 | } |
9221 | diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c |
9222 | index 28bdb48357f0..ab36aa5585b4 100644 |
9223 | --- a/tools/perf/util/header.c |
9224 | +++ b/tools/perf/util/header.c |
9225 | @@ -1454,8 +1454,16 @@ static int __event_process_build_id(struct build_id_event *bev, |
9226 | |
9227 | dso__set_build_id(dso, &bev->build_id); |
9228 | |
9229 | - if (!is_kernel_module(filename, cpumode)) |
9230 | - dso->kernel = dso_type; |
9231 | + if (dso_type != DSO_TYPE_USER) { |
9232 | + struct kmod_path m = { .name = NULL, }; |
9233 | + |
9234 | + if (!kmod_path__parse_name(&m, filename) && m.kmod) |
9235 | + dso__set_short_name(dso, strdup(m.name), true); |
9236 | + else |
9237 | + dso->kernel = dso_type; |
9238 | + |
9239 | + free(m.name); |
9240 | + } |
9241 | |
9242 | build_id__sprintf(dso->build_id, sizeof(dso->build_id), |
9243 | sbuild_id); |
9244 | diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c |
9245 | index 4d2e22f8bd94..c93daccec755 100644 |
9246 | --- a/tools/perf/util/probe-event.c |
9247 | +++ b/tools/perf/util/probe-event.c |
9248 | @@ -2609,6 +2609,14 @@ static int get_new_event_name(char *buf, size_t len, const char *base, |
9249 | |
9250 | out: |
9251 | free(nbase); |
9252 | + |
9253 | + /* Final validation */ |
9254 | + if (ret >= 0 && !is_c_func_name(buf)) { |
9255 | + pr_warning("Internal error: \"%s\" is an invalid event name.\n", |
9256 | + buf); |
9257 | + ret = -EINVAL; |
9258 | + } |
9259 | + |
9260 | return ret; |
9261 | } |
9262 | |
9263 | diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c |
9264 | index 783a53fb7a4e..b46e1cf347e5 100644 |
9265 | --- a/tools/perf/util/unwind-libdw.c |
9266 | +++ b/tools/perf/util/unwind-libdw.c |
9267 | @@ -38,6 +38,14 @@ static int __report_module(struct addr_location *al, u64 ip, |
9268 | return 0; |
9269 | |
9270 | mod = dwfl_addrmodule(ui->dwfl, ip); |
9271 | + if (mod) { |
9272 | + Dwarf_Addr s; |
9273 | + |
9274 | + dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); |
9275 | + if (s != al->map->start) |
9276 | + mod = 0; |
9277 | + } |
9278 | + |
9279 | if (!mod) |
9280 | mod = dwfl_report_elf(ui->dwfl, dso->short_name, |
9281 | dso->long_name, -1, al->map->start, |
9282 | @@ -167,12 +175,16 @@ frame_callback(Dwfl_Frame *state, void *arg) |
9283 | { |
9284 | struct unwind_info *ui = arg; |
9285 | Dwarf_Addr pc; |
9286 | + bool isactivation; |
9287 | |
9288 | - if (!dwfl_frame_pc(state, &pc, NULL)) { |
9289 | + if (!dwfl_frame_pc(state, &pc, &isactivation)) { |
9290 | pr_err("%s", dwfl_errmsg(-1)); |
9291 | return DWARF_CB_ABORT; |
9292 | } |
9293 | |
9294 | + if (!isactivation) |
9295 | + --pc; |
9296 | + |
9297 | return entry(pc, ui) || !(--ui->max_stack) ? |
9298 | DWARF_CB_ABORT : DWARF_CB_OK; |
9299 | } |
9300 | diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c |
9301 | index 20c2e5743903..120383494ff2 100644 |
9302 | --- a/tools/perf/util/unwind-libunwind-local.c |
9303 | +++ b/tools/perf/util/unwind-libunwind-local.c |
9304 | @@ -646,6 +646,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, |
9305 | |
9306 | while (!ret && (unw_step(&c) > 0) && i < max_stack) { |
9307 | unw_get_reg(&c, UNW_REG_IP, &ips[i]); |
9308 | + |
9309 | + /* |
9310 | + * Decrement the IP for any non-activation frames. |
9311 | + * this is required to properly find the srcline |
9312 | + * for caller frames. |
9313 | + * See also the documentation for dwfl_frame_pc(), |
9314 | + * which this code tries to replicate. |
9315 | + */ |
9316 | + if (unw_is_signal_frame(&c) <= 0) |
9317 | + --ips[i]; |
9318 | + |
9319 | ++i; |
9320 | } |
9321 | |
9322 | diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c |
9323 | index 85c56800f17a..dfb010bd29f2 100644 |
9324 | --- a/tools/perf/util/util.c |
9325 | +++ b/tools/perf/util/util.c |
9326 | @@ -207,7 +207,7 @@ int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size) |
9327 | |
9328 | size -= ret; |
9329 | off_in += ret; |
9330 | - off_out -= ret; |
9331 | + off_out += ret; |
9332 | } |
9333 | munmap(ptr, off_in + size); |
9334 | |
9335 | diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c |
9336 | index d9c49f41515e..e79ccd6aada1 100644 |
9337 | --- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c |
9338 | +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c |
9339 | @@ -42,12 +42,12 @@ int test_body(void) |
9340 | printf("Check DSCR TM context switch: "); |
9341 | fflush(stdout); |
9342 | for (;;) { |
9343 | - rv = 1; |
9344 | asm __volatile__ ( |
9345 | /* set a known value into the DSCR */ |
9346 | "ld 3, %[dscr1];" |
9347 | "mtspr %[sprn_dscr], 3;" |
9348 | |
9349 | + "li %[rv], 1;" |
9350 | /* start and suspend a transaction */ |
9351 | "tbegin.;" |
9352 | "beq 1f;" |
9353 | diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c |
9354 | index cbb0564c0ec4..f68998149351 100644 |
9355 | --- a/tools/testing/selftests/seccomp/seccomp_bpf.c |
9356 | +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c |
9357 | @@ -1318,7 +1318,7 @@ void change_syscall(struct __test_metadata *_metadata, |
9358 | iov.iov_len = sizeof(regs); |
9359 | ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); |
9360 | #endif |
9361 | - EXPECT_EQ(0, ret); |
9362 | + EXPECT_EQ(0, ret) {} |
9363 | |
9364 | #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ |
9365 | defined(__s390__) || defined(__hppa__) |