Contents of /trunk/kernel-alx/patches-5.4/0112-5.4.13-all-fixes.patch
Parent Directory | Revision Log
Revision 3493 -
(show annotations)
(download)
Mon May 11 14:36:15 2020 UTC (4 years, 4 months ago) by niro
File size: 268563 byte(s)
Mon May 11 14:36:15 2020 UTC (4 years, 4 months ago) by niro
File size: 268563 byte(s)
-linux-5.4.13
1 | diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io |
2 | index 8ca498447aeb..05601a90a9b6 100644 |
3 | --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io |
4 | +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io |
5 | @@ -29,13 +29,13 @@ Description: This file shows the system fans direction: |
6 | |
7 | The files are read only. |
8 | |
9 | -What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable |
10 | +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version |
11 | |
12 | Date: November 2018 |
13 | KernelVersion: 5.0 |
14 | Contact: Vadim Pasternak <vadimpmellanox.com> |
15 | Description: These files show with which CPLD versions have been burned |
16 | - on LED board. |
17 | + on LED or Gearbox board. |
18 | |
19 | The files are read only. |
20 | |
21 | @@ -121,6 +121,15 @@ Description: These files show the system reset cause, as following: ComEx |
22 | |
23 | The files are read only. |
24 | |
25 | +What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version |
26 | +Date: November 2018 |
27 | +KernelVersion: 5.0 |
28 | +Contact: Vadim Pasternak <vadimpmellanox.com> |
29 | +Description: These files show with which CPLD versions have been burned |
30 | + on LED board. |
31 | + |
32 | + The files are read only. |
33 | + |
34 | Date: June 2019 |
35 | KernelVersion: 5.3 |
36 | Contact: Vadim Pasternak <vadimpmellanox.com> |
37 | diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei |
38 | index 6bd45346ac7e..3f8701e8fa24 100644 |
39 | --- a/Documentation/ABI/testing/sysfs-bus-mei |
40 | +++ b/Documentation/ABI/testing/sysfs-bus-mei |
41 | @@ -4,7 +4,7 @@ KernelVersion: 3.10 |
42 | Contact: Samuel Ortiz <sameo@linux.intel.com> |
43 | linux-mei@linux.intel.com |
44 | Description: Stores the same MODALIAS value emitted by uevent |
45 | - Format: mei:<mei device name>:<device uuid>: |
46 | + Format: mei:<mei device name>:<device uuid>:<protocol version> |
47 | |
48 | What: /sys/bus/mei/devices/.../name |
49 | Date: May 2015 |
50 | diff --git a/Documentation/admin-guide/device-mapper/index.rst b/Documentation/admin-guide/device-mapper/index.rst |
51 | index c77c58b8f67b..d8dec8911eb3 100644 |
52 | --- a/Documentation/admin-guide/device-mapper/index.rst |
53 | +++ b/Documentation/admin-guide/device-mapper/index.rst |
54 | @@ -8,6 +8,7 @@ Device Mapper |
55 | cache-policies |
56 | cache |
57 | delay |
58 | + dm-clone |
59 | dm-crypt |
60 | dm-flakey |
61 | dm-init |
62 | diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt |
63 | index 6e5341b4f891..ee59409640f2 100644 |
64 | --- a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt |
65 | +++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt |
66 | @@ -22,6 +22,6 @@ Example: |
67 | }; |
68 | |
69 | ðernet_switch { |
70 | - resets = <&reset>; |
71 | + resets = <&reset 26>; |
72 | reset-names = "switch"; |
73 | }; |
74 | diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt |
75 | index d6d5207fa996..17ff3892f439 100644 |
76 | --- a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt |
77 | +++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt |
78 | @@ -2,9 +2,11 @@ MT8183 with MT6358, TS3A227 and MAX98357 CODECS |
79 | |
80 | Required properties: |
81 | - compatible : "mediatek,mt8183_mt6358_ts3a227_max98357" |
82 | -- mediatek,headset-codec: the phandles of ts3a227 codecs |
83 | - mediatek,platform: the phandle of MT8183 ASoC platform |
84 | |
85 | +Optional properties: |
86 | +- mediatek,headset-codec: the phandles of ts3a227 codecs |
87 | + |
88 | Example: |
89 | |
90 | sound { |
91 | diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst |
92 | index dc60b13fcd09..f5be243d250a 100644 |
93 | --- a/Documentation/networking/j1939.rst |
94 | +++ b/Documentation/networking/j1939.rst |
95 | @@ -339,7 +339,7 @@ To claim an address following code example can be used: |
96 | .pgn = J1939_PGN_ADDRESS_CLAIMED, |
97 | .pgn_mask = J1939_PGN_PDU1_MAX, |
98 | }, { |
99 | - .pgn = J1939_PGN_ADDRESS_REQUEST, |
100 | + .pgn = J1939_PGN_REQUEST, |
101 | .pgn_mask = J1939_PGN_PDU1_MAX, |
102 | }, { |
103 | .pgn = J1939_PGN_ADDRESS_COMMANDED, |
104 | diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt |
105 | index 201f80c7c050..df129f55ace5 100644 |
106 | --- a/Documentation/scsi/smartpqi.txt |
107 | +++ b/Documentation/scsi/smartpqi.txt |
108 | @@ -29,7 +29,7 @@ smartpqi specific entries in /sys |
109 | smartpqi host attributes: |
110 | ------------------------- |
111 | /sys/class/scsi_host/host*/rescan |
112 | - /sys/class/scsi_host/host*/version |
113 | + /sys/class/scsi_host/host*/driver_version |
114 | |
115 | The host rescan attribute is a write only attribute. Writing to this |
116 | attribute will trigger the driver to scan for new, changed, or removed |
117 | diff --git a/MAINTAINERS b/MAINTAINERS |
118 | index 9d3a5c54a41d..4f7ac27d8651 100644 |
119 | --- a/MAINTAINERS |
120 | +++ b/MAINTAINERS |
121 | @@ -6973,6 +6973,7 @@ L: linux-acpi@vger.kernel.org |
122 | S: Maintained |
123 | F: Documentation/firmware-guide/acpi/gpio-properties.rst |
124 | F: drivers/gpio/gpiolib-acpi.c |
125 | +F: drivers/gpio/gpiolib-acpi.h |
126 | |
127 | GPIO IR Transmitter |
128 | M: Sean Young <sean@mess.org> |
129 | diff --git a/Makefile b/Makefile |
130 | index 45c6264f1108..d4cf4700ae3f 100644 |
131 | --- a/Makefile |
132 | +++ b/Makefile |
133 | @@ -1,7 +1,7 @@ |
134 | # SPDX-License-Identifier: GPL-2.0 |
135 | VERSION = 5 |
136 | PATCHLEVEL = 4 |
137 | -SUBLEVEL = 12 |
138 | +SUBLEVEL = 13 |
139 | EXTRAVERSION = |
140 | NAME = Kleptomaniac Octopus |
141 | |
142 | diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c |
143 | index 4b0bab2607e4..46e1be9e57a8 100644 |
144 | --- a/arch/arm/kernel/smp.c |
145 | +++ b/arch/arm/kernel/smp.c |
146 | @@ -240,6 +240,10 @@ int __cpu_disable(void) |
147 | if (ret) |
148 | return ret; |
149 | |
150 | +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY |
151 | + remove_cpu_topology(cpu); |
152 | +#endif |
153 | + |
154 | /* |
155 | * Take this CPU offline. Once we clear this, we can't return, |
156 | * and we must not schedule until we're ready to give up the cpu. |
157 | diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c |
158 | index 5b9faba03afb..8d2e61d9e7a6 100644 |
159 | --- a/arch/arm/kernel/topology.c |
160 | +++ b/arch/arm/kernel/topology.c |
161 | @@ -196,9 +196,8 @@ void store_cpu_topology(unsigned int cpuid) |
162 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; |
163 | unsigned int mpidr; |
164 | |
165 | - /* If the cpu topology has been already set, just return */ |
166 | - if (cpuid_topo->core_id != -1) |
167 | - return; |
168 | + if (cpuid_topo->package_id != -1) |
169 | + goto topology_populated; |
170 | |
171 | mpidr = read_cpuid_mpidr(); |
172 | |
173 | @@ -231,14 +230,15 @@ void store_cpu_topology(unsigned int cpuid) |
174 | cpuid_topo->package_id = -1; |
175 | } |
176 | |
177 | - update_siblings_masks(cpuid); |
178 | - |
179 | update_cpu_capacity(cpuid); |
180 | |
181 | pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
182 | cpuid, cpu_topology[cpuid].thread_id, |
183 | cpu_topology[cpuid].core_id, |
184 | cpu_topology[cpuid].package_id, mpidr); |
185 | + |
186 | +topology_populated: |
187 | + update_siblings_masks(cpuid); |
188 | } |
189 | |
190 | static inline int cpu_corepower_flags(void) |
191 | diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi |
192 | index 04ad2fb22b9a..dba3488492f1 100644 |
193 | --- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi |
194 | +++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi |
195 | @@ -623,6 +623,8 @@ |
196 | l21 { |
197 | regulator-min-microvolt = <2950000>; |
198 | regulator-max-microvolt = <2950000>; |
199 | + regulator-allow-set-load; |
200 | + regulator-system-load = <200000>; |
201 | }; |
202 | l22 { |
203 | regulator-min-microvolt = <3300000>; |
204 | diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c |
205 | index ea873b8904c4..e3e27349a9fe 100644 |
206 | --- a/arch/arm64/crypto/aes-neonbs-glue.c |
207 | +++ b/arch/arm64/crypto/aes-neonbs-glue.c |
208 | @@ -384,7 +384,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt, |
209 | goto xts_tail; |
210 | |
211 | kernel_neon_end(); |
212 | - skcipher_walk_done(&walk, nbytes); |
213 | + err = skcipher_walk_done(&walk, nbytes); |
214 | } |
215 | |
216 | if (err || likely(!tail)) |
217 | diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h |
218 | index 12cd9231c4b8..0231d69c8bf2 100644 |
219 | --- a/arch/hexagon/include/asm/atomic.h |
220 | +++ b/arch/hexagon/include/asm/atomic.h |
221 | @@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ |
222 | "1: %0 = memw_locked(%1);\n" \ |
223 | " %0 = "#op "(%0,%2);\n" \ |
224 | " memw_locked(%1,P3)=%0;\n" \ |
225 | - " if !P3 jump 1b;\n" \ |
226 | + " if (!P3) jump 1b;\n" \ |
227 | : "=&r" (output) \ |
228 | : "r" (&v->counter), "r" (i) \ |
229 | : "memory", "p3" \ |
230 | @@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ |
231 | "1: %0 = memw_locked(%1);\n" \ |
232 | " %0 = "#op "(%0,%2);\n" \ |
233 | " memw_locked(%1,P3)=%0;\n" \ |
234 | - " if !P3 jump 1b;\n" \ |
235 | + " if (!P3) jump 1b;\n" \ |
236 | : "=&r" (output) \ |
237 | : "r" (&v->counter), "r" (i) \ |
238 | : "memory", "p3" \ |
239 | @@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \ |
240 | "1: %0 = memw_locked(%2);\n" \ |
241 | " %1 = "#op "(%0,%3);\n" \ |
242 | " memw_locked(%2,P3)=%1;\n" \ |
243 | - " if !P3 jump 1b;\n" \ |
244 | + " if (!P3) jump 1b;\n" \ |
245 | : "=&r" (output), "=&r" (val) \ |
246 | : "r" (&v->counter), "r" (i) \ |
247 | : "memory", "p3" \ |
248 | @@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) |
249 | " }" |
250 | " memw_locked(%2, p3) = %1;" |
251 | " {" |
252 | - " if !p3 jump 1b;" |
253 | + " if (!p3) jump 1b;" |
254 | " }" |
255 | "2:" |
256 | : "=&r" (__oldval), "=&r" (tmp) |
257 | diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h |
258 | index 47384b094b94..71429f756af0 100644 |
259 | --- a/arch/hexagon/include/asm/bitops.h |
260 | +++ b/arch/hexagon/include/asm/bitops.h |
261 | @@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr) |
262 | "1: R12 = memw_locked(R10);\n" |
263 | " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n" |
264 | " memw_locked(R10,P1) = R12;\n" |
265 | - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" |
266 | + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" |
267 | : "=&r" (oldval) |
268 | : "r" (addr), "r" (nr) |
269 | : "r10", "r11", "r12", "p0", "p1", "memory" |
270 | @@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr) |
271 | "1: R12 = memw_locked(R10);\n" |
272 | " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n" |
273 | " memw_locked(R10,P1) = R12;\n" |
274 | - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" |
275 | + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" |
276 | : "=&r" (oldval) |
277 | : "r" (addr), "r" (nr) |
278 | : "r10", "r11", "r12", "p0", "p1", "memory" |
279 | @@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr) |
280 | "1: R12 = memw_locked(R10);\n" |
281 | " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n" |
282 | " memw_locked(R10,P1) = R12;\n" |
283 | - " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n" |
284 | + " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n" |
285 | : "=&r" (oldval) |
286 | : "r" (addr), "r" (nr) |
287 | : "r10", "r11", "r12", "p0", "p1", "memory" |
288 | @@ -223,7 +223,7 @@ static inline int ffs(int x) |
289 | int r; |
290 | |
291 | asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n" |
292 | - "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n" |
293 | + "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n" |
294 | : "=&r" (r) |
295 | : "r" (x) |
296 | : "p0"); |
297 | diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h |
298 | index 6091322c3af9..92b8a02e588a 100644 |
299 | --- a/arch/hexagon/include/asm/cmpxchg.h |
300 | +++ b/arch/hexagon/include/asm/cmpxchg.h |
301 | @@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, |
302 | __asm__ __volatile__ ( |
303 | "1: %0 = memw_locked(%1);\n" /* load into retval */ |
304 | " memw_locked(%1,P0) = %2;\n" /* store into memory */ |
305 | - " if !P0 jump 1b;\n" |
306 | + " if (!P0) jump 1b;\n" |
307 | : "=&r" (retval) |
308 | : "r" (ptr), "r" (x) |
309 | : "memory", "p0" |
310 | diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h |
311 | index cb635216a732..0191f7c7193e 100644 |
312 | --- a/arch/hexagon/include/asm/futex.h |
313 | +++ b/arch/hexagon/include/asm/futex.h |
314 | @@ -16,7 +16,7 @@ |
315 | /* For example: %1 = %4 */ \ |
316 | insn \ |
317 | "2: memw_locked(%3,p2) = %1;\n" \ |
318 | - " if !p2 jump 1b;\n" \ |
319 | + " if (!p2) jump 1b;\n" \ |
320 | " %1 = #0;\n" \ |
321 | "3:\n" \ |
322 | ".section .fixup,\"ax\"\n" \ |
323 | @@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, |
324 | "1: %1 = memw_locked(%3)\n" |
325 | " {\n" |
326 | " p2 = cmp.eq(%1,%4)\n" |
327 | - " if !p2.new jump:NT 3f\n" |
328 | + " if (!p2.new) jump:NT 3f\n" |
329 | " }\n" |
330 | "2: memw_locked(%3,p2) = %5\n" |
331 | - " if !p2 jump 1b\n" |
332 | + " if (!p2) jump 1b\n" |
333 | "3:\n" |
334 | ".section .fixup,\"ax\"\n" |
335 | "4: %0 = #%6\n" |
336 | diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h |
337 | index bfe07d842ff3..ef103b73bec8 100644 |
338 | --- a/arch/hexagon/include/asm/spinlock.h |
339 | +++ b/arch/hexagon/include/asm/spinlock.h |
340 | @@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock) |
341 | __asm__ __volatile__( |
342 | "1: R6 = memw_locked(%0);\n" |
343 | " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" |
344 | - " { if !P3 jump 1b; }\n" |
345 | + " { if (!P3) jump 1b; }\n" |
346 | " memw_locked(%0,P3) = R6;\n" |
347 | - " { if !P3 jump 1b; }\n" |
348 | + " { if (!P3) jump 1b; }\n" |
349 | : |
350 | : "r" (&lock->lock) |
351 | : "memory", "r6", "p3" |
352 | @@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock) |
353 | "1: R6 = memw_locked(%0);\n" |
354 | " R6 = add(R6,#-1);\n" |
355 | " memw_locked(%0,P3) = R6\n" |
356 | - " if !P3 jump 1b;\n" |
357 | + " if (!P3) jump 1b;\n" |
358 | : |
359 | : "r" (&lock->lock) |
360 | : "memory", "r6", "p3" |
361 | @@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock) |
362 | __asm__ __volatile__( |
363 | " R6 = memw_locked(%1);\n" |
364 | " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" |
365 | - " { if !P3 jump 1f; }\n" |
366 | + " { if (!P3) jump 1f; }\n" |
367 | " memw_locked(%1,P3) = R6;\n" |
368 | " { %0 = P3 }\n" |
369 | "1:\n" |
370 | @@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock) |
371 | __asm__ __volatile__( |
372 | "1: R6 = memw_locked(%0)\n" |
373 | " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" |
374 | - " { if !P3 jump 1b; }\n" |
375 | + " { if (!P3) jump 1b; }\n" |
376 | " memw_locked(%0,P3) = R6;\n" |
377 | - " { if !P3 jump 1b; }\n" |
378 | + " { if (!P3) jump 1b; }\n" |
379 | : |
380 | : "r" (&lock->lock) |
381 | : "memory", "r6", "p3" |
382 | @@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock) |
383 | __asm__ __volatile__( |
384 | " R6 = memw_locked(%1)\n" |
385 | " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" |
386 | - " { if !P3 jump 1f; }\n" |
387 | + " { if (!P3) jump 1f; }\n" |
388 | " memw_locked(%1,P3) = R6;\n" |
389 | " %0 = P3;\n" |
390 | "1:\n" |
391 | @@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) |
392 | __asm__ __volatile__( |
393 | "1: R6 = memw_locked(%0);\n" |
394 | " P3 = cmp.eq(R6,#0);\n" |
395 | - " { if !P3 jump 1b; R6 = #1; }\n" |
396 | + " { if (!P3) jump 1b; R6 = #1; }\n" |
397 | " memw_locked(%0,P3) = R6;\n" |
398 | - " { if !P3 jump 1b; }\n" |
399 | + " { if (!P3) jump 1b; }\n" |
400 | : |
401 | : "r" (&lock->lock) |
402 | : "memory", "r6", "p3" |
403 | @@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) |
404 | __asm__ __volatile__( |
405 | " R6 = memw_locked(%1);\n" |
406 | " P3 = cmp.eq(R6,#0);\n" |
407 | - " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" |
408 | + " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n" |
409 | " memw_locked(%1,P3) = R6;\n" |
410 | " %0 = P3;\n" |
411 | "1:\n" |
412 | diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c |
413 | index 35f29423fda8..5ed02f699479 100644 |
414 | --- a/arch/hexagon/kernel/stacktrace.c |
415 | +++ b/arch/hexagon/kernel/stacktrace.c |
416 | @@ -11,8 +11,6 @@ |
417 | #include <linux/thread_info.h> |
418 | #include <linux/module.h> |
419 | |
420 | -register unsigned long current_frame_pointer asm("r30"); |
421 | - |
422 | struct stackframe { |
423 | unsigned long fp; |
424 | unsigned long rets; |
425 | @@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace) |
426 | |
427 | low = (unsigned long)task_stack_page(current); |
428 | high = low + THREAD_SIZE; |
429 | - fp = current_frame_pointer; |
430 | + fp = (unsigned long)__builtin_frame_address(0); |
431 | |
432 | while (fp >= low && fp <= (high - sizeof(*frame))) { |
433 | frame = (struct stackframe *)fp; |
434 | diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S |
435 | index 12242c27e2df..4023fdbea490 100644 |
436 | --- a/arch/hexagon/kernel/vm_entry.S |
437 | +++ b/arch/hexagon/kernel/vm_entry.S |
438 | @@ -369,7 +369,7 @@ ret_from_fork: |
439 | R26.L = #LO(do_work_pending); |
440 | R0 = #VM_INT_DISABLE; |
441 | } |
442 | - if P0 jump check_work_pending |
443 | + if (P0) jump check_work_pending |
444 | { |
445 | R0 = R25; |
446 | callr R24 |
447 | diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile |
448 | index 172801ed35b8..d859f079b771 100644 |
449 | --- a/arch/mips/boot/compressed/Makefile |
450 | +++ b/arch/mips/boot/compressed/Makefile |
451 | @@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \ |
452 | -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \ |
453 | -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS) |
454 | |
455 | +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. |
456 | +KCOV_INSTRUMENT := n |
457 | + |
458 | # decompressor objects (linked with vmlinuz) |
459 | vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o |
460 | |
461 | diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h |
462 | index b08825531e9f..0ae9b4cbc153 100644 |
463 | --- a/arch/mips/include/asm/vdso/gettimeofday.h |
464 | +++ b/arch/mips/include/asm/vdso/gettimeofday.h |
465 | @@ -26,8 +26,6 @@ |
466 | |
467 | #define __VDSO_USE_SYSCALL ULLONG_MAX |
468 | |
469 | -#ifdef CONFIG_MIPS_CLOCK_VSYSCALL |
470 | - |
471 | static __always_inline long gettimeofday_fallback( |
472 | struct __kernel_old_timeval *_tv, |
473 | struct timezone *_tz) |
474 | @@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback( |
475 | return error ? -ret : ret; |
476 | } |
477 | |
478 | -#else |
479 | - |
480 | -static __always_inline long gettimeofday_fallback( |
481 | - struct __kernel_old_timeval *_tv, |
482 | - struct timezone *_tz) |
483 | -{ |
484 | - return -1; |
485 | -} |
486 | - |
487 | -#endif |
488 | - |
489 | static __always_inline long clock_gettime_fallback( |
490 | clockid_t _clkid, |
491 | struct __kernel_timespec *_ts) |
492 | diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c |
493 | index f777e44653d5..47312c529410 100644 |
494 | --- a/arch/mips/kernel/cacheinfo.c |
495 | +++ b/arch/mips/kernel/cacheinfo.c |
496 | @@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu) |
497 | return 0; |
498 | } |
499 | |
500 | +static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) |
501 | +{ |
502 | + int cpu1; |
503 | + |
504 | + for_each_possible_cpu(cpu1) |
505 | + if (cpus_are_siblings(cpu, cpu1)) |
506 | + cpumask_set_cpu(cpu1, cpu_map); |
507 | +} |
508 | + |
509 | +static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) |
510 | +{ |
511 | + int cpu1; |
512 | + int cluster = cpu_cluster(&cpu_data[cpu]); |
513 | + |
514 | + for_each_possible_cpu(cpu1) |
515 | + if (cpu_cluster(&cpu_data[cpu1]) == cluster) |
516 | + cpumask_set_cpu(cpu1, cpu_map); |
517 | +} |
518 | + |
519 | static int __populate_cache_leaves(unsigned int cpu) |
520 | { |
521 | struct cpuinfo_mips *c = ¤t_cpu_data; |
522 | @@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu) |
523 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; |
524 | |
525 | if (c->icache.waysize) { |
526 | + /* L1 caches are per core */ |
527 | + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); |
528 | populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); |
529 | + fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); |
530 | populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); |
531 | } else { |
532 | populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); |
533 | } |
534 | |
535 | - if (c->scache.waysize) |
536 | + if (c->scache.waysize) { |
537 | + /* L2 cache is per cluster */ |
538 | + fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); |
539 | populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); |
540 | + } |
541 | |
542 | if (c->tcache.waysize) |
543 | populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); |
544 | diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c |
545 | index 7b4d40354ee7..30017d5945bc 100644 |
546 | --- a/arch/mips/pci/pci-xtalk-bridge.c |
547 | +++ b/arch/mips/pci/pci-xtalk-bridge.c |
548 | @@ -279,16 +279,15 @@ static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask, |
549 | struct bridge_irq_chip_data *data = d->chip_data; |
550 | int bit = d->parent_data->hwirq; |
551 | int pin = d->hwirq; |
552 | - nasid_t nasid; |
553 | int ret, cpu; |
554 | |
555 | ret = irq_chip_set_affinity_parent(d, mask, force); |
556 | if (ret >= 0) { |
557 | cpu = cpumask_first_and(mask, cpu_online_mask); |
558 | - nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); |
559 | + data->nnasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); |
560 | bridge_write(data->bc, b_int_addr[pin].addr, |
561 | (((data->bc->intr_addr >> 30) & 0x30000) | |
562 | - bit | (nasid << 8))); |
563 | + bit | (data->nasid << 8))); |
564 | bridge_read(data->bc, b_wid_tflush); |
565 | } |
566 | return ret; |
567 | diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c |
568 | index 37be04975831..79a2f6bd2b5a 100644 |
569 | --- a/arch/mips/sgi-ip27/ip27-irq.c |
570 | +++ b/arch/mips/sgi-ip27/ip27-irq.c |
571 | @@ -73,6 +73,9 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) |
572 | int cpu; |
573 | |
574 | cpu = cpumask_first_and(mask, cpu_online_mask); |
575 | + if (cpu >= nr_cpu_ids) |
576 | + cpu = cpumask_any(cpu_online_mask); |
577 | + |
578 | nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); |
579 | hd->cpu = cpu; |
580 | if (!cputoslice(cpu)) { |
581 | @@ -139,6 +142,7 @@ static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq, |
582 | /* use CPU connected to nearest hub */ |
583 | hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid)); |
584 | setup_hub_mask(hd, &hub->h_cpus); |
585 | + info->nasid = cpu_to_node(hd->cpu); |
586 | |
587 | /* Make sure it's not already pending when we connect it. */ |
588 | REMOTE_HUB_CLR_INTR(info->nasid, swlevel); |
589 | diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c |
590 | index 6ebdc37c89fc..6b83b6376a4b 100644 |
591 | --- a/arch/mips/vdso/vgettimeofday.c |
592 | +++ b/arch/mips/vdso/vgettimeofday.c |
593 | @@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock, |
594 | return __cvdso_clock_gettime32(clock, ts); |
595 | } |
596 | |
597 | +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL |
598 | + |
599 | +/* |
600 | + * This is behind the ifdef so that we don't provide the symbol when there's no |
601 | + * possibility of there being a usable clocksource, because there's nothing we |
602 | + * can do without it. When libc fails the symbol lookup it should fall back on |
603 | + * the standard syscall path. |
604 | + */ |
605 | int __vdso_gettimeofday(struct __kernel_old_timeval *tv, |
606 | struct timezone *tz) |
607 | { |
608 | return __cvdso_gettimeofday(tv, tz); |
609 | } |
610 | |
611 | +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ |
612 | + |
613 | int __vdso_clock_getres(clockid_t clock_id, |
614 | struct old_timespec32 *res) |
615 | { |
616 | @@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock, |
617 | return __cvdso_clock_gettime(clock, ts); |
618 | } |
619 | |
620 | +#ifdef CONFIG_MIPS_CLOCK_VSYSCALL |
621 | + |
622 | +/* |
623 | + * This is behind the ifdef so that we don't provide the symbol when there's no |
624 | + * possibility of there being a usable clocksource, because there's nothing we |
625 | + * can do without it. When libc fails the symbol lookup it should fall back on |
626 | + * the standard syscall path. |
627 | + */ |
628 | int __vdso_gettimeofday(struct __kernel_old_timeval *tv, |
629 | struct timezone *tz) |
630 | { |
631 | return __cvdso_gettimeofday(tv, tz); |
632 | } |
633 | |
634 | +#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */ |
635 | + |
636 | int __vdso_clock_getres(clockid_t clock_id, |
637 | struct __kernel_timespec *res) |
638 | { |
639 | diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h |
640 | index d9ac7e6408ef..caddded56e77 100644 |
641 | --- a/arch/nds32/include/asm/cacheflush.h |
642 | +++ b/arch/nds32/include/asm/cacheflush.h |
643 | @@ -9,7 +9,11 @@ |
644 | #define PG_dcache_dirty PG_arch_1 |
645 | |
646 | void flush_icache_range(unsigned long start, unsigned long end); |
647 | +#define flush_icache_range flush_icache_range |
648 | + |
649 | void flush_icache_page(struct vm_area_struct *vma, struct page *page); |
650 | +#define flush_icache_page flush_icache_page |
651 | + |
652 | #ifdef CONFIG_CPU_CACHE_ALIASING |
653 | void flush_cache_mm(struct mm_struct *mm); |
654 | void flush_cache_dup_mm(struct mm_struct *mm); |
655 | @@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size); |
656 | #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) |
657 | |
658 | #else |
659 | -#include <asm-generic/cacheflush.h> |
660 | -#undef flush_icache_range |
661 | -#undef flush_icache_page |
662 | -#undef flush_icache_user_range |
663 | void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, |
664 | unsigned long addr, int len); |
665 | +#define flush_icache_user_range flush_icache_user_range |
666 | + |
667 | +#include <asm-generic/cacheflush.h> |
668 | #endif |
669 | |
670 | #endif /* __NDS32_CACHEFLUSH_H__ */ |
671 | diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c |
672 | index 2825d004dece..c0bea75ac27b 100644 |
673 | --- a/arch/powerpc/platforms/powernv/pci.c |
674 | +++ b/arch/powerpc/platforms/powernv/pci.c |
675 | @@ -945,6 +945,23 @@ void __init pnv_pci_init(void) |
676 | if (!firmware_has_feature(FW_FEATURE_OPAL)) |
677 | return; |
678 | |
679 | +#ifdef CONFIG_PCIEPORTBUS |
680 | + /* |
681 | + * On PowerNV PCIe devices are (currently) managed in cooperation |
682 | + * with firmware. This isn't *strictly* required, but there's enough |
683 | + * assumptions baked into both firmware and the platform code that |
684 | + * it's unwise to allow the portbus services to be used. |
685 | + * |
686 | + * We need to fix this eventually, but for now set this flag to disable |
687 | + * the portbus driver. The AER service isn't required since that AER |
688 | + * events are handled via EEH. The pciehp hotplug driver can't work |
689 | + * without kernel changes (and portbus binding breaks pnv_php). The |
690 | + * other services also require some thinking about how we're going |
691 | + * to integrate them. |
692 | + */ |
693 | + pcie_ports_disabled = true; |
694 | +#endif |
695 | + |
696 | /* Look for IODA IO-Hubs. */ |
697 | for_each_compatible_node(np, NULL, "ibm,ioda-hub") { |
698 | pnv_pci_init_ioda_hub(np); |
699 | diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c |
700 | index 3f15938dec89..c54bd3c79955 100644 |
701 | --- a/arch/riscv/mm/cacheflush.c |
702 | +++ b/arch/riscv/mm/cacheflush.c |
703 | @@ -14,6 +14,7 @@ void flush_icache_all(void) |
704 | { |
705 | sbi_remote_fence_i(NULL); |
706 | } |
707 | +EXPORT_SYMBOL(flush_icache_all); |
708 | |
709 | /* |
710 | * Performs an icache flush for the given MM context. RISC-V has no direct |
711 | diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c |
712 | index aa3336a7cb15..7d17b3addbbb 100644 |
713 | --- a/arch/x86/entry/syscall_32.c |
714 | +++ b/arch/x86/entry/syscall_32.c |
715 | @@ -10,13 +10,11 @@ |
716 | #ifdef CONFIG_IA32_EMULATION |
717 | /* On X86_64, we use struct pt_regs * to pass parameters to syscalls */ |
718 | #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *); |
719 | - |
720 | -/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */ |
721 | -extern asmlinkage long sys_ni_syscall(const struct pt_regs *); |
722 | - |
723 | +#define __sys_ni_syscall __ia32_sys_ni_syscall |
724 | #else /* CONFIG_IA32_EMULATION */ |
725 | #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); |
726 | extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); |
727 | +#define __sys_ni_syscall sys_ni_syscall |
728 | #endif /* CONFIG_IA32_EMULATION */ |
729 | |
730 | #include <asm/syscalls_32.h> |
731 | @@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = |
732 | * Smells like a compiler bug -- it doesn't work |
733 | * when the & below is removed. |
734 | */ |
735 | - [0 ... __NR_syscall_compat_max] = &sys_ni_syscall, |
736 | + [0 ... __NR_syscall_compat_max] = &__sys_ni_syscall, |
737 | #include <asm/syscalls_32.h> |
738 | }; |
739 | diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c |
740 | index b1bf31713374..adf619a856e8 100644 |
741 | --- a/arch/x86/entry/syscall_64.c |
742 | +++ b/arch/x86/entry/syscall_64.c |
743 | @@ -4,11 +4,17 @@ |
744 | #include <linux/linkage.h> |
745 | #include <linux/sys.h> |
746 | #include <linux/cache.h> |
747 | +#include <linux/syscalls.h> |
748 | #include <asm/asm-offsets.h> |
749 | #include <asm/syscall.h> |
750 | |
751 | -/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */ |
752 | -extern asmlinkage long sys_ni_syscall(const struct pt_regs *); |
753 | +extern asmlinkage long sys_ni_syscall(void); |
754 | + |
755 | +SYSCALL_DEFINE0(ni_syscall) |
756 | +{ |
757 | + return sys_ni_syscall(); |
758 | +} |
759 | + |
760 | #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *); |
761 | #define __SYSCALL_X32(nr, sym, qual) __SYSCALL_64(nr, sym, qual) |
762 | #include <asm/syscalls_64.h> |
763 | @@ -23,7 +29,7 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { |
764 | * Smells like a compiler bug -- it doesn't work |
765 | * when the & below is removed. |
766 | */ |
767 | - [0 ... __NR_syscall_max] = &sys_ni_syscall, |
768 | + [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall, |
769 | #include <asm/syscalls_64.h> |
770 | }; |
771 | |
772 | @@ -40,7 +46,7 @@ asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_syscall_x32_max+1] = { |
773 | * Smells like a compiler bug -- it doesn't work |
774 | * when the & below is removed. |
775 | */ |
776 | - [0 ... __NR_syscall_x32_max] = &sys_ni_syscall, |
777 | + [0 ... __NR_syscall_x32_max] = &__x64_sys_ni_syscall, |
778 | #include <asm/syscalls_64.h> |
779 | }; |
780 | |
781 | diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl |
782 | index 3fe02546aed3..15908eb9b17e 100644 |
783 | --- a/arch/x86/entry/syscalls/syscall_32.tbl |
784 | +++ b/arch/x86/entry/syscalls/syscall_32.tbl |
785 | @@ -124,13 +124,13 @@ |
786 | 110 i386 iopl sys_iopl __ia32_sys_iopl |
787 | 111 i386 vhangup sys_vhangup __ia32_sys_vhangup |
788 | 112 i386 idle |
789 | -113 i386 vm86old sys_vm86old sys_ni_syscall |
790 | +113 i386 vm86old sys_vm86old __ia32_sys_ni_syscall |
791 | 114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4 |
792 | 115 i386 swapoff sys_swapoff __ia32_sys_swapoff |
793 | 116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo |
794 | 117 i386 ipc sys_ipc __ia32_compat_sys_ipc |
795 | 118 i386 fsync sys_fsync __ia32_sys_fsync |
796 | -119 i386 sigreturn sys_sigreturn sys32_sigreturn |
797 | +119 i386 sigreturn sys_sigreturn __ia32_compat_sys_sigreturn |
798 | 120 i386 clone sys_clone __ia32_compat_sys_x86_clone |
799 | 121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname |
800 | 122 i386 uname sys_newuname __ia32_sys_newuname |
801 | @@ -177,14 +177,14 @@ |
802 | 163 i386 mremap sys_mremap __ia32_sys_mremap |
803 | 164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16 |
804 | 165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16 |
805 | -166 i386 vm86 sys_vm86 sys_ni_syscall |
806 | +166 i386 vm86 sys_vm86 __ia32_sys_ni_syscall |
807 | 167 i386 query_module |
808 | 168 i386 poll sys_poll __ia32_sys_poll |
809 | 169 i386 nfsservctl |
810 | 170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16 |
811 | 171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16 |
812 | 172 i386 prctl sys_prctl __ia32_sys_prctl |
813 | -173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn |
814 | +173 i386 rt_sigreturn sys_rt_sigreturn __ia32_compat_sys_rt_sigreturn |
815 | 174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction |
816 | 175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_compat_sys_rt_sigprocmask |
817 | 176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending |
818 | diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c |
819 | index 1cee10091b9f..30416d7f19d4 100644 |
820 | --- a/arch/x86/ia32/ia32_signal.c |
821 | +++ b/arch/x86/ia32/ia32_signal.c |
822 | @@ -21,6 +21,7 @@ |
823 | #include <linux/personality.h> |
824 | #include <linux/compat.h> |
825 | #include <linux/binfmts.h> |
826 | +#include <linux/syscalls.h> |
827 | #include <asm/ucontext.h> |
828 | #include <linux/uaccess.h> |
829 | #include <asm/fpu/internal.h> |
830 | @@ -118,7 +119,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, |
831 | return err; |
832 | } |
833 | |
834 | -asmlinkage long sys32_sigreturn(void) |
835 | +COMPAT_SYSCALL_DEFINE0(sigreturn) |
836 | { |
837 | struct pt_regs *regs = current_pt_regs(); |
838 | struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8); |
839 | @@ -144,7 +145,7 @@ badframe: |
840 | return 0; |
841 | } |
842 | |
843 | -asmlinkage long sys32_rt_sigreturn(void) |
844 | +COMPAT_SYSCALL_DEFINE0(rt_sigreturn) |
845 | { |
846 | struct pt_regs *regs = current_pt_regs(); |
847 | struct rt_sigframe_ia32 __user *frame; |
848 | diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h |
849 | index 90eb70df0b18..e2389ce9bf58 100644 |
850 | --- a/arch/x86/include/asm/syscall_wrapper.h |
851 | +++ b/arch/x86/include/asm/syscall_wrapper.h |
852 | @@ -6,6 +6,8 @@ |
853 | #ifndef _ASM_X86_SYSCALL_WRAPPER_H |
854 | #define _ASM_X86_SYSCALL_WRAPPER_H |
855 | |
856 | +struct pt_regs; |
857 | + |
858 | /* Mapping of registers to parameters for syscalls on x86-64 and x32 */ |
859 | #define SC_X86_64_REGS_TO_ARGS(x, ...) \ |
860 | __MAP(x,__SC_ARGS \ |
861 | @@ -28,13 +30,21 @@ |
862 | * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this |
863 | * case as well. |
864 | */ |
865 | +#define __IA32_COMPAT_SYS_STUB0(x, name) \ |
866 | + asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs);\ |
867 | + ALLOW_ERROR_INJECTION(__ia32_compat_sys_##name, ERRNO); \ |
868 | + asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs)\ |
869 | + { \ |
870 | + return __se_compat_sys_##name(); \ |
871 | + } |
872 | + |
873 | #define __IA32_COMPAT_SYS_STUBx(x, name, ...) \ |
874 | asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\ |
875 | ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \ |
876 | asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\ |
877 | { \ |
878 | return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\ |
879 | - } \ |
880 | + } |
881 | |
882 | #define __IA32_SYS_STUBx(x, name, ...) \ |
883 | asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \ |
884 | @@ -56,9 +66,15 @@ |
885 | SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \ |
886 | asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused) |
887 | |
888 | -#define COND_SYSCALL(name) \ |
889 | - cond_syscall(__x64_sys_##name); \ |
890 | - cond_syscall(__ia32_sys_##name) |
891 | +#define COND_SYSCALL(name) \ |
892 | + asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \ |
893 | + { \ |
894 | + return sys_ni_syscall(); \ |
895 | + } \ |
896 | + asmlinkage __weak long __ia32_sys_##name(const struct pt_regs *__unused)\ |
897 | + { \ |
898 | + return sys_ni_syscall(); \ |
899 | + } |
900 | |
901 | #define SYS_NI(name) \ |
902 | SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \ |
903 | @@ -76,15 +92,24 @@ |
904 | * of the x86-64-style parameter ordering of x32 syscalls. The syscalls common |
905 | * with x86_64 obviously do not need such care. |
906 | */ |
907 | +#define __X32_COMPAT_SYS_STUB0(x, name, ...) \ |
908 | + asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs);\ |
909 | + ALLOW_ERROR_INJECTION(__x32_compat_sys_##name, ERRNO); \ |
910 | + asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs)\ |
911 | + { \ |
912 | + return __se_compat_sys_##name();\ |
913 | + } |
914 | + |
915 | #define __X32_COMPAT_SYS_STUBx(x, name, ...) \ |
916 | asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\ |
917 | ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \ |
918 | asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\ |
919 | { \ |
920 | return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\ |
921 | - } \ |
922 | + } |
923 | |
924 | #else /* CONFIG_X86_X32 */ |
925 | +#define __X32_COMPAT_SYS_STUB0(x, name) |
926 | #define __X32_COMPAT_SYS_STUBx(x, name, ...) |
927 | #endif /* CONFIG_X86_X32 */ |
928 | |
929 | @@ -95,6 +120,17 @@ |
930 | * mapping of registers to parameters, we need to generate stubs for each |
931 | * of them. |
932 | */ |
933 | +#define COMPAT_SYSCALL_DEFINE0(name) \ |
934 | + static long __se_compat_sys_##name(void); \ |
935 | + static inline long __do_compat_sys_##name(void); \ |
936 | + __IA32_COMPAT_SYS_STUB0(x, name) \ |
937 | + __X32_COMPAT_SYS_STUB0(x, name) \ |
938 | + static long __se_compat_sys_##name(void) \ |
939 | + { \ |
940 | + return __do_compat_sys_##name(); \ |
941 | + } \ |
942 | + static inline long __do_compat_sys_##name(void) |
943 | + |
944 | #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \ |
945 | static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \ |
946 | static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\ |
947 | @@ -190,7 +226,11 @@ |
948 | #endif |
949 | |
950 | #ifndef COND_SYSCALL |
951 | -#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name) |
952 | +#define COND_SYSCALL(name) \ |
953 | + asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \ |
954 | + { \ |
955 | + return sys_ni_syscall(); \ |
956 | + } |
957 | #endif |
958 | |
959 | #ifndef SYS_NI |
960 | @@ -202,7 +242,6 @@ |
961 | * For VSYSCALLS, we need to declare these three syscalls with the new |
962 | * pt_regs-based calling convention for in-kernel use. |
963 | */ |
964 | -struct pt_regs; |
965 | asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs); |
966 | asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs); |
967 | asmlinkage long __x64_sys_time(const struct pt_regs *regs); |
968 | diff --git a/block/bio.c b/block/bio.c |
969 | index c822ceb7c4de..906da3581a3e 100644 |
970 | --- a/block/bio.c |
971 | +++ b/block/bio.c |
972 | @@ -535,6 +535,16 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) |
973 | } |
974 | EXPORT_SYMBOL(zero_fill_bio_iter); |
975 | |
976 | +/** |
977 | + * bio_truncate - truncate the bio to small size of @new_size |
978 | + * @bio: the bio to be truncated |
979 | + * @new_size: new size for truncating the bio |
980 | + * |
981 | + * Description: |
982 | + * Truncate the bio to new size of @new_size. If bio_op(bio) is |
983 | + * REQ_OP_READ, zero the truncated part. This function should only |
984 | + * be used for handling corner cases, such as bio eod. |
985 | + */ |
986 | void bio_truncate(struct bio *bio, unsigned new_size) |
987 | { |
988 | struct bio_vec bv; |
989 | @@ -545,7 +555,7 @@ void bio_truncate(struct bio *bio, unsigned new_size) |
990 | if (new_size >= bio->bi_iter.bi_size) |
991 | return; |
992 | |
993 | - if (bio_data_dir(bio) != READ) |
994 | + if (bio_op(bio) != REQ_OP_READ) |
995 | goto exit; |
996 | |
997 | bio_for_each_segment(bv, bio, iter) { |
998 | diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c |
999 | index c1601edd70e3..e2c8ab408bed 100644 |
1000 | --- a/crypto/algif_skcipher.c |
1001 | +++ b/crypto/algif_skcipher.c |
1002 | @@ -56,7 +56,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg, |
1003 | struct alg_sock *pask = alg_sk(psk); |
1004 | struct af_alg_ctx *ctx = ask->private; |
1005 | struct crypto_skcipher *tfm = pask->private; |
1006 | - unsigned int bs = crypto_skcipher_blocksize(tfm); |
1007 | + unsigned int bs = crypto_skcipher_chunksize(tfm); |
1008 | struct af_alg_async_req *areq; |
1009 | int err = 0; |
1010 | size_t len = 0; |
1011 | diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c |
1012 | index 27a95c86a80b..4fc294c2f9e8 100644 |
1013 | --- a/drivers/clk/clk.c |
1014 | +++ b/drivers/clk/clk.c |
1015 | @@ -3886,6 +3886,7 @@ void clk_unregister(struct clk *clk) |
1016 | __func__, clk->core->name); |
1017 | |
1018 | kref_put(&clk->core->ref, __clk_release); |
1019 | + free_clk(clk); |
1020 | unlock: |
1021 | clk_prepare_unlock(); |
1022 | } |
1023 | diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c |
1024 | index d43b4a3c0de8..047f1d8fe323 100644 |
1025 | --- a/drivers/clk/imx/clk-pll14xx.c |
1026 | +++ b/drivers/clk/imx/clk-pll14xx.c |
1027 | @@ -112,43 +112,17 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw, |
1028 | return fvco; |
1029 | } |
1030 | |
1031 | -static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate, |
1032 | +static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate, |
1033 | u32 pll_div) |
1034 | { |
1035 | u32 old_mdiv, old_pdiv; |
1036 | |
1037 | - old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK; |
1038 | - old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK; |
1039 | + old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT; |
1040 | + old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT; |
1041 | |
1042 | return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv; |
1043 | } |
1044 | |
1045 | -static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate, |
1046 | - u32 pll_div_ctl0, u32 pll_div_ctl1) |
1047 | -{ |
1048 | - u32 old_mdiv, old_pdiv, old_kdiv; |
1049 | - |
1050 | - old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK; |
1051 | - old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK; |
1052 | - old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK; |
1053 | - |
1054 | - return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv || |
1055 | - rate->kdiv != old_kdiv; |
1056 | -} |
1057 | - |
1058 | -static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate, |
1059 | - u32 pll_div_ctl0, u32 pll_div_ctl1) |
1060 | -{ |
1061 | - u32 old_mdiv, old_pdiv, old_kdiv; |
1062 | - |
1063 | - old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK; |
1064 | - old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK; |
1065 | - old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK; |
1066 | - |
1067 | - return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv || |
1068 | - rate->kdiv != old_kdiv; |
1069 | -} |
1070 | - |
1071 | static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll) |
1072 | { |
1073 | u32 val; |
1074 | @@ -174,7 +148,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate, |
1075 | |
1076 | tmp = readl_relaxed(pll->base + 4); |
1077 | |
1078 | - if (!clk_pll1416x_mp_change(rate, tmp)) { |
1079 | + if (!clk_pll14xx_mp_change(rate, tmp)) { |
1080 | tmp &= ~(SDIV_MASK) << SDIV_SHIFT; |
1081 | tmp |= rate->sdiv << SDIV_SHIFT; |
1082 | writel_relaxed(tmp, pll->base + 4); |
1083 | @@ -239,13 +213,15 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate, |
1084 | } |
1085 | |
1086 | tmp = readl_relaxed(pll->base + 4); |
1087 | - div_val = readl_relaxed(pll->base + 8); |
1088 | |
1089 | - if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) { |
1090 | + if (!clk_pll14xx_mp_change(rate, tmp)) { |
1091 | tmp &= ~(SDIV_MASK) << SDIV_SHIFT; |
1092 | tmp |= rate->sdiv << SDIV_SHIFT; |
1093 | writel_relaxed(tmp, pll->base + 4); |
1094 | |
1095 | + tmp = rate->kdiv << KDIV_SHIFT; |
1096 | + writel_relaxed(tmp, pll->base + 8); |
1097 | + |
1098 | return 0; |
1099 | } |
1100 | |
1101 | diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c |
1102 | index 18b23cdf679c..aa2522624fd3 100644 |
1103 | --- a/drivers/clk/meson/axg-audio.c |
1104 | +++ b/drivers/clk/meson/axg-audio.c |
1105 | @@ -1001,7 +1001,7 @@ static const struct regmap_config axg_audio_regmap_cfg = { |
1106 | .reg_bits = 32, |
1107 | .val_bits = 32, |
1108 | .reg_stride = 4, |
1109 | - .max_register = AUDIO_CLK_PDMIN_CTRL1, |
1110 | + .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL, |
1111 | }; |
1112 | |
1113 | struct audioclk_data { |
1114 | diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c |
1115 | index 31466cd1842f..3b7601647d7b 100644 |
1116 | --- a/drivers/clk/samsung/clk-exynos5420.c |
1117 | +++ b/drivers/clk/samsung/clk-exynos5420.c |
1118 | @@ -165,6 +165,8 @@ static const unsigned long exynos5x_clk_regs[] __initconst = { |
1119 | GATE_BUS_CPU, |
1120 | GATE_SCLK_CPU, |
1121 | CLKOUT_CMU_CPU, |
1122 | + APLL_CON0, |
1123 | + KPLL_CON0, |
1124 | CPLL_CON0, |
1125 | DPLL_CON0, |
1126 | EPLL_CON0, |
1127 | diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c |
1128 | index bc924980e10c..c4632d84c9a1 100644 |
1129 | --- a/drivers/crypto/cavium/nitrox/nitrox_main.c |
1130 | +++ b/drivers/crypto/cavium/nitrox/nitrox_main.c |
1131 | @@ -103,8 +103,7 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size, |
1132 | offset = UCD_UCODE_LOAD_BLOCK_NUM; |
1133 | nitrox_write_csr(ndev, offset, block_num); |
1134 | |
1135 | - code_size = ucode_size; |
1136 | - code_size = roundup(code_size, 8); |
1137 | + code_size = roundup(ucode_size, 16); |
1138 | while (code_size) { |
1139 | data = ucode_data[i]; |
1140 | /* write 8 bytes at a time */ |
1141 | @@ -220,11 +219,11 @@ static int nitrox_load_fw(struct nitrox_device *ndev) |
1142 | |
1143 | /* write block number and firmware length |
1144 | * bit:<2:0> block number |
1145 | - * bit:3 is set SE uses 32KB microcode |
1146 | - * bit:3 is clear SE uses 64KB microcode |
1147 | + * bit:3 is set AE uses 32KB microcode |
1148 | + * bit:3 is clear AE uses 64KB microcode |
1149 | */ |
1150 | core_2_eid_val.value = 0ULL; |
1151 | - core_2_eid_val.ucode_blk = 0; |
1152 | + core_2_eid_val.ucode_blk = 2; |
1153 | if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) |
1154 | core_2_eid_val.ucode_len = 1; |
1155 | else |
1156 | diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c |
1157 | index 940485112d15..73a899e6f837 100644 |
1158 | --- a/drivers/crypto/geode-aes.c |
1159 | +++ b/drivers/crypto/geode-aes.c |
1160 | @@ -10,7 +10,7 @@ |
1161 | #include <linux/spinlock.h> |
1162 | #include <crypto/algapi.h> |
1163 | #include <crypto/aes.h> |
1164 | -#include <crypto/skcipher.h> |
1165 | +#include <crypto/internal/skcipher.h> |
1166 | |
1167 | #include <linux/io.h> |
1168 | #include <linux/delay.h> |
1169 | @@ -24,12 +24,12 @@ static spinlock_t lock; |
1170 | |
1171 | /* Write a 128 bit field (either a writable key or IV) */ |
1172 | static inline void |
1173 | -_writefield(u32 offset, void *value) |
1174 | +_writefield(u32 offset, const void *value) |
1175 | { |
1176 | int i; |
1177 | |
1178 | for (i = 0; i < 4; i++) |
1179 | - iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4)); |
1180 | + iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4)); |
1181 | } |
1182 | |
1183 | /* Read a 128 bit field (either a writable key or IV) */ |
1184 | @@ -43,12 +43,12 @@ _readfield(u32 offset, void *value) |
1185 | } |
1186 | |
1187 | static int |
1188 | -do_crypt(void *src, void *dst, int len, u32 flags) |
1189 | +do_crypt(const void *src, void *dst, u32 len, u32 flags) |
1190 | { |
1191 | u32 status; |
1192 | u32 counter = AES_OP_TIMEOUT; |
1193 | |
1194 | - iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG); |
1195 | + iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG); |
1196 | iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); |
1197 | iowrite32(len, _iobase + AES_LENA_REG); |
1198 | |
1199 | @@ -65,16 +65,14 @@ do_crypt(void *src, void *dst, int len, u32 flags) |
1200 | return counter ? 0 : 1; |
1201 | } |
1202 | |
1203 | -static unsigned int |
1204 | -geode_aes_crypt(struct geode_aes_op *op) |
1205 | +static void |
1206 | +geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src, |
1207 | + void *dst, u32 len, u8 *iv, int mode, int dir) |
1208 | { |
1209 | u32 flags = 0; |
1210 | unsigned long iflags; |
1211 | int ret; |
1212 | |
1213 | - if (op->len == 0) |
1214 | - return 0; |
1215 | - |
1216 | /* If the source and destination is the same, then |
1217 | * we need to turn on the coherent flags, otherwise |
1218 | * we don't need to worry |
1219 | @@ -82,32 +80,28 @@ geode_aes_crypt(struct geode_aes_op *op) |
1220 | |
1221 | flags |= (AES_CTRL_DCA | AES_CTRL_SCA); |
1222 | |
1223 | - if (op->dir == AES_DIR_ENCRYPT) |
1224 | + if (dir == AES_DIR_ENCRYPT) |
1225 | flags |= AES_CTRL_ENCRYPT; |
1226 | |
1227 | /* Start the critical section */ |
1228 | |
1229 | spin_lock_irqsave(&lock, iflags); |
1230 | |
1231 | - if (op->mode == AES_MODE_CBC) { |
1232 | + if (mode == AES_MODE_CBC) { |
1233 | flags |= AES_CTRL_CBC; |
1234 | - _writefield(AES_WRITEIV0_REG, op->iv); |
1235 | + _writefield(AES_WRITEIV0_REG, iv); |
1236 | } |
1237 | |
1238 | - if (!(op->flags & AES_FLAGS_HIDDENKEY)) { |
1239 | - flags |= AES_CTRL_WRKEY; |
1240 | - _writefield(AES_WRITEKEY0_REG, op->key); |
1241 | - } |
1242 | + flags |= AES_CTRL_WRKEY; |
1243 | + _writefield(AES_WRITEKEY0_REG, tctx->key); |
1244 | |
1245 | - ret = do_crypt(op->src, op->dst, op->len, flags); |
1246 | + ret = do_crypt(src, dst, len, flags); |
1247 | BUG_ON(ret); |
1248 | |
1249 | - if (op->mode == AES_MODE_CBC) |
1250 | - _readfield(AES_WRITEIV0_REG, op->iv); |
1251 | + if (mode == AES_MODE_CBC) |
1252 | + _readfield(AES_WRITEIV0_REG, iv); |
1253 | |
1254 | spin_unlock_irqrestore(&lock, iflags); |
1255 | - |
1256 | - return op->len; |
1257 | } |
1258 | |
1259 | /* CRYPTO-API Functions */ |
1260 | @@ -115,13 +109,13 @@ geode_aes_crypt(struct geode_aes_op *op) |
1261 | static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, |
1262 | unsigned int len) |
1263 | { |
1264 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1265 | + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); |
1266 | unsigned int ret; |
1267 | |
1268 | - op->keylen = len; |
1269 | + tctx->keylen = len; |
1270 | |
1271 | if (len == AES_KEYSIZE_128) { |
1272 | - memcpy(op->key, key, len); |
1273 | + memcpy(tctx->key, key, len); |
1274 | return 0; |
1275 | } |
1276 | |
1277 | @@ -134,132 +128,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, |
1278 | /* |
1279 | * The requested key size is not supported by HW, do a fallback |
1280 | */ |
1281 | - op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
1282 | - op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
1283 | + tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; |
1284 | + tctx->fallback.cip->base.crt_flags |= |
1285 | + (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
1286 | |
1287 | - ret = crypto_cipher_setkey(op->fallback.cip, key, len); |
1288 | + ret = crypto_cipher_setkey(tctx->fallback.cip, key, len); |
1289 | if (ret) { |
1290 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
1291 | - tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK); |
1292 | + tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags & |
1293 | + CRYPTO_TFM_RES_MASK); |
1294 | } |
1295 | return ret; |
1296 | } |
1297 | |
1298 | -static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key, |
1299 | - unsigned int len) |
1300 | +static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, |
1301 | + unsigned int len) |
1302 | { |
1303 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1304 | + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); |
1305 | unsigned int ret; |
1306 | |
1307 | - op->keylen = len; |
1308 | + tctx->keylen = len; |
1309 | |
1310 | if (len == AES_KEYSIZE_128) { |
1311 | - memcpy(op->key, key, len); |
1312 | + memcpy(tctx->key, key, len); |
1313 | return 0; |
1314 | } |
1315 | |
1316 | if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) { |
1317 | /* not supported at all */ |
1318 | - tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; |
1319 | + crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); |
1320 | return -EINVAL; |
1321 | } |
1322 | |
1323 | /* |
1324 | * The requested key size is not supported by HW, do a fallback |
1325 | */ |
1326 | - crypto_sync_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK); |
1327 | - crypto_sync_skcipher_set_flags(op->fallback.blk, |
1328 | - tfm->crt_flags & CRYPTO_TFM_REQ_MASK); |
1329 | - |
1330 | - ret = crypto_sync_skcipher_setkey(op->fallback.blk, key, len); |
1331 | - if (ret) { |
1332 | - tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; |
1333 | - tfm->crt_flags |= crypto_sync_skcipher_get_flags(op->fallback.blk) & |
1334 | - CRYPTO_TFM_RES_MASK; |
1335 | - } |
1336 | + crypto_skcipher_clear_flags(tctx->fallback.skcipher, |
1337 | + CRYPTO_TFM_REQ_MASK); |
1338 | + crypto_skcipher_set_flags(tctx->fallback.skcipher, |
1339 | + crypto_skcipher_get_flags(tfm) & |
1340 | + CRYPTO_TFM_REQ_MASK); |
1341 | + ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); |
1342 | + crypto_skcipher_set_flags(tfm, |
1343 | + crypto_skcipher_get_flags(tctx->fallback.skcipher) & |
1344 | + CRYPTO_TFM_RES_MASK); |
1345 | return ret; |
1346 | } |
1347 | |
1348 | -static int fallback_blk_dec(struct blkcipher_desc *desc, |
1349 | - struct scatterlist *dst, struct scatterlist *src, |
1350 | - unsigned int nbytes) |
1351 | -{ |
1352 | - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
1353 | - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk); |
1354 | - |
1355 | - skcipher_request_set_sync_tfm(req, op->fallback.blk); |
1356 | - skcipher_request_set_callback(req, 0, NULL, NULL); |
1357 | - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
1358 | - |
1359 | - return crypto_skcipher_decrypt(req); |
1360 | -} |
1361 | - |
1362 | -static int fallback_blk_enc(struct blkcipher_desc *desc, |
1363 | - struct scatterlist *dst, struct scatterlist *src, |
1364 | - unsigned int nbytes) |
1365 | -{ |
1366 | - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
1367 | - SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk); |
1368 | - |
1369 | - skcipher_request_set_sync_tfm(req, op->fallback.blk); |
1370 | - skcipher_request_set_callback(req, 0, NULL, NULL); |
1371 | - skcipher_request_set_crypt(req, src, dst, nbytes, desc->info); |
1372 | - |
1373 | - return crypto_skcipher_encrypt(req); |
1374 | -} |
1375 | - |
1376 | static void |
1377 | geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1378 | { |
1379 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1380 | + const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); |
1381 | |
1382 | - if (unlikely(op->keylen != AES_KEYSIZE_128)) { |
1383 | - crypto_cipher_encrypt_one(op->fallback.cip, out, in); |
1384 | + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { |
1385 | + crypto_cipher_encrypt_one(tctx->fallback.cip, out, in); |
1386 | return; |
1387 | } |
1388 | |
1389 | - op->src = (void *) in; |
1390 | - op->dst = (void *) out; |
1391 | - op->mode = AES_MODE_ECB; |
1392 | - op->flags = 0; |
1393 | - op->len = AES_BLOCK_SIZE; |
1394 | - op->dir = AES_DIR_ENCRYPT; |
1395 | - |
1396 | - geode_aes_crypt(op); |
1397 | + geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, |
1398 | + AES_MODE_ECB, AES_DIR_ENCRYPT); |
1399 | } |
1400 | |
1401 | |
1402 | static void |
1403 | geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) |
1404 | { |
1405 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1406 | + const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); |
1407 | |
1408 | - if (unlikely(op->keylen != AES_KEYSIZE_128)) { |
1409 | - crypto_cipher_decrypt_one(op->fallback.cip, out, in); |
1410 | + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { |
1411 | + crypto_cipher_decrypt_one(tctx->fallback.cip, out, in); |
1412 | return; |
1413 | } |
1414 | |
1415 | - op->src = (void *) in; |
1416 | - op->dst = (void *) out; |
1417 | - op->mode = AES_MODE_ECB; |
1418 | - op->flags = 0; |
1419 | - op->len = AES_BLOCK_SIZE; |
1420 | - op->dir = AES_DIR_DECRYPT; |
1421 | - |
1422 | - geode_aes_crypt(op); |
1423 | + geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, |
1424 | + AES_MODE_ECB, AES_DIR_DECRYPT); |
1425 | } |
1426 | |
1427 | static int fallback_init_cip(struct crypto_tfm *tfm) |
1428 | { |
1429 | const char *name = crypto_tfm_alg_name(tfm); |
1430 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1431 | + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); |
1432 | |
1433 | - op->fallback.cip = crypto_alloc_cipher(name, 0, |
1434 | - CRYPTO_ALG_NEED_FALLBACK); |
1435 | + tctx->fallback.cip = crypto_alloc_cipher(name, 0, |
1436 | + CRYPTO_ALG_NEED_FALLBACK); |
1437 | |
1438 | - if (IS_ERR(op->fallback.cip)) { |
1439 | + if (IS_ERR(tctx->fallback.cip)) { |
1440 | printk(KERN_ERR "Error allocating fallback algo %s\n", name); |
1441 | - return PTR_ERR(op->fallback.cip); |
1442 | + return PTR_ERR(tctx->fallback.cip); |
1443 | } |
1444 | |
1445 | return 0; |
1446 | @@ -267,10 +222,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm) |
1447 | |
1448 | static void fallback_exit_cip(struct crypto_tfm *tfm) |
1449 | { |
1450 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1451 | + struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); |
1452 | |
1453 | - crypto_free_cipher(op->fallback.cip); |
1454 | - op->fallback.cip = NULL; |
1455 | + crypto_free_cipher(tctx->fallback.cip); |
1456 | } |
1457 | |
1458 | static struct crypto_alg geode_alg = { |
1459 | @@ -283,7 +237,7 @@ static struct crypto_alg geode_alg = { |
1460 | .cra_init = fallback_init_cip, |
1461 | .cra_exit = fallback_exit_cip, |
1462 | .cra_blocksize = AES_BLOCK_SIZE, |
1463 | - .cra_ctxsize = sizeof(struct geode_aes_op), |
1464 | + .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), |
1465 | .cra_module = THIS_MODULE, |
1466 | .cra_u = { |
1467 | .cipher = { |
1468 | @@ -296,220 +250,126 @@ static struct crypto_alg geode_alg = { |
1469 | } |
1470 | }; |
1471 | |
1472 | -static int |
1473 | -geode_cbc_decrypt(struct blkcipher_desc *desc, |
1474 | - struct scatterlist *dst, struct scatterlist *src, |
1475 | - unsigned int nbytes) |
1476 | +static int geode_init_skcipher(struct crypto_skcipher *tfm) |
1477 | { |
1478 | - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
1479 | - struct blkcipher_walk walk; |
1480 | - int err, ret; |
1481 | - |
1482 | - if (nbytes % AES_BLOCK_SIZE) |
1483 | - return -EINVAL; |
1484 | - |
1485 | - if (unlikely(op->keylen != AES_KEYSIZE_128)) |
1486 | - return fallback_blk_dec(desc, dst, src, nbytes); |
1487 | + const char *name = crypto_tfm_alg_name(&tfm->base); |
1488 | + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); |
1489 | |
1490 | - blkcipher_walk_init(&walk, dst, src, nbytes); |
1491 | - err = blkcipher_walk_virt(desc, &walk); |
1492 | - op->iv = walk.iv; |
1493 | - |
1494 | - while ((nbytes = walk.nbytes)) { |
1495 | - op->src = walk.src.virt.addr, |
1496 | - op->dst = walk.dst.virt.addr; |
1497 | - op->mode = AES_MODE_CBC; |
1498 | - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); |
1499 | - op->dir = AES_DIR_DECRYPT; |
1500 | - |
1501 | - ret = geode_aes_crypt(op); |
1502 | - |
1503 | - nbytes -= ret; |
1504 | - err = blkcipher_walk_done(desc, &walk, nbytes); |
1505 | + tctx->fallback.skcipher = |
1506 | + crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | |
1507 | + CRYPTO_ALG_ASYNC); |
1508 | + if (IS_ERR(tctx->fallback.skcipher)) { |
1509 | + printk(KERN_ERR "Error allocating fallback algo %s\n", name); |
1510 | + return PTR_ERR(tctx->fallback.skcipher); |
1511 | } |
1512 | |
1513 | - return err; |
1514 | + crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + |
1515 | + crypto_skcipher_reqsize(tctx->fallback.skcipher)); |
1516 | + return 0; |
1517 | } |
1518 | |
1519 | -static int |
1520 | -geode_cbc_encrypt(struct blkcipher_desc *desc, |
1521 | - struct scatterlist *dst, struct scatterlist *src, |
1522 | - unsigned int nbytes) |
1523 | +static void geode_exit_skcipher(struct crypto_skcipher *tfm) |
1524 | { |
1525 | - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
1526 | - struct blkcipher_walk walk; |
1527 | - int err, ret; |
1528 | + struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); |
1529 | |
1530 | - if (nbytes % AES_BLOCK_SIZE) |
1531 | - return -EINVAL; |
1532 | - |
1533 | - if (unlikely(op->keylen != AES_KEYSIZE_128)) |
1534 | - return fallback_blk_enc(desc, dst, src, nbytes); |
1535 | + crypto_free_skcipher(tctx->fallback.skcipher); |
1536 | +} |
1537 | |
1538 | - blkcipher_walk_init(&walk, dst, src, nbytes); |
1539 | - err = blkcipher_walk_virt(desc, &walk); |
1540 | - op->iv = walk.iv; |
1541 | +static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir) |
1542 | +{ |
1543 | + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
1544 | + const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); |
1545 | + struct skcipher_walk walk; |
1546 | + unsigned int nbytes; |
1547 | + int err; |
1548 | + |
1549 | + if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { |
1550 | + struct skcipher_request *subreq = skcipher_request_ctx(req); |
1551 | + |
1552 | + *subreq = *req; |
1553 | + skcipher_request_set_tfm(subreq, tctx->fallback.skcipher); |
1554 | + if (dir == AES_DIR_DECRYPT) |
1555 | + return crypto_skcipher_decrypt(subreq); |
1556 | + else |
1557 | + return crypto_skcipher_encrypt(subreq); |
1558 | + } |
1559 | |
1560 | - while ((nbytes = walk.nbytes)) { |
1561 | - op->src = walk.src.virt.addr, |
1562 | - op->dst = walk.dst.virt.addr; |
1563 | - op->mode = AES_MODE_CBC; |
1564 | - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); |
1565 | - op->dir = AES_DIR_ENCRYPT; |
1566 | + err = skcipher_walk_virt(&walk, req, false); |
1567 | |
1568 | - ret = geode_aes_crypt(op); |
1569 | - nbytes -= ret; |
1570 | - err = blkcipher_walk_done(desc, &walk, nbytes); |
1571 | + while ((nbytes = walk.nbytes) != 0) { |
1572 | + geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr, |
1573 | + round_down(nbytes, AES_BLOCK_SIZE), |
1574 | + walk.iv, mode, dir); |
1575 | + err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); |
1576 | } |
1577 | |
1578 | return err; |
1579 | } |
1580 | |
1581 | -static int fallback_init_blk(struct crypto_tfm *tfm) |
1582 | +static int geode_cbc_encrypt(struct skcipher_request *req) |
1583 | { |
1584 | - const char *name = crypto_tfm_alg_name(tfm); |
1585 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1586 | - |
1587 | - op->fallback.blk = crypto_alloc_sync_skcipher(name, 0, |
1588 | - CRYPTO_ALG_NEED_FALLBACK); |
1589 | - if (IS_ERR(op->fallback.blk)) { |
1590 | - printk(KERN_ERR "Error allocating fallback algo %s\n", name); |
1591 | - return PTR_ERR(op->fallback.blk); |
1592 | - } |
1593 | - |
1594 | - return 0; |
1595 | + return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT); |
1596 | } |
1597 | |
1598 | -static void fallback_exit_blk(struct crypto_tfm *tfm) |
1599 | +static int geode_cbc_decrypt(struct skcipher_request *req) |
1600 | { |
1601 | - struct geode_aes_op *op = crypto_tfm_ctx(tfm); |
1602 | - |
1603 | - crypto_free_sync_skcipher(op->fallback.blk); |
1604 | - op->fallback.blk = NULL; |
1605 | + return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT); |
1606 | } |
1607 | |
1608 | -static struct crypto_alg geode_cbc_alg = { |
1609 | - .cra_name = "cbc(aes)", |
1610 | - .cra_driver_name = "cbc-aes-geode", |
1611 | - .cra_priority = 400, |
1612 | - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
1613 | - CRYPTO_ALG_KERN_DRIVER_ONLY | |
1614 | - CRYPTO_ALG_NEED_FALLBACK, |
1615 | - .cra_init = fallback_init_blk, |
1616 | - .cra_exit = fallback_exit_blk, |
1617 | - .cra_blocksize = AES_BLOCK_SIZE, |
1618 | - .cra_ctxsize = sizeof(struct geode_aes_op), |
1619 | - .cra_alignmask = 15, |
1620 | - .cra_type = &crypto_blkcipher_type, |
1621 | - .cra_module = THIS_MODULE, |
1622 | - .cra_u = { |
1623 | - .blkcipher = { |
1624 | - .min_keysize = AES_MIN_KEY_SIZE, |
1625 | - .max_keysize = AES_MAX_KEY_SIZE, |
1626 | - .setkey = geode_setkey_blk, |
1627 | - .encrypt = geode_cbc_encrypt, |
1628 | - .decrypt = geode_cbc_decrypt, |
1629 | - .ivsize = AES_BLOCK_SIZE, |
1630 | - } |
1631 | - } |
1632 | -}; |
1633 | - |
1634 | -static int |
1635 | -geode_ecb_decrypt(struct blkcipher_desc *desc, |
1636 | - struct scatterlist *dst, struct scatterlist *src, |
1637 | - unsigned int nbytes) |
1638 | +static int geode_ecb_encrypt(struct skcipher_request *req) |
1639 | { |
1640 | - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
1641 | - struct blkcipher_walk walk; |
1642 | - int err, ret; |
1643 | - |
1644 | - if (nbytes % AES_BLOCK_SIZE) |
1645 | - return -EINVAL; |
1646 | - |
1647 | - if (unlikely(op->keylen != AES_KEYSIZE_128)) |
1648 | - return fallback_blk_dec(desc, dst, src, nbytes); |
1649 | - |
1650 | - blkcipher_walk_init(&walk, dst, src, nbytes); |
1651 | - err = blkcipher_walk_virt(desc, &walk); |
1652 | - |
1653 | - while ((nbytes = walk.nbytes)) { |
1654 | - op->src = walk.src.virt.addr, |
1655 | - op->dst = walk.dst.virt.addr; |
1656 | - op->mode = AES_MODE_ECB; |
1657 | - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); |
1658 | - op->dir = AES_DIR_DECRYPT; |
1659 | - |
1660 | - ret = geode_aes_crypt(op); |
1661 | - nbytes -= ret; |
1662 | - err = blkcipher_walk_done(desc, &walk, nbytes); |
1663 | - } |
1664 | - |
1665 | - return err; |
1666 | + return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT); |
1667 | } |
1668 | |
1669 | -static int |
1670 | -geode_ecb_encrypt(struct blkcipher_desc *desc, |
1671 | - struct scatterlist *dst, struct scatterlist *src, |
1672 | - unsigned int nbytes) |
1673 | +static int geode_ecb_decrypt(struct skcipher_request *req) |
1674 | { |
1675 | - struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); |
1676 | - struct blkcipher_walk walk; |
1677 | - int err, ret; |
1678 | - |
1679 | - if (nbytes % AES_BLOCK_SIZE) |
1680 | - return -EINVAL; |
1681 | - |
1682 | - if (unlikely(op->keylen != AES_KEYSIZE_128)) |
1683 | - return fallback_blk_enc(desc, dst, src, nbytes); |
1684 | - |
1685 | - blkcipher_walk_init(&walk, dst, src, nbytes); |
1686 | - err = blkcipher_walk_virt(desc, &walk); |
1687 | - |
1688 | - while ((nbytes = walk.nbytes)) { |
1689 | - op->src = walk.src.virt.addr, |
1690 | - op->dst = walk.dst.virt.addr; |
1691 | - op->mode = AES_MODE_ECB; |
1692 | - op->len = nbytes - (nbytes % AES_BLOCK_SIZE); |
1693 | - op->dir = AES_DIR_ENCRYPT; |
1694 | - |
1695 | - ret = geode_aes_crypt(op); |
1696 | - nbytes -= ret; |
1697 | - ret = blkcipher_walk_done(desc, &walk, nbytes); |
1698 | - } |
1699 | - |
1700 | - return err; |
1701 | + return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT); |
1702 | } |
1703 | |
1704 | -static struct crypto_alg geode_ecb_alg = { |
1705 | - .cra_name = "ecb(aes)", |
1706 | - .cra_driver_name = "ecb-aes-geode", |
1707 | - .cra_priority = 400, |
1708 | - .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | |
1709 | - CRYPTO_ALG_KERN_DRIVER_ONLY | |
1710 | - CRYPTO_ALG_NEED_FALLBACK, |
1711 | - .cra_init = fallback_init_blk, |
1712 | - .cra_exit = fallback_exit_blk, |
1713 | - .cra_blocksize = AES_BLOCK_SIZE, |
1714 | - .cra_ctxsize = sizeof(struct geode_aes_op), |
1715 | - .cra_alignmask = 15, |
1716 | - .cra_type = &crypto_blkcipher_type, |
1717 | - .cra_module = THIS_MODULE, |
1718 | - .cra_u = { |
1719 | - .blkcipher = { |
1720 | - .min_keysize = AES_MIN_KEY_SIZE, |
1721 | - .max_keysize = AES_MAX_KEY_SIZE, |
1722 | - .setkey = geode_setkey_blk, |
1723 | - .encrypt = geode_ecb_encrypt, |
1724 | - .decrypt = geode_ecb_decrypt, |
1725 | - } |
1726 | - } |
1727 | +static struct skcipher_alg geode_skcipher_algs[] = { |
1728 | + { |
1729 | + .base.cra_name = "cbc(aes)", |
1730 | + .base.cra_driver_name = "cbc-aes-geode", |
1731 | + .base.cra_priority = 400, |
1732 | + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
1733 | + CRYPTO_ALG_NEED_FALLBACK, |
1734 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
1735 | + .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), |
1736 | + .base.cra_alignmask = 15, |
1737 | + .base.cra_module = THIS_MODULE, |
1738 | + .init = geode_init_skcipher, |
1739 | + .exit = geode_exit_skcipher, |
1740 | + .setkey = geode_setkey_skcipher, |
1741 | + .encrypt = geode_cbc_encrypt, |
1742 | + .decrypt = geode_cbc_decrypt, |
1743 | + .min_keysize = AES_MIN_KEY_SIZE, |
1744 | + .max_keysize = AES_MAX_KEY_SIZE, |
1745 | + .ivsize = AES_BLOCK_SIZE, |
1746 | + }, { |
1747 | + .base.cra_name = "ecb(aes)", |
1748 | + .base.cra_driver_name = "ecb-aes-geode", |
1749 | + .base.cra_priority = 400, |
1750 | + .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | |
1751 | + CRYPTO_ALG_NEED_FALLBACK, |
1752 | + .base.cra_blocksize = AES_BLOCK_SIZE, |
1753 | + .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), |
1754 | + .base.cra_alignmask = 15, |
1755 | + .base.cra_module = THIS_MODULE, |
1756 | + .init = geode_init_skcipher, |
1757 | + .exit = geode_exit_skcipher, |
1758 | + .setkey = geode_setkey_skcipher, |
1759 | + .encrypt = geode_ecb_encrypt, |
1760 | + .decrypt = geode_ecb_decrypt, |
1761 | + .min_keysize = AES_MIN_KEY_SIZE, |
1762 | + .max_keysize = AES_MAX_KEY_SIZE, |
1763 | + }, |
1764 | }; |
1765 | |
1766 | static void geode_aes_remove(struct pci_dev *dev) |
1767 | { |
1768 | crypto_unregister_alg(&geode_alg); |
1769 | - crypto_unregister_alg(&geode_ecb_alg); |
1770 | - crypto_unregister_alg(&geode_cbc_alg); |
1771 | + crypto_unregister_skciphers(geode_skcipher_algs, |
1772 | + ARRAY_SIZE(geode_skcipher_algs)); |
1773 | |
1774 | pci_iounmap(dev, _iobase); |
1775 | _iobase = NULL; |
1776 | @@ -547,20 +407,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) |
1777 | if (ret) |
1778 | goto eiomap; |
1779 | |
1780 | - ret = crypto_register_alg(&geode_ecb_alg); |
1781 | + ret = crypto_register_skciphers(geode_skcipher_algs, |
1782 | + ARRAY_SIZE(geode_skcipher_algs)); |
1783 | if (ret) |
1784 | goto ealg; |
1785 | |
1786 | - ret = crypto_register_alg(&geode_cbc_alg); |
1787 | - if (ret) |
1788 | - goto eecb; |
1789 | - |
1790 | dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); |
1791 | return 0; |
1792 | |
1793 | - eecb: |
1794 | - crypto_unregister_alg(&geode_ecb_alg); |
1795 | - |
1796 | ealg: |
1797 | crypto_unregister_alg(&geode_alg); |
1798 | |
1799 | diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h |
1800 | index f8a86898ac22..6d0a0cdc7647 100644 |
1801 | --- a/drivers/crypto/geode-aes.h |
1802 | +++ b/drivers/crypto/geode-aes.h |
1803 | @@ -46,21 +46,10 @@ |
1804 | |
1805 | #define AES_OP_TIMEOUT 0x50000 |
1806 | |
1807 | -struct geode_aes_op { |
1808 | - |
1809 | - void *src; |
1810 | - void *dst; |
1811 | - |
1812 | - u32 mode; |
1813 | - u32 dir; |
1814 | - u32 flags; |
1815 | - int len; |
1816 | - |
1817 | +struct geode_aes_tfm_ctx { |
1818 | u8 key[AES_KEYSIZE_128]; |
1819 | - u8 *iv; |
1820 | - |
1821 | union { |
1822 | - struct crypto_sync_skcipher *blk; |
1823 | + struct crypto_skcipher *skcipher; |
1824 | struct crypto_cipher *cip; |
1825 | } fallback; |
1826 | u32 keylen; |
1827 | diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig |
1828 | index ebaf91e0146d..504daff7687d 100644 |
1829 | --- a/drivers/crypto/hisilicon/Kconfig |
1830 | +++ b/drivers/crypto/hisilicon/Kconfig |
1831 | @@ -17,6 +17,7 @@ config CRYPTO_DEV_HISI_SEC |
1832 | config CRYPTO_DEV_HISI_QM |
1833 | tristate |
1834 | depends on ARM64 && PCI && PCI_MSI |
1835 | + select NEED_SG_DMA_LENGTH |
1836 | help |
1837 | HiSilicon accelerator engines use a common queue management |
1838 | interface. Specific engine driver may use this module. |
1839 | diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c |
1840 | index 673fb29fda53..82b316b2f537 100644 |
1841 | --- a/drivers/crypto/virtio/virtio_crypto_algs.c |
1842 | +++ b/drivers/crypto/virtio/virtio_crypto_algs.c |
1843 | @@ -435,6 +435,11 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req, |
1844 | goto free; |
1845 | } |
1846 | memcpy(iv, req->info, ivsize); |
1847 | + if (!vc_sym_req->encrypt) |
1848 | + scatterwalk_map_and_copy(req->info, req->src, |
1849 | + req->nbytes - AES_BLOCK_SIZE, |
1850 | + AES_BLOCK_SIZE, 0); |
1851 | + |
1852 | sg_init_one(&iv_sg, iv, ivsize); |
1853 | sgs[num_out++] = &iv_sg; |
1854 | vc_sym_req->iv = iv; |
1855 | @@ -571,6 +576,10 @@ static void virtio_crypto_ablkcipher_finalize_req( |
1856 | struct ablkcipher_request *req, |
1857 | int err) |
1858 | { |
1859 | + if (vc_sym_req->encrypt) |
1860 | + scatterwalk_map_and_copy(req->info, req->dst, |
1861 | + req->nbytes - AES_BLOCK_SIZE, |
1862 | + AES_BLOCK_SIZE, 0); |
1863 | crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine, |
1864 | req, err); |
1865 | kzfree(vc_sym_req->iv); |
1866 | diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig |
1867 | index defe1d438710..af4a3ccb96b3 100644 |
1868 | --- a/drivers/devfreq/Kconfig |
1869 | +++ b/drivers/devfreq/Kconfig |
1870 | @@ -99,6 +99,7 @@ config ARM_TEGRA_DEVFREQ |
1871 | ARCH_TEGRA_210_SOC || \ |
1872 | COMPILE_TEST |
1873 | select PM_OPP |
1874 | + depends on COMMON_CLK |
1875 | help |
1876 | This adds the DEVFREQ driver for the Tegra family of SoCs. |
1877 | It reads ACTMON counters of memory controllers and adjusts the |
1878 | diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c |
1879 | index c90c798e5ec3..0585d749d935 100644 |
1880 | --- a/drivers/dma/dw/platform.c |
1881 | +++ b/drivers/dma/dw/platform.c |
1882 | @@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev) |
1883 | |
1884 | data->chip = chip; |
1885 | |
1886 | - chip->clk = devm_clk_get(chip->dev, "hclk"); |
1887 | + chip->clk = devm_clk_get_optional(chip->dev, "hclk"); |
1888 | if (IS_ERR(chip->clk)) |
1889 | return PTR_ERR(chip->clk); |
1890 | err = clk_prepare_enable(chip->clk); |
1891 | diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c |
1892 | index 1a422a8b43cf..18c011e57592 100644 |
1893 | --- a/drivers/dma/ioat/dma.c |
1894 | +++ b/drivers/dma/ioat/dma.c |
1895 | @@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags) |
1896 | |
1897 | descs->virt = dma_alloc_coherent(to_dev(ioat_chan), |
1898 | SZ_2M, &descs->hw, flags); |
1899 | - if (!descs->virt && (i > 0)) { |
1900 | + if (!descs->virt) { |
1901 | int idx; |
1902 | |
1903 | for (idx = 0; idx < i; idx++) { |
1904 | + descs = &ioat_chan->descs[idx]; |
1905 | dma_free_coherent(to_dev(ioat_chan), SZ_2M, |
1906 | descs->virt, descs->hw); |
1907 | descs->virt = NULL; |
1908 | diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c |
1909 | index 4b36c8810517..d05471653224 100644 |
1910 | --- a/drivers/dma/k3dma.c |
1911 | +++ b/drivers/dma/k3dma.c |
1912 | @@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id) |
1913 | c = p->vchan; |
1914 | if (c && (tc1 & BIT(i))) { |
1915 | spin_lock_irqsave(&c->vc.lock, flags); |
1916 | - vchan_cookie_complete(&p->ds_run->vd); |
1917 | - p->ds_done = p->ds_run; |
1918 | - p->ds_run = NULL; |
1919 | + if (p->ds_run != NULL) { |
1920 | + vchan_cookie_complete(&p->ds_run->vd); |
1921 | + p->ds_done = p->ds_run; |
1922 | + p->ds_run = NULL; |
1923 | + } |
1924 | spin_unlock_irqrestore(&c->vc.lock, flags); |
1925 | } |
1926 | if (c && (tc2 & BIT(i))) { |
1927 | @@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c) |
1928 | if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d)) |
1929 | return -EAGAIN; |
1930 | |
1931 | + /* Avoid losing track of ds_run if a transaction is in flight */ |
1932 | + if (c->phy->ds_run) |
1933 | + return -EAGAIN; |
1934 | + |
1935 | if (vd) { |
1936 | struct k3_dma_desc_sw *ds = |
1937 | container_of(vd, struct k3_dma_desc_sw, vd); |
1938 | diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c |
1939 | index a031cbcdf6ef..d72a3a5507b0 100644 |
1940 | --- a/drivers/gpio/gpio-mpc8xxx.c |
1941 | +++ b/drivers/gpio/gpio-mpc8xxx.c |
1942 | @@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev) |
1943 | return -ENOMEM; |
1944 | |
1945 | gc = &mpc8xxx_gc->gc; |
1946 | + gc->parent = &pdev->dev; |
1947 | |
1948 | if (of_property_read_bool(np, "little-endian")) { |
1949 | ret = bgpio_init(gc, &pdev->dev, 4, |
1950 | diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c |
1951 | index cd475ff4bcad..7835aad6d162 100644 |
1952 | --- a/drivers/gpio/gpio-zynq.c |
1953 | +++ b/drivers/gpio/gpio-zynq.c |
1954 | @@ -681,6 +681,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) |
1955 | unsigned int bank_num; |
1956 | |
1957 | for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) { |
1958 | + writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr + |
1959 | + ZYNQ_GPIO_INTDIS_OFFSET(bank_num)); |
1960 | writel_relaxed(gpio->context.datalsw[bank_num], |
1961 | gpio->base_addr + |
1962 | ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num)); |
1963 | @@ -690,9 +692,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) |
1964 | writel_relaxed(gpio->context.dirm[bank_num], |
1965 | gpio->base_addr + |
1966 | ZYNQ_GPIO_DIRM_OFFSET(bank_num)); |
1967 | - writel_relaxed(gpio->context.int_en[bank_num], |
1968 | - gpio->base_addr + |
1969 | - ZYNQ_GPIO_INTEN_OFFSET(bank_num)); |
1970 | writel_relaxed(gpio->context.int_type[bank_num], |
1971 | gpio->base_addr + |
1972 | ZYNQ_GPIO_INTTYPE_OFFSET(bank_num)); |
1973 | @@ -702,6 +701,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio) |
1974 | writel_relaxed(gpio->context.int_any[bank_num], |
1975 | gpio->base_addr + |
1976 | ZYNQ_GPIO_INTANY_OFFSET(bank_num)); |
1977 | + writel_relaxed(~(gpio->context.int_en[bank_num]), |
1978 | + gpio->base_addr + |
1979 | + ZYNQ_GPIO_INTEN_OFFSET(bank_num)); |
1980 | } |
1981 | } |
1982 | |
1983 | diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c |
1984 | index 99d19f80440e..3d9524a2abc4 100644 |
1985 | --- a/drivers/gpio/gpiolib.c |
1986 | +++ b/drivers/gpio/gpiolib.c |
1987 | @@ -4328,8 +4328,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, |
1988 | |
1989 | if (chip->ngpio <= p->chip_hwnum) { |
1990 | dev_err(dev, |
1991 | - "requested GPIO %d is out of range [0..%d] for chip %s\n", |
1992 | - idx, chip->ngpio, chip->label); |
1993 | + "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", |
1994 | + idx, p->chip_hwnum, chip->ngpio - 1, |
1995 | + chip->label); |
1996 | return ERR_PTR(-EINVAL); |
1997 | } |
1998 | |
1999 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
2000 | index bd37df5dd6d0..d1e278e999ee 100644 |
2001 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
2002 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h |
2003 | @@ -813,6 +813,7 @@ struct amdgpu_device { |
2004 | uint8_t *bios; |
2005 | uint32_t bios_size; |
2006 | struct amdgpu_bo *stolen_vga_memory; |
2007 | + struct amdgpu_bo *discovery_memory; |
2008 | uint32_t bios_scratch_reg_offset; |
2009 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; |
2010 | |
2011 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c |
2012 | index 1481899f86c1..71198c5318e1 100644 |
2013 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c |
2014 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c |
2015 | @@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin |
2016 | { |
2017 | uint32_t *p = (uint32_t *)binary; |
2018 | uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; |
2019 | - uint64_t pos = vram_size - BINARY_MAX_SIZE; |
2020 | + uint64_t pos = vram_size - DISCOVERY_TMR_SIZE; |
2021 | unsigned long flags; |
2022 | |
2023 | while (pos < vram_size) { |
2024 | @@ -179,7 +179,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) |
2025 | uint16_t checksum; |
2026 | int r; |
2027 | |
2028 | - adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL); |
2029 | + adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); |
2030 | if (!adev->discovery) |
2031 | return -ENOMEM; |
2032 | |
2033 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h |
2034 | index 85b8c4d4d576..5a6693d7d269 100644 |
2035 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h |
2036 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h |
2037 | @@ -24,6 +24,8 @@ |
2038 | #ifndef __AMDGPU_DISCOVERY__ |
2039 | #define __AMDGPU_DISCOVERY__ |
2040 | |
2041 | +#define DISCOVERY_TMR_SIZE (64 << 10) |
2042 | + |
2043 | int amdgpu_discovery_init(struct amdgpu_device *adev); |
2044 | void amdgpu_discovery_fini(struct amdgpu_device *adev); |
2045 | int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); |
2046 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
2047 | index 7289e1b4fb60..28361a9c5add 100644 |
2048 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
2049 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c |
2050 | @@ -342,6 +342,67 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
2051 | return 0; |
2052 | } |
2053 | |
2054 | +/** |
2055 | + * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location |
2056 | + * |
2057 | + * @adev: amdgpu device object |
2058 | + * @offset: offset of the BO |
2059 | + * @size: size of the BO |
2060 | + * @domain: where to place it |
2061 | + * @bo_ptr: used to initialize BOs in structures |
2062 | + * @cpu_addr: optional CPU address mapping |
2063 | + * |
2064 | + * Creates a kernel BO at a specific offset in the address space of the domain. |
2065 | + * |
2066 | + * Returns: |
2067 | + * 0 on success, negative error code otherwise. |
2068 | + */ |
2069 | +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
2070 | + uint64_t offset, uint64_t size, uint32_t domain, |
2071 | + struct amdgpu_bo **bo_ptr, void **cpu_addr) |
2072 | +{ |
2073 | + struct ttm_operation_ctx ctx = { false, false }; |
2074 | + unsigned int i; |
2075 | + int r; |
2076 | + |
2077 | + offset &= PAGE_MASK; |
2078 | + size = ALIGN(size, PAGE_SIZE); |
2079 | + |
2080 | + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, |
2081 | + NULL, NULL); |
2082 | + if (r) |
2083 | + return r; |
2084 | + |
2085 | + /* |
2086 | + * Remove the original mem node and create a new one at the request |
2087 | + * position. |
2088 | + */ |
2089 | + for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { |
2090 | + (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; |
2091 | + (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
2092 | + } |
2093 | + |
2094 | + ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); |
2095 | + r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, |
2096 | + &(*bo_ptr)->tbo.mem, &ctx); |
2097 | + if (r) |
2098 | + goto error; |
2099 | + |
2100 | + if (cpu_addr) { |
2101 | + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); |
2102 | + if (r) |
2103 | + goto error; |
2104 | + } |
2105 | + |
2106 | + amdgpu_bo_unreserve(*bo_ptr); |
2107 | + return 0; |
2108 | + |
2109 | +error: |
2110 | + amdgpu_bo_unreserve(*bo_ptr); |
2111 | + amdgpu_bo_unref(bo_ptr); |
2112 | + return r; |
2113 | +} |
2114 | + |
2115 | /** |
2116 | * amdgpu_bo_free_kernel - free BO for kernel use |
2117 | * |
2118 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |
2119 | index 658f4c9779b7..4fcea23ee516 100644 |
2120 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |
2121 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h |
2122 | @@ -237,6 +237,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
2123 | unsigned long size, int align, |
2124 | u32 domain, struct amdgpu_bo **bo_ptr, |
2125 | u64 *gpu_addr, void **cpu_addr); |
2126 | +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
2127 | + uint64_t offset, uint64_t size, uint32_t domain, |
2128 | + struct amdgpu_bo **bo_ptr, void **cpu_addr); |
2129 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
2130 | void **cpu_addr); |
2131 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
2132 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c |
2133 | index 016ea274b955..9c5cbc47edf1 100644 |
2134 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c |
2135 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c |
2136 | @@ -65,12 +65,6 @@ const char *ras_block_string[] = { |
2137 | /* inject address is 52 bits */ |
2138 | #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) |
2139 | |
2140 | -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, |
2141 | - uint64_t offset, uint64_t size, |
2142 | - struct amdgpu_bo **bo_ptr); |
2143 | -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, |
2144 | - struct amdgpu_bo **bo_ptr); |
2145 | - |
2146 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, |
2147 | size_t size, loff_t *pos) |
2148 | { |
2149 | @@ -1214,75 +1208,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) |
2150 | atomic_set(&ras->in_recovery, 0); |
2151 | } |
2152 | |
2153 | -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, |
2154 | - struct amdgpu_bo **bo_ptr) |
2155 | -{ |
2156 | - /* no need to free it actually. */ |
2157 | - amdgpu_bo_free_kernel(bo_ptr, NULL, NULL); |
2158 | - return 0; |
2159 | -} |
2160 | - |
2161 | -/* reserve vram with size@offset */ |
2162 | -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, |
2163 | - uint64_t offset, uint64_t size, |
2164 | - struct amdgpu_bo **bo_ptr) |
2165 | -{ |
2166 | - struct ttm_operation_ctx ctx = { false, false }; |
2167 | - struct amdgpu_bo_param bp; |
2168 | - int r = 0; |
2169 | - int i; |
2170 | - struct amdgpu_bo *bo; |
2171 | - |
2172 | - if (bo_ptr) |
2173 | - *bo_ptr = NULL; |
2174 | - memset(&bp, 0, sizeof(bp)); |
2175 | - bp.size = size; |
2176 | - bp.byte_align = PAGE_SIZE; |
2177 | - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; |
2178 | - bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | |
2179 | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
2180 | - bp.type = ttm_bo_type_kernel; |
2181 | - bp.resv = NULL; |
2182 | - |
2183 | - r = amdgpu_bo_create(adev, &bp, &bo); |
2184 | - if (r) |
2185 | - return -EINVAL; |
2186 | - |
2187 | - r = amdgpu_bo_reserve(bo, false); |
2188 | - if (r) |
2189 | - goto error_reserve; |
2190 | - |
2191 | - offset = ALIGN(offset, PAGE_SIZE); |
2192 | - for (i = 0; i < bo->placement.num_placement; ++i) { |
2193 | - bo->placements[i].fpfn = offset >> PAGE_SHIFT; |
2194 | - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
2195 | - } |
2196 | - |
2197 | - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); |
2198 | - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx); |
2199 | - if (r) |
2200 | - goto error_pin; |
2201 | - |
2202 | - r = amdgpu_bo_pin_restricted(bo, |
2203 | - AMDGPU_GEM_DOMAIN_VRAM, |
2204 | - offset, |
2205 | - offset + size); |
2206 | - if (r) |
2207 | - goto error_pin; |
2208 | - |
2209 | - if (bo_ptr) |
2210 | - *bo_ptr = bo; |
2211 | - |
2212 | - amdgpu_bo_unreserve(bo); |
2213 | - return r; |
2214 | - |
2215 | -error_pin: |
2216 | - amdgpu_bo_unreserve(bo); |
2217 | -error_reserve: |
2218 | - amdgpu_bo_unref(&bo); |
2219 | - return r; |
2220 | -} |
2221 | - |
2222 | /* alloc/realloc bps array */ |
2223 | static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, |
2224 | struct ras_err_handler_data *data, int pages) |
2225 | @@ -1345,7 +1270,7 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) |
2226 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2227 | struct ras_err_handler_data *data; |
2228 | uint64_t bp; |
2229 | - struct amdgpu_bo *bo; |
2230 | + struct amdgpu_bo *bo = NULL; |
2231 | int i; |
2232 | |
2233 | if (!con || !con->eh_data) |
2234 | @@ -1359,12 +1284,14 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) |
2235 | for (i = data->last_reserved; i < data->count; i++) { |
2236 | bp = data->bps[i].bp; |
2237 | |
2238 | - if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT, |
2239 | - PAGE_SIZE, &bo)) |
2240 | + if (amdgpu_bo_create_kernel_at(adev, bp << PAGE_SHIFT, PAGE_SIZE, |
2241 | + AMDGPU_GEM_DOMAIN_VRAM, |
2242 | + &bo, NULL)) |
2243 | DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp); |
2244 | |
2245 | data->bps[i].bo = bo; |
2246 | data->last_reserved = i + 1; |
2247 | + bo = NULL; |
2248 | } |
2249 | out: |
2250 | mutex_unlock(&con->recovery_lock); |
2251 | @@ -1390,7 +1317,7 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) |
2252 | for (i = data->last_reserved - 1; i >= 0; i--) { |
2253 | bo = data->bps[i].bo; |
2254 | |
2255 | - amdgpu_ras_release_vram(adev, &bo); |
2256 | + amdgpu_bo_free_kernel(&bo, NULL, NULL); |
2257 | |
2258 | data->bps[i].bo = bo; |
2259 | data->last_reserved = i; |
2260 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |
2261 | index c0e41f1f0c23..f15ded1ce905 100644 |
2262 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |
2263 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c |
2264 | @@ -1639,81 +1639,25 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) |
2265 | */ |
2266 | static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) |
2267 | { |
2268 | - struct ttm_operation_ctx ctx = { false, false }; |
2269 | - struct amdgpu_bo_param bp; |
2270 | - int r = 0; |
2271 | - int i; |
2272 | - u64 vram_size = adev->gmc.visible_vram_size; |
2273 | - u64 offset = adev->fw_vram_usage.start_offset; |
2274 | - u64 size = adev->fw_vram_usage.size; |
2275 | - struct amdgpu_bo *bo; |
2276 | - |
2277 | - memset(&bp, 0, sizeof(bp)); |
2278 | - bp.size = adev->fw_vram_usage.size; |
2279 | - bp.byte_align = PAGE_SIZE; |
2280 | - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; |
2281 | - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
2282 | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
2283 | - bp.type = ttm_bo_type_kernel; |
2284 | - bp.resv = NULL; |
2285 | + uint64_t vram_size = adev->gmc.visible_vram_size; |
2286 | + int r; |
2287 | + |
2288 | adev->fw_vram_usage.va = NULL; |
2289 | adev->fw_vram_usage.reserved_bo = NULL; |
2290 | |
2291 | - if (adev->fw_vram_usage.size > 0 && |
2292 | - adev->fw_vram_usage.size <= vram_size) { |
2293 | - |
2294 | - r = amdgpu_bo_create(adev, &bp, |
2295 | - &adev->fw_vram_usage.reserved_bo); |
2296 | - if (r) |
2297 | - goto error_create; |
2298 | - |
2299 | - r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); |
2300 | - if (r) |
2301 | - goto error_reserve; |
2302 | - |
2303 | - /* remove the original mem node and create a new one at the |
2304 | - * request position |
2305 | - */ |
2306 | - bo = adev->fw_vram_usage.reserved_bo; |
2307 | - offset = ALIGN(offset, PAGE_SIZE); |
2308 | - for (i = 0; i < bo->placement.num_placement; ++i) { |
2309 | - bo->placements[i].fpfn = offset >> PAGE_SHIFT; |
2310 | - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; |
2311 | - } |
2312 | - |
2313 | - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); |
2314 | - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, |
2315 | - &bo->tbo.mem, &ctx); |
2316 | - if (r) |
2317 | - goto error_pin; |
2318 | - |
2319 | - r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, |
2320 | - AMDGPU_GEM_DOMAIN_VRAM, |
2321 | - adev->fw_vram_usage.start_offset, |
2322 | - (adev->fw_vram_usage.start_offset + |
2323 | - adev->fw_vram_usage.size)); |
2324 | - if (r) |
2325 | - goto error_pin; |
2326 | - r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, |
2327 | - &adev->fw_vram_usage.va); |
2328 | - if (r) |
2329 | - goto error_kmap; |
2330 | - |
2331 | - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
2332 | - } |
2333 | - return r; |
2334 | + if (adev->fw_vram_usage.size == 0 || |
2335 | + adev->fw_vram_usage.size > vram_size) |
2336 | + return 0; |
2337 | |
2338 | -error_kmap: |
2339 | - amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); |
2340 | -error_pin: |
2341 | - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); |
2342 | -error_reserve: |
2343 | - amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); |
2344 | -error_create: |
2345 | - adev->fw_vram_usage.va = NULL; |
2346 | - adev->fw_vram_usage.reserved_bo = NULL; |
2347 | + return amdgpu_bo_create_kernel_at(adev, |
2348 | + adev->fw_vram_usage.start_offset, |
2349 | + adev->fw_vram_usage.size, |
2350 | + AMDGPU_GEM_DOMAIN_VRAM, |
2351 | + &adev->fw_vram_usage.reserved_bo, |
2352 | + &adev->fw_vram_usage.va); |
2353 | return r; |
2354 | } |
2355 | + |
2356 | /** |
2357 | * amdgpu_ttm_init - Init the memory management (ttm) as well as various |
2358 | * gtt/vram related fields. |
2359 | @@ -1786,6 +1730,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) |
2360 | NULL, &stolen_vga_buf); |
2361 | if (r) |
2362 | return r; |
2363 | + |
2364 | + /* |
2365 | + * reserve one TMR (64K) memory at the top of VRAM which holds |
2366 | + * IP Discovery data and is protected by PSP. |
2367 | + */ |
2368 | + r = amdgpu_bo_create_kernel_at(adev, |
2369 | + adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, |
2370 | + DISCOVERY_TMR_SIZE, |
2371 | + AMDGPU_GEM_DOMAIN_VRAM, |
2372 | + &adev->discovery_memory, |
2373 | + NULL); |
2374 | + if (r) |
2375 | + return r; |
2376 | + |
2377 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
2378 | (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); |
2379 | |
2380 | @@ -1850,6 +1808,9 @@ void amdgpu_ttm_late_init(struct amdgpu_device *adev) |
2381 | void *stolen_vga_buf; |
2382 | /* return the VGA stolen memory (if any) back to VRAM */ |
2383 | amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); |
2384 | + |
2385 | + /* return the IP Discovery TMR memory back to VRAM */ |
2386 | + amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); |
2387 | } |
2388 | |
2389 | /** |
2390 | diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
2391 | index c9ba2ec6d038..ab4a0d8545dc 100644 |
2392 | --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
2393 | +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c |
2394 | @@ -1038,17 +1038,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) |
2395 | case CHIP_VEGA20: |
2396 | break; |
2397 | case CHIP_RAVEN: |
2398 | - /* Disable GFXOFF on original raven. There are combinations |
2399 | - * of sbios and platforms that are not stable. |
2400 | - */ |
2401 | - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) |
2402 | - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
2403 | - else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) |
2404 | - &&((adev->gfx.rlc_fw_version != 106 && |
2405 | - adev->gfx.rlc_fw_version < 531) || |
2406 | - (adev->gfx.rlc_fw_version == 53815) || |
2407 | - (adev->gfx.rlc_feature_version < 1) || |
2408 | - !adev->gfx.rlc.is_rlc_v2_1)) |
2409 | + if (!(adev->rev_id >= 0x8 || |
2410 | + adev->pdev->device == 0x15d8) && |
2411 | + (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ |
2412 | + !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ |
2413 | adev->pm.pp_feature &= ~PP_GFXOFF_MASK; |
2414 | |
2415 | if (adev->pm.pp_feature & PP_GFXOFF_MASK) |
2416 | diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h |
2417 | index 5dcb776548d8..7ec4331e67f2 100644 |
2418 | --- a/drivers/gpu/drm/amd/include/discovery.h |
2419 | +++ b/drivers/gpu/drm/amd/include/discovery.h |
2420 | @@ -25,7 +25,6 @@ |
2421 | #define _DISCOVERY_H_ |
2422 | |
2423 | #define PSP_HEADER_SIZE 256 |
2424 | -#define BINARY_MAX_SIZE (64 << 10) |
2425 | #define BINARY_SIGNATURE 0x28211407 |
2426 | #define DISCOVERY_TABLE_SIGNATURE 0x53445049 |
2427 | |
2428 | diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c |
2429 | index 875a3a9eabfa..7d0e7b031e44 100644 |
2430 | --- a/drivers/gpu/drm/arm/malidp_mw.c |
2431 | +++ b/drivers/gpu/drm/arm/malidp_mw.c |
2432 | @@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector, |
2433 | return MODE_OK; |
2434 | } |
2435 | |
2436 | -const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { |
2437 | +static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = { |
2438 | .get_modes = malidp_mw_connector_get_modes, |
2439 | .mode_valid = malidp_mw_connector_mode_valid, |
2440 | }; |
2441 | diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c |
2442 | index 6fb7d74ff553..bc7cc32140f8 100644 |
2443 | --- a/drivers/gpu/drm/tegra/drm.c |
2444 | +++ b/drivers/gpu/drm/tegra/drm.c |
2445 | @@ -201,19 +201,19 @@ hub: |
2446 | if (tegra->hub) |
2447 | tegra_display_hub_cleanup(tegra->hub); |
2448 | device: |
2449 | - host1x_device_exit(device); |
2450 | -fbdev: |
2451 | - drm_kms_helper_poll_fini(drm); |
2452 | - tegra_drm_fb_free(drm); |
2453 | -config: |
2454 | - drm_mode_config_cleanup(drm); |
2455 | - |
2456 | if (tegra->domain) { |
2457 | mutex_destroy(&tegra->mm_lock); |
2458 | drm_mm_takedown(&tegra->mm); |
2459 | put_iova_domain(&tegra->carveout.domain); |
2460 | iova_cache_put(); |
2461 | } |
2462 | + |
2463 | + host1x_device_exit(device); |
2464 | +fbdev: |
2465 | + drm_kms_helper_poll_fini(drm); |
2466 | + tegra_drm_fb_free(drm); |
2467 | +config: |
2468 | + drm_mode_config_cleanup(drm); |
2469 | domain: |
2470 | if (tegra->domain) |
2471 | iommu_domain_free(tegra->domain); |
2472 | diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
2473 | index a662394f6892..0a88ef11b9d3 100644 |
2474 | --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
2475 | +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c |
2476 | @@ -463,29 +463,25 @@ out: |
2477 | } |
2478 | |
2479 | static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, |
2480 | - struct drm_file *file) |
2481 | + struct drm_file *file) |
2482 | { |
2483 | struct drm_virtgpu_3d_wait *args = data; |
2484 | - struct drm_gem_object *obj; |
2485 | - long timeout = 15 * HZ; |
2486 | + struct drm_gem_object *gobj = NULL; |
2487 | + struct virtio_gpu_object *qobj = NULL; |
2488 | int ret; |
2489 | + bool nowait = false; |
2490 | |
2491 | - obj = drm_gem_object_lookup(file, args->handle); |
2492 | - if (obj == NULL) |
2493 | + gobj = drm_gem_object_lookup(file, args->handle); |
2494 | + if (gobj == NULL) |
2495 | return -ENOENT; |
2496 | |
2497 | - if (args->flags & VIRTGPU_WAIT_NOWAIT) { |
2498 | - ret = dma_resv_test_signaled_rcu(obj->resv, true); |
2499 | - } else { |
2500 | - ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, |
2501 | - timeout); |
2502 | - } |
2503 | - if (ret == 0) |
2504 | - ret = -EBUSY; |
2505 | - else if (ret > 0) |
2506 | - ret = 0; |
2507 | + qobj = gem_to_virtio_gpu_obj(gobj); |
2508 | |
2509 | - drm_gem_object_put_unlocked(obj); |
2510 | + if (args->flags & VIRTGPU_WAIT_NOWAIT) |
2511 | + nowait = true; |
2512 | + ret = virtio_gpu_object_wait(qobj, nowait); |
2513 | + |
2514 | + drm_gem_object_put_unlocked(gobj); |
2515 | return ret; |
2516 | } |
2517 | |
2518 | diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c |
2519 | index c25e95c19cad..b382c6bf2c5c 100644 |
2520 | --- a/drivers/hid/hidraw.c |
2521 | +++ b/drivers/hid/hidraw.c |
2522 | @@ -249,13 +249,14 @@ out: |
2523 | static __poll_t hidraw_poll(struct file *file, poll_table *wait) |
2524 | { |
2525 | struct hidraw_list *list = file->private_data; |
2526 | + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */ |
2527 | |
2528 | poll_wait(file, &list->hidraw->wait, wait); |
2529 | if (list->head != list->tail) |
2530 | - return EPOLLIN | EPOLLRDNORM; |
2531 | + mask |= EPOLLIN | EPOLLRDNORM; |
2532 | if (!list->hidraw->exist) |
2533 | - return EPOLLERR | EPOLLHUP; |
2534 | - return EPOLLOUT | EPOLLWRNORM; |
2535 | + mask |= EPOLLERR | EPOLLHUP; |
2536 | + return mask; |
2537 | } |
2538 | |
2539 | static int hidraw_open(struct inode *inode, struct file *file) |
2540 | diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c |
2541 | index 935c3d0a3b63..8fe3efcb8327 100644 |
2542 | --- a/drivers/hid/uhid.c |
2543 | +++ b/drivers/hid/uhid.c |
2544 | @@ -766,13 +766,14 @@ unlock: |
2545 | static __poll_t uhid_char_poll(struct file *file, poll_table *wait) |
2546 | { |
2547 | struct uhid_device *uhid = file->private_data; |
2548 | + __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */ |
2549 | |
2550 | poll_wait(file, &uhid->waitq, wait); |
2551 | |
2552 | if (uhid->head != uhid->tail) |
2553 | - return EPOLLIN | EPOLLRDNORM; |
2554 | + mask |= EPOLLIN | EPOLLRDNORM; |
2555 | |
2556 | - return EPOLLOUT | EPOLLWRNORM; |
2557 | + return mask; |
2558 | } |
2559 | |
2560 | static const struct file_operations uhid_fops = { |
2561 | diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c |
2562 | index e01b2b57e724..5ab901ad615d 100644 |
2563 | --- a/drivers/i2c/busses/i2c-bcm2835.c |
2564 | +++ b/drivers/i2c/busses/i2c-bcm2835.c |
2565 | @@ -58,6 +58,7 @@ struct bcm2835_i2c_dev { |
2566 | struct i2c_adapter adapter; |
2567 | struct completion completion; |
2568 | struct i2c_msg *curr_msg; |
2569 | + struct clk *bus_clk; |
2570 | int num_msgs; |
2571 | u32 msg_err; |
2572 | u8 *msg_buf; |
2573 | @@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) |
2574 | struct resource *mem, *irq; |
2575 | int ret; |
2576 | struct i2c_adapter *adap; |
2577 | - struct clk *bus_clk; |
2578 | struct clk *mclk; |
2579 | u32 bus_clk_rate; |
2580 | |
2581 | @@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) |
2582 | return PTR_ERR(mclk); |
2583 | } |
2584 | |
2585 | - bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); |
2586 | + i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev); |
2587 | |
2588 | - if (IS_ERR(bus_clk)) { |
2589 | + if (IS_ERR(i2c_dev->bus_clk)) { |
2590 | dev_err(&pdev->dev, "Could not register clock\n"); |
2591 | - return PTR_ERR(bus_clk); |
2592 | + return PTR_ERR(i2c_dev->bus_clk); |
2593 | } |
2594 | |
2595 | ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", |
2596 | @@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) |
2597 | bus_clk_rate = 100000; |
2598 | } |
2599 | |
2600 | - ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate); |
2601 | + ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate); |
2602 | if (ret < 0) { |
2603 | dev_err(&pdev->dev, "Could not set clock frequency\n"); |
2604 | return ret; |
2605 | } |
2606 | |
2607 | - ret = clk_prepare_enable(bus_clk); |
2608 | + ret = clk_prepare_enable(i2c_dev->bus_clk); |
2609 | if (ret) { |
2610 | dev_err(&pdev->dev, "Couldn't prepare clock"); |
2611 | return ret; |
2612 | @@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev) |
2613 | static int bcm2835_i2c_remove(struct platform_device *pdev) |
2614 | { |
2615 | struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev); |
2616 | - struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div"); |
2617 | |
2618 | - clk_rate_exclusive_put(bus_clk); |
2619 | - clk_disable_unprepare(bus_clk); |
2620 | + clk_rate_exclusive_put(i2c_dev->bus_clk); |
2621 | + clk_disable_unprepare(i2c_dev->bus_clk); |
2622 | |
2623 | free_irq(i2c_dev->irq, i2c_dev); |
2624 | i2c_del_adapter(&i2c_dev->adapter); |
2625 | diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c |
2626 | index 7b966a41d623..cf7b59d97802 100644 |
2627 | --- a/drivers/iio/imu/adis16480.c |
2628 | +++ b/drivers/iio/imu/adis16480.c |
2629 | @@ -454,12 +454,14 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev, |
2630 | case IIO_MAGN: |
2631 | case IIO_PRESSURE: |
2632 | ret = adis_read_reg_16(&st->adis, reg, &val16); |
2633 | - *bias = sign_extend32(val16, 15); |
2634 | + if (ret == 0) |
2635 | + *bias = sign_extend32(val16, 15); |
2636 | break; |
2637 | case IIO_ANGL_VEL: |
2638 | case IIO_ACCEL: |
2639 | ret = adis_read_reg_32(&st->adis, reg, &val32); |
2640 | - *bias = sign_extend32(val32, 31); |
2641 | + if (ret == 0) |
2642 | + *bias = sign_extend32(val32, 31); |
2643 | break; |
2644 | default: |
2645 | ret = -EINVAL; |
2646 | diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c |
2647 | index 28e011b35f21..3e0528793d95 100644 |
2648 | --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c |
2649 | +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c |
2650 | @@ -152,9 +152,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = { |
2651 | .addr = 0x10, |
2652 | .mask = GENMASK(4, 3), |
2653 | }, |
2654 | - .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 }, |
2655 | - .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 }, |
2656 | - .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 }, |
2657 | + |
2658 | + .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 }, |
2659 | + .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 }, |
2660 | + .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 }, |
2661 | .fs_len = 3, |
2662 | }, |
2663 | }, |
2664 | diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c |
2665 | index 023478107f0e..46dd50ff7c85 100644 |
2666 | --- a/drivers/infiniband/core/counters.c |
2667 | +++ b/drivers/infiniband/core/counters.c |
2668 | @@ -466,10 +466,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev, |
2669 | int rdma_counter_bind_qpn(struct ib_device *dev, u8 port, |
2670 | u32 qp_num, u32 counter_id) |
2671 | { |
2672 | + struct rdma_port_counter *port_counter; |
2673 | struct rdma_counter *counter; |
2674 | struct ib_qp *qp; |
2675 | int ret; |
2676 | |
2677 | + port_counter = &dev->port_data[port].port_counter; |
2678 | + if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) |
2679 | + return -EINVAL; |
2680 | + |
2681 | qp = rdma_counter_get_qp(dev, qp_num); |
2682 | if (!qp) |
2683 | return -ENOENT; |
2684 | @@ -506,6 +511,7 @@ err: |
2685 | int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, |
2686 | u32 qp_num, u32 *counter_id) |
2687 | { |
2688 | + struct rdma_port_counter *port_counter; |
2689 | struct rdma_counter *counter; |
2690 | struct ib_qp *qp; |
2691 | int ret; |
2692 | @@ -513,9 +519,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port, |
2693 | if (!rdma_is_port_valid(dev, port)) |
2694 | return -EINVAL; |
2695 | |
2696 | - if (!dev->port_data[port].port_counter.hstats) |
2697 | + port_counter = &dev->port_data[port].port_counter; |
2698 | + if (!port_counter->hstats) |
2699 | return -EOPNOTSUPP; |
2700 | |
2701 | + if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO) |
2702 | + return -EINVAL; |
2703 | + |
2704 | qp = rdma_counter_get_qp(dev, qp_num); |
2705 | if (!qp) |
2706 | return -ENOENT; |
2707 | diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c |
2708 | index b4149dc9e824..ebc3e3d4a6e2 100644 |
2709 | --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c |
2710 | +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c |
2711 | @@ -3323,8 +3323,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata) |
2712 | int rc; |
2713 | |
2714 | rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr); |
2715 | - if (rc) |
2716 | + if (rc) { |
2717 | dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc); |
2718 | + return rc; |
2719 | + } |
2720 | |
2721 | if (mr->pages) { |
2722 | rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, |
2723 | diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c |
2724 | index 958c1ff9c515..4d07d22bfa7b 100644 |
2725 | --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c |
2726 | +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c |
2727 | @@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, |
2728 | /* Add qp to flush list of the CQ */ |
2729 | bnxt_qplib_add_flush_qp(qp); |
2730 | } else { |
2731 | + /* Before we complete, do WA 9060 */ |
2732 | + if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, |
2733 | + cqe_sq_cons)) { |
2734 | + *lib_qp = qp; |
2735 | + goto out; |
2736 | + } |
2737 | if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) { |
2738 | - /* Before we complete, do WA 9060 */ |
2739 | - if (do_wa9060(qp, cq, cq_cons, sw_sq_cons, |
2740 | - cqe_sq_cons)) { |
2741 | - *lib_qp = qp; |
2742 | - goto out; |
2743 | - } |
2744 | cqe->status = CQ_REQ_STATUS_OK; |
2745 | cqe++; |
2746 | (*budget)--; |
2747 | diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c |
2748 | index adb4a1ba921b..5836fe7b2817 100644 |
2749 | --- a/drivers/infiniband/hw/hfi1/iowait.c |
2750 | +++ b/drivers/infiniband/hw/hfi1/iowait.c |
2751 | @@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit, |
2752 | void iowait_cancel_work(struct iowait *w) |
2753 | { |
2754 | cancel_work_sync(&iowait_get_ib_work(w)->iowork); |
2755 | - cancel_work_sync(&iowait_get_tid_work(w)->iowork); |
2756 | + /* Make sure that the iowork for TID RDMA is used */ |
2757 | + if (iowait_get_tid_work(w)->iowork.func) |
2758 | + cancel_work_sync(&iowait_get_tid_work(w)->iowork); |
2759 | } |
2760 | |
2761 | /** |
2762 | diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig |
2763 | index d602b698b57e..4921c1e40ccd 100644 |
2764 | --- a/drivers/infiniband/hw/hns/Kconfig |
2765 | +++ b/drivers/infiniband/hw/hns/Kconfig |
2766 | @@ -1,23 +1,34 @@ |
2767 | # SPDX-License-Identifier: GPL-2.0-only |
2768 | config INFINIBAND_HNS |
2769 | - bool "HNS RoCE Driver" |
2770 | + tristate "HNS RoCE Driver" |
2771 | depends on NET_VENDOR_HISILICON |
2772 | depends on ARM64 || (COMPILE_TEST && 64BIT) |
2773 | + depends on (HNS_DSAF && HNS_ENET) || HNS3 |
2774 | ---help--- |
2775 | This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine |
2776 | is used in Hisilicon Hip06 and more further ICT SoC based on |
2777 | platform device. |
2778 | |
2779 | + To compile HIP06 or HIP08 driver as module, choose M here. |
2780 | + |
2781 | config INFINIBAND_HNS_HIP06 |
2782 | - tristate "Hisilicon Hip06 Family RoCE support" |
2783 | + bool "Hisilicon Hip06 Family RoCE support" |
2784 | depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET |
2785 | + depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y) |
2786 | ---help--- |
2787 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and |
2788 | Hip07 SoC. These RoCE engines are platform devices. |
2789 | |
2790 | + To compile this driver, choose Y here: if INFINIBAND_HNS is m, this |
2791 | + module will be called hns-roce-hw-v1 |
2792 | + |
2793 | config INFINIBAND_HNS_HIP08 |
2794 | - tristate "Hisilicon Hip08 Family RoCE support" |
2795 | + bool "Hisilicon Hip08 Family RoCE support" |
2796 | depends on INFINIBAND_HNS && PCI && HNS3 |
2797 | + depends on INFINIBAND_HNS=m || HNS3=y |
2798 | ---help--- |
2799 | RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC. |
2800 | The RoCE engine is a PCI device. |
2801 | + |
2802 | + To compile this driver, choose Y here: if INFINIBAND_HNS is m, this |
2803 | + module will be called hns-roce-hw-v2. |
2804 | diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile |
2805 | index 449a2d81319d..e105945b94a1 100644 |
2806 | --- a/drivers/infiniband/hw/hns/Makefile |
2807 | +++ b/drivers/infiniband/hw/hns/Makefile |
2808 | @@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ |
2809 | hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ |
2810 | hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o |
2811 | |
2812 | +ifdef CONFIG_INFINIBAND_HNS_HIP06 |
2813 | hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) |
2814 | -obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o |
2815 | +obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o |
2816 | +endif |
2817 | |
2818 | +ifdef CONFIG_INFINIBAND_HNS_HIP08 |
2819 | hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) |
2820 | -obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o |
2821 | +obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o |
2822 | +endif |
2823 | diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c |
2824 | index e82567fcdeb7..79294f278b26 100644 |
2825 | --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c |
2826 | +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c |
2827 | @@ -389,7 +389,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, |
2828 | roce_set_field(ud_sq_wqe->byte_36, |
2829 | V2_UD_SEND_WQE_BYTE_36_VLAN_M, |
2830 | V2_UD_SEND_WQE_BYTE_36_VLAN_S, |
2831 | - le16_to_cpu(ah->av.vlan)); |
2832 | + ah->av.vlan); |
2833 | roce_set_field(ud_sq_wqe->byte_36, |
2834 | V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M, |
2835 | V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, |
2836 | @@ -4650,16 +4650,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, |
2837 | { |
2838 | struct hns_roce_cq *send_cq, *recv_cq; |
2839 | struct ib_device *ibdev = &hr_dev->ib_dev; |
2840 | - int ret; |
2841 | + int ret = 0; |
2842 | |
2843 | if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) { |
2844 | /* Modify qp to reset before destroying qp */ |
2845 | ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, |
2846 | hr_qp->state, IB_QPS_RESET); |
2847 | - if (ret) { |
2848 | + if (ret) |
2849 | ibdev_err(ibdev, "modify QP to Reset failed.\n"); |
2850 | - return ret; |
2851 | - } |
2852 | } |
2853 | |
2854 | send_cq = to_hr_cq(hr_qp->ibqp.send_cq); |
2855 | @@ -4715,7 +4713,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, |
2856 | kfree(hr_qp->rq_inl_buf.wqe_list); |
2857 | } |
2858 | |
2859 | - return 0; |
2860 | + return ret; |
2861 | } |
2862 | |
2863 | static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
2864 | @@ -4725,11 +4723,9 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) |
2865 | int ret; |
2866 | |
2867 | ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); |
2868 | - if (ret) { |
2869 | + if (ret) |
2870 | ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", |
2871 | hr_qp->qpn, ret); |
2872 | - return ret; |
2873 | - } |
2874 | |
2875 | if (hr_qp->ibqp.qp_type == IB_QPT_GSI) |
2876 | kfree(hr_to_hr_sqp(hr_qp)); |
2877 | @@ -6092,11 +6088,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, |
2878 | roce_set_field(srq_context->byte_44_idxbufpgsz_addr, |
2879 | SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M, |
2880 | SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S, |
2881 | - hr_dev->caps.idx_ba_pg_sz); |
2882 | + hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET); |
2883 | roce_set_field(srq_context->byte_44_idxbufpgsz_addr, |
2884 | SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M, |
2885 | SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S, |
2886 | - hr_dev->caps.idx_buf_pg_sz); |
2887 | + hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET); |
2888 | |
2889 | srq_context->idx_nxt_blk_addr = |
2890 | cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT); |
2891 | diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
2892 | index 43219d2f7de0..76a14db7028d 100644 |
2893 | --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
2894 | +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h |
2895 | @@ -87,8 +87,8 @@ |
2896 | #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 |
2897 | #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 |
2898 | #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32 |
2899 | -#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096 |
2900 | -#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096 |
2901 | +#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE |
2902 | +#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE |
2903 | #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 |
2904 | #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 |
2905 | #define HNS_ROCE_INVALID_LKEY 0x100 |
2906 | diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c |
2907 | index bd78ff90d998..8dd2d666f687 100644 |
2908 | --- a/drivers/infiniband/hw/hns/hns_roce_qp.c |
2909 | +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c |
2910 | @@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev, |
2911 | u8 max_sq_stride = ilog2(roundup_sq_stride); |
2912 | |
2913 | /* Sanity check SQ size before proceeding */ |
2914 | - if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes || |
2915 | - ucmd->log_sq_stride > max_sq_stride || |
2916 | - ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { |
2917 | + if (ucmd->log_sq_stride > max_sq_stride || |
2918 | + ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) { |
2919 | ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n"); |
2920 | return -EINVAL; |
2921 | } |
2922 | @@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, |
2923 | u32 max_cnt; |
2924 | int ret; |
2925 | |
2926 | + if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) || |
2927 | + hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) |
2928 | + return -EINVAL; |
2929 | + |
2930 | ret = check_sq_size_with_integrity(hr_dev, cap, ucmd); |
2931 | if (ret) { |
2932 | ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n"); |
2933 | return ret; |
2934 | } |
2935 | |
2936 | - hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; |
2937 | hr_qp->sq.wqe_shift = ucmd->log_sq_stride; |
2938 | |
2939 | max_cnt = max(1U, cap->max_send_sge); |
2940 | diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c |
2941 | index 06871731ac43..39c08217e861 100644 |
2942 | --- a/drivers/infiniband/hw/hns/hns_roce_restrack.c |
2943 | +++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c |
2944 | @@ -95,7 +95,7 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg, |
2945 | |
2946 | ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); |
2947 | if (ret) |
2948 | - goto err; |
2949 | + return -EINVAL; |
2950 | |
2951 | table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER); |
2952 | if (!table_attr) { |
2953 | diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c |
2954 | index 7019c12005f4..99d563dba91b 100644 |
2955 | --- a/drivers/infiniband/hw/mlx5/mr.c |
2956 | +++ b/drivers/infiniband/hw/mlx5/mr.c |
2957 | @@ -428,7 +428,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry) |
2958 | |
2959 | if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) { |
2960 | mlx5_ib_err(dev, "cache entry %d is out of range\n", entry); |
2961 | - return NULL; |
2962 | + return ERR_PTR(-EINVAL); |
2963 | } |
2964 | |
2965 | ent = &cache->ent[entry]; |
2966 | diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c |
2967 | index 8c1931a57f4a..0454561718d9 100644 |
2968 | --- a/drivers/infiniband/sw/siw/siw_cm.c |
2969 | +++ b/drivers/infiniband/sw/siw/siw_cm.c |
2970 | @@ -1867,14 +1867,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog, |
2971 | list_add_tail(&cep->listenq, (struct list_head *)id->provider_data); |
2972 | cep->state = SIW_EPSTATE_LISTENING; |
2973 | |
2974 | - if (addr_family == AF_INET) |
2975 | - siw_dbg(id->device, "Listen at laddr %pI4 %u\n", |
2976 | - &(((struct sockaddr_in *)laddr)->sin_addr), |
2977 | - ((struct sockaddr_in *)laddr)->sin_port); |
2978 | - else |
2979 | - siw_dbg(id->device, "Listen at laddr %pI6 %u\n", |
2980 | - &(((struct sockaddr_in6 *)laddr)->sin6_addr), |
2981 | - ((struct sockaddr_in6 *)laddr)->sin6_port); |
2982 | + siw_dbg(id->device, "Listen at laddr %pISp\n", laddr); |
2983 | |
2984 | return 0; |
2985 | |
2986 | diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2987 | index e25c70a56be6..02b92e3cd9a8 100644 |
2988 | --- a/drivers/infiniband/ulp/srpt/ib_srpt.c |
2989 | +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c |
2990 | @@ -1364,9 +1364,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, |
2991 | struct srpt_send_ioctx *ioctx, u64 tag, |
2992 | int status) |
2993 | { |
2994 | + struct se_cmd *cmd = &ioctx->cmd; |
2995 | struct srp_rsp *srp_rsp; |
2996 | const u8 *sense_data; |
2997 | int sense_data_len, max_sense_len; |
2998 | + u32 resid = cmd->residual_count; |
2999 | |
3000 | /* |
3001 | * The lowest bit of all SAM-3 status codes is zero (see also |
3002 | @@ -1388,6 +1390,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch, |
3003 | srp_rsp->tag = tag; |
3004 | srp_rsp->status = status; |
3005 | |
3006 | + if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { |
3007 | + if (cmd->data_direction == DMA_TO_DEVICE) { |
3008 | + /* residual data from an underflow write */ |
3009 | + srp_rsp->flags = SRP_RSP_FLAG_DOUNDER; |
3010 | + srp_rsp->data_out_res_cnt = cpu_to_be32(resid); |
3011 | + } else if (cmd->data_direction == DMA_FROM_DEVICE) { |
3012 | + /* residual data from an underflow read */ |
3013 | + srp_rsp->flags = SRP_RSP_FLAG_DIUNDER; |
3014 | + srp_rsp->data_in_res_cnt = cpu_to_be32(resid); |
3015 | + } |
3016 | + } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { |
3017 | + if (cmd->data_direction == DMA_TO_DEVICE) { |
3018 | + /* residual data from an overflow write */ |
3019 | + srp_rsp->flags = SRP_RSP_FLAG_DOOVER; |
3020 | + srp_rsp->data_out_res_cnt = cpu_to_be32(resid); |
3021 | + } else if (cmd->data_direction == DMA_FROM_DEVICE) { |
3022 | + /* residual data from an overflow read */ |
3023 | + srp_rsp->flags = SRP_RSP_FLAG_DIOVER; |
3024 | + srp_rsp->data_in_res_cnt = cpu_to_be32(resid); |
3025 | + } |
3026 | + } |
3027 | + |
3028 | if (sense_data_len) { |
3029 | BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp)); |
3030 | max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp); |
3031 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
3032 | index 67aa317de6db..e84c5dfe146f 100644 |
3033 | --- a/drivers/iommu/intel-iommu.c |
3034 | +++ b/drivers/iommu/intel-iommu.c |
3035 | @@ -5593,8 +5593,10 @@ static int intel_iommu_add_device(struct device *dev) |
3036 | |
3037 | group = iommu_group_get_for_dev(dev); |
3038 | |
3039 | - if (IS_ERR(group)) |
3040 | - return PTR_ERR(group); |
3041 | + if (IS_ERR(group)) { |
3042 | + ret = PTR_ERR(group); |
3043 | + goto unlink; |
3044 | + } |
3045 | |
3046 | iommu_group_put(group); |
3047 | |
3048 | @@ -5620,7 +5622,8 @@ static int intel_iommu_add_device(struct device *dev) |
3049 | if (!get_private_domain_for_dev(dev)) { |
3050 | dev_warn(dev, |
3051 | "Failed to get a private domain.\n"); |
3052 | - return -ENOMEM; |
3053 | + ret = -ENOMEM; |
3054 | + goto unlink; |
3055 | } |
3056 | |
3057 | dev_info(dev, |
3058 | @@ -5635,6 +5638,10 @@ static int intel_iommu_add_device(struct device *dev) |
3059 | } |
3060 | |
3061 | return 0; |
3062 | + |
3063 | +unlink: |
3064 | + iommu_device_unlink(&iommu->iommu, dev); |
3065 | + return ret; |
3066 | } |
3067 | |
3068 | static void intel_iommu_remove_device(struct device *dev) |
3069 | diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c |
3070 | index 24248aa8a7e5..cd3c0ea56657 100644 |
3071 | --- a/drivers/iommu/iommu.c |
3072 | +++ b/drivers/iommu/iommu.c |
3073 | @@ -751,6 +751,7 @@ err_put_group: |
3074 | mutex_unlock(&group->mutex); |
3075 | dev->iommu_group = NULL; |
3076 | kobject_put(group->devices_kobj); |
3077 | + sysfs_remove_link(group->devices_kobj, device->name); |
3078 | err_free_name: |
3079 | kfree(device->name); |
3080 | err_remove_link: |
3081 | diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c |
3082 | index 67a483c1a935..c2f6c78fee44 100644 |
3083 | --- a/drivers/iommu/mtk_iommu.c |
3084 | +++ b/drivers/iommu/mtk_iommu.c |
3085 | @@ -219,22 +219,37 @@ static void mtk_iommu_tlb_sync(void *cookie) |
3086 | static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size, |
3087 | size_t granule, void *cookie) |
3088 | { |
3089 | + struct mtk_iommu_data *data = cookie; |
3090 | + unsigned long flags; |
3091 | + |
3092 | + spin_lock_irqsave(&data->tlb_lock, flags); |
3093 | mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie); |
3094 | mtk_iommu_tlb_sync(cookie); |
3095 | + spin_unlock_irqrestore(&data->tlb_lock, flags); |
3096 | } |
3097 | |
3098 | static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size, |
3099 | size_t granule, void *cookie) |
3100 | { |
3101 | + struct mtk_iommu_data *data = cookie; |
3102 | + unsigned long flags; |
3103 | + |
3104 | + spin_lock_irqsave(&data->tlb_lock, flags); |
3105 | mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie); |
3106 | mtk_iommu_tlb_sync(cookie); |
3107 | + spin_unlock_irqrestore(&data->tlb_lock, flags); |
3108 | } |
3109 | |
3110 | static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, |
3111 | unsigned long iova, size_t granule, |
3112 | void *cookie) |
3113 | { |
3114 | + struct mtk_iommu_data *data = cookie; |
3115 | + unsigned long flags; |
3116 | + |
3117 | + spin_lock_irqsave(&data->tlb_lock, flags); |
3118 | mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie); |
3119 | + spin_unlock_irqrestore(&data->tlb_lock, flags); |
3120 | } |
3121 | |
3122 | static const struct iommu_flush_ops mtk_iommu_flush_ops = { |
3123 | @@ -447,13 +462,18 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, |
3124 | |
3125 | static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) |
3126 | { |
3127 | - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); |
3128 | + mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); |
3129 | } |
3130 | |
3131 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, |
3132 | struct iommu_iotlb_gather *gather) |
3133 | { |
3134 | - mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data()); |
3135 | + struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
3136 | + unsigned long flags; |
3137 | + |
3138 | + spin_lock_irqsave(&data->tlb_lock, flags); |
3139 | + mtk_iommu_tlb_sync(data); |
3140 | + spin_unlock_irqrestore(&data->tlb_lock, flags); |
3141 | } |
3142 | |
3143 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
3144 | @@ -733,6 +753,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) |
3145 | if (ret) |
3146 | return ret; |
3147 | |
3148 | + spin_lock_init(&data->tlb_lock); |
3149 | list_add_tail(&data->list, &m4ulist); |
3150 | |
3151 | if (!iommu_present(&platform_bus_type)) |
3152 | diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h |
3153 | index fc0f16eabacd..8cae22de7663 100644 |
3154 | --- a/drivers/iommu/mtk_iommu.h |
3155 | +++ b/drivers/iommu/mtk_iommu.h |
3156 | @@ -58,6 +58,7 @@ struct mtk_iommu_data { |
3157 | struct iommu_group *m4u_group; |
3158 | bool enable_4GB; |
3159 | bool tlb_flush_active; |
3160 | + spinlock_t tlb_lock; /* lock for tlb range flush */ |
3161 | |
3162 | struct iommu_device iommu; |
3163 | const struct mtk_iommu_plat_data *plat_data; |
3164 | diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c |
3165 | index a5b2448c0abc..af482620f94a 100644 |
3166 | --- a/drivers/media/i2c/ov6650.c |
3167 | +++ b/drivers/media/i2c/ov6650.c |
3168 | @@ -201,7 +201,6 @@ struct ov6650 { |
3169 | unsigned long pclk_max; /* from resolution and format */ |
3170 | struct v4l2_fract tpf; /* as requested with s_frame_interval */ |
3171 | u32 code; |
3172 | - enum v4l2_colorspace colorspace; |
3173 | }; |
3174 | |
3175 | |
3176 | @@ -214,6 +213,17 @@ static u32 ov6650_codes[] = { |
3177 | MEDIA_BUS_FMT_Y8_1X8, |
3178 | }; |
3179 | |
3180 | +static const struct v4l2_mbus_framefmt ov6650_def_fmt = { |
3181 | + .width = W_CIF, |
3182 | + .height = H_CIF, |
3183 | + .code = MEDIA_BUS_FMT_SBGGR8_1X8, |
3184 | + .colorspace = V4L2_COLORSPACE_SRGB, |
3185 | + .field = V4L2_FIELD_NONE, |
3186 | + .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT, |
3187 | + .quantization = V4L2_QUANTIZATION_DEFAULT, |
3188 | + .xfer_func = V4L2_XFER_FUNC_DEFAULT, |
3189 | +}; |
3190 | + |
3191 | /* read a register */ |
3192 | static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val) |
3193 | { |
3194 | @@ -514,12 +524,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd, |
3195 | if (format->pad) |
3196 | return -EINVAL; |
3197 | |
3198 | - mf->width = priv->rect.width >> priv->half_scale; |
3199 | - mf->height = priv->rect.height >> priv->half_scale; |
3200 | - mf->code = priv->code; |
3201 | - mf->colorspace = priv->colorspace; |
3202 | - mf->field = V4L2_FIELD_NONE; |
3203 | + /* initialize response with default media bus frame format */ |
3204 | + *mf = ov6650_def_fmt; |
3205 | + |
3206 | + /* update media bus format code and frame size */ |
3207 | + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { |
3208 | + mf->width = cfg->try_fmt.width; |
3209 | + mf->height = cfg->try_fmt.height; |
3210 | + mf->code = cfg->try_fmt.code; |
3211 | |
3212 | + } else { |
3213 | + mf->width = priv->rect.width >> priv->half_scale; |
3214 | + mf->height = priv->rect.height >> priv->half_scale; |
3215 | + mf->code = priv->code; |
3216 | + } |
3217 | return 0; |
3218 | } |
3219 | |
3220 | @@ -624,11 +642,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) |
3221 | priv->pclk_max = 8000000; |
3222 | } |
3223 | |
3224 | - if (code == MEDIA_BUS_FMT_SBGGR8_1X8) |
3225 | - priv->colorspace = V4L2_COLORSPACE_SRGB; |
3226 | - else if (code != 0) |
3227 | - priv->colorspace = V4L2_COLORSPACE_JPEG; |
3228 | - |
3229 | if (half_scale) { |
3230 | dev_dbg(&client->dev, "max resolution: QCIF\n"); |
3231 | coma_set |= COMA_QCIF; |
3232 | @@ -662,11 +675,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) |
3233 | if (!ret) |
3234 | priv->code = code; |
3235 | |
3236 | - if (!ret) { |
3237 | - mf->colorspace = priv->colorspace; |
3238 | - mf->width = priv->rect.width >> half_scale; |
3239 | - mf->height = priv->rect.height >> half_scale; |
3240 | - } |
3241 | return ret; |
3242 | } |
3243 | |
3244 | @@ -685,8 +693,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, |
3245 | v4l_bound_align_image(&mf->width, 2, W_CIF, 1, |
3246 | &mf->height, 2, H_CIF, 1, 0); |
3247 | |
3248 | - mf->field = V4L2_FIELD_NONE; |
3249 | - |
3250 | switch (mf->code) { |
3251 | case MEDIA_BUS_FMT_Y10_1X10: |
3252 | mf->code = MEDIA_BUS_FMT_Y8_1X8; |
3253 | @@ -696,20 +702,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd, |
3254 | case MEDIA_BUS_FMT_YUYV8_2X8: |
3255 | case MEDIA_BUS_FMT_VYUY8_2X8: |
3256 | case MEDIA_BUS_FMT_UYVY8_2X8: |
3257 | - mf->colorspace = V4L2_COLORSPACE_JPEG; |
3258 | break; |
3259 | default: |
3260 | mf->code = MEDIA_BUS_FMT_SBGGR8_1X8; |
3261 | /* fall through */ |
3262 | case MEDIA_BUS_FMT_SBGGR8_1X8: |
3263 | - mf->colorspace = V4L2_COLORSPACE_SRGB; |
3264 | break; |
3265 | } |
3266 | |
3267 | - if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) |
3268 | - return ov6650_s_fmt(sd, mf); |
3269 | - cfg->try_fmt = *mf; |
3270 | + if (format->which == V4L2_SUBDEV_FORMAT_TRY) { |
3271 | + /* store media bus format code and frame size in pad config */ |
3272 | + cfg->try_fmt.width = mf->width; |
3273 | + cfg->try_fmt.height = mf->height; |
3274 | + cfg->try_fmt.code = mf->code; |
3275 | |
3276 | + /* return default mbus frame format updated with pad config */ |
3277 | + *mf = ov6650_def_fmt; |
3278 | + mf->width = cfg->try_fmt.width; |
3279 | + mf->height = cfg->try_fmt.height; |
3280 | + mf->code = cfg->try_fmt.code; |
3281 | + |
3282 | + } else { |
3283 | + /* apply new media bus format code and frame size */ |
3284 | + int ret = ov6650_s_fmt(sd, mf); |
3285 | + |
3286 | + if (ret) |
3287 | + return ret; |
3288 | + |
3289 | + /* return default format updated with active size and code */ |
3290 | + *mf = ov6650_def_fmt; |
3291 | + mf->width = priv->rect.width >> priv->half_scale; |
3292 | + mf->height = priv->rect.height >> priv->half_scale; |
3293 | + mf->code = priv->code; |
3294 | + } |
3295 | return 0; |
3296 | } |
3297 | |
3298 | @@ -852,6 +877,11 @@ static int ov6650_video_probe(struct v4l2_subdev *sd) |
3299 | ret = ov6650_reset(client); |
3300 | if (!ret) |
3301 | ret = ov6650_prog_dflt(client); |
3302 | + if (!ret) { |
3303 | + struct v4l2_mbus_framefmt mf = ov6650_def_fmt; |
3304 | + |
3305 | + ret = ov6650_s_fmt(sd, &mf); |
3306 | + } |
3307 | if (!ret) |
3308 | ret = v4l2_ctrl_handler_setup(&priv->hdl); |
3309 | |
3310 | @@ -1006,9 +1036,6 @@ static int ov6650_probe(struct i2c_client *client, |
3311 | priv->rect.top = DEF_VSTRT << 1; |
3312 | priv->rect.width = W_CIF; |
3313 | priv->rect.height = H_CIF; |
3314 | - priv->half_scale = false; |
3315 | - priv->code = MEDIA_BUS_FMT_YUYV8_2X8; |
3316 | - priv->colorspace = V4L2_COLORSPACE_JPEG; |
3317 | |
3318 | /* Hardware default frame interval */ |
3319 | priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC); |
3320 | diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c |
3321 | index 096a7c9a8963..4eaaf39b9223 100644 |
3322 | --- a/drivers/media/platform/aspeed-video.c |
3323 | +++ b/drivers/media/platform/aspeed-video.c |
3324 | @@ -1658,7 +1658,8 @@ static int aspeed_video_probe(struct platform_device *pdev) |
3325 | { |
3326 | int rc; |
3327 | struct resource *res; |
3328 | - struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL); |
3329 | + struct aspeed_video *video = |
3330 | + devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL); |
3331 | |
3332 | if (!video) |
3333 | return -ENOMEM; |
3334 | diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c |
3335 | index 31ace114eda1..be9ec59774d6 100644 |
3336 | --- a/drivers/media/platform/cadence/cdns-csi2rx.c |
3337 | +++ b/drivers/media/platform/cadence/cdns-csi2rx.c |
3338 | @@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx) |
3339 | */ |
3340 | for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) { |
3341 | unsigned int idx = find_first_zero_bit(&lanes_used, |
3342 | - sizeof(lanes_used)); |
3343 | + csi2rx->max_lanes); |
3344 | set_bit(idx, &lanes_used); |
3345 | reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1); |
3346 | } |
3347 | diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c |
3348 | index 73222c0615c0..834f11fe9dc2 100644 |
3349 | --- a/drivers/media/platform/coda/coda-common.c |
3350 | +++ b/drivers/media/platform/coda/coda-common.c |
3351 | @@ -1084,16 +1084,16 @@ static int coda_decoder_cmd(struct file *file, void *fh, |
3352 | |
3353 | switch (dc->cmd) { |
3354 | case V4L2_DEC_CMD_START: |
3355 | - mutex_lock(&ctx->bitstream_mutex); |
3356 | mutex_lock(&dev->coda_mutex); |
3357 | + mutex_lock(&ctx->bitstream_mutex); |
3358 | coda_bitstream_flush(ctx); |
3359 | - mutex_unlock(&dev->coda_mutex); |
3360 | dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, |
3361 | V4L2_BUF_TYPE_VIDEO_CAPTURE); |
3362 | vb2_clear_last_buffer_dequeued(dst_vq); |
3363 | ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG; |
3364 | coda_fill_bitstream(ctx, NULL); |
3365 | mutex_unlock(&ctx->bitstream_mutex); |
3366 | + mutex_unlock(&dev->coda_mutex); |
3367 | break; |
3368 | case V4L2_DEC_CMD_STOP: |
3369 | stream_end = false; |
3370 | diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c |
3371 | index 378cc302e1f8..d2cbcdca0463 100644 |
3372 | --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c |
3373 | +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c |
3374 | @@ -313,7 +313,7 @@ static int isp_video_release(struct file *file) |
3375 | ivc->streaming = 0; |
3376 | } |
3377 | |
3378 | - vb2_fop_release(file); |
3379 | + _vb2_fop_release(file, NULL); |
3380 | |
3381 | if (v4l2_fh_is_singular_file(file)) { |
3382 | fimc_pipeline_call(&ivc->ve, close); |
3383 | diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c |
3384 | index cbc1c07f0a96..ec2796413e26 100644 |
3385 | --- a/drivers/media/platform/rcar-vin/rcar-v4l2.c |
3386 | +++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c |
3387 | @@ -208,6 +208,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which, |
3388 | ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format); |
3389 | if (ret < 0 && ret != -ENOIOCTLCMD) |
3390 | goto done; |
3391 | + ret = 0; |
3392 | |
3393 | v4l2_fill_pix_format(pix, &format.format); |
3394 | |
3395 | @@ -242,7 +243,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which, |
3396 | done: |
3397 | v4l2_subdev_free_pad_config(pad_cfg); |
3398 | |
3399 | - return 0; |
3400 | + return ret; |
3401 | } |
3402 | |
3403 | static int rvin_querycap(struct file *file, void *priv, |
3404 | diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c |
3405 | index 439d7d886873..a113e811faab 100644 |
3406 | --- a/drivers/memory/mtk-smi.c |
3407 | +++ b/drivers/memory/mtk-smi.c |
3408 | @@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev) |
3409 | |
3410 | static const struct dev_pm_ops smi_larb_pm_ops = { |
3411 | SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL) |
3412 | + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
3413 | + pm_runtime_force_resume) |
3414 | }; |
3415 | |
3416 | static struct platform_driver mtk_smi_larb_driver = { |
3417 | @@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev) |
3418 | |
3419 | static const struct dev_pm_ops smi_common_pm_ops = { |
3420 | SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL) |
3421 | + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, |
3422 | + pm_runtime_force_resume) |
3423 | }; |
3424 | |
3425 | static struct platform_driver mtk_smi_common_driver = { |
3426 | diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c |
3427 | index 6d27ccfe0680..3c2d405bc79b 100644 |
3428 | --- a/drivers/misc/enclosure.c |
3429 | +++ b/drivers/misc/enclosure.c |
3430 | @@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev) |
3431 | cdev = &edev->component[i]; |
3432 | if (cdev->dev == dev) { |
3433 | enclosure_remove_links(cdev); |
3434 | - device_del(&cdev->cdev); |
3435 | put_device(dev); |
3436 | cdev->dev = NULL; |
3437 | - return device_add(&cdev->cdev); |
3438 | + return 0; |
3439 | } |
3440 | } |
3441 | return -ENODEV; |
3442 | diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c |
3443 | index edf94ee54ec7..71a632b815aa 100644 |
3444 | --- a/drivers/mtd/nand/onenand/omap2.c |
3445 | +++ b/drivers/mtd/nand/onenand/omap2.c |
3446 | @@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c, |
3447 | struct dma_async_tx_descriptor *tx; |
3448 | dma_cookie_t cookie; |
3449 | |
3450 | - tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0); |
3451 | + tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, |
3452 | + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); |
3453 | if (!tx) { |
3454 | dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n"); |
3455 | return -EIO; |
3456 | diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c |
3457 | index 8cc852dc7d54..5c06e0b4d4ef 100644 |
3458 | --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c |
3459 | +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c |
3460 | @@ -37,6 +37,7 @@ |
3461 | /* Max ECC buffer length */ |
3462 | #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG) |
3463 | |
3464 | +#define FMC2_TIMEOUT_US 1000 |
3465 | #define FMC2_TIMEOUT_MS 1000 |
3466 | |
3467 | /* Timings */ |
3468 | @@ -53,6 +54,8 @@ |
3469 | #define FMC2_PMEM 0x88 |
3470 | #define FMC2_PATT 0x8c |
3471 | #define FMC2_HECCR 0x94 |
3472 | +#define FMC2_ISR 0x184 |
3473 | +#define FMC2_ICR 0x188 |
3474 | #define FMC2_CSQCR 0x200 |
3475 | #define FMC2_CSQCFGR1 0x204 |
3476 | #define FMC2_CSQCFGR2 0x208 |
3477 | @@ -118,6 +121,12 @@ |
3478 | #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24) |
3479 | #define FMC2_PATT_DEFAULT 0x0a0a0a0a |
3480 | |
3481 | +/* Register: FMC2_ISR */ |
3482 | +#define FMC2_ISR_IHLF BIT(1) |
3483 | + |
3484 | +/* Register: FMC2_ICR */ |
3485 | +#define FMC2_ICR_CIHLF BIT(1) |
3486 | + |
3487 | /* Register: FMC2_CSQCR */ |
3488 | #define FMC2_CSQCR_CSQSTART BIT(0) |
3489 | |
3490 | @@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf, |
3491 | stm32_fmc2_set_buswidth_16(fmc2, true); |
3492 | } |
3493 | |
3494 | +static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) |
3495 | +{ |
3496 | + struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller); |
3497 | + const struct nand_sdr_timings *timings; |
3498 | + u32 isr, sr; |
3499 | + |
3500 | + /* Check if there is no pending requests to the NAND flash */ |
3501 | + if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr, |
3502 | + sr & FMC2_SR_NWRF, 1, |
3503 | + FMC2_TIMEOUT_US)) |
3504 | + dev_warn(fmc2->dev, "Waitrdy timeout\n"); |
3505 | + |
3506 | + /* Wait tWB before R/B# signal is low */ |
3507 | + timings = nand_get_sdr_timings(&chip->data_interface); |
3508 | + ndelay(PSEC_TO_NSEC(timings->tWB_max)); |
3509 | + |
3510 | + /* R/B# signal is low, clear high level flag */ |
3511 | + writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR); |
3512 | + |
3513 | + /* Wait R/B# signal is high */ |
3514 | + return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR, |
3515 | + isr, isr & FMC2_ISR_IHLF, |
3516 | + 5, 1000 * timeout_ms); |
3517 | +} |
3518 | + |
3519 | static int stm32_fmc2_exec_op(struct nand_chip *chip, |
3520 | const struct nand_operation *op, |
3521 | bool check_only) |
3522 | @@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip, |
3523 | break; |
3524 | |
3525 | case NAND_OP_WAITRDY_INSTR: |
3526 | - ret = nand_soft_waitrdy(chip, |
3527 | - instr->ctx.waitrdy.timeout_ms); |
3528 | + ret = stm32_fmc2_waitrdy(chip, |
3529 | + instr->ctx.waitrdy.timeout_ms); |
3530 | break; |
3531 | } |
3532 | } |
3533 | diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c |
3534 | index 7acf4a93b592..1548e0f7f5f4 100644 |
3535 | --- a/drivers/mtd/spi-nor/spi-nor.c |
3536 | +++ b/drivers/mtd/spi-nor/spi-nor.c |
3537 | @@ -2544,7 +2544,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, |
3538 | size_t *retlen, u_char *buf) |
3539 | { |
3540 | struct spi_nor *nor = mtd_to_spi_nor(mtd); |
3541 | - int ret; |
3542 | + ssize_t ret; |
3543 | |
3544 | dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); |
3545 | |
3546 | @@ -2865,7 +2865,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) |
3547 | */ |
3548 | static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf) |
3549 | { |
3550 | - int ret; |
3551 | + ssize_t ret; |
3552 | |
3553 | while (len) { |
3554 | ret = spi_nor_read_data(nor, addr, len, buf); |
3555 | diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c |
3556 | index 159490f5a111..60731e07f681 100644 |
3557 | --- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c |
3558 | +++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c |
3559 | @@ -84,7 +84,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data, |
3560 | val = swahb32(val); |
3561 | } |
3562 | |
3563 | - __raw_writel(val, mem + reg); |
3564 | + iowrite32(val, mem + reg); |
3565 | usleep_range(100, 120); |
3566 | } |
3567 | |
3568 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
3569 | index d9eb2b286438..c59cbb8cbdd7 100644 |
3570 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
3571 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c |
3572 | @@ -514,6 +514,18 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm) |
3573 | struct iwl_phy_cfg_cmd phy_cfg_cmd; |
3574 | enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img; |
3575 | |
3576 | + if (iwl_mvm_has_unified_ucode(mvm) && |
3577 | + !mvm->trans->cfg->tx_with_siso_diversity) { |
3578 | + return 0; |
3579 | + } else if (mvm->trans->cfg->tx_with_siso_diversity) { |
3580 | + /* |
3581 | + * TODO: currently we don't set the antenna but letting the NIC |
3582 | + * to decide which antenna to use. This should come from BIOS. |
3583 | + */ |
3584 | + phy_cfg_cmd.phy_cfg = |
3585 | + cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED); |
3586 | + } |
3587 | + |
3588 | /* Set parameters */ |
3589 | phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm)); |
3590 | |
3591 | @@ -1344,12 +1356,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm) |
3592 | ret = iwl_send_phy_db_data(mvm->phy_db); |
3593 | if (ret) |
3594 | goto error; |
3595 | - |
3596 | - ret = iwl_send_phy_cfg_cmd(mvm); |
3597 | - if (ret) |
3598 | - goto error; |
3599 | } |
3600 | |
3601 | + ret = iwl_send_phy_cfg_cmd(mvm); |
3602 | + if (ret) |
3603 | + goto error; |
3604 | + |
3605 | ret = iwl_mvm_send_bt_init_conf(mvm); |
3606 | if (ret) |
3607 | goto error; |
3608 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c |
3609 | index 8f50e2b121bd..098d48153a38 100644 |
3610 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c |
3611 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c |
3612 | @@ -350,7 +350,13 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, |
3613 | u16 size = le32_to_cpu(notif->amsdu_size); |
3614 | int i; |
3615 | |
3616 | - if (WARN_ON(sta->max_amsdu_len < size)) |
3617 | + /* |
3618 | + * In debug sta->max_amsdu_len < size |
3619 | + * so also check with orig_amsdu_len which holds the original |
3620 | + * data before debugfs changed the value |
3621 | + */ |
3622 | + if (WARN_ON(sta->max_amsdu_len < size && |
3623 | + mvmsta->orig_amsdu_len < size)) |
3624 | goto out; |
3625 | |
3626 | mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled); |
3627 | diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
3628 | index 8a059da7a1fa..e3b2a2bf3863 100644 |
3629 | --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
3630 | +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c |
3631 | @@ -935,7 +935,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, |
3632 | !(mvmsta->amsdu_enabled & BIT(tid))) |
3633 | return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb); |
3634 | |
3635 | - max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid); |
3636 | + /* |
3637 | + * Take the min of ieee80211 station and mvm station |
3638 | + */ |
3639 | + max_amsdu_len = |
3640 | + min_t(unsigned int, sta->max_amsdu_len, |
3641 | + iwl_mvm_max_amsdu_size(mvm, sta, tid)); |
3642 | |
3643 | /* |
3644 | * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not |
3645 | diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c |
3646 | index c10432cd703e..8be31e0ad878 100644 |
3647 | --- a/drivers/net/wireless/realtek/rtlwifi/regd.c |
3648 | +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c |
3649 | @@ -386,7 +386,7 @@ int rtl_regd_init(struct ieee80211_hw *hw, |
3650 | struct wiphy *wiphy = hw->wiphy; |
3651 | struct country_code_to_enum_rd *country = NULL; |
3652 | |
3653 | - if (wiphy == NULL || &rtlpriv->regd == NULL) |
3654 | + if (!wiphy) |
3655 | return -EINVAL; |
3656 | |
3657 | /* init country_code from efuse channel plan */ |
3658 | diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c |
3659 | index 760eaffeebd6..23a1d00b5f38 100644 |
3660 | --- a/drivers/net/wireless/rsi/rsi_91x_usb.c |
3661 | +++ b/drivers/net/wireless/rsi/rsi_91x_usb.c |
3662 | @@ -793,7 +793,7 @@ static int rsi_probe(struct usb_interface *pfunction, |
3663 | adapter->device_model = RSI_DEV_9116; |
3664 | } else { |
3665 | rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n", |
3666 | - __func__, id->idProduct); |
3667 | + __func__, id ? id->idProduct : 0x0); |
3668 | goto err1; |
3669 | } |
3670 | |
3671 | diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c |
3672 | index e35e9eaa50ee..b927a92e3463 100644 |
3673 | --- a/drivers/pci/controller/dwc/pci-meson.c |
3674 | +++ b/drivers/pci/controller/dwc/pci-meson.c |
3675 | @@ -250,15 +250,15 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp) |
3676 | if (IS_ERR(res->port_clk)) |
3677 | return PTR_ERR(res->port_clk); |
3678 | |
3679 | - res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0); |
3680 | + res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0); |
3681 | if (IS_ERR(res->mipi_gate)) |
3682 | return PTR_ERR(res->mipi_gate); |
3683 | |
3684 | - res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0); |
3685 | + res->general_clk = meson_pcie_probe_clock(dev, "general", 0); |
3686 | if (IS_ERR(res->general_clk)) |
3687 | return PTR_ERR(res->general_clk); |
3688 | |
3689 | - res->clk = meson_pcie_probe_clock(dev, "pcie", 0); |
3690 | + res->clk = meson_pcie_probe_clock(dev, "pclk", 0); |
3691 | if (IS_ERR(res->clk)) |
3692 | return PTR_ERR(res->clk); |
3693 | |
3694 | diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c |
3695 | index 0f36a926059a..8615f1548882 100644 |
3696 | --- a/drivers/pci/controller/dwc/pcie-designware-host.c |
3697 | +++ b/drivers/pci/controller/dwc/pcie-designware-host.c |
3698 | @@ -78,7 +78,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = { |
3699 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) |
3700 | { |
3701 | int i, pos, irq; |
3702 | - u32 val, num_ctrls; |
3703 | + unsigned long val; |
3704 | + u32 status, num_ctrls; |
3705 | irqreturn_t ret = IRQ_NONE; |
3706 | |
3707 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; |
3708 | @@ -86,14 +87,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) |
3709 | for (i = 0; i < num_ctrls; i++) { |
3710 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + |
3711 | (i * MSI_REG_CTRL_BLOCK_SIZE), |
3712 | - 4, &val); |
3713 | - if (!val) |
3714 | + 4, &status); |
3715 | + if (!status) |
3716 | continue; |
3717 | |
3718 | ret = IRQ_HANDLED; |
3719 | + val = status; |
3720 | pos = 0; |
3721 | - while ((pos = find_next_bit((unsigned long *) &val, |
3722 | - MAX_MSI_IRQS_PER_CTRL, |
3723 | + while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, |
3724 | pos)) != MAX_MSI_IRQS_PER_CTRL) { |
3725 | irq = irq_find_mapping(pp->irq_domain, |
3726 | (i * MAX_MSI_IRQS_PER_CTRL) + |
3727 | diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c |
3728 | index fc0fe4d4de49..97245e076548 100644 |
3729 | --- a/drivers/pci/controller/pci-aardvark.c |
3730 | +++ b/drivers/pci/controller/pci-aardvark.c |
3731 | @@ -180,6 +180,8 @@ |
3732 | #define LINK_WAIT_MAX_RETRIES 10 |
3733 | #define LINK_WAIT_USLEEP_MIN 90000 |
3734 | #define LINK_WAIT_USLEEP_MAX 100000 |
3735 | +#define RETRAIN_WAIT_MAX_RETRIES 10 |
3736 | +#define RETRAIN_WAIT_USLEEP_US 2000 |
3737 | |
3738 | #define MSI_IRQ_NUM 32 |
3739 | |
3740 | @@ -239,6 +241,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie) |
3741 | return -ETIMEDOUT; |
3742 | } |
3743 | |
3744 | +static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie) |
3745 | +{ |
3746 | + size_t retries; |
3747 | + |
3748 | + for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) { |
3749 | + if (!advk_pcie_link_up(pcie)) |
3750 | + break; |
3751 | + udelay(RETRAIN_WAIT_USLEEP_US); |
3752 | + } |
3753 | +} |
3754 | + |
3755 | static void advk_pcie_setup_hw(struct advk_pcie *pcie) |
3756 | { |
3757 | u32 reg; |
3758 | @@ -415,7 +428,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, |
3759 | |
3760 | case PCI_EXP_RTCTL: { |
3761 | u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG); |
3762 | - *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0; |
3763 | + *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE; |
3764 | return PCI_BRIDGE_EMUL_HANDLED; |
3765 | } |
3766 | |
3767 | @@ -426,11 +439,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge, |
3768 | return PCI_BRIDGE_EMUL_HANDLED; |
3769 | } |
3770 | |
3771 | + case PCI_EXP_LNKCTL: { |
3772 | + /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */ |
3773 | + u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) & |
3774 | + ~(PCI_EXP_LNKSTA_LT << 16); |
3775 | + if (!advk_pcie_link_up(pcie)) |
3776 | + val |= (PCI_EXP_LNKSTA_LT << 16); |
3777 | + *value = val; |
3778 | + return PCI_BRIDGE_EMUL_HANDLED; |
3779 | + } |
3780 | + |
3781 | case PCI_CAP_LIST_ID: |
3782 | case PCI_EXP_DEVCAP: |
3783 | case PCI_EXP_DEVCTL: |
3784 | case PCI_EXP_LNKCAP: |
3785 | - case PCI_EXP_LNKCTL: |
3786 | *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg); |
3787 | return PCI_BRIDGE_EMUL_HANDLED; |
3788 | default: |
3789 | @@ -447,14 +469,24 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge, |
3790 | |
3791 | switch (reg) { |
3792 | case PCI_EXP_DEVCTL: |
3793 | + advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); |
3794 | + break; |
3795 | + |
3796 | case PCI_EXP_LNKCTL: |
3797 | advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg); |
3798 | + if (new & PCI_EXP_LNKCTL_RL) |
3799 | + advk_pcie_wait_for_retrain(pcie); |
3800 | break; |
3801 | |
3802 | - case PCI_EXP_RTCTL: |
3803 | - new = (new & PCI_EXP_RTCTL_PMEIE) << 3; |
3804 | - advk_writel(pcie, new, PCIE_ISR0_MASK_REG); |
3805 | + case PCI_EXP_RTCTL: { |
3806 | + /* Only mask/unmask PME interrupt */ |
3807 | + u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) & |
3808 | + ~PCIE_MSG_PM_PME_MASK; |
3809 | + if ((new & PCI_EXP_RTCTL_PMEIE) == 0) |
3810 | + val |= PCIE_MSG_PM_PME_MASK; |
3811 | + advk_writel(pcie, val, PCIE_ISR0_MASK_REG); |
3812 | break; |
3813 | + } |
3814 | |
3815 | case PCI_EXP_RTSTA: |
3816 | new = (new & PCI_EXP_RTSTA_PME) >> 9; |
3817 | diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c |
3818 | index b3122c151b80..56daad828c9e 100644 |
3819 | --- a/drivers/pci/hotplug/pciehp_core.c |
3820 | +++ b/drivers/pci/hotplug/pciehp_core.c |
3821 | @@ -253,7 +253,7 @@ static bool pme_is_native(struct pcie_device *dev) |
3822 | return pcie_ports_native || host->native_pme; |
3823 | } |
3824 | |
3825 | -static int pciehp_suspend(struct pcie_device *dev) |
3826 | +static void pciehp_disable_interrupt(struct pcie_device *dev) |
3827 | { |
3828 | /* |
3829 | * Disable hotplug interrupt so that it does not trigger |
3830 | @@ -261,7 +261,19 @@ static int pciehp_suspend(struct pcie_device *dev) |
3831 | */ |
3832 | if (pme_is_native(dev)) |
3833 | pcie_disable_interrupt(get_service_data(dev)); |
3834 | +} |
3835 | |
3836 | +#ifdef CONFIG_PM_SLEEP |
3837 | +static int pciehp_suspend(struct pcie_device *dev) |
3838 | +{ |
3839 | + /* |
3840 | + * If the port is already runtime suspended we can keep it that |
3841 | + * way. |
3842 | + */ |
3843 | + if (dev_pm_smart_suspend_and_suspended(&dev->port->dev)) |
3844 | + return 0; |
3845 | + |
3846 | + pciehp_disable_interrupt(dev); |
3847 | return 0; |
3848 | } |
3849 | |
3850 | @@ -279,6 +291,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev) |
3851 | |
3852 | return 0; |
3853 | } |
3854 | +#endif |
3855 | |
3856 | static int pciehp_resume(struct pcie_device *dev) |
3857 | { |
3858 | @@ -292,6 +305,12 @@ static int pciehp_resume(struct pcie_device *dev) |
3859 | return 0; |
3860 | } |
3861 | |
3862 | +static int pciehp_runtime_suspend(struct pcie_device *dev) |
3863 | +{ |
3864 | + pciehp_disable_interrupt(dev); |
3865 | + return 0; |
3866 | +} |
3867 | + |
3868 | static int pciehp_runtime_resume(struct pcie_device *dev) |
3869 | { |
3870 | struct controller *ctrl = get_service_data(dev); |
3871 | @@ -318,10 +337,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = { |
3872 | .remove = pciehp_remove, |
3873 | |
3874 | #ifdef CONFIG_PM |
3875 | +#ifdef CONFIG_PM_SLEEP |
3876 | .suspend = pciehp_suspend, |
3877 | .resume_noirq = pciehp_resume_noirq, |
3878 | .resume = pciehp_resume, |
3879 | - .runtime_suspend = pciehp_suspend, |
3880 | +#endif |
3881 | + .runtime_suspend = pciehp_runtime_suspend, |
3882 | .runtime_resume = pciehp_runtime_resume, |
3883 | #endif /* PM */ |
3884 | }; |
3885 | diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c |
3886 | index d4ac8ce8c1f9..0c3086793e4e 100644 |
3887 | --- a/drivers/pci/pci-driver.c |
3888 | +++ b/drivers/pci/pci-driver.c |
3889 | @@ -941,12 +941,11 @@ static int pci_pm_resume_noirq(struct device *dev) |
3890 | pci_pm_default_resume_early(pci_dev); |
3891 | |
3892 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
3893 | + pcie_pme_root_status_cleanup(pci_dev); |
3894 | |
3895 | if (pci_has_legacy_pm_support(pci_dev)) |
3896 | return pci_legacy_resume_early(dev); |
3897 | |
3898 | - pcie_pme_root_status_cleanup(pci_dev); |
3899 | - |
3900 | if (drv && drv->pm && drv->pm->resume_noirq) |
3901 | error = drv->pm->resume_noirq(dev); |
3902 | |
3903 | diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c |
3904 | index 98cfa30f3fae..9361f3aa26ab 100644 |
3905 | --- a/drivers/pci/pcie/ptm.c |
3906 | +++ b/drivers/pci/pcie/ptm.c |
3907 | @@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev) |
3908 | snprintf(clock_desc, sizeof(clock_desc), ">254ns"); |
3909 | break; |
3910 | default: |
3911 | - snprintf(clock_desc, sizeof(clock_desc), "%udns", |
3912 | + snprintf(clock_desc, sizeof(clock_desc), "%uns", |
3913 | dev->ptm_granularity); |
3914 | break; |
3915 | } |
3916 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
3917 | index 64ebe3e5e611..d3033873395d 100644 |
3918 | --- a/drivers/pci/probe.c |
3919 | +++ b/drivers/pci/probe.c |
3920 | @@ -572,6 +572,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev) |
3921 | bridge->release_fn(bridge); |
3922 | |
3923 | pci_free_resource_list(&bridge->windows); |
3924 | + pci_free_resource_list(&bridge->dma_ranges); |
3925 | } |
3926 | |
3927 | static void pci_release_host_bridge_dev(struct device *dev) |
3928 | diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c |
3929 | index ee184d5607bd..f20524f0c21d 100644 |
3930 | --- a/drivers/phy/motorola/phy-mapphone-mdm6600.c |
3931 | +++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c |
3932 | @@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work) |
3933 | struct phy_mdm6600 *ddata; |
3934 | struct device *dev; |
3935 | DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES); |
3936 | - int error, i, val = 0; |
3937 | + int error; |
3938 | |
3939 | ddata = container_of(work, struct phy_mdm6600, status_work.work); |
3940 | dev = ddata->dev; |
3941 | @@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work) |
3942 | if (error) |
3943 | return; |
3944 | |
3945 | - for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) { |
3946 | - val |= test_bit(i, values) << i; |
3947 | - dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", |
3948 | - __func__, i, test_bit(i, values), val); |
3949 | - } |
3950 | - ddata->status = values[0]; |
3951 | + ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1); |
3952 | |
3953 | dev_info(dev, "modem status: %i %s\n", |
3954 | ddata->status, |
3955 | - phy_mdm6600_status_name[ddata->status & 7]); |
3956 | + phy_mdm6600_status_name[ddata->status]); |
3957 | complete(&ddata->ack); |
3958 | } |
3959 | |
3960 | diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig |
3961 | index f1806fd781a0..530426a74f75 100644 |
3962 | --- a/drivers/pinctrl/cirrus/Kconfig |
3963 | +++ b/drivers/pinctrl/cirrus/Kconfig |
3964 | @@ -2,6 +2,7 @@ |
3965 | config PINCTRL_LOCHNAGAR |
3966 | tristate "Cirrus Logic Lochnagar pinctrl driver" |
3967 | depends on MFD_LOCHNAGAR |
3968 | + select GPIOLIB |
3969 | select PINMUX |
3970 | select PINCONF |
3971 | select GENERIC_PINCONF |
3972 | diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c |
3973 | index 2e06fb1464ab..7fdf4257df1e 100644 |
3974 | --- a/drivers/pinctrl/intel/pinctrl-lewisburg.c |
3975 | +++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c |
3976 | @@ -33,6 +33,7 @@ |
3977 | .npins = ((e) - (s) + 1), \ |
3978 | } |
3979 | |
3980 | +/* Lewisburg */ |
3981 | static const struct pinctrl_pin_desc lbg_pins[] = { |
3982 | /* GPP_A */ |
3983 | PINCTRL_PIN(0, "RCINB"), |
3984 | @@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = { |
3985 | PINCTRL_PIN(33, "SRCCLKREQB_4"), |
3986 | PINCTRL_PIN(34, "SRCCLKREQB_5"), |
3987 | PINCTRL_PIN(35, "GPP_B_11"), |
3988 | - PINCTRL_PIN(36, "GLB_RST_WARN_N"), |
3989 | + PINCTRL_PIN(36, "SLP_S0B"), |
3990 | PINCTRL_PIN(37, "PLTRSTB"), |
3991 | PINCTRL_PIN(38, "SPKR"), |
3992 | PINCTRL_PIN(39, "GPP_B_15"), |
3993 | @@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = { |
3994 | PINCTRL_PIN(141, "GBE_PCI_DIS"), |
3995 | PINCTRL_PIN(142, "GBE_LAN_DIS"), |
3996 | PINCTRL_PIN(143, "GPP_I_10"), |
3997 | - PINCTRL_PIN(144, "GPIO_RCOMP_3P3"), |
3998 | /* GPP_J */ |
3999 | - PINCTRL_PIN(145, "GBE_LED_0_0"), |
4000 | - PINCTRL_PIN(146, "GBE_LED_0_1"), |
4001 | - PINCTRL_PIN(147, "GBE_LED_1_0"), |
4002 | - PINCTRL_PIN(148, "GBE_LED_1_1"), |
4003 | - PINCTRL_PIN(149, "GBE_LED_2_0"), |
4004 | - PINCTRL_PIN(150, "GBE_LED_2_1"), |
4005 | - PINCTRL_PIN(151, "GBE_LED_3_0"), |
4006 | - PINCTRL_PIN(152, "GBE_LED_3_1"), |
4007 | - PINCTRL_PIN(153, "GBE_SCL_0"), |
4008 | - PINCTRL_PIN(154, "GBE_SDA_0"), |
4009 | - PINCTRL_PIN(155, "GBE_SCL_1"), |
4010 | - PINCTRL_PIN(156, "GBE_SDA_1"), |
4011 | - PINCTRL_PIN(157, "GBE_SCL_2"), |
4012 | - PINCTRL_PIN(158, "GBE_SDA_2"), |
4013 | - PINCTRL_PIN(159, "GBE_SCL_3"), |
4014 | - PINCTRL_PIN(160, "GBE_SDA_3"), |
4015 | - PINCTRL_PIN(161, "GBE_SDP_0_0"), |
4016 | - PINCTRL_PIN(162, "GBE_SDP_0_1"), |
4017 | - PINCTRL_PIN(163, "GBE_SDP_1_0"), |
4018 | - PINCTRL_PIN(164, "GBE_SDP_1_1"), |
4019 | - PINCTRL_PIN(165, "GBE_SDP_2_0"), |
4020 | - PINCTRL_PIN(166, "GBE_SDP_2_1"), |
4021 | - PINCTRL_PIN(167, "GBE_SDP_3_0"), |
4022 | - PINCTRL_PIN(168, "GBE_SDP_3_1"), |
4023 | + PINCTRL_PIN(144, "GBE_LED_0_0"), |
4024 | + PINCTRL_PIN(145, "GBE_LED_0_1"), |
4025 | + PINCTRL_PIN(146, "GBE_LED_1_0"), |
4026 | + PINCTRL_PIN(147, "GBE_LED_1_1"), |
4027 | + PINCTRL_PIN(148, "GBE_LED_2_0"), |
4028 | + PINCTRL_PIN(149, "GBE_LED_2_1"), |
4029 | + PINCTRL_PIN(150, "GBE_LED_3_0"), |
4030 | + PINCTRL_PIN(151, "GBE_LED_3_1"), |
4031 | + PINCTRL_PIN(152, "GBE_SCL_0"), |
4032 | + PINCTRL_PIN(153, "GBE_SDA_0"), |
4033 | + PINCTRL_PIN(154, "GBE_SCL_1"), |
4034 | + PINCTRL_PIN(155, "GBE_SDA_1"), |
4035 | + PINCTRL_PIN(156, "GBE_SCL_2"), |
4036 | + PINCTRL_PIN(157, "GBE_SDA_2"), |
4037 | + PINCTRL_PIN(158, "GBE_SCL_3"), |
4038 | + PINCTRL_PIN(159, "GBE_SDA_3"), |
4039 | + PINCTRL_PIN(160, "GBE_SDP_0_0"), |
4040 | + PINCTRL_PIN(161, "GBE_SDP_0_1"), |
4041 | + PINCTRL_PIN(162, "GBE_SDP_1_0"), |
4042 | + PINCTRL_PIN(163, "GBE_SDP_1_1"), |
4043 | + PINCTRL_PIN(164, "GBE_SDP_2_0"), |
4044 | + PINCTRL_PIN(165, "GBE_SDP_2_1"), |
4045 | + PINCTRL_PIN(166, "GBE_SDP_3_0"), |
4046 | + PINCTRL_PIN(167, "GBE_SDP_3_1"), |
4047 | /* GPP_K */ |
4048 | - PINCTRL_PIN(169, "GBE_RMIICLK"), |
4049 | - PINCTRL_PIN(170, "GBE_RMII_TXD_0"), |
4050 | - PINCTRL_PIN(171, "GBE_RMII_TXD_1"), |
4051 | + PINCTRL_PIN(168, "GBE_RMIICLK"), |
4052 | + PINCTRL_PIN(169, "GBE_RMII_RXD_0"), |
4053 | + PINCTRL_PIN(170, "GBE_RMII_RXD_1"), |
4054 | + PINCTRL_PIN(171, "GBE_RMII_CRS_DV"), |
4055 | PINCTRL_PIN(172, "GBE_RMII_TX_EN"), |
4056 | - PINCTRL_PIN(173, "GBE_RMII_CRS_DV"), |
4057 | - PINCTRL_PIN(174, "GBE_RMII_RXD_0"), |
4058 | - PINCTRL_PIN(175, "GBE_RMII_RXD_1"), |
4059 | - PINCTRL_PIN(176, "GBE_RMII_RX_ER"), |
4060 | - PINCTRL_PIN(177, "GBE_RMII_ARBIN"), |
4061 | - PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"), |
4062 | - PINCTRL_PIN(179, "PE_RST_N"), |
4063 | - PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"), |
4064 | + PINCTRL_PIN(173, "GBE_RMII_TXD_0"), |
4065 | + PINCTRL_PIN(174, "GBE_RMII_TXD_1"), |
4066 | + PINCTRL_PIN(175, "GBE_RMII_RX_ER"), |
4067 | + PINCTRL_PIN(176, "GBE_RMII_ARBIN"), |
4068 | + PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"), |
4069 | + PINCTRL_PIN(178, "PE_RST_N"), |
4070 | /* GPP_G */ |
4071 | - PINCTRL_PIN(181, "FAN_TACH_0"), |
4072 | - PINCTRL_PIN(182, "FAN_TACH_1"), |
4073 | - PINCTRL_PIN(183, "FAN_TACH_2"), |
4074 | - PINCTRL_PIN(184, "FAN_TACH_3"), |
4075 | - PINCTRL_PIN(185, "FAN_TACH_4"), |
4076 | - PINCTRL_PIN(186, "FAN_TACH_5"), |
4077 | - PINCTRL_PIN(187, "FAN_TACH_6"), |
4078 | - PINCTRL_PIN(188, "FAN_TACH_7"), |
4079 | - PINCTRL_PIN(189, "FAN_PWM_0"), |
4080 | - PINCTRL_PIN(190, "FAN_PWM_1"), |
4081 | - PINCTRL_PIN(191, "FAN_PWM_2"), |
4082 | - PINCTRL_PIN(192, "FAN_PWM_3"), |
4083 | - PINCTRL_PIN(193, "GSXDOUT"), |
4084 | - PINCTRL_PIN(194, "GSXSLOAD"), |
4085 | - PINCTRL_PIN(195, "GSXDIN"), |
4086 | - PINCTRL_PIN(196, "GSXSRESETB"), |
4087 | - PINCTRL_PIN(197, "GSXCLK"), |
4088 | - PINCTRL_PIN(198, "ADR_COMPLETE"), |
4089 | - PINCTRL_PIN(199, "NMIB"), |
4090 | - PINCTRL_PIN(200, "SMIB"), |
4091 | - PINCTRL_PIN(201, "SSATA_DEVSLP_0"), |
4092 | - PINCTRL_PIN(202, "SSATA_DEVSLP_1"), |
4093 | - PINCTRL_PIN(203, "SSATA_DEVSLP_2"), |
4094 | - PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"), |
4095 | + PINCTRL_PIN(179, "FAN_TACH_0"), |
4096 | + PINCTRL_PIN(180, "FAN_TACH_1"), |
4097 | + PINCTRL_PIN(181, "FAN_TACH_2"), |
4098 | + PINCTRL_PIN(182, "FAN_TACH_3"), |
4099 | + PINCTRL_PIN(183, "FAN_TACH_4"), |
4100 | + PINCTRL_PIN(184, "FAN_TACH_5"), |
4101 | + PINCTRL_PIN(185, "FAN_TACH_6"), |
4102 | + PINCTRL_PIN(186, "FAN_TACH_7"), |
4103 | + PINCTRL_PIN(187, "FAN_PWM_0"), |
4104 | + PINCTRL_PIN(188, "FAN_PWM_1"), |
4105 | + PINCTRL_PIN(189, "FAN_PWM_2"), |
4106 | + PINCTRL_PIN(190, "FAN_PWM_3"), |
4107 | + PINCTRL_PIN(191, "GSXDOUT"), |
4108 | + PINCTRL_PIN(192, "GSXSLOAD"), |
4109 | + PINCTRL_PIN(193, "GSXDIN"), |
4110 | + PINCTRL_PIN(194, "GSXSRESETB"), |
4111 | + PINCTRL_PIN(195, "GSXCLK"), |
4112 | + PINCTRL_PIN(196, "ADR_COMPLETE"), |
4113 | + PINCTRL_PIN(197, "NMIB"), |
4114 | + PINCTRL_PIN(198, "SMIB"), |
4115 | + PINCTRL_PIN(199, "SSATA_DEVSLP_0"), |
4116 | + PINCTRL_PIN(200, "SSATA_DEVSLP_1"), |
4117 | + PINCTRL_PIN(201, "SSATA_DEVSLP_2"), |
4118 | + PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"), |
4119 | /* GPP_H */ |
4120 | - PINCTRL_PIN(205, "SRCCLKREQB_6"), |
4121 | - PINCTRL_PIN(206, "SRCCLKREQB_7"), |
4122 | - PINCTRL_PIN(207, "SRCCLKREQB_8"), |
4123 | - PINCTRL_PIN(208, "SRCCLKREQB_9"), |
4124 | - PINCTRL_PIN(209, "SRCCLKREQB_10"), |
4125 | - PINCTRL_PIN(210, "SRCCLKREQB_11"), |
4126 | - PINCTRL_PIN(211, "SRCCLKREQB_12"), |
4127 | - PINCTRL_PIN(212, "SRCCLKREQB_13"), |
4128 | - PINCTRL_PIN(213, "SRCCLKREQB_14"), |
4129 | - PINCTRL_PIN(214, "SRCCLKREQB_15"), |
4130 | - PINCTRL_PIN(215, "SML2CLK"), |
4131 | - PINCTRL_PIN(216, "SML2DATA"), |
4132 | - PINCTRL_PIN(217, "SML2ALERTB"), |
4133 | - PINCTRL_PIN(218, "SML3CLK"), |
4134 | - PINCTRL_PIN(219, "SML3DATA"), |
4135 | - PINCTRL_PIN(220, "SML3ALERTB"), |
4136 | - PINCTRL_PIN(221, "SML4CLK"), |
4137 | - PINCTRL_PIN(222, "SML4DATA"), |
4138 | - PINCTRL_PIN(223, "SML4ALERTB"), |
4139 | - PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"), |
4140 | - PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"), |
4141 | - PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"), |
4142 | - PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"), |
4143 | - PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"), |
4144 | + PINCTRL_PIN(203, "SRCCLKREQB_6"), |
4145 | + PINCTRL_PIN(204, "SRCCLKREQB_7"), |
4146 | + PINCTRL_PIN(205, "SRCCLKREQB_8"), |
4147 | + PINCTRL_PIN(206, "SRCCLKREQB_9"), |
4148 | + PINCTRL_PIN(207, "SRCCLKREQB_10"), |
4149 | + PINCTRL_PIN(208, "SRCCLKREQB_11"), |
4150 | + PINCTRL_PIN(209, "SRCCLKREQB_12"), |
4151 | + PINCTRL_PIN(210, "SRCCLKREQB_13"), |
4152 | + PINCTRL_PIN(211, "SRCCLKREQB_14"), |
4153 | + PINCTRL_PIN(212, "SRCCLKREQB_15"), |
4154 | + PINCTRL_PIN(213, "SML2CLK"), |
4155 | + PINCTRL_PIN(214, "SML2DATA"), |
4156 | + PINCTRL_PIN(215, "SML2ALERTB"), |
4157 | + PINCTRL_PIN(216, "SML3CLK"), |
4158 | + PINCTRL_PIN(217, "SML3DATA"), |
4159 | + PINCTRL_PIN(218, "SML3ALERTB"), |
4160 | + PINCTRL_PIN(219, "SML4CLK"), |
4161 | + PINCTRL_PIN(220, "SML4DATA"), |
4162 | + PINCTRL_PIN(221, "SML4ALERTB"), |
4163 | + PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"), |
4164 | + PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"), |
4165 | + PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"), |
4166 | + PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"), |
4167 | + PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"), |
4168 | /* GPP_L */ |
4169 | + PINCTRL_PIN(227, "GPP_L_0"), |
4170 | + PINCTRL_PIN(228, "EC_CSME_INTR_OUT"), |
4171 | PINCTRL_PIN(229, "VISA2CH0_D0"), |
4172 | PINCTRL_PIN(230, "VISA2CH0_D1"), |
4173 | PINCTRL_PIN(231, "VISA2CH0_D2"), |
4174 | diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c |
4175 | index 8bba9d053d9f..aba479a1150c 100644 |
4176 | --- a/drivers/pinctrl/meson/pinctrl-meson.c |
4177 | +++ b/drivers/pinctrl/meson/pinctrl-meson.c |
4178 | @@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, |
4179 | return ret; |
4180 | |
4181 | meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); |
4182 | + bit = bit << 1; |
4183 | |
4184 | ret = regmap_read(pc->reg_ds, reg, &val); |
4185 | if (ret) |
4186 | diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c |
4187 | index b8640ad41bef..ce983247c9e2 100644 |
4188 | --- a/drivers/pinctrl/sh-pfc/core.c |
4189 | +++ b/drivers/pinctrl/sh-pfc/core.c |
4190 | @@ -29,12 +29,12 @@ |
4191 | static int sh_pfc_map_resources(struct sh_pfc *pfc, |
4192 | struct platform_device *pdev) |
4193 | { |
4194 | - unsigned int num_windows, num_irqs; |
4195 | struct sh_pfc_window *windows; |
4196 | unsigned int *irqs = NULL; |
4197 | + unsigned int num_windows; |
4198 | struct resource *res; |
4199 | unsigned int i; |
4200 | - int irq; |
4201 | + int num_irqs; |
4202 | |
4203 | /* Count the MEM and IRQ resources. */ |
4204 | for (num_windows = 0;; num_windows++) { |
4205 | @@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc, |
4206 | if (!res) |
4207 | break; |
4208 | } |
4209 | - for (num_irqs = 0;; num_irqs++) { |
4210 | - irq = platform_get_irq(pdev, num_irqs); |
4211 | - if (irq == -EPROBE_DEFER) |
4212 | - return irq; |
4213 | - if (irq < 0) |
4214 | - break; |
4215 | - } |
4216 | - |
4217 | if (num_windows == 0) |
4218 | return -EINVAL; |
4219 | |
4220 | + num_irqs = platform_irq_count(pdev); |
4221 | + if (num_irqs < 0) |
4222 | + return num_irqs; |
4223 | + |
4224 | /* Allocate memory windows and IRQs arrays. */ |
4225 | windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows), |
4226 | GFP_KERNEL); |
4227 | diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h |
4228 | index 835148fc0f28..cab7da130925 100644 |
4229 | --- a/drivers/pinctrl/sh-pfc/sh_pfc.h |
4230 | +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h |
4231 | @@ -422,12 +422,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info; |
4232 | /* |
4233 | * Describe a pinmux configuration in which a pin is physically multiplexed |
4234 | * with other pins. |
4235 | - * - ipsr: IPSR field (unused, for documentation purposes only) |
4236 | + * - ipsr: IPSR field |
4237 | * - fn: Function name |
4238 | * - psel: Physical multiplexing selector |
4239 | */ |
4240 | #define PINMUX_IPSR_PHYS(ipsr, fn, psel) \ |
4241 | - PINMUX_DATA(fn##_MARK, FN_##psel) |
4242 | + PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr) |
4243 | |
4244 | /* |
4245 | * Describe a pinmux configuration for a single-function pin with GPIO |
4246 | diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c |
4247 | index e5e7f1f22813..b522ca010332 100644 |
4248 | --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c |
4249 | +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c |
4250 | @@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev, |
4251 | return -EINVAL; |
4252 | |
4253 | rows = pinctrl_count_index_with_args(np, name); |
4254 | - if (rows == -EINVAL) |
4255 | + if (rows < 0) |
4256 | return rows; |
4257 | |
4258 | *map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL); |
4259 | diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c |
4260 | index 9a5c9fd2dbc6..5739a9669b29 100644 |
4261 | --- a/drivers/platform/mellanox/mlxbf-tmfifo.c |
4262 | +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c |
4263 | @@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info { |
4264 | * @work: work struct for deferred process |
4265 | * @timer: background timer |
4266 | * @vring: Tx/Rx ring |
4267 | - * @spin_lock: spin lock |
4268 | + * @spin_lock: Tx/Rx spin lock |
4269 | * @is_ready: ready flag |
4270 | */ |
4271 | struct mlxbf_tmfifo { |
4272 | @@ -164,7 +164,7 @@ struct mlxbf_tmfifo { |
4273 | struct work_struct work; |
4274 | struct timer_list timer; |
4275 | struct mlxbf_tmfifo_vring *vring[2]; |
4276 | - spinlock_t spin_lock; /* spin lock */ |
4277 | + spinlock_t spin_lock[2]; /* spin lock */ |
4278 | bool is_ready; |
4279 | }; |
4280 | |
4281 | @@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) |
4282 | writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); |
4283 | |
4284 | /* Use spin-lock to protect the 'cons->tx_buf'. */ |
4285 | - spin_lock_irqsave(&fifo->spin_lock, flags); |
4286 | + spin_lock_irqsave(&fifo->spin_lock[0], flags); |
4287 | |
4288 | while (size > 0) { |
4289 | addr = cons->tx_buf.buf + cons->tx_buf.tail; |
4290 | @@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) |
4291 | } |
4292 | } |
4293 | |
4294 | - spin_unlock_irqrestore(&fifo->spin_lock, flags); |
4295 | + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); |
4296 | } |
4297 | |
4298 | /* Rx/Tx one word in the descriptor buffer. */ |
4299 | @@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, |
4300 | fifo->vring[is_rx] = NULL; |
4301 | |
4302 | /* Notify upper layer that packet is done. */ |
4303 | - spin_lock_irqsave(&fifo->spin_lock, flags); |
4304 | + spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); |
4305 | vring_interrupt(0, vring->vq); |
4306 | - spin_unlock_irqrestore(&fifo->spin_lock, flags); |
4307 | + spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); |
4308 | } |
4309 | |
4310 | mlxbf_tmfifo_desc_done: |
4311 | @@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) |
4312 | * worker handler. |
4313 | */ |
4314 | if (vring->vdev_id == VIRTIO_ID_CONSOLE) { |
4315 | - spin_lock_irqsave(&fifo->spin_lock, flags); |
4316 | + spin_lock_irqsave(&fifo->spin_lock[0], flags); |
4317 | tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; |
4318 | mlxbf_tmfifo_console_output(tm_vdev, vring); |
4319 | - spin_unlock_irqrestore(&fifo->spin_lock, flags); |
4320 | + spin_unlock_irqrestore(&fifo->spin_lock[0], flags); |
4321 | } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, |
4322 | &fifo->pend_events)) { |
4323 | return true; |
4324 | @@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) |
4325 | if (!fifo) |
4326 | return -ENOMEM; |
4327 | |
4328 | - spin_lock_init(&fifo->spin_lock); |
4329 | + spin_lock_init(&fifo->spin_lock[0]); |
4330 | + spin_lock_init(&fifo->spin_lock[1]); |
4331 | INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); |
4332 | mutex_init(&fifo->lock); |
4333 | |
4334 | diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c |
4335 | index a7f184bb47e0..3d29a11c1d6b 100644 |
4336 | --- a/drivers/platform/mips/cpu_hwmon.c |
4337 | +++ b/drivers/platform/mips/cpu_hwmon.c |
4338 | @@ -161,7 +161,7 @@ static int __init loongson_hwmon_init(void) |
4339 | |
4340 | cpu_hwmon_dev = hwmon_device_register(NULL); |
4341 | if (IS_ERR(cpu_hwmon_dev)) { |
4342 | - ret = -ENOMEM; |
4343 | + ret = PTR_ERR(cpu_hwmon_dev); |
4344 | pr_err("hwmon_device_register fail!\n"); |
4345 | goto fail_hwmon_device_register; |
4346 | } |
4347 | diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c |
4348 | index 821b08e01635..982f0cc8270c 100644 |
4349 | --- a/drivers/platform/x86/asus-wmi.c |
4350 | +++ b/drivers/platform/x86/asus-wmi.c |
4351 | @@ -512,13 +512,7 @@ static void kbd_led_update(struct asus_wmi *asus) |
4352 | { |
4353 | int ctrl_param = 0; |
4354 | |
4355 | - /* |
4356 | - * bits 0-2: level |
4357 | - * bit 7: light on/off |
4358 | - */ |
4359 | - if (asus->kbd_led_wk > 0) |
4360 | - ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); |
4361 | - |
4362 | + ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F); |
4363 | asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL); |
4364 | } |
4365 | |
4366 | diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c |
4367 | index be85ed966bf3..73eb1572b966 100644 |
4368 | --- a/drivers/platform/x86/gpd-pocket-fan.c |
4369 | +++ b/drivers/platform/x86/gpd-pocket-fan.c |
4370 | @@ -16,17 +16,27 @@ |
4371 | |
4372 | #define MAX_SPEED 3 |
4373 | |
4374 | -static int temp_limits[3] = { 55000, 60000, 65000 }; |
4375 | +#define TEMP_LIMIT0_DEFAULT 55000 |
4376 | +#define TEMP_LIMIT1_DEFAULT 60000 |
4377 | +#define TEMP_LIMIT2_DEFAULT 65000 |
4378 | + |
4379 | +#define HYSTERESIS_DEFAULT 3000 |
4380 | + |
4381 | +#define SPEED_ON_AC_DEFAULT 2 |
4382 | + |
4383 | +static int temp_limits[3] = { |
4384 | + TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT, |
4385 | +}; |
4386 | module_param_array(temp_limits, int, NULL, 0444); |
4387 | MODULE_PARM_DESC(temp_limits, |
4388 | "Millicelsius values above which the fan speed increases"); |
4389 | |
4390 | -static int hysteresis = 3000; |
4391 | +static int hysteresis = HYSTERESIS_DEFAULT; |
4392 | module_param(hysteresis, int, 0444); |
4393 | MODULE_PARM_DESC(hysteresis, |
4394 | "Hysteresis in millicelsius before lowering the fan speed"); |
4395 | |
4396 | -static int speed_on_ac = 2; |
4397 | +static int speed_on_ac = SPEED_ON_AC_DEFAULT; |
4398 | module_param(speed_on_ac, int, 0444); |
4399 | MODULE_PARM_DESC(speed_on_ac, |
4400 | "minimum fan speed to allow when system is powered by AC"); |
4401 | @@ -120,18 +130,21 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev) |
4402 | if (temp_limits[i] < 40000 || temp_limits[i] > 70000) { |
4403 | dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n", |
4404 | temp_limits[i]); |
4405 | - return -EINVAL; |
4406 | + temp_limits[0] = TEMP_LIMIT0_DEFAULT; |
4407 | + temp_limits[1] = TEMP_LIMIT1_DEFAULT; |
4408 | + temp_limits[2] = TEMP_LIMIT2_DEFAULT; |
4409 | + break; |
4410 | } |
4411 | } |
4412 | if (hysteresis < 1000 || hysteresis > 10000) { |
4413 | dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n", |
4414 | hysteresis); |
4415 | - return -EINVAL; |
4416 | + hysteresis = HYSTERESIS_DEFAULT; |
4417 | } |
4418 | if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) { |
4419 | dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n", |
4420 | speed_on_ac); |
4421 | - return -EINVAL; |
4422 | + speed_on_ac = SPEED_ON_AC_DEFAULT; |
4423 | } |
4424 | |
4425 | fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL); |
4426 | diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c |
4427 | index a608f445dad6..f213264c8567 100644 |
4428 | --- a/drivers/reset/reset-brcmstb.c |
4429 | +++ b/drivers/reset/reset-brcmstb.c |
4430 | @@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev) |
4431 | return -ENOMEM; |
4432 | |
4433 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
4434 | - if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) || |
4435 | - !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) { |
4436 | - dev_err(kdev, "incorrect register range\n"); |
4437 | - return -EINVAL; |
4438 | - } |
4439 | - |
4440 | priv->base = devm_ioremap_resource(kdev, res); |
4441 | if (IS_ERR(priv->base)) |
4442 | return PTR_ERR(priv->base); |
4443 | diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c |
4444 | index 7744333b0f40..ddfef4d43bab 100644 |
4445 | --- a/drivers/rtc/rtc-bd70528.c |
4446 | +++ b/drivers/rtc/rtc-bd70528.c |
4447 | @@ -491,3 +491,4 @@ module_platform_driver(bd70528_rtc); |
4448 | MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); |
4449 | MODULE_DESCRIPTION("BD70528 RTC driver"); |
4450 | MODULE_LICENSE("GPL"); |
4451 | +MODULE_ALIAS("platofrm:bd70528-rtc"); |
4452 | diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c |
4453 | index 3e9800f9878a..82d2ab0b3e9c 100644 |
4454 | --- a/drivers/rtc/rtc-brcmstb-waketimer.c |
4455 | +++ b/drivers/rtc/rtc-brcmstb-waketimer.c |
4456 | @@ -277,6 +277,7 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev) |
4457 | struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev); |
4458 | |
4459 | unregister_reboot_notifier(&timer->reboot_notifier); |
4460 | + clk_disable_unprepare(timer->clk); |
4461 | |
4462 | return 0; |
4463 | } |
4464 | diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c |
4465 | index 1c2d3c4a4963..b1f2bedee77e 100644 |
4466 | --- a/drivers/rtc/rtc-msm6242.c |
4467 | +++ b/drivers/rtc/rtc-msm6242.c |
4468 | @@ -133,7 +133,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm) |
4469 | msm6242_read(priv, MSM6242_SECOND1); |
4470 | tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 + |
4471 | msm6242_read(priv, MSM6242_MINUTE1); |
4472 | - tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 + |
4473 | + tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) & |
4474 | + MSM6242_HOUR10_HR_MASK) * 10 + |
4475 | msm6242_read(priv, MSM6242_HOUR1); |
4476 | tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 + |
4477 | msm6242_read(priv, MSM6242_DAY1); |
4478 | diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c |
4479 | index 704229eb0cac..b216bdcba0da 100644 |
4480 | --- a/drivers/rtc/rtc-mt6397.c |
4481 | +++ b/drivers/rtc/rtc-mt6397.c |
4482 | @@ -47,6 +47,14 @@ |
4483 | |
4484 | #define RTC_AL_SEC 0x0018 |
4485 | |
4486 | +#define RTC_AL_SEC_MASK 0x003f |
4487 | +#define RTC_AL_MIN_MASK 0x003f |
4488 | +#define RTC_AL_HOU_MASK 0x001f |
4489 | +#define RTC_AL_DOM_MASK 0x001f |
4490 | +#define RTC_AL_DOW_MASK 0x0007 |
4491 | +#define RTC_AL_MTH_MASK 0x000f |
4492 | +#define RTC_AL_YEA_MASK 0x007f |
4493 | + |
4494 | #define RTC_PDN2 0x002e |
4495 | #define RTC_PDN2_PWRON_ALARM BIT(4) |
4496 | |
4497 | @@ -103,7 +111,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data) |
4498 | irqen = irqsta & ~RTC_IRQ_EN_AL; |
4499 | mutex_lock(&rtc->lock); |
4500 | if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN, |
4501 | - irqen) < 0) |
4502 | + irqen) == 0) |
4503 | mtk_rtc_write_trigger(rtc); |
4504 | mutex_unlock(&rtc->lock); |
4505 | |
4506 | @@ -225,12 +233,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) |
4507 | alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM); |
4508 | mutex_unlock(&rtc->lock); |
4509 | |
4510 | - tm->tm_sec = data[RTC_OFFSET_SEC]; |
4511 | - tm->tm_min = data[RTC_OFFSET_MIN]; |
4512 | - tm->tm_hour = data[RTC_OFFSET_HOUR]; |
4513 | - tm->tm_mday = data[RTC_OFFSET_DOM]; |
4514 | - tm->tm_mon = data[RTC_OFFSET_MTH]; |
4515 | - tm->tm_year = data[RTC_OFFSET_YEAR]; |
4516 | + tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK; |
4517 | + tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK; |
4518 | + tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK; |
4519 | + tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK; |
4520 | + tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK; |
4521 | + tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK; |
4522 | |
4523 | tm->tm_year += RTC_MIN_YEAR_OFFSET; |
4524 | tm->tm_mon--; |
4525 | @@ -251,14 +259,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) |
4526 | tm->tm_year -= RTC_MIN_YEAR_OFFSET; |
4527 | tm->tm_mon++; |
4528 | |
4529 | - data[RTC_OFFSET_SEC] = tm->tm_sec; |
4530 | - data[RTC_OFFSET_MIN] = tm->tm_min; |
4531 | - data[RTC_OFFSET_HOUR] = tm->tm_hour; |
4532 | - data[RTC_OFFSET_DOM] = tm->tm_mday; |
4533 | - data[RTC_OFFSET_MTH] = tm->tm_mon; |
4534 | - data[RTC_OFFSET_YEAR] = tm->tm_year; |
4535 | - |
4536 | mutex_lock(&rtc->lock); |
4537 | + ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC, |
4538 | + data, RTC_OFFSET_COUNT); |
4539 | + if (ret < 0) |
4540 | + goto exit; |
4541 | + |
4542 | + data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) | |
4543 | + (tm->tm_sec & RTC_AL_SEC_MASK)); |
4544 | + data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) | |
4545 | + (tm->tm_min & RTC_AL_MIN_MASK)); |
4546 | + data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) | |
4547 | + (tm->tm_hour & RTC_AL_HOU_MASK)); |
4548 | + data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) | |
4549 | + (tm->tm_mday & RTC_AL_DOM_MASK)); |
4550 | + data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) | |
4551 | + (tm->tm_mon & RTC_AL_MTH_MASK)); |
4552 | + data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) | |
4553 | + (tm->tm_year & RTC_AL_YEA_MASK)); |
4554 | + |
4555 | if (alm->enabled) { |
4556 | ret = regmap_bulk_write(rtc->regmap, |
4557 | rtc->addr_base + RTC_AL_SEC, |
4558 | diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c |
4559 | index 9df47421d69c..5be4d800e4ba 100644 |
4560 | --- a/drivers/s390/net/qeth_core_main.c |
4561 | +++ b/drivers/s390/net/qeth_core_main.c |
4562 | @@ -2451,50 +2451,46 @@ static int qeth_mpc_initialize(struct qeth_card *card) |
4563 | rc = qeth_cm_enable(card); |
4564 | if (rc) { |
4565 | QETH_CARD_TEXT_(card, 2, "2err%d", rc); |
4566 | - goto out_qdio; |
4567 | + return rc; |
4568 | } |
4569 | rc = qeth_cm_setup(card); |
4570 | if (rc) { |
4571 | QETH_CARD_TEXT_(card, 2, "3err%d", rc); |
4572 | - goto out_qdio; |
4573 | + return rc; |
4574 | } |
4575 | rc = qeth_ulp_enable(card); |
4576 | if (rc) { |
4577 | QETH_CARD_TEXT_(card, 2, "4err%d", rc); |
4578 | - goto out_qdio; |
4579 | + return rc; |
4580 | } |
4581 | rc = qeth_ulp_setup(card); |
4582 | if (rc) { |
4583 | QETH_CARD_TEXT_(card, 2, "5err%d", rc); |
4584 | - goto out_qdio; |
4585 | + return rc; |
4586 | } |
4587 | rc = qeth_alloc_qdio_queues(card); |
4588 | if (rc) { |
4589 | QETH_CARD_TEXT_(card, 2, "5err%d", rc); |
4590 | - goto out_qdio; |
4591 | + return rc; |
4592 | } |
4593 | rc = qeth_qdio_establish(card); |
4594 | if (rc) { |
4595 | QETH_CARD_TEXT_(card, 2, "6err%d", rc); |
4596 | qeth_free_qdio_queues(card); |
4597 | - goto out_qdio; |
4598 | + return rc; |
4599 | } |
4600 | rc = qeth_qdio_activate(card); |
4601 | if (rc) { |
4602 | QETH_CARD_TEXT_(card, 2, "7err%d", rc); |
4603 | - goto out_qdio; |
4604 | + return rc; |
4605 | } |
4606 | rc = qeth_dm_act(card); |
4607 | if (rc) { |
4608 | QETH_CARD_TEXT_(card, 2, "8err%d", rc); |
4609 | - goto out_qdio; |
4610 | + return rc; |
4611 | } |
4612 | |
4613 | return 0; |
4614 | -out_qdio: |
4615 | - qeth_qdio_clear_card(card, !IS_IQD(card)); |
4616 | - qdio_free(CARD_DDEV(card)); |
4617 | - return rc; |
4618 | } |
4619 | |
4620 | void qeth_print_status_message(struct qeth_card *card) |
4621 | @@ -3382,11 +3378,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) |
4622 | goto out; |
4623 | } |
4624 | |
4625 | - if (card->state != CARD_STATE_DOWN) { |
4626 | - rc = -1; |
4627 | - goto out; |
4628 | - } |
4629 | - |
4630 | qeth_free_qdio_queues(card); |
4631 | card->options.cq = cq; |
4632 | rc = 0; |
4633 | @@ -4972,10 +4963,8 @@ retriable: |
4634 | } |
4635 | if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { |
4636 | rc = qeth_query_setdiagass(card); |
4637 | - if (rc < 0) { |
4638 | + if (rc) |
4639 | QETH_CARD_TEXT_(card, 2, "8err%d", rc); |
4640 | - goto out; |
4641 | - } |
4642 | } |
4643 | return 0; |
4644 | out: |
4645 | diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c |
4646 | index 8b7d911dccd8..11e3292c0adf 100644 |
4647 | --- a/drivers/s390/net/qeth_l2_main.c |
4648 | +++ b/drivers/s390/net/qeth_l2_main.c |
4649 | @@ -287,12 +287,12 @@ static void qeth_l2_stop_card(struct qeth_card *card) |
4650 | card->state = CARD_STATE_HARDSETUP; |
4651 | } |
4652 | if (card->state == CARD_STATE_HARDSETUP) { |
4653 | - qeth_qdio_clear_card(card, 0); |
4654 | qeth_drain_output_queues(card); |
4655 | qeth_clear_working_pool_list(card); |
4656 | card->state = CARD_STATE_DOWN; |
4657 | } |
4658 | |
4659 | + qeth_qdio_clear_card(card, 0); |
4660 | flush_workqueue(card->event_wq); |
4661 | card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; |
4662 | card->info.promisc_mode = 0; |
4663 | @@ -1983,8 +1983,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) |
4664 | /* check if VNICC is currently enabled */ |
4665 | bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) |
4666 | { |
4667 | - /* if everything is turned off, VNICC is not active */ |
4668 | - if (!card->options.vnicc.cur_chars) |
4669 | + if (!card->options.vnicc.sup_chars) |
4670 | return false; |
4671 | /* default values are only OK if rx_bcast was not enabled by user |
4672 | * or the card is offline. |
4673 | @@ -2071,8 +2070,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card) |
4674 | /* enforce assumed default values and recover settings, if changed */ |
4675 | error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, |
4676 | timeout); |
4677 | - chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; |
4678 | - chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; |
4679 | + /* Change chars, if necessary */ |
4680 | + chars_tmp = card->options.vnicc.wanted_chars ^ |
4681 | + card->options.vnicc.cur_chars; |
4682 | chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; |
4683 | for_each_set_bit(i, &chars_tmp, chars_len) { |
4684 | vnicc = BIT(i); |
4685 | diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c |
4686 | index 32385327539b..5152970a9aa4 100644 |
4687 | --- a/drivers/s390/net/qeth_l3_main.c |
4688 | +++ b/drivers/s390/net/qeth_l3_main.c |
4689 | @@ -1426,12 +1426,12 @@ static void qeth_l3_stop_card(struct qeth_card *card) |
4690 | card->state = CARD_STATE_HARDSETUP; |
4691 | } |
4692 | if (card->state == CARD_STATE_HARDSETUP) { |
4693 | - qeth_qdio_clear_card(card, 0); |
4694 | qeth_drain_output_queues(card); |
4695 | qeth_clear_working_pool_list(card); |
4696 | card->state = CARD_STATE_DOWN; |
4697 | } |
4698 | |
4699 | + qeth_qdio_clear_card(card, 0); |
4700 | flush_workqueue(card->event_wq); |
4701 | card->info.promisc_mode = 0; |
4702 | } |
4703 | diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c |
4704 | index 2f73b33c9347..333fd4619dc6 100644 |
4705 | --- a/drivers/s390/net/qeth_l3_sys.c |
4706 | +++ b/drivers/s390/net/qeth_l3_sys.c |
4707 | @@ -270,24 +270,36 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, |
4708 | struct device_attribute *attr, const char *buf, size_t count) |
4709 | { |
4710 | struct qeth_card *card = dev_get_drvdata(dev); |
4711 | + int rc = 0; |
4712 | char *tmp; |
4713 | - int rc; |
4714 | |
4715 | if (!card) |
4716 | return -EINVAL; |
4717 | |
4718 | if (!IS_IQD(card)) |
4719 | return -EPERM; |
4720 | - if (card->state != CARD_STATE_DOWN) |
4721 | - return -EPERM; |
4722 | - if (card->options.sniffer) |
4723 | - return -EPERM; |
4724 | - if (card->options.cq == QETH_CQ_NOTAVAILABLE) |
4725 | - return -EPERM; |
4726 | + |
4727 | + mutex_lock(&card->conf_mutex); |
4728 | + if (card->state != CARD_STATE_DOWN) { |
4729 | + rc = -EPERM; |
4730 | + goto out; |
4731 | + } |
4732 | + |
4733 | + if (card->options.sniffer) { |
4734 | + rc = -EPERM; |
4735 | + goto out; |
4736 | + } |
4737 | + |
4738 | + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { |
4739 | + rc = -EPERM; |
4740 | + goto out; |
4741 | + } |
4742 | |
4743 | tmp = strsep((char **)&buf, "\n"); |
4744 | - if (strlen(tmp) > 8) |
4745 | - return -EINVAL; |
4746 | + if (strlen(tmp) > 8) { |
4747 | + rc = -EINVAL; |
4748 | + goto out; |
4749 | + } |
4750 | |
4751 | if (card->options.hsuid[0]) |
4752 | /* delete old ip address */ |
4753 | @@ -298,11 +310,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, |
4754 | card->options.hsuid[0] = '\0'; |
4755 | memcpy(card->dev->perm_addr, card->options.hsuid, 9); |
4756 | qeth_configure_cq(card, QETH_CQ_DISABLED); |
4757 | - return count; |
4758 | + goto out; |
4759 | } |
4760 | |
4761 | - if (qeth_configure_cq(card, QETH_CQ_ENABLED)) |
4762 | - return -EPERM; |
4763 | + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { |
4764 | + rc = -EPERM; |
4765 | + goto out; |
4766 | + } |
4767 | |
4768 | snprintf(card->options.hsuid, sizeof(card->options.hsuid), |
4769 | "%-8s", tmp); |
4770 | @@ -311,6 +325,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, |
4771 | |
4772 | rc = qeth_l3_modify_hsuid(card, true); |
4773 | |
4774 | +out: |
4775 | + mutex_unlock(&card->conf_mutex); |
4776 | return rc ? rc : count; |
4777 | } |
4778 | |
4779 | diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c |
4780 | index 3e17af8aedeb..2cd2761bd249 100644 |
4781 | --- a/drivers/scsi/cxgbi/libcxgbi.c |
4782 | +++ b/drivers/scsi/cxgbi/libcxgbi.c |
4783 | @@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) |
4784 | "cdev 0x%p, p# %u.\n", cdev, cdev->nports); |
4785 | cxgbi_hbas_remove(cdev); |
4786 | cxgbi_device_portmap_cleanup(cdev); |
4787 | - cxgbi_ppm_release(cdev->cdev2ppm(cdev)); |
4788 | + if (cdev->cdev2ppm) |
4789 | + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); |
4790 | if (cdev->pmap.max_connect) |
4791 | cxgbi_free_big_mem(cdev->pmap.port_csk); |
4792 | kfree(cdev); |
4793 | diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c |
4794 | index fea3cb6a090b..752b71cfbe12 100644 |
4795 | --- a/drivers/scsi/mpt3sas/mpt3sas_base.c |
4796 | +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c |
4797 | @@ -5234,7 +5234,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) |
4798 | &ct->chain_buffer_dma); |
4799 | if (!ct->chain_buffer) { |
4800 | ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); |
4801 | - _base_release_memory_pools(ioc); |
4802 | goto out; |
4803 | } |
4804 | } |
4805 | diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c |
4806 | index ebb40160539f..ac2e88ec1190 100644 |
4807 | --- a/drivers/scsi/sd.c |
4808 | +++ b/drivers/scsi/sd.c |
4809 | @@ -1694,20 +1694,30 @@ static void sd_rescan(struct device *dev) |
4810 | static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, |
4811 | unsigned int cmd, unsigned long arg) |
4812 | { |
4813 | - struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; |
4814 | + struct gendisk *disk = bdev->bd_disk; |
4815 | + struct scsi_disk *sdkp = scsi_disk(disk); |
4816 | + struct scsi_device *sdev = sdkp->device; |
4817 | + void __user *p = compat_ptr(arg); |
4818 | int error; |
4819 | |
4820 | + error = scsi_verify_blk_ioctl(bdev, cmd); |
4821 | + if (error < 0) |
4822 | + return error; |
4823 | + |
4824 | error = scsi_ioctl_block_when_processing_errors(sdev, cmd, |
4825 | (mode & FMODE_NDELAY) != 0); |
4826 | if (error) |
4827 | return error; |
4828 | + |
4829 | + if (is_sed_ioctl(cmd)) |
4830 | + return sed_ioctl(sdkp->opal_dev, cmd, p); |
4831 | |
4832 | /* |
4833 | * Let the static ioctl translation table take care of it. |
4834 | */ |
4835 | if (!sdev->host->hostt->compat_ioctl) |
4836 | return -ENOIOCTLCMD; |
4837 | - return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); |
4838 | + return sdev->host->hostt->compat_ioctl(sdev, cmd, p); |
4839 | } |
4840 | #endif |
4841 | |
4842 | @@ -2192,8 +2202,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer |
4843 | u8 type; |
4844 | int ret = 0; |
4845 | |
4846 | - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) |
4847 | + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { |
4848 | + sdkp->protection_type = 0; |
4849 | return ret; |
4850 | + } |
4851 | |
4852 | type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ |
4853 | |
4854 | diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c |
4855 | index dc2f6d2b46ed..d2197a31abe5 100644 |
4856 | --- a/drivers/scsi/ufs/ufs_bsg.c |
4857 | +++ b/drivers/scsi/ufs/ufs_bsg.c |
4858 | @@ -202,7 +202,7 @@ int ufs_bsg_probe(struct ufs_hba *hba) |
4859 | bsg_dev->parent = get_device(parent); |
4860 | bsg_dev->release = ufs_bsg_node_release; |
4861 | |
4862 | - dev_set_name(bsg_dev, "ufs-bsg"); |
4863 | + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); |
4864 | |
4865 | ret = device_add(bsg_dev); |
4866 | if (ret) |
4867 | diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c |
4868 | index ba8eff41b746..abbc1582f457 100644 |
4869 | --- a/drivers/spi/spi-atmel.c |
4870 | +++ b/drivers/spi/spi-atmel.c |
4871 | @@ -302,7 +302,6 @@ struct atmel_spi { |
4872 | bool use_cs_gpios; |
4873 | |
4874 | bool keep_cs; |
4875 | - bool cs_active; |
4876 | |
4877 | u32 fifo_size; |
4878 | }; |
4879 | @@ -1374,11 +1373,9 @@ static int atmel_spi_one_transfer(struct spi_master *master, |
4880 | &msg->transfers)) { |
4881 | as->keep_cs = true; |
4882 | } else { |
4883 | - as->cs_active = !as->cs_active; |
4884 | - if (as->cs_active) |
4885 | - cs_activate(as, msg->spi); |
4886 | - else |
4887 | - cs_deactivate(as, msg->spi); |
4888 | + cs_deactivate(as, msg->spi); |
4889 | + udelay(10); |
4890 | + cs_activate(as, msg->spi); |
4891 | } |
4892 | } |
4893 | |
4894 | @@ -1401,7 +1398,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master, |
4895 | atmel_spi_lock(as); |
4896 | cs_activate(as, spi); |
4897 | |
4898 | - as->cs_active = true; |
4899 | as->keep_cs = false; |
4900 | |
4901 | msg->status = 0; |
4902 | diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c |
4903 | index d08e9324140e..3528ed5eea9b 100644 |
4904 | --- a/drivers/spi/spi-fsl-lpspi.c |
4905 | +++ b/drivers/spi/spi-fsl-lpspi.c |
4906 | @@ -938,7 +938,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) |
4907 | ret = pm_runtime_get_sync(fsl_lpspi->dev); |
4908 | if (ret < 0) { |
4909 | dev_err(fsl_lpspi->dev, "failed to enable clock\n"); |
4910 | - return ret; |
4911 | + goto out_controller_put; |
4912 | } |
4913 | |
4914 | temp = readl(fsl_lpspi->base + IMX7ULP_PARAM); |
4915 | diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c |
4916 | index ae95ec0bc964..9f92165fe09f 100644 |
4917 | --- a/drivers/spi/spi-pxa2xx.c |
4918 | +++ b/drivers/spi/spi-pxa2xx.c |
4919 | @@ -1612,6 +1612,11 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller, |
4920 | return cs; |
4921 | } |
4922 | |
4923 | +static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi) |
4924 | +{ |
4925 | + return MAX_DMA_LEN; |
4926 | +} |
4927 | + |
4928 | static int pxa2xx_spi_probe(struct platform_device *pdev) |
4929 | { |
4930 | struct device *dev = &pdev->dev; |
4931 | @@ -1717,6 +1722,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) |
4932 | } else { |
4933 | controller->can_dma = pxa2xx_spi_can_dma; |
4934 | controller->max_dma_len = MAX_DMA_LEN; |
4935 | + controller->max_transfer_size = |
4936 | + pxa2xx_spi_max_dma_transfer_size; |
4937 | } |
4938 | } |
4939 | |
4940 | diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c |
4941 | index 15f5723d9f95..7222c7689c3c 100644 |
4942 | --- a/drivers/spi/spi-rspi.c |
4943 | +++ b/drivers/spi/spi-rspi.c |
4944 | @@ -1257,9 +1257,9 @@ static int rspi_probe(struct platform_device *pdev) |
4945 | ctlr->flags = ops->flags; |
4946 | ctlr->dev.of_node = pdev->dev.of_node; |
4947 | |
4948 | - ret = platform_get_irq_byname(pdev, "rx"); |
4949 | + ret = platform_get_irq_byname_optional(pdev, "rx"); |
4950 | if (ret < 0) { |
4951 | - ret = platform_get_irq_byname(pdev, "mux"); |
4952 | + ret = platform_get_irq_byname_optional(pdev, "mux"); |
4953 | if (ret < 0) |
4954 | ret = platform_get_irq(pdev, 0); |
4955 | if (ret >= 0) |
4956 | @@ -1270,10 +1270,6 @@ static int rspi_probe(struct platform_device *pdev) |
4957 | if (ret >= 0) |
4958 | rspi->tx_irq = ret; |
4959 | } |
4960 | - if (ret < 0) { |
4961 | - dev_err(&pdev->dev, "platform_get_irq error\n"); |
4962 | - goto error2; |
4963 | - } |
4964 | |
4965 | if (rspi->rx_irq == rspi->tx_irq) { |
4966 | /* Single multiplexed interrupt */ |
4967 | diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c |
4968 | index 8c9021b7f7a9..fa597e27be17 100644 |
4969 | --- a/drivers/spi/spi-sprd.c |
4970 | +++ b/drivers/spi/spi-sprd.c |
4971 | @@ -674,7 +674,7 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t) |
4972 | u16 word_delay, interval; |
4973 | u32 val; |
4974 | |
4975 | - val = readl_relaxed(ss->base + SPRD_SPI_CTL7); |
4976 | + val = readl_relaxed(ss->base + SPRD_SPI_CTL0); |
4977 | val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX); |
4978 | /* Set default chip selection, clock phase and clock polarity */ |
4979 | val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX; |
4980 | diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c |
4981 | index 636bf972adcf..5f29b7a836db 100644 |
4982 | --- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c |
4983 | +++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c |
4984 | @@ -63,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx) |
4985 | /* always use the matrix sent from userspace */ |
4986 | reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E; |
4987 | |
4988 | - if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC) |
4989 | + if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY)) |
4990 | reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E; |
4991 | vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2); |
4992 | |
4993 | diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c |
4994 | index 0d758e0c0f99..a9c134204351 100644 |
4995 | --- a/drivers/staging/media/hantro/hantro_h264.c |
4996 | +++ b/drivers/staging/media/hantro/hantro_h264.c |
4997 | @@ -20,7 +20,7 @@ |
4998 | /* Size with u32 units. */ |
4999 | #define CABAC_INIT_BUFFER_SIZE (460 * 2) |
5000 | #define POC_BUFFER_SIZE 34 |
5001 | -#define SCALING_LIST_SIZE (6 * 16 + 6 * 64) |
5002 | +#define SCALING_LIST_SIZE (6 * 16 + 2 * 64) |
5003 | |
5004 | #define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1) |
5005 | |
5006 | @@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = { |
5007 | 0x1f0c2517, 0x1f261440 |
5008 | }; |
5009 | |
5010 | -/* |
5011 | - * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process |
5012 | - * to get the values in matrix order. In addition, the hardware requires bytes |
5013 | - * swapped within each subsequent 4 bytes. Both arrays below include both |
5014 | - * transformations. |
5015 | - */ |
5016 | -static const u32 zig_zag_4x4[] = { |
5017 | - 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12 |
5018 | -}; |
5019 | - |
5020 | -static const u32 zig_zag_8x8[] = { |
5021 | - 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6, |
5022 | - 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31, |
5023 | - 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48, |
5024 | - 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60 |
5025 | -}; |
5026 | - |
5027 | static void |
5028 | reorder_scaling_list(struct hantro_ctx *ctx) |
5029 | { |
5030 | @@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx) |
5031 | const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling; |
5032 | const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4); |
5033 | const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]); |
5034 | - const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8); |
5035 | const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]); |
5036 | struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu; |
5037 | - u8 *dst = tbl->scaling_list; |
5038 | - const u8 *src; |
5039 | + u32 *dst = (u32 *)tbl->scaling_list; |
5040 | + const u32 *src; |
5041 | int i, j; |
5042 | |
5043 | - BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4); |
5044 | - BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8); |
5045 | - BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) != |
5046 | - num_list_4x4 * list_len_4x4 + |
5047 | - num_list_8x8 * list_len_8x8); |
5048 | - |
5049 | - src = &scaling->scaling_list_4x4[0][0]; |
5050 | - for (i = 0; i < num_list_4x4; ++i) { |
5051 | - for (j = 0; j < list_len_4x4; ++j) |
5052 | - dst[zig_zag_4x4[j]] = src[j]; |
5053 | - src += list_len_4x4; |
5054 | - dst += list_len_4x4; |
5055 | + for (i = 0; i < num_list_4x4; i++) { |
5056 | + src = (u32 *)&scaling->scaling_list_4x4[i]; |
5057 | + for (j = 0; j < list_len_4x4 / 4; j++) |
5058 | + *dst++ = swab32(src[j]); |
5059 | } |
5060 | |
5061 | - src = &scaling->scaling_list_8x8[0][0]; |
5062 | - for (i = 0; i < num_list_8x8; ++i) { |
5063 | - for (j = 0; j < list_len_8x8; ++j) |
5064 | - dst[zig_zag_8x8[j]] = src[j]; |
5065 | - src += list_len_8x8; |
5066 | - dst += list_len_8x8; |
5067 | + /* Only Intra/Inter Y lists */ |
5068 | + for (i = 0; i < 2; i++) { |
5069 | + src = (u32 *)&scaling->scaling_list_8x8[i]; |
5070 | + for (j = 0; j < list_len_8x8 / 4; j++) |
5071 | + *dst++ = swab32(src[j]); |
5072 | } |
5073 | } |
5074 | |
5075 | @@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder { |
5076 | const struct v4l2_h264_dpb_entry *dpb; |
5077 | s32 pocs[HANTRO_H264_DPB_SIZE]; |
5078 | u8 unordered_reflist[HANTRO_H264_DPB_SIZE]; |
5079 | + int frame_nums[HANTRO_H264_DPB_SIZE]; |
5080 | s32 curpoc; |
5081 | u8 num_valid; |
5082 | }; |
5083 | @@ -294,13 +268,20 @@ static void |
5084 | init_reflist_builder(struct hantro_ctx *ctx, |
5085 | struct hantro_h264_reflist_builder *b) |
5086 | { |
5087 | + const struct v4l2_ctrl_h264_slice_params *slice_params; |
5088 | const struct v4l2_ctrl_h264_decode_params *dec_param; |
5089 | + const struct v4l2_ctrl_h264_sps *sps; |
5090 | struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx); |
5091 | const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb; |
5092 | struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; |
5093 | + int cur_frame_num, max_frame_num; |
5094 | unsigned int i; |
5095 | |
5096 | dec_param = ctx->h264_dec.ctrls.decode; |
5097 | + slice_params = &ctx->h264_dec.ctrls.slices[0]; |
5098 | + sps = ctx->h264_dec.ctrls.sps; |
5099 | + max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4); |
5100 | + cur_frame_num = slice_params->frame_num; |
5101 | |
5102 | memset(b, 0, sizeof(*b)); |
5103 | b->dpb = dpb; |
5104 | @@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx, |
5105 | continue; |
5106 | |
5107 | buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx)); |
5108 | + |
5109 | + /* |
5110 | + * Handle frame_num wraparound as described in section |
5111 | + * '8.2.4.1 Decoding process for picture numbers' of the spec. |
5112 | + * TODO: This logic will have to be adjusted when we start |
5113 | + * supporting interlaced content. |
5114 | + */ |
5115 | + if (dpb[i].frame_num > cur_frame_num) |
5116 | + b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num; |
5117 | + else |
5118 | + b->frame_nums[i] = dpb[i].frame_num; |
5119 | + |
5120 | b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt, |
5121 | dpb[i].bottom_field_order_cnt); |
5122 | b->unordered_reflist[b->num_valid] = i; |
5123 | @@ -353,7 +346,7 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data) |
5124 | * ascending order. |
5125 | */ |
5126 | if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)) |
5127 | - return b->frame_num - a->frame_num; |
5128 | + return builder->frame_nums[idxb] - builder->frame_nums[idxa]; |
5129 | |
5130 | return a->pic_num - b->pic_num; |
5131 | } |
5132 | diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h |
5133 | index c7cd27efac8a..0b1cb9f9cbd1 100644 |
5134 | --- a/drivers/staging/media/ipu3/include/intel-ipu3.h |
5135 | +++ b/drivers/staging/media/ipu3/include/intel-ipu3.h |
5136 | @@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s { |
5137 | __u16 reserved1; |
5138 | __u32 bayer_sign; |
5139 | __u8 bayer_nf; |
5140 | - __u8 reserved2[3]; |
5141 | + __u8 reserved2[7]; |
5142 | } __attribute__((aligned(32))) __packed; |
5143 | |
5144 | /** |
5145 | diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c |
5146 | index 08c6c9c410cc..c07526c12629 100644 |
5147 | --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c |
5148 | +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c |
5149 | @@ -244,8 +244,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx, |
5150 | sizeof(scaling->scaling_list_8x8[0])); |
5151 | |
5152 | cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1, |
5153 | - scaling->scaling_list_8x8[3], |
5154 | - sizeof(scaling->scaling_list_8x8[3])); |
5155 | + scaling->scaling_list_8x8[1], |
5156 | + sizeof(scaling->scaling_list_8x8[1])); |
5157 | |
5158 | cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4, |
5159 | scaling->scaling_list_4x4, |
5160 | diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c |
5161 | index 6949ea8bc387..51ffd5c002de 100644 |
5162 | --- a/drivers/target/target_core_iblock.c |
5163 | +++ b/drivers/target/target_core_iblock.c |
5164 | @@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio, |
5165 | } |
5166 | |
5167 | bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); |
5168 | - bip_set_seed(bip, bio->bi_iter.bi_sector); |
5169 | + /* virtual start sector must be in integrity interval units */ |
5170 | + bip_set_seed(bip, bio->bi_iter.bi_sector >> |
5171 | + (bi->interval_exp - SECTOR_SHIFT)); |
5172 | |
5173 | pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size, |
5174 | (unsigned long long)bip->bip_iter.bi_sector); |
5175 | diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c |
5176 | index 5e08f2657b90..34f602c3a882 100644 |
5177 | --- a/drivers/tty/serial/imx.c |
5178 | +++ b/drivers/tty/serial/imx.c |
5179 | @@ -619,7 +619,7 @@ static void imx_uart_dma_tx(struct imx_port *sport) |
5180 | dev_err(dev, "DMA mapping error for TX.\n"); |
5181 | return; |
5182 | } |
5183 | - desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents, |
5184 | + desc = dmaengine_prep_slave_sg(chan, sgl, ret, |
5185 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); |
5186 | if (!desc) { |
5187 | dma_unmap_sg(dev, sgl, sport->dma_tx_nents, |
5188 | diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c |
5189 | index 6157213a8359..c16234bca78f 100644 |
5190 | --- a/drivers/tty/serial/pch_uart.c |
5191 | +++ b/drivers/tty/serial/pch_uart.c |
5192 | @@ -233,6 +233,7 @@ struct eg20t_port { |
5193 | struct dma_chan *chan_rx; |
5194 | struct scatterlist *sg_tx_p; |
5195 | int nent; |
5196 | + int orig_nent; |
5197 | struct scatterlist sg_rx; |
5198 | int tx_dma_use; |
5199 | void *rx_buf_virt; |
5200 | @@ -787,9 +788,10 @@ static void pch_dma_tx_complete(void *arg) |
5201 | } |
5202 | xmit->tail &= UART_XMIT_SIZE - 1; |
5203 | async_tx_ack(priv->desc_tx); |
5204 | - dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); |
5205 | + dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE); |
5206 | priv->tx_dma_use = 0; |
5207 | priv->nent = 0; |
5208 | + priv->orig_nent = 0; |
5209 | kfree(priv->sg_tx_p); |
5210 | pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT); |
5211 | } |
5212 | @@ -1010,6 +1012,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) |
5213 | dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__); |
5214 | return 0; |
5215 | } |
5216 | + priv->orig_nent = num; |
5217 | priv->nent = nent; |
5218 | |
5219 | for (i = 0; i < nent; i++, sg++) { |
5220 | diff --git a/fs/affs/super.c b/fs/affs/super.c |
5221 | index cc463ae47c12..3812f7bc3a7f 100644 |
5222 | --- a/fs/affs/super.c |
5223 | +++ b/fs/affs/super.c |
5224 | @@ -561,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data) |
5225 | int root_block; |
5226 | unsigned long mount_flags; |
5227 | int res = 0; |
5228 | - char *new_opts; |
5229 | char volume[32]; |
5230 | char *prefix = NULL; |
5231 | |
5232 | - new_opts = kstrdup(data, GFP_KERNEL); |
5233 | - if (data && !new_opts) |
5234 | - return -ENOMEM; |
5235 | - |
5236 | pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data); |
5237 | |
5238 | sync_filesystem(sb); |
5239 | @@ -579,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data) |
5240 | &blocksize, &prefix, volume, |
5241 | &mount_flags)) { |
5242 | kfree(prefix); |
5243 | - kfree(new_opts); |
5244 | return -EINVAL; |
5245 | } |
5246 | |
5247 | diff --git a/fs/afs/dir.c b/fs/afs/dir.c |
5248 | index 497f979018c2..5c794f4b051a 100644 |
5249 | --- a/fs/afs/dir.c |
5250 | +++ b/fs/afs/dir.c |
5251 | @@ -908,6 +908,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, |
5252 | unsigned int flags) |
5253 | { |
5254 | struct afs_vnode *dvnode = AFS_FS_I(dir); |
5255 | + struct afs_fid fid = {}; |
5256 | struct inode *inode; |
5257 | struct dentry *d; |
5258 | struct key *key; |
5259 | @@ -951,21 +952,18 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry, |
5260 | afs_stat_v(dvnode, n_lookup); |
5261 | inode = afs_do_lookup(dir, dentry, key); |
5262 | key_put(key); |
5263 | - if (inode == ERR_PTR(-ENOENT)) { |
5264 | + if (inode == ERR_PTR(-ENOENT)) |
5265 | inode = afs_try_auto_mntpt(dentry, dir); |
5266 | - } else { |
5267 | - dentry->d_fsdata = |
5268 | - (void *)(unsigned long)dvnode->status.data_version; |
5269 | - } |
5270 | + |
5271 | + if (!IS_ERR_OR_NULL(inode)) |
5272 | + fid = AFS_FS_I(inode)->fid; |
5273 | + |
5274 | d = d_splice_alias(inode, dentry); |
5275 | if (!IS_ERR_OR_NULL(d)) { |
5276 | d->d_fsdata = dentry->d_fsdata; |
5277 | - trace_afs_lookup(dvnode, &d->d_name, |
5278 | - inode ? AFS_FS_I(inode) : NULL); |
5279 | + trace_afs_lookup(dvnode, &d->d_name, &fid); |
5280 | } else { |
5281 | - trace_afs_lookup(dvnode, &dentry->d_name, |
5282 | - IS_ERR_OR_NULL(inode) ? NULL |
5283 | - : AFS_FS_I(inode)); |
5284 | + trace_afs_lookup(dvnode, &dentry->d_name, &fid); |
5285 | } |
5286 | return d; |
5287 | } |
5288 | diff --git a/fs/afs/super.c b/fs/afs/super.c |
5289 | index d9a6036b70b9..7f8a9b3137bf 100644 |
5290 | --- a/fs/afs/super.c |
5291 | +++ b/fs/afs/super.c |
5292 | @@ -404,6 +404,7 @@ static int afs_test_super(struct super_block *sb, struct fs_context *fc) |
5293 | return (as->net_ns == fc->net_ns && |
5294 | as->volume && |
5295 | as->volume->vid == ctx->volume->vid && |
5296 | + as->cell == ctx->cell && |
5297 | !as->dyn_root); |
5298 | } |
5299 | |
5300 | diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
5301 | index eaafd00f93d4..5739b8fc7fff 100644 |
5302 | --- a/fs/btrfs/file.c |
5303 | +++ b/fs/btrfs/file.c |
5304 | @@ -1903,9 +1903,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, |
5305 | (iocb->ki_flags & IOCB_NOWAIT)) |
5306 | return -EOPNOTSUPP; |
5307 | |
5308 | - if (!inode_trylock(inode)) { |
5309 | - if (iocb->ki_flags & IOCB_NOWAIT) |
5310 | + if (iocb->ki_flags & IOCB_NOWAIT) { |
5311 | + if (!inode_trylock(inode)) |
5312 | return -EAGAIN; |
5313 | + } else { |
5314 | inode_lock(inode); |
5315 | } |
5316 | |
5317 | diff --git a/fs/buffer.c b/fs/buffer.c |
5318 | index 7744488f7bde..91ceca52d14f 100644 |
5319 | --- a/fs/buffer.c |
5320 | +++ b/fs/buffer.c |
5321 | @@ -2991,7 +2991,7 @@ static void end_bio_bh_io_sync(struct bio *bio) |
5322 | * errors, this only handles the "we need to be able to |
5323 | * do IO at the final sector" case. |
5324 | */ |
5325 | -void guard_bio_eod(int op, struct bio *bio) |
5326 | +void guard_bio_eod(struct bio *bio) |
5327 | { |
5328 | sector_t maxsector; |
5329 | struct hd_struct *part; |
5330 | @@ -3055,15 +3055,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
5331 | bio->bi_end_io = end_bio_bh_io_sync; |
5332 | bio->bi_private = bh; |
5333 | |
5334 | - /* Take care of bh's that straddle the end of the device */ |
5335 | - guard_bio_eod(op, bio); |
5336 | - |
5337 | if (buffer_meta(bh)) |
5338 | op_flags |= REQ_META; |
5339 | if (buffer_prio(bh)) |
5340 | op_flags |= REQ_PRIO; |
5341 | bio_set_op_attrs(bio, op, op_flags); |
5342 | |
5343 | + /* Take care of bh's that straddle the end of the device */ |
5344 | + guard_bio_eod(bio); |
5345 | + |
5346 | if (wbc) { |
5347 | wbc_init_bio(wbc, bio); |
5348 | wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); |
5349 | diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c |
5350 | index 8b0b512c5792..afe1f03aabe3 100644 |
5351 | --- a/fs/cifs/smb2file.c |
5352 | +++ b/fs/cifs/smb2file.c |
5353 | @@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, |
5354 | goto out; |
5355 | |
5356 | |
5357 | - if (oparms->tcon->use_resilient) { |
5358 | + if (oparms->tcon->use_resilient) { |
5359 | /* default timeout is 0, servers pick default (120 seconds) */ |
5360 | nr_ioctl_req.Timeout = |
5361 | cpu_to_le32(oparms->tcon->handle_timeout); |
5362 | diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c |
5363 | index 5755e897a5f0..2e9c73165800 100644 |
5364 | --- a/fs/f2fs/data.c |
5365 | +++ b/fs/f2fs/data.c |
5366 | @@ -2098,7 +2098,7 @@ static int __write_data_page(struct page *page, bool *submitted, |
5367 | loff_t i_size = i_size_read(inode); |
5368 | const pgoff_t end_index = ((unsigned long long) i_size) |
5369 | >> PAGE_SHIFT; |
5370 | - loff_t psize = (page->index + 1) << PAGE_SHIFT; |
5371 | + loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT; |
5372 | unsigned offset = 0; |
5373 | bool need_balance_fs = false; |
5374 | int err = 0; |
5375 | diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c |
5376 | index 8ed8e4328bd1..fae665691481 100644 |
5377 | --- a/fs/f2fs/file.c |
5378 | +++ b/fs/f2fs/file.c |
5379 | @@ -1139,7 +1139,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, |
5380 | } |
5381 | dn.ofs_in_node++; |
5382 | i++; |
5383 | - new_size = (dst + i) << PAGE_SHIFT; |
5384 | + new_size = (loff_t)(dst + i) << PAGE_SHIFT; |
5385 | if (dst_inode->i_size < new_size) |
5386 | f2fs_i_size_write(dst_inode, new_size); |
5387 | } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR)); |
5388 | diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c |
5389 | index c53e3b892210..01ff37b76652 100644 |
5390 | --- a/fs/gfs2/file.c |
5391 | +++ b/fs/gfs2/file.c |
5392 | @@ -6,6 +6,7 @@ |
5393 | |
5394 | #include <linux/slab.h> |
5395 | #include <linux/spinlock.h> |
5396 | +#include <linux/compat.h> |
5397 | #include <linux/completion.h> |
5398 | #include <linux/buffer_head.h> |
5399 | #include <linux/pagemap.h> |
5400 | @@ -354,6 +355,31 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
5401 | return -ENOTTY; |
5402 | } |
5403 | |
5404 | +#ifdef CONFIG_COMPAT |
5405 | +static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
5406 | +{ |
5407 | + switch(cmd) { |
5408 | + /* These are just misnamed, they actually get/put from/to user an int */ |
5409 | + case FS_IOC32_GETFLAGS: |
5410 | + cmd = FS_IOC_GETFLAGS; |
5411 | + break; |
5412 | + case FS_IOC32_SETFLAGS: |
5413 | + cmd = FS_IOC_SETFLAGS; |
5414 | + break; |
5415 | + /* Keep this list in sync with gfs2_ioctl */ |
5416 | + case FITRIM: |
5417 | + case FS_IOC_GETFSLABEL: |
5418 | + break; |
5419 | + default: |
5420 | + return -ENOIOCTLCMD; |
5421 | + } |
5422 | + |
5423 | + return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); |
5424 | +} |
5425 | +#else |
5426 | +#define gfs2_compat_ioctl NULL |
5427 | +#endif |
5428 | + |
5429 | /** |
5430 | * gfs2_size_hint - Give a hint to the size of a write request |
5431 | * @filep: The struct file |
5432 | @@ -1294,6 +1320,7 @@ const struct file_operations gfs2_file_fops = { |
5433 | .write_iter = gfs2_file_write_iter, |
5434 | .iopoll = iomap_dio_iopoll, |
5435 | .unlocked_ioctl = gfs2_ioctl, |
5436 | + .compat_ioctl = gfs2_compat_ioctl, |
5437 | .mmap = gfs2_mmap, |
5438 | .open = gfs2_open, |
5439 | .release = gfs2_release, |
5440 | @@ -1309,6 +1336,7 @@ const struct file_operations gfs2_file_fops = { |
5441 | const struct file_operations gfs2_dir_fops = { |
5442 | .iterate_shared = gfs2_readdir, |
5443 | .unlocked_ioctl = gfs2_ioctl, |
5444 | + .compat_ioctl = gfs2_compat_ioctl, |
5445 | .open = gfs2_open, |
5446 | .release = gfs2_release, |
5447 | .fsync = gfs2_fsync, |
5448 | @@ -1325,6 +1353,7 @@ const struct file_operations gfs2_file_fops_nolock = { |
5449 | .write_iter = gfs2_file_write_iter, |
5450 | .iopoll = iomap_dio_iopoll, |
5451 | .unlocked_ioctl = gfs2_ioctl, |
5452 | + .compat_ioctl = gfs2_compat_ioctl, |
5453 | .mmap = gfs2_mmap, |
5454 | .open = gfs2_open, |
5455 | .release = gfs2_release, |
5456 | @@ -1338,6 +1367,7 @@ const struct file_operations gfs2_file_fops_nolock = { |
5457 | const struct file_operations gfs2_dir_fops_nolock = { |
5458 | .iterate_shared = gfs2_readdir, |
5459 | .unlocked_ioctl = gfs2_ioctl, |
5460 | + .compat_ioctl = gfs2_compat_ioctl, |
5461 | .open = gfs2_open, |
5462 | .release = gfs2_release, |
5463 | .fsync = gfs2_fsync, |
5464 | diff --git a/fs/internal.h b/fs/internal.h |
5465 | index 315fcd8d237c..7651e8b8ef13 100644 |
5466 | --- a/fs/internal.h |
5467 | +++ b/fs/internal.h |
5468 | @@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) |
5469 | /* |
5470 | * buffer.c |
5471 | */ |
5472 | -extern void guard_bio_eod(int rw, struct bio *bio); |
5473 | +extern void guard_bio_eod(struct bio *bio); |
5474 | extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, |
5475 | get_block_t *get_block, struct iomap *iomap); |
5476 | |
5477 | diff --git a/fs/mpage.c b/fs/mpage.c |
5478 | index a63620cdb73a..ccba3c4c4479 100644 |
5479 | --- a/fs/mpage.c |
5480 | +++ b/fs/mpage.c |
5481 | @@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) |
5482 | { |
5483 | bio->bi_end_io = mpage_end_io; |
5484 | bio_set_op_attrs(bio, op, op_flags); |
5485 | - guard_bio_eod(op, bio); |
5486 | + guard_bio_eod(bio); |
5487 | submit_bio(bio); |
5488 | return NULL; |
5489 | } |
5490 | diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c |
5491 | index cbc17a203248..887f9136a9db 100644 |
5492 | --- a/fs/nfs/nfs2xdr.c |
5493 | +++ b/fs/nfs/nfs2xdr.c |
5494 | @@ -370,7 +370,7 @@ static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr, |
5495 | } else |
5496 | p = xdr_time_not_set(p); |
5497 | if (attr->ia_valid & ATTR_MTIME_SET) { |
5498 | - ts = timespec64_to_timespec(attr->ia_atime); |
5499 | + ts = timespec64_to_timespec(attr->ia_mtime); |
5500 | xdr_encode_time(p, &ts); |
5501 | } else if (attr->ia_valid & ATTR_MTIME) { |
5502 | ts = timespec64_to_timespec(attr->ia_mtime); |
5503 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
5504 | index caacf5e7f5e1..f26d714f9f28 100644 |
5505 | --- a/fs/nfs/nfs4proc.c |
5506 | +++ b/fs/nfs/nfs4proc.c |
5507 | @@ -521,9 +521,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server, |
5508 | case -NFS4ERR_DEADSESSION: |
5509 | case -NFS4ERR_SEQ_FALSE_RETRY: |
5510 | case -NFS4ERR_SEQ_MISORDERED: |
5511 | - dprintk("%s ERROR: %d Reset session\n", __func__, |
5512 | - errorcode); |
5513 | - nfs4_schedule_session_recovery(clp->cl_session, errorcode); |
5514 | + /* Handled in nfs41_sequence_process() */ |
5515 | goto wait_on_recovery; |
5516 | #endif /* defined(CONFIG_NFS_V4_1) */ |
5517 | case -NFS4ERR_FILE_OPEN: |
5518 | @@ -782,6 +780,7 @@ static int nfs41_sequence_process(struct rpc_task *task, |
5519 | struct nfs4_session *session; |
5520 | struct nfs4_slot *slot = res->sr_slot; |
5521 | struct nfs_client *clp; |
5522 | + int status; |
5523 | int ret = 1; |
5524 | |
5525 | if (slot == NULL) |
5526 | @@ -793,8 +792,13 @@ static int nfs41_sequence_process(struct rpc_task *task, |
5527 | session = slot->table->session; |
5528 | |
5529 | trace_nfs4_sequence_done(session, res); |
5530 | + |
5531 | + status = res->sr_status; |
5532 | + if (task->tk_status == -NFS4ERR_DEADSESSION) |
5533 | + status = -NFS4ERR_DEADSESSION; |
5534 | + |
5535 | /* Check the SEQUENCE operation status */ |
5536 | - switch (res->sr_status) { |
5537 | + switch (status) { |
5538 | case 0: |
5539 | /* Mark this sequence number as having been acked */ |
5540 | nfs4_slot_sequence_acked(slot, slot->seq_nr); |
5541 | @@ -866,6 +870,10 @@ static int nfs41_sequence_process(struct rpc_task *task, |
5542 | */ |
5543 | slot->seq_nr = slot->seq_nr_highest_sent; |
5544 | goto out_retry; |
5545 | + case -NFS4ERR_BADSESSION: |
5546 | + case -NFS4ERR_DEADSESSION: |
5547 | + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
5548 | + goto session_recover; |
5549 | default: |
5550 | /* Just update the slot sequence no. */ |
5551 | slot->seq_done = 1; |
5552 | @@ -876,8 +884,10 @@ out: |
5553 | out_noaction: |
5554 | return ret; |
5555 | session_recover: |
5556 | - nfs4_schedule_session_recovery(session, res->sr_status); |
5557 | - goto retry_nowait; |
5558 | + nfs4_schedule_session_recovery(session, status); |
5559 | + dprintk("%s ERROR: %d Reset session\n", __func__, status); |
5560 | + nfs41_sequence_free_slot(res); |
5561 | + goto out; |
5562 | retry_new_seq: |
5563 | ++slot->seq_nr; |
5564 | retry_nowait: |
5565 | @@ -2188,7 +2198,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct |
5566 | case -NFS4ERR_BAD_HIGH_SLOT: |
5567 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
5568 | case -NFS4ERR_DEADSESSION: |
5569 | - nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); |
5570 | return -EAGAIN; |
5571 | case -NFS4ERR_STALE_CLIENTID: |
5572 | case -NFS4ERR_STALE_STATEID: |
5573 | @@ -6243,8 +6252,10 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data) |
5574 | |
5575 | d_data = (struct nfs4_delegreturndata *)data; |
5576 | |
5577 | - if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) |
5578 | + if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) { |
5579 | + nfs4_sequence_done(task, &d_data->res.seq_res); |
5580 | return; |
5581 | + } |
5582 | |
5583 | lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL; |
5584 | if (lo && !pnfs_layout_is_valid(lo)) { |
5585 | @@ -7820,6 +7831,15 @@ nfs41_same_server_scope(struct nfs41_server_scope *a, |
5586 | static void |
5587 | nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata) |
5588 | { |
5589 | + struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp; |
5590 | + struct nfs_client *clp = args->client; |
5591 | + |
5592 | + switch (task->tk_status) { |
5593 | + case -NFS4ERR_BADSESSION: |
5594 | + case -NFS4ERR_DEADSESSION: |
5595 | + nfs4_schedule_session_recovery(clp->cl_session, |
5596 | + task->tk_status); |
5597 | + } |
5598 | } |
5599 | |
5600 | static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = { |
5601 | @@ -8867,8 +8887,6 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf |
5602 | case -NFS4ERR_BADSESSION: |
5603 | case -NFS4ERR_DEADSESSION: |
5604 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
5605 | - nfs4_schedule_session_recovery(clp->cl_session, |
5606 | - task->tk_status); |
5607 | break; |
5608 | default: |
5609 | nfs4_schedule_lease_recovery(clp); |
5610 | diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig |
5611 | index 10cefb0c07c7..c4b1a89b8845 100644 |
5612 | --- a/fs/nfsd/Kconfig |
5613 | +++ b/fs/nfsd/Kconfig |
5614 | @@ -73,7 +73,7 @@ config NFSD_V4 |
5615 | select NFSD_V3 |
5616 | select FS_POSIX_ACL |
5617 | select SUNRPC_GSS |
5618 | - select CRYPTO |
5619 | + select CRYPTO_SHA256 |
5620 | select GRACE_PERIOD |
5621 | help |
5622 | This option enables support in your system's NFS server for |
5623 | diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c |
5624 | index 38c0aeda500e..4798667af647 100644 |
5625 | --- a/fs/nfsd/nfs4proc.c |
5626 | +++ b/fs/nfsd/nfs4proc.c |
5627 | @@ -1298,7 +1298,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
5628 | out: |
5629 | return status; |
5630 | out_err: |
5631 | - cleanup_async_copy(async_copy); |
5632 | + if (async_copy) |
5633 | + cleanup_async_copy(async_copy); |
5634 | goto out; |
5635 | } |
5636 | |
5637 | diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c |
5638 | index cdc75ad4438b..c35c0ebaf722 100644 |
5639 | --- a/fs/nfsd/nfs4recover.c |
5640 | +++ b/fs/nfsd/nfs4recover.c |
5641 | @@ -1578,6 +1578,7 @@ nfsd4_cld_tracking_init(struct net *net) |
5642 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
5643 | bool running; |
5644 | int retries = 10; |
5645 | + struct crypto_shash *tfm; |
5646 | |
5647 | status = nfs4_cld_state_init(net); |
5648 | if (status) |
5649 | @@ -1586,11 +1587,6 @@ nfsd4_cld_tracking_init(struct net *net) |
5650 | status = __nfsd4_init_cld_pipe(net); |
5651 | if (status) |
5652 | goto err_shutdown; |
5653 | - nn->cld_net->cn_tfm = crypto_alloc_shash("sha256", 0, 0); |
5654 | - if (IS_ERR(nn->cld_net->cn_tfm)) { |
5655 | - status = PTR_ERR(nn->cld_net->cn_tfm); |
5656 | - goto err_remove; |
5657 | - } |
5658 | |
5659 | /* |
5660 | * rpc pipe upcalls take 30 seconds to time out, so we don't want to |
5661 | @@ -1607,6 +1603,12 @@ nfsd4_cld_tracking_init(struct net *net) |
5662 | status = -ETIMEDOUT; |
5663 | goto err_remove; |
5664 | } |
5665 | + tfm = crypto_alloc_shash("sha256", 0, 0); |
5666 | + if (IS_ERR(tfm)) { |
5667 | + status = PTR_ERR(tfm); |
5668 | + goto err_remove; |
5669 | + } |
5670 | + nn->cld_net->cn_tfm = tfm; |
5671 | |
5672 | status = nfsd4_cld_get_version(nn); |
5673 | if (status == -EOPNOTSUPP) |
5674 | diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c |
5675 | index 699a560efbb0..900e4ef686bf 100644 |
5676 | --- a/fs/ocfs2/journal.c |
5677 | +++ b/fs/ocfs2/journal.c |
5678 | @@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed) |
5679 | |
5680 | ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num); |
5681 | |
5682 | + if (replayed) { |
5683 | + jbd2_journal_lock_updates(journal->j_journal); |
5684 | + status = jbd2_journal_flush(journal->j_journal); |
5685 | + jbd2_journal_unlock_updates(journal->j_journal); |
5686 | + if (status < 0) |
5687 | + mlog_errno(status); |
5688 | + } |
5689 | + |
5690 | status = ocfs2_journal_toggle_dirty(osb, 1, replayed); |
5691 | if (status < 0) { |
5692 | mlog_errno(status); |
5693 | diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c |
5694 | index 4fd9683b8245..826dad0243dc 100644 |
5695 | --- a/fs/ubifs/journal.c |
5696 | +++ b/fs/ubifs/journal.c |
5697 | @@ -899,7 +899,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode) |
5698 | fname_name(&nm) = xent->name; |
5699 | fname_len(&nm) = le16_to_cpu(xent->nlen); |
5700 | |
5701 | - xino = ubifs_iget(c->vfs_sb, xent->inum); |
5702 | + xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum)); |
5703 | if (IS_ERR(xino)) { |
5704 | err = PTR_ERR(xino); |
5705 | ubifs_err(c, "dead directory entry '%s', error %d", |
5706 | diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c |
5707 | index 3b4b4114f208..54d6db61106f 100644 |
5708 | --- a/fs/ubifs/orphan.c |
5709 | +++ b/fs/ubifs/orphan.c |
5710 | @@ -631,12 +631,17 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, |
5711 | ino_t inum; |
5712 | int i, n, err, first = 1; |
5713 | |
5714 | + ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS); |
5715 | + if (!ino) |
5716 | + return -ENOMEM; |
5717 | + |
5718 | list_for_each_entry(snod, &sleb->nodes, list) { |
5719 | if (snod->type != UBIFS_ORPH_NODE) { |
5720 | ubifs_err(c, "invalid node type %d in orphan area at %d:%d", |
5721 | snod->type, sleb->lnum, snod->offs); |
5722 | ubifs_dump_node(c, snod->node); |
5723 | - return -EINVAL; |
5724 | + err = -EINVAL; |
5725 | + goto out_free; |
5726 | } |
5727 | |
5728 | orph = snod->node; |
5729 | @@ -663,20 +668,18 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb, |
5730 | ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d", |
5731 | cmt_no, sleb->lnum, snod->offs); |
5732 | ubifs_dump_node(c, snod->node); |
5733 | - return -EINVAL; |
5734 | + err = -EINVAL; |
5735 | + goto out_free; |
5736 | } |
5737 | dbg_rcvry("out of date LEB %d", sleb->lnum); |
5738 | *outofdate = 1; |
5739 | - return 0; |
5740 | + err = 0; |
5741 | + goto out_free; |
5742 | } |
5743 | |
5744 | if (first) |
5745 | first = 0; |
5746 | |
5747 | - ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS); |
5748 | - if (!ino) |
5749 | - return -ENOMEM; |
5750 | - |
5751 | n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; |
5752 | for (i = 0; i < n; i++) { |
5753 | union ubifs_key key1, key2; |
5754 | diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c |
5755 | index 7d4547e5202d..5e1e8ec0589e 100644 |
5756 | --- a/fs/ubifs/super.c |
5757 | +++ b/fs/ubifs/super.c |
5758 | @@ -2267,10 +2267,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, |
5759 | } |
5760 | } else { |
5761 | err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0); |
5762 | - if (err) { |
5763 | - kfree(c); |
5764 | + if (err) |
5765 | goto out_deact; |
5766 | - } |
5767 | /* We do not support atime */ |
5768 | sb->s_flags |= SB_ACTIVE; |
5769 | if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT)) |
5770 | diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h |
5771 | index a950a22c4890..cac7404b2bdd 100644 |
5772 | --- a/include/asm-generic/cacheflush.h |
5773 | +++ b/include/asm-generic/cacheflush.h |
5774 | @@ -11,71 +11,102 @@ |
5775 | * The cache doesn't need to be flushed when TLB entries change when |
5776 | * the cache is mapped to physical memory, not virtual memory |
5777 | */ |
5778 | +#ifndef flush_cache_all |
5779 | static inline void flush_cache_all(void) |
5780 | { |
5781 | } |
5782 | +#endif |
5783 | |
5784 | +#ifndef flush_cache_mm |
5785 | static inline void flush_cache_mm(struct mm_struct *mm) |
5786 | { |
5787 | } |
5788 | +#endif |
5789 | |
5790 | +#ifndef flush_cache_dup_mm |
5791 | static inline void flush_cache_dup_mm(struct mm_struct *mm) |
5792 | { |
5793 | } |
5794 | +#endif |
5795 | |
5796 | +#ifndef flush_cache_range |
5797 | static inline void flush_cache_range(struct vm_area_struct *vma, |
5798 | unsigned long start, |
5799 | unsigned long end) |
5800 | { |
5801 | } |
5802 | +#endif |
5803 | |
5804 | +#ifndef flush_cache_page |
5805 | static inline void flush_cache_page(struct vm_area_struct *vma, |
5806 | unsigned long vmaddr, |
5807 | unsigned long pfn) |
5808 | { |
5809 | } |
5810 | +#endif |
5811 | |
5812 | +#ifndef flush_dcache_page |
5813 | static inline void flush_dcache_page(struct page *page) |
5814 | { |
5815 | } |
5816 | +#endif |
5817 | |
5818 | +#ifndef flush_dcache_mmap_lock |
5819 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) |
5820 | { |
5821 | } |
5822 | +#endif |
5823 | |
5824 | +#ifndef flush_dcache_mmap_unlock |
5825 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) |
5826 | { |
5827 | } |
5828 | +#endif |
5829 | |
5830 | +#ifndef flush_icache_range |
5831 | static inline void flush_icache_range(unsigned long start, unsigned long end) |
5832 | { |
5833 | } |
5834 | +#endif |
5835 | |
5836 | +#ifndef flush_icache_page |
5837 | static inline void flush_icache_page(struct vm_area_struct *vma, |
5838 | struct page *page) |
5839 | { |
5840 | } |
5841 | +#endif |
5842 | |
5843 | +#ifndef flush_icache_user_range |
5844 | static inline void flush_icache_user_range(struct vm_area_struct *vma, |
5845 | struct page *page, |
5846 | unsigned long addr, int len) |
5847 | { |
5848 | } |
5849 | +#endif |
5850 | |
5851 | +#ifndef flush_cache_vmap |
5852 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
5853 | { |
5854 | } |
5855 | +#endif |
5856 | |
5857 | +#ifndef flush_cache_vunmap |
5858 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
5859 | { |
5860 | } |
5861 | +#endif |
5862 | |
5863 | -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
5864 | +#ifndef copy_to_user_page |
5865 | +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
5866 | do { \ |
5867 | memcpy(dst, src, len); \ |
5868 | flush_icache_user_range(vma, page, vaddr, len); \ |
5869 | } while (0) |
5870 | +#endif |
5871 | + |
5872 | +#ifndef copy_from_user_page |
5873 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
5874 | memcpy(dst, src, len) |
5875 | +#endif |
5876 | |
5877 | #endif /* __ASM_CACHEFLUSH_H */ |
5878 | diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h |
5879 | index 734b6f7081b8..3175dfeaed2c 100644 |
5880 | --- a/include/crypto/internal/skcipher.h |
5881 | +++ b/include/crypto/internal/skcipher.h |
5882 | @@ -205,19 +205,6 @@ static inline unsigned int crypto_skcipher_alg_max_keysize( |
5883 | return alg->max_keysize; |
5884 | } |
5885 | |
5886 | -static inline unsigned int crypto_skcipher_alg_chunksize( |
5887 | - struct skcipher_alg *alg) |
5888 | -{ |
5889 | - if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == |
5890 | - CRYPTO_ALG_TYPE_BLKCIPHER) |
5891 | - return alg->base.cra_blocksize; |
5892 | - |
5893 | - if (alg->base.cra_ablkcipher.encrypt) |
5894 | - return alg->base.cra_blocksize; |
5895 | - |
5896 | - return alg->chunksize; |
5897 | -} |
5898 | - |
5899 | static inline unsigned int crypto_skcipher_alg_walksize( |
5900 | struct skcipher_alg *alg) |
5901 | { |
5902 | @@ -231,23 +218,6 @@ static inline unsigned int crypto_skcipher_alg_walksize( |
5903 | return alg->walksize; |
5904 | } |
5905 | |
5906 | -/** |
5907 | - * crypto_skcipher_chunksize() - obtain chunk size |
5908 | - * @tfm: cipher handle |
5909 | - * |
5910 | - * The block size is set to one for ciphers such as CTR. However, |
5911 | - * you still need to provide incremental updates in multiples of |
5912 | - * the underlying block size as the IV does not have sub-block |
5913 | - * granularity. This is known in this API as the chunk size. |
5914 | - * |
5915 | - * Return: chunk size in bytes |
5916 | - */ |
5917 | -static inline unsigned int crypto_skcipher_chunksize( |
5918 | - struct crypto_skcipher *tfm) |
5919 | -{ |
5920 | - return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); |
5921 | -} |
5922 | - |
5923 | /** |
5924 | * crypto_skcipher_walksize() - obtain walk size |
5925 | * @tfm: cipher handle |
5926 | diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h |
5927 | index 37c164234d97..aada87916918 100644 |
5928 | --- a/include/crypto/skcipher.h |
5929 | +++ b/include/crypto/skcipher.h |
5930 | @@ -304,6 +304,36 @@ static inline unsigned int crypto_skcipher_blocksize( |
5931 | return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm)); |
5932 | } |
5933 | |
5934 | +static inline unsigned int crypto_skcipher_alg_chunksize( |
5935 | + struct skcipher_alg *alg) |
5936 | +{ |
5937 | + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == |
5938 | + CRYPTO_ALG_TYPE_BLKCIPHER) |
5939 | + return alg->base.cra_blocksize; |
5940 | + |
5941 | + if (alg->base.cra_ablkcipher.encrypt) |
5942 | + return alg->base.cra_blocksize; |
5943 | + |
5944 | + return alg->chunksize; |
5945 | +} |
5946 | + |
5947 | +/** |
5948 | + * crypto_skcipher_chunksize() - obtain chunk size |
5949 | + * @tfm: cipher handle |
5950 | + * |
5951 | + * The block size is set to one for ciphers such as CTR. However, |
5952 | + * you still need to provide incremental updates in multiples of |
5953 | + * the underlying block size as the IV does not have sub-block |
5954 | + * granularity. This is known in this API as the chunk size. |
5955 | + * |
5956 | + * Return: chunk size in bytes |
5957 | + */ |
5958 | +static inline unsigned int crypto_skcipher_chunksize( |
5959 | + struct crypto_skcipher *tfm) |
5960 | +{ |
5961 | + return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); |
5962 | +} |
5963 | + |
5964 | static inline unsigned int crypto_sync_skcipher_blocksize( |
5965 | struct crypto_sync_skcipher *tfm) |
5966 | { |
5967 | diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h |
5968 | index d4ee6e942562..38555435a64a 100644 |
5969 | --- a/include/linux/uaccess.h |
5970 | +++ b/include/linux/uaccess.h |
5971 | @@ -337,6 +337,18 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size); |
5972 | extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
5973 | extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); |
5974 | |
5975 | +/* |
5976 | + * probe_user_write(): safely attempt to write to a location in user space |
5977 | + * @dst: address to write to |
5978 | + * @src: pointer to the data that shall be written |
5979 | + * @size: size of the data chunk |
5980 | + * |
5981 | + * Safely write to address @dst from the buffer at @src. If a kernel fault |
5982 | + * happens, handle that and return -EFAULT. |
5983 | + */ |
5984 | +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); |
5985 | +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); |
5986 | + |
5987 | extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); |
5988 | extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, |
5989 | long count); |
5990 | diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h |
5991 | index 31f76b6abf71..bbdd1542d6f1 100644 |
5992 | --- a/include/sound/simple_card_utils.h |
5993 | +++ b/include/sound/simple_card_utils.h |
5994 | @@ -8,6 +8,7 @@ |
5995 | #ifndef __SIMPLE_CARD_UTILS_H |
5996 | #define __SIMPLE_CARD_UTILS_H |
5997 | |
5998 | +#include <linux/clk.h> |
5999 | #include <sound/soc.h> |
6000 | |
6001 | #define asoc_simple_init_hp(card, sjack, prefix) \ |
6002 | diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h |
6003 | index d5ec4fac82ae..564ba1b5cf57 100644 |
6004 | --- a/include/trace/events/afs.h |
6005 | +++ b/include/trace/events/afs.h |
6006 | @@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state, |
6007 | |
6008 | TRACE_EVENT(afs_lookup, |
6009 | TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name, |
6010 | - struct afs_vnode *vnode), |
6011 | + struct afs_fid *fid), |
6012 | |
6013 | - TP_ARGS(dvnode, name, vnode), |
6014 | + TP_ARGS(dvnode, name, fid), |
6015 | |
6016 | TP_STRUCT__entry( |
6017 | __field_struct(struct afs_fid, dfid ) |
6018 | @@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup, |
6019 | TP_fast_assign( |
6020 | int __len = min_t(int, name->len, 23); |
6021 | __entry->dfid = dvnode->fid; |
6022 | - if (vnode) { |
6023 | - __entry->fid = vnode->fid; |
6024 | - } else { |
6025 | - __entry->fid.vid = 0; |
6026 | - __entry->fid.vnode = 0; |
6027 | - __entry->fid.unique = 0; |
6028 | - } |
6029 | + __entry->fid = *fid; |
6030 | memcpy(__entry->name, name->name, __len); |
6031 | __entry->name[__len] = 0; |
6032 | ), |
6033 | diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h |
6034 | index a13830616107..7fd11ec1c9a4 100644 |
6035 | --- a/include/trace/events/rpcrdma.h |
6036 | +++ b/include/trace/events/rpcrdma.h |
6037 | @@ -735,6 +735,31 @@ TRACE_EVENT(xprtrdma_post_recvs, |
6038 | ) |
6039 | ); |
6040 | |
6041 | +TRACE_EVENT(xprtrdma_post_linv, |
6042 | + TP_PROTO( |
6043 | + const struct rpcrdma_req *req, |
6044 | + int status |
6045 | + ), |
6046 | + |
6047 | + TP_ARGS(req, status), |
6048 | + |
6049 | + TP_STRUCT__entry( |
6050 | + __field(const void *, req) |
6051 | + __field(int, status) |
6052 | + __field(u32, xid) |
6053 | + ), |
6054 | + |
6055 | + TP_fast_assign( |
6056 | + __entry->req = req; |
6057 | + __entry->status = status; |
6058 | + __entry->xid = be32_to_cpu(req->rl_slot.rq_xid); |
6059 | + ), |
6060 | + |
6061 | + TP_printk("req=%p xid=0x%08x status=%d", |
6062 | + __entry->req, __entry->xid, __entry->status |
6063 | + ) |
6064 | +); |
6065 | + |
6066 | /** |
6067 | ** Completion events |
6068 | **/ |
6069 | diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h |
6070 | deleted file mode 100644 |
6071 | index f80495baa969..000000000000 |
6072 | --- a/include/uapi/rdma/nes-abi.h |
6073 | +++ /dev/null |
6074 | @@ -1,115 +0,0 @@ |
6075 | -/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */ |
6076 | -/* |
6077 | - * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved. |
6078 | - * Copyright (c) 2005 Topspin Communications. All rights reserved. |
6079 | - * Copyright (c) 2005 Cisco Systems. All rights reserved. |
6080 | - * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved. |
6081 | - * |
6082 | - * This software is available to you under a choice of one of two |
6083 | - * licenses. You may choose to be licensed under the terms of the GNU |
6084 | - * General Public License (GPL) Version 2, available from the file |
6085 | - * COPYING in the main directory of this source tree, or the |
6086 | - * OpenIB.org BSD license below: |
6087 | - * |
6088 | - * Redistribution and use in source and binary forms, with or |
6089 | - * without modification, are permitted provided that the following |
6090 | - * conditions are met: |
6091 | - * |
6092 | - * - Redistributions of source code must retain the above |
6093 | - * copyright notice, this list of conditions and the following |
6094 | - * disclaimer. |
6095 | - * |
6096 | - * - Redistributions in binary form must reproduce the above |
6097 | - * copyright notice, this list of conditions and the following |
6098 | - * disclaimer in the documentation and/or other materials |
6099 | - * provided with the distribution. |
6100 | - * |
6101 | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
6102 | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
6103 | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
6104 | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
6105 | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
6106 | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
6107 | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
6108 | - * SOFTWARE. |
6109 | - * |
6110 | - */ |
6111 | - |
6112 | -#ifndef NES_ABI_USER_H |
6113 | -#define NES_ABI_USER_H |
6114 | - |
6115 | -#include <linux/types.h> |
6116 | - |
6117 | -#define NES_ABI_USERSPACE_VER 2 |
6118 | -#define NES_ABI_KERNEL_VER 2 |
6119 | - |
6120 | -/* |
6121 | - * Make sure that all structs defined in this file remain laid out so |
6122 | - * that they pack the same way on 32-bit and 64-bit architectures (to |
6123 | - * avoid incompatibility between 32-bit userspace and 64-bit kernels). |
6124 | - * In particular do not use pointer types -- pass pointers in __u64 |
6125 | - * instead. |
6126 | - */ |
6127 | - |
6128 | -struct nes_alloc_ucontext_req { |
6129 | - __u32 reserved32; |
6130 | - __u8 userspace_ver; |
6131 | - __u8 reserved8[3]; |
6132 | -}; |
6133 | - |
6134 | -struct nes_alloc_ucontext_resp { |
6135 | - __u32 max_pds; /* maximum pds allowed for this user process */ |
6136 | - __u32 max_qps; /* maximum qps allowed for this user process */ |
6137 | - __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */ |
6138 | - __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */ |
6139 | - __u8 kernel_ver; |
6140 | - __u8 reserved[2]; |
6141 | -}; |
6142 | - |
6143 | -struct nes_alloc_pd_resp { |
6144 | - __u32 pd_id; |
6145 | - __u32 mmap_db_index; |
6146 | -}; |
6147 | - |
6148 | -struct nes_create_cq_req { |
6149 | - __aligned_u64 user_cq_buffer; |
6150 | - __u32 mcrqf; |
6151 | - __u8 reserved[4]; |
6152 | -}; |
6153 | - |
6154 | -struct nes_create_qp_req { |
6155 | - __aligned_u64 user_wqe_buffers; |
6156 | - __aligned_u64 user_qp_buffer; |
6157 | -}; |
6158 | - |
6159 | -enum iwnes_memreg_type { |
6160 | - IWNES_MEMREG_TYPE_MEM = 0x0000, |
6161 | - IWNES_MEMREG_TYPE_QP = 0x0001, |
6162 | - IWNES_MEMREG_TYPE_CQ = 0x0002, |
6163 | - IWNES_MEMREG_TYPE_MW = 0x0003, |
6164 | - IWNES_MEMREG_TYPE_FMR = 0x0004, |
6165 | - IWNES_MEMREG_TYPE_FMEM = 0x0005, |
6166 | -}; |
6167 | - |
6168 | -struct nes_mem_reg_req { |
6169 | - __u32 reg_type; /* indicates if id is memory, QP or CQ */ |
6170 | - __u32 reserved; |
6171 | -}; |
6172 | - |
6173 | -struct nes_create_cq_resp { |
6174 | - __u32 cq_id; |
6175 | - __u32 cq_size; |
6176 | - __u32 mmap_db_index; |
6177 | - __u32 reserved; |
6178 | -}; |
6179 | - |
6180 | -struct nes_create_qp_resp { |
6181 | - __u32 qp_id; |
6182 | - __u32 actual_sq_size; |
6183 | - __u32 actual_rq_size; |
6184 | - __u32 mmap_sq_db_index; |
6185 | - __u32 mmap_rq_db_index; |
6186 | - __u32 nes_drv_opt; |
6187 | -}; |
6188 | - |
6189 | -#endif /* NES_ABI_USER_H */ |
6190 | diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c |
6191 | index a3eaf08e7dd3..8bd69062fbe5 100644 |
6192 | --- a/kernel/bpf/cgroup.c |
6193 | +++ b/kernel/bpf/cgroup.c |
6194 | @@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp) |
6195 | */ |
6196 | static void cgroup_bpf_release(struct work_struct *work) |
6197 | { |
6198 | - struct cgroup *cgrp = container_of(work, struct cgroup, |
6199 | - bpf.release_work); |
6200 | + struct cgroup *p, *cgrp = container_of(work, struct cgroup, |
6201 | + bpf.release_work); |
6202 | enum bpf_cgroup_storage_type stype; |
6203 | struct bpf_prog_array *old_array; |
6204 | unsigned int type; |
6205 | @@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work) |
6206 | |
6207 | mutex_unlock(&cgroup_mutex); |
6208 | |
6209 | + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) |
6210 | + cgroup_bpf_put(p); |
6211 | + |
6212 | percpu_ref_exit(&cgrp->bpf.refcnt); |
6213 | cgroup_put(cgrp); |
6214 | } |
6215 | @@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) |
6216 | */ |
6217 | #define NR ARRAY_SIZE(cgrp->bpf.effective) |
6218 | struct bpf_prog_array *arrays[NR] = {}; |
6219 | + struct cgroup *p; |
6220 | int ret, i; |
6221 | |
6222 | ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, |
6223 | @@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp) |
6224 | if (ret) |
6225 | return ret; |
6226 | |
6227 | + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) |
6228 | + cgroup_bpf_get(p); |
6229 | + |
6230 | for (i = 0; i < NR; i++) |
6231 | INIT_LIST_HEAD(&cgrp->bpf.progs[i]); |
6232 | |
6233 | diff --git a/kernel/cred.c b/kernel/cred.c |
6234 | index 9ed51b70ed80..809a985b1793 100644 |
6235 | --- a/kernel/cred.c |
6236 | +++ b/kernel/cred.c |
6237 | @@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk) |
6238 | put_cred(cred); |
6239 | |
6240 | #ifdef CONFIG_KEYS_REQUEST_CACHE |
6241 | - key_put(current->cached_requested_key); |
6242 | - current->cached_requested_key = NULL; |
6243 | + key_put(tsk->cached_requested_key); |
6244 | + tsk->cached_requested_key = NULL; |
6245 | #endif |
6246 | } |
6247 | |
6248 | diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c |
6249 | index 44bd08f2443b..89bdac61233d 100644 |
6250 | --- a/kernel/trace/bpf_trace.c |
6251 | +++ b/kernel/trace/bpf_trace.c |
6252 | @@ -163,7 +163,7 @@ static const struct bpf_func_proto bpf_probe_read_proto = { |
6253 | .arg3_type = ARG_ANYTHING, |
6254 | }; |
6255 | |
6256 | -BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, |
6257 | +BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, |
6258 | u32, size) |
6259 | { |
6260 | /* |
6261 | @@ -186,10 +186,8 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, |
6262 | return -EPERM; |
6263 | if (unlikely(!nmi_uaccess_okay())) |
6264 | return -EPERM; |
6265 | - if (!access_ok(unsafe_ptr, size)) |
6266 | - return -EPERM; |
6267 | |
6268 | - return probe_kernel_write(unsafe_ptr, src, size); |
6269 | + return probe_user_write(unsafe_ptr, src, size); |
6270 | } |
6271 | |
6272 | static const struct bpf_func_proto bpf_probe_write_user_proto = { |
6273 | diff --git a/mm/maccess.c b/mm/maccess.c |
6274 | index d065736f6b87..2d3c3d01064c 100644 |
6275 | --- a/mm/maccess.c |
6276 | +++ b/mm/maccess.c |
6277 | @@ -18,6 +18,18 @@ probe_read_common(void *dst, const void __user *src, size_t size) |
6278 | return ret ? -EFAULT : 0; |
6279 | } |
6280 | |
6281 | +static __always_inline long |
6282 | +probe_write_common(void __user *dst, const void *src, size_t size) |
6283 | +{ |
6284 | + long ret; |
6285 | + |
6286 | + pagefault_disable(); |
6287 | + ret = __copy_to_user_inatomic(dst, src, size); |
6288 | + pagefault_enable(); |
6289 | + |
6290 | + return ret ? -EFAULT : 0; |
6291 | +} |
6292 | + |
6293 | /** |
6294 | * probe_kernel_read(): safely attempt to read from a kernel-space location |
6295 | * @dst: pointer to the buffer that shall take the data |
6296 | @@ -85,6 +97,7 @@ EXPORT_SYMBOL_GPL(probe_user_read); |
6297 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
6298 | * happens, handle that and return -EFAULT. |
6299 | */ |
6300 | + |
6301 | long __weak probe_kernel_write(void *dst, const void *src, size_t size) |
6302 | __attribute__((alias("__probe_kernel_write"))); |
6303 | |
6304 | @@ -94,15 +107,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) |
6305 | mm_segment_t old_fs = get_fs(); |
6306 | |
6307 | set_fs(KERNEL_DS); |
6308 | - pagefault_disable(); |
6309 | - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); |
6310 | - pagefault_enable(); |
6311 | + ret = probe_write_common((__force void __user *)dst, src, size); |
6312 | set_fs(old_fs); |
6313 | |
6314 | - return ret ? -EFAULT : 0; |
6315 | + return ret; |
6316 | } |
6317 | EXPORT_SYMBOL_GPL(probe_kernel_write); |
6318 | |
6319 | +/** |
6320 | + * probe_user_write(): safely attempt to write to a user-space location |
6321 | + * @dst: address to write to |
6322 | + * @src: pointer to the data that shall be written |
6323 | + * @size: size of the data chunk |
6324 | + * |
6325 | + * Safely write to address @dst from the buffer at @src. If a kernel fault |
6326 | + * happens, handle that and return -EFAULT. |
6327 | + */ |
6328 | + |
6329 | +long __weak probe_user_write(void __user *dst, const void *src, size_t size) |
6330 | + __attribute__((alias("__probe_user_write"))); |
6331 | + |
6332 | +long __probe_user_write(void __user *dst, const void *src, size_t size) |
6333 | +{ |
6334 | + long ret = -EFAULT; |
6335 | + mm_segment_t old_fs = get_fs(); |
6336 | + |
6337 | + set_fs(USER_DS); |
6338 | + if (access_ok(dst, size)) |
6339 | + ret = probe_write_common(dst, src, size); |
6340 | + set_fs(old_fs); |
6341 | + |
6342 | + return ret; |
6343 | +} |
6344 | +EXPORT_SYMBOL_GPL(probe_user_write); |
6345 | |
6346 | /** |
6347 | * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. |
6348 | diff --git a/net/core/skmsg.c b/net/core/skmsg.c |
6349 | index 0675d022584e..ded2d5227678 100644 |
6350 | --- a/net/core/skmsg.c |
6351 | +++ b/net/core/skmsg.c |
6352 | @@ -793,15 +793,18 @@ static void sk_psock_strp_data_ready(struct sock *sk) |
6353 | static void sk_psock_write_space(struct sock *sk) |
6354 | { |
6355 | struct sk_psock *psock; |
6356 | - void (*write_space)(struct sock *sk); |
6357 | + void (*write_space)(struct sock *sk) = NULL; |
6358 | |
6359 | rcu_read_lock(); |
6360 | psock = sk_psock(sk); |
6361 | - if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))) |
6362 | - schedule_work(&psock->work); |
6363 | - write_space = psock->saved_write_space; |
6364 | + if (likely(psock)) { |
6365 | + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) |
6366 | + schedule_work(&psock->work); |
6367 | + write_space = psock->saved_write_space; |
6368 | + } |
6369 | rcu_read_unlock(); |
6370 | - write_space(sk); |
6371 | + if (write_space) |
6372 | + write_space(sk); |
6373 | } |
6374 | |
6375 | int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) |
6376 | diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c |
6377 | index 6618a9d8e58e..d5f709b940ff 100644 |
6378 | --- a/net/hsr/hsr_debugfs.c |
6379 | +++ b/net/hsr/hsr_debugfs.c |
6380 | @@ -20,6 +20,8 @@ |
6381 | #include "hsr_main.h" |
6382 | #include "hsr_framereg.h" |
6383 | |
6384 | +static struct dentry *hsr_debugfs_root_dir; |
6385 | + |
6386 | static void print_mac_address(struct seq_file *sfp, unsigned char *mac) |
6387 | { |
6388 | seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:", |
6389 | @@ -63,6 +65,19 @@ hsr_node_table_open(struct inode *inode, struct file *filp) |
6390 | return single_open(filp, hsr_node_table_show, inode->i_private); |
6391 | } |
6392 | |
6393 | +void hsr_debugfs_rename(struct net_device *dev) |
6394 | +{ |
6395 | + struct hsr_priv *priv = netdev_priv(dev); |
6396 | + struct dentry *d; |
6397 | + |
6398 | + d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root, |
6399 | + hsr_debugfs_root_dir, dev->name); |
6400 | + if (IS_ERR(d)) |
6401 | + netdev_warn(dev, "failed to rename\n"); |
6402 | + else |
6403 | + priv->node_tbl_root = d; |
6404 | +} |
6405 | + |
6406 | static const struct file_operations hsr_fops = { |
6407 | .open = hsr_node_table_open, |
6408 | .read = seq_read, |
6409 | @@ -81,9 +96,9 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) |
6410 | { |
6411 | struct dentry *de = NULL; |
6412 | |
6413 | - de = debugfs_create_dir(hsr_dev->name, NULL); |
6414 | + de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir); |
6415 | if (IS_ERR(de)) { |
6416 | - pr_err("Cannot create hsr debugfs root\n"); |
6417 | + pr_err("Cannot create hsr debugfs directory\n"); |
6418 | return; |
6419 | } |
6420 | |
6421 | @@ -93,7 +108,7 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) |
6422 | priv->node_tbl_root, priv, |
6423 | &hsr_fops); |
6424 | if (IS_ERR(de)) { |
6425 | - pr_err("Cannot create hsr node_table directory\n"); |
6426 | + pr_err("Cannot create hsr node_table file\n"); |
6427 | debugfs_remove(priv->node_tbl_root); |
6428 | priv->node_tbl_root = NULL; |
6429 | return; |
6430 | @@ -115,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv) |
6431 | debugfs_remove(priv->node_tbl_root); |
6432 | priv->node_tbl_root = NULL; |
6433 | } |
6434 | + |
6435 | +void hsr_debugfs_create_root(void) |
6436 | +{ |
6437 | + hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL); |
6438 | + if (IS_ERR(hsr_debugfs_root_dir)) { |
6439 | + pr_err("Cannot create hsr debugfs root directory\n"); |
6440 | + hsr_debugfs_root_dir = NULL; |
6441 | + } |
6442 | +} |
6443 | + |
6444 | +void hsr_debugfs_remove_root(void) |
6445 | +{ |
6446 | + /* debugfs_remove() internally checks NULL and ERROR */ |
6447 | + debugfs_remove(hsr_debugfs_root_dir); |
6448 | +} |
6449 | diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c |
6450 | index 62c03f0d0079..c7bd6c49fadf 100644 |
6451 | --- a/net/hsr/hsr_device.c |
6452 | +++ b/net/hsr/hsr_device.c |
6453 | @@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master, |
6454 | skb->dev->dev_addr, skb->len) <= 0) |
6455 | goto out; |
6456 | skb_reset_mac_header(skb); |
6457 | + skb_reset_network_header(skb); |
6458 | + skb_reset_transport_header(skb); |
6459 | |
6460 | if (hsr_ver > 0) { |
6461 | hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); |
6462 | diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c |
6463 | index 6deb8fa8d5c8..9e389accbfc7 100644 |
6464 | --- a/net/hsr/hsr_main.c |
6465 | +++ b/net/hsr/hsr_main.c |
6466 | @@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, |
6467 | case NETDEV_CHANGE: /* Link (carrier) state changes */ |
6468 | hsr_check_carrier_and_operstate(hsr); |
6469 | break; |
6470 | + case NETDEV_CHANGENAME: |
6471 | + if (is_hsr_master(dev)) |
6472 | + hsr_debugfs_rename(dev); |
6473 | + break; |
6474 | case NETDEV_CHANGEADDR: |
6475 | if (port->type == HSR_PT_MASTER) { |
6476 | /* This should not happen since there's no |
6477 | @@ -123,6 +127,7 @@ static void __exit hsr_exit(void) |
6478 | { |
6479 | unregister_netdevice_notifier(&hsr_nb); |
6480 | hsr_netlink_exit(); |
6481 | + hsr_debugfs_remove_root(); |
6482 | } |
6483 | |
6484 | module_init(hsr_init); |
6485 | diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h |
6486 | index 9ec38e33b8b1..d40de84a637f 100644 |
6487 | --- a/net/hsr/hsr_main.h |
6488 | +++ b/net/hsr/hsr_main.h |
6489 | @@ -185,14 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb) |
6490 | } |
6491 | |
6492 | #if IS_ENABLED(CONFIG_DEBUG_FS) |
6493 | +void hsr_debugfs_rename(struct net_device *dev); |
6494 | void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); |
6495 | void hsr_debugfs_term(struct hsr_priv *priv); |
6496 | +void hsr_debugfs_create_root(void); |
6497 | +void hsr_debugfs_remove_root(void); |
6498 | #else |
6499 | +static inline void void hsr_debugfs_rename(struct net_device *dev) |
6500 | +{ |
6501 | +} |
6502 | static inline void hsr_debugfs_init(struct hsr_priv *priv, |
6503 | struct net_device *hsr_dev) |
6504 | {} |
6505 | static inline void hsr_debugfs_term(struct hsr_priv *priv) |
6506 | {} |
6507 | +static inline void hsr_debugfs_create_root(void) |
6508 | +{} |
6509 | +static inline void hsr_debugfs_remove_root(void) |
6510 | +{} |
6511 | #endif |
6512 | |
6513 | #endif /* __HSR_PRIVATE_H */ |
6514 | diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c |
6515 | index 8f8337f893ba..8dc0547f01d0 100644 |
6516 | --- a/net/hsr/hsr_netlink.c |
6517 | +++ b/net/hsr/hsr_netlink.c |
6518 | @@ -476,6 +476,7 @@ int __init hsr_netlink_init(void) |
6519 | if (rc) |
6520 | goto fail_genl_register_family; |
6521 | |
6522 | + hsr_debugfs_create_root(); |
6523 | return 0; |
6524 | |
6525 | fail_genl_register_family: |
6526 | diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c |
6527 | index e743f811245f..96a64e7594a5 100644 |
6528 | --- a/net/netfilter/nf_tables_offload.c |
6529 | +++ b/net/netfilter/nf_tables_offload.c |
6530 | @@ -358,14 +358,14 @@ int nft_flow_rule_offload_commit(struct net *net) |
6531 | continue; |
6532 | |
6533 | if (trans->ctx.flags & NLM_F_REPLACE || |
6534 | - !(trans->ctx.flags & NLM_F_APPEND)) |
6535 | - return -EOPNOTSUPP; |
6536 | - |
6537 | + !(trans->ctx.flags & NLM_F_APPEND)) { |
6538 | + err = -EOPNOTSUPP; |
6539 | + break; |
6540 | + } |
6541 | err = nft_flow_offload_rule(trans->ctx.chain, |
6542 | nft_trans_rule(trans), |
6543 | nft_trans_flow_rule(trans), |
6544 | FLOW_CLS_REPLACE); |
6545 | - nft_flow_rule_destroy(nft_trans_flow_rule(trans)); |
6546 | break; |
6547 | case NFT_MSG_DELRULE: |
6548 | if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) |
6549 | @@ -379,7 +379,23 @@ int nft_flow_rule_offload_commit(struct net *net) |
6550 | } |
6551 | |
6552 | if (err) |
6553 | - return err; |
6554 | + break; |
6555 | + } |
6556 | + |
6557 | + list_for_each_entry(trans, &net->nft.commit_list, list) { |
6558 | + if (trans->ctx.family != NFPROTO_NETDEV) |
6559 | + continue; |
6560 | + |
6561 | + switch (trans->msg_type) { |
6562 | + case NFT_MSG_NEWRULE: |
6563 | + if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) |
6564 | + continue; |
6565 | + |
6566 | + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); |
6567 | + break; |
6568 | + default: |
6569 | + break; |
6570 | + } |
6571 | } |
6572 | |
6573 | return err; |
6574 | diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c |
6575 | index f29bbc74c4bf..ff5ac173e897 100644 |
6576 | --- a/net/netfilter/nft_flow_offload.c |
6577 | +++ b/net/netfilter/nft_flow_offload.c |
6578 | @@ -197,9 +197,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx, |
6579 | static void nft_flow_offload_destroy(const struct nft_ctx *ctx, |
6580 | const struct nft_expr *expr) |
6581 | { |
6582 | - struct nft_flow_offload *priv = nft_expr_priv(expr); |
6583 | - |
6584 | - priv->flowtable->use--; |
6585 | nf_ct_netns_put(ctx->net, ctx->family); |
6586 | } |
6587 | |
6588 | diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c |
6589 | index 317e3a9e8c5b..dda1e55d5801 100644 |
6590 | --- a/net/netfilter/nft_meta.c |
6591 | +++ b/net/netfilter/nft_meta.c |
6592 | @@ -33,19 +33,19 @@ |
6593 | |
6594 | static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); |
6595 | |
6596 | -static u8 nft_meta_weekday(unsigned long secs) |
6597 | +static u8 nft_meta_weekday(time64_t secs) |
6598 | { |
6599 | unsigned int dse; |
6600 | u8 wday; |
6601 | |
6602 | secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest; |
6603 | - dse = secs / NFT_META_SECS_PER_DAY; |
6604 | + dse = div_u64(secs, NFT_META_SECS_PER_DAY); |
6605 | wday = (4 + dse) % NFT_META_DAYS_PER_WEEK; |
6606 | |
6607 | return wday; |
6608 | } |
6609 | |
6610 | -static u32 nft_meta_hour(unsigned long secs) |
6611 | +static u32 nft_meta_hour(time64_t secs) |
6612 | { |
6613 | struct tm tm; |
6614 | |
6615 | @@ -250,10 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr, |
6616 | nft_reg_store64(dest, ktime_get_real_ns()); |
6617 | break; |
6618 | case NFT_META_TIME_DAY: |
6619 | - nft_reg_store8(dest, nft_meta_weekday(get_seconds())); |
6620 | + nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds())); |
6621 | break; |
6622 | case NFT_META_TIME_HOUR: |
6623 | - *dest = nft_meta_hour(get_seconds()); |
6624 | + *dest = nft_meta_hour(ktime_get_real_seconds()); |
6625 | break; |
6626 | default: |
6627 | WARN_ON(1); |
6628 | diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h |
6629 | index 7c7d10f2e0c1..5e99df80e80a 100644 |
6630 | --- a/net/rxrpc/ar-internal.h |
6631 | +++ b/net/rxrpc/ar-internal.h |
6632 | @@ -209,6 +209,7 @@ struct rxrpc_skb_priv { |
6633 | struct rxrpc_security { |
6634 | const char *name; /* name of this service */ |
6635 | u8 security_index; /* security type provided */ |
6636 | + u32 no_key_abort; /* Abort code indicating no key */ |
6637 | |
6638 | /* Initialise a security service */ |
6639 | int (*init)(void); |
6640 | @@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, |
6641 | struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, |
6642 | struct sk_buff *); |
6643 | struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); |
6644 | -void rxrpc_new_incoming_connection(struct rxrpc_sock *, |
6645 | - struct rxrpc_connection *, struct sk_buff *); |
6646 | +void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, |
6647 | + const struct rxrpc_security *, struct key *, |
6648 | + struct sk_buff *); |
6649 | void rxrpc_unpublish_service_conn(struct rxrpc_connection *); |
6650 | |
6651 | /* |
6652 | @@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad; |
6653 | int __init rxrpc_init_security(void); |
6654 | void rxrpc_exit_security(void); |
6655 | int rxrpc_init_client_conn_security(struct rxrpc_connection *); |
6656 | -int rxrpc_init_server_conn_security(struct rxrpc_connection *); |
6657 | +bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *, |
6658 | + const struct rxrpc_security **, struct key **, |
6659 | + struct sk_buff *); |
6660 | |
6661 | /* |
6662 | * sendmsg.c |
6663 | diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c |
6664 | index 135bf5cd8dd5..70e44abf106c 100644 |
6665 | --- a/net/rxrpc/call_accept.c |
6666 | +++ b/net/rxrpc/call_accept.c |
6667 | @@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx) |
6668 | kfree(b); |
6669 | } |
6670 | |
6671 | +/* |
6672 | + * Ping the other end to fill our RTT cache and to retrieve the rwind |
6673 | + * and MTU parameters. |
6674 | + */ |
6675 | +static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) |
6676 | +{ |
6677 | + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
6678 | + ktime_t now = skb->tstamp; |
6679 | + |
6680 | + if (call->peer->rtt_usage < 3 || |
6681 | + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) |
6682 | + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
6683 | + true, true, |
6684 | + rxrpc_propose_ack_ping_for_params); |
6685 | +} |
6686 | + |
6687 | /* |
6688 | * Allocate a new incoming call from the prealloc pool, along with a connection |
6689 | * and a peer as necessary. |
6690 | @@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
6691 | struct rxrpc_local *local, |
6692 | struct rxrpc_peer *peer, |
6693 | struct rxrpc_connection *conn, |
6694 | + const struct rxrpc_security *sec, |
6695 | + struct key *key, |
6696 | struct sk_buff *skb) |
6697 | { |
6698 | struct rxrpc_backlog *b = rx->backlog; |
6699 | @@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
6700 | conn->params.local = rxrpc_get_local(local); |
6701 | conn->params.peer = peer; |
6702 | rxrpc_see_connection(conn); |
6703 | - rxrpc_new_incoming_connection(rx, conn, skb); |
6704 | + rxrpc_new_incoming_connection(rx, conn, sec, key, skb); |
6705 | } else { |
6706 | rxrpc_get_connection(conn); |
6707 | } |
6708 | @@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
6709 | struct sk_buff *skb) |
6710 | { |
6711 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
6712 | + const struct rxrpc_security *sec = NULL; |
6713 | struct rxrpc_connection *conn; |
6714 | struct rxrpc_peer *peer = NULL; |
6715 | - struct rxrpc_call *call; |
6716 | + struct rxrpc_call *call = NULL; |
6717 | + struct key *key = NULL; |
6718 | |
6719 | _enter(""); |
6720 | |
6721 | @@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
6722 | sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); |
6723 | skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
6724 | skb->priority = RX_INVALID_OPERATION; |
6725 | - _leave(" = NULL [close]"); |
6726 | - call = NULL; |
6727 | - goto out; |
6728 | + goto no_call; |
6729 | } |
6730 | |
6731 | /* The peer, connection and call may all have sprung into existence due |
6732 | @@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
6733 | */ |
6734 | conn = rxrpc_find_connection_rcu(local, skb, &peer); |
6735 | |
6736 | - call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); |
6737 | + if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb)) |
6738 | + goto no_call; |
6739 | + |
6740 | + call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); |
6741 | + key_put(key); |
6742 | if (!call) { |
6743 | skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; |
6744 | - _leave(" = NULL [busy]"); |
6745 | - call = NULL; |
6746 | - goto out; |
6747 | + goto no_call; |
6748 | } |
6749 | |
6750 | trace_rxrpc_receive(call, rxrpc_receive_incoming, |
6751 | sp->hdr.serial, sp->hdr.seq); |
6752 | |
6753 | - /* Lock the call to prevent rxrpc_kernel_send/recv_data() and |
6754 | - * sendmsg()/recvmsg() inconveniently stealing the mutex once the |
6755 | - * notification is generated. |
6756 | - * |
6757 | - * The BUG should never happen because the kernel should be well |
6758 | - * behaved enough not to access the call before the first notification |
6759 | - * event and userspace is prevented from doing so until the state is |
6760 | - * appropriate. |
6761 | - */ |
6762 | - if (!mutex_trylock(&call->user_mutex)) |
6763 | - BUG(); |
6764 | - |
6765 | /* Make the call live. */ |
6766 | rxrpc_incoming_call(rx, call, skb); |
6767 | conn = call->conn; |
6768 | @@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
6769 | BUG(); |
6770 | } |
6771 | spin_unlock(&conn->state_lock); |
6772 | + spin_unlock(&rx->incoming_lock); |
6773 | + |
6774 | + rxrpc_send_ping(call, skb); |
6775 | |
6776 | if (call->state == RXRPC_CALL_SERVER_ACCEPTING) |
6777 | rxrpc_notify_socket(call); |
6778 | @@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
6779 | rxrpc_put_call(call, rxrpc_call_put); |
6780 | |
6781 | _leave(" = %p{%d}", call, call->debug_id); |
6782 | -out: |
6783 | - spin_unlock(&rx->incoming_lock); |
6784 | return call; |
6785 | + |
6786 | +no_call: |
6787 | + spin_unlock(&rx->incoming_lock); |
6788 | + _leave(" = NULL [%u]", skb->mark); |
6789 | + return NULL; |
6790 | } |
6791 | |
6792 | /* |
6793 | diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c |
6794 | index a1ceef4f5cd0..808a4723f868 100644 |
6795 | --- a/net/rxrpc/conn_event.c |
6796 | +++ b/net/rxrpc/conn_event.c |
6797 | @@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn) |
6798 | _enter("{%d}", conn->debug_id); |
6799 | |
6800 | ASSERT(conn->security_ix != 0); |
6801 | - |
6802 | - if (!conn->params.key) { |
6803 | - _debug("set up security"); |
6804 | - ret = rxrpc_init_server_conn_security(conn); |
6805 | - switch (ret) { |
6806 | - case 0: |
6807 | - break; |
6808 | - case -ENOENT: |
6809 | - abort_code = RX_CALL_DEAD; |
6810 | - goto abort; |
6811 | - default: |
6812 | - abort_code = RXKADNOAUTH; |
6813 | - goto abort; |
6814 | - } |
6815 | - } |
6816 | + ASSERT(conn->server_key); |
6817 | |
6818 | if (conn->security->issue_challenge(conn) < 0) { |
6819 | abort_code = RX_CALL_DEAD; |
6820 | diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c |
6821 | index 123d6ceab15c..21da48e3d2e5 100644 |
6822 | --- a/net/rxrpc/conn_service.c |
6823 | +++ b/net/rxrpc/conn_service.c |
6824 | @@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn |
6825 | */ |
6826 | void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, |
6827 | struct rxrpc_connection *conn, |
6828 | + const struct rxrpc_security *sec, |
6829 | + struct key *key, |
6830 | struct sk_buff *skb) |
6831 | { |
6832 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
6833 | @@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, |
6834 | conn->service_id = sp->hdr.serviceId; |
6835 | conn->security_ix = sp->hdr.securityIndex; |
6836 | conn->out_clientflag = 0; |
6837 | + conn->security = sec; |
6838 | + conn->server_key = key_get(key); |
6839 | if (conn->security_ix) |
6840 | conn->state = RXRPC_CONN_SERVICE_UNSECURED; |
6841 | else |
6842 | diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c |
6843 | index 157be1ff8697..86bd133b4fa0 100644 |
6844 | --- a/net/rxrpc/input.c |
6845 | +++ b/net/rxrpc/input.c |
6846 | @@ -192,22 +192,6 @@ send_extra_data: |
6847 | goto out_no_clear_ca; |
6848 | } |
6849 | |
6850 | -/* |
6851 | - * Ping the other end to fill our RTT cache and to retrieve the rwind |
6852 | - * and MTU parameters. |
6853 | - */ |
6854 | -static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) |
6855 | -{ |
6856 | - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
6857 | - ktime_t now = skb->tstamp; |
6858 | - |
6859 | - if (call->peer->rtt_usage < 3 || |
6860 | - ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) |
6861 | - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, |
6862 | - true, true, |
6863 | - rxrpc_propose_ack_ping_for_params); |
6864 | -} |
6865 | - |
6866 | /* |
6867 | * Apply a hard ACK by advancing the Tx window. |
6868 | */ |
6869 | @@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb) |
6870 | call = rxrpc_new_incoming_call(local, rx, skb); |
6871 | if (!call) |
6872 | goto reject_packet; |
6873 | - rxrpc_send_ping(call, skb); |
6874 | - mutex_unlock(&call->user_mutex); |
6875 | } |
6876 | |
6877 | /* Process a call packet; this either discards or passes on the ref |
6878 | diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c |
6879 | index 8d8aa3c230b5..098f1f9ec53b 100644 |
6880 | --- a/net/rxrpc/rxkad.c |
6881 | +++ b/net/rxrpc/rxkad.c |
6882 | @@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn) |
6883 | u32 serial; |
6884 | int ret; |
6885 | |
6886 | - _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); |
6887 | + _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); |
6888 | |
6889 | - ret = key_validate(conn->params.key); |
6890 | + ret = key_validate(conn->server_key); |
6891 | if (ret < 0) |
6892 | return ret; |
6893 | |
6894 | @@ -1293,6 +1293,7 @@ static void rxkad_exit(void) |
6895 | const struct rxrpc_security rxkad = { |
6896 | .name = "rxkad", |
6897 | .security_index = RXRPC_SECURITY_RXKAD, |
6898 | + .no_key_abort = RXKADUNKNOWNKEY, |
6899 | .init = rxkad_init, |
6900 | .exit = rxkad_exit, |
6901 | .init_connection_security = rxkad_init_connection_security, |
6902 | diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c |
6903 | index a4c47d2b7054..9b1fb9ed0717 100644 |
6904 | --- a/net/rxrpc/security.c |
6905 | +++ b/net/rxrpc/security.c |
6906 | @@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) |
6907 | } |
6908 | |
6909 | /* |
6910 | - * initialise the security on a server connection |
6911 | + * Find the security key for a server connection. |
6912 | */ |
6913 | -int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) |
6914 | +bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx, |
6915 | + const struct rxrpc_security **_sec, |
6916 | + struct key **_key, |
6917 | + struct sk_buff *skb) |
6918 | { |
6919 | const struct rxrpc_security *sec; |
6920 | - struct rxrpc_local *local = conn->params.local; |
6921 | - struct rxrpc_sock *rx; |
6922 | - struct key *key; |
6923 | - key_ref_t kref; |
6924 | + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
6925 | + key_ref_t kref = NULL; |
6926 | char kdesc[5 + 1 + 3 + 1]; |
6927 | |
6928 | _enter(""); |
6929 | |
6930 | - sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); |
6931 | + sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex); |
6932 | |
6933 | - sec = rxrpc_security_lookup(conn->security_ix); |
6934 | + sec = rxrpc_security_lookup(sp->hdr.securityIndex); |
6935 | if (!sec) { |
6936 | - _leave(" = -ENOKEY [lookup]"); |
6937 | - return -ENOKEY; |
6938 | + trace_rxrpc_abort(0, "SVS", |
6939 | + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
6940 | + RX_INVALID_OPERATION, EKEYREJECTED); |
6941 | + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
6942 | + skb->priority = RX_INVALID_OPERATION; |
6943 | + return false; |
6944 | } |
6945 | |
6946 | - /* find the service */ |
6947 | - read_lock(&local->services_lock); |
6948 | - rx = rcu_dereference_protected(local->service, |
6949 | - lockdep_is_held(&local->services_lock)); |
6950 | - if (rx && (rx->srx.srx_service == conn->service_id || |
6951 | - rx->second_service == conn->service_id)) |
6952 | - goto found_service; |
6953 | + if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE) |
6954 | + goto out; |
6955 | |
6956 | - /* the service appears to have died */ |
6957 | - read_unlock(&local->services_lock); |
6958 | - _leave(" = -ENOENT"); |
6959 | - return -ENOENT; |
6960 | - |
6961 | -found_service: |
6962 | if (!rx->securities) { |
6963 | - read_unlock(&local->services_lock); |
6964 | - _leave(" = -ENOKEY"); |
6965 | - return -ENOKEY; |
6966 | + trace_rxrpc_abort(0, "SVR", |
6967 | + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
6968 | + RX_INVALID_OPERATION, EKEYREJECTED); |
6969 | + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
6970 | + skb->priority = RX_INVALID_OPERATION; |
6971 | + return false; |
6972 | } |
6973 | |
6974 | /* look through the service's keyring */ |
6975 | kref = keyring_search(make_key_ref(rx->securities, 1UL), |
6976 | &key_type_rxrpc_s, kdesc, true); |
6977 | if (IS_ERR(kref)) { |
6978 | - read_unlock(&local->services_lock); |
6979 | - _leave(" = %ld [search]", PTR_ERR(kref)); |
6980 | - return PTR_ERR(kref); |
6981 | + trace_rxrpc_abort(0, "SVK", |
6982 | + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
6983 | + sec->no_key_abort, EKEYREJECTED); |
6984 | + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; |
6985 | + skb->priority = sec->no_key_abort; |
6986 | + return false; |
6987 | } |
6988 | |
6989 | - key = key_ref_to_ptr(kref); |
6990 | - read_unlock(&local->services_lock); |
6991 | - |
6992 | - conn->server_key = key; |
6993 | - conn->security = sec; |
6994 | - |
6995 | - _leave(" = 0"); |
6996 | - return 0; |
6997 | +out: |
6998 | + *_sec = sec; |
6999 | + *_key = key_ref_to_ptr(kref); |
7000 | + return true; |
7001 | } |
7002 | diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c |
7003 | index dd0e8680b030..2277369feae5 100644 |
7004 | --- a/net/sched/sch_cake.c |
7005 | +++ b/net/sched/sch_cake.c |
7006 | @@ -2184,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = { |
7007 | [TCA_CAKE_MPU] = { .type = NLA_U32 }, |
7008 | [TCA_CAKE_INGRESS] = { .type = NLA_U32 }, |
7009 | [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 }, |
7010 | + [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 }, |
7011 | [TCA_CAKE_FWMARK] = { .type = NLA_U32 }, |
7012 | }; |
7013 | |
7014 | diff --git a/net/socket.c b/net/socket.c |
7015 | index ca8de9e1582d..432800b39ddb 100644 |
7016 | --- a/net/socket.c |
7017 | +++ b/net/socket.c |
7018 | @@ -3532,6 +3532,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, |
7019 | case SIOCSARP: |
7020 | case SIOCGARP: |
7021 | case SIOCDARP: |
7022 | + case SIOCOUTQNSD: |
7023 | case SIOCATMARK: |
7024 | return sock_do_ioctl(net, sock, cmd, arg); |
7025 | } |
7026 | diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c |
7027 | index 30065a28628c..9901a811f598 100644 |
7028 | --- a/net/sunrpc/xprtrdma/frwr_ops.c |
7029 | +++ b/net/sunrpc/xprtrdma/frwr_ops.c |
7030 | @@ -570,7 +570,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
7031 | */ |
7032 | bad_wr = NULL; |
7033 | rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); |
7034 | - trace_xprtrdma_post_send(req, rc); |
7035 | |
7036 | /* The final LOCAL_INV WR in the chain is supposed to |
7037 | * do the wake. If it was never posted, the wake will |
7038 | @@ -583,6 +582,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
7039 | |
7040 | /* Recycle MRs in the LOCAL_INV chain that did not get posted. |
7041 | */ |
7042 | + trace_xprtrdma_post_linv(req, rc); |
7043 | while (bad_wr) { |
7044 | frwr = container_of(bad_wr, struct rpcrdma_frwr, |
7045 | fr_invwr); |
7046 | @@ -673,12 +673,12 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) |
7047 | */ |
7048 | bad_wr = NULL; |
7049 | rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr); |
7050 | - trace_xprtrdma_post_send(req, rc); |
7051 | if (!rc) |
7052 | return; |
7053 | |
7054 | /* Recycle MRs in the LOCAL_INV chain that did not get posted. |
7055 | */ |
7056 | + trace_xprtrdma_post_linv(req, rc); |
7057 | while (bad_wr) { |
7058 | frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); |
7059 | mr = container_of(frwr, struct rpcrdma_mr, frwr); |
7060 | diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c |
7061 | index b86b5fd62d9f..ef5102b60589 100644 |
7062 | --- a/net/sunrpc/xprtrdma/rpc_rdma.c |
7063 | +++ b/net/sunrpc/xprtrdma/rpc_rdma.c |
7064 | @@ -1362,6 +1362,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) |
7065 | xprt->cwnd = credits << RPC_CWNDSHIFT; |
7066 | spin_unlock(&xprt->transport_lock); |
7067 | } |
7068 | + rpcrdma_post_recvs(r_xprt, false); |
7069 | |
7070 | req = rpcr_to_rdmar(rqst); |
7071 | if (req->rl_reply) { |
7072 | diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c |
7073 | index 160558b4135e..c67d465dc062 100644 |
7074 | --- a/net/sunrpc/xprtrdma/transport.c |
7075 | +++ b/net/sunrpc/xprtrdma/transport.c |
7076 | @@ -428,8 +428,11 @@ void xprt_rdma_close(struct rpc_xprt *xprt) |
7077 | /* Prepare @xprt for the next connection by reinitializing |
7078 | * its credit grant to one (see RFC 8166, Section 3.3.3). |
7079 | */ |
7080 | + spin_lock(&xprt->transport_lock); |
7081 | r_xprt->rx_buf.rb_credits = 1; |
7082 | + xprt->cong = 0; |
7083 | xprt->cwnd = RPC_CWNDSHIFT; |
7084 | + spin_unlock(&xprt->transport_lock); |
7085 | |
7086 | out: |
7087 | xprt->reestablish_timeout = 0; |
7088 | diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c |
7089 | index 3a907537e2cf..0f4d39fdb48f 100644 |
7090 | --- a/net/sunrpc/xprtrdma/verbs.c |
7091 | +++ b/net/sunrpc/xprtrdma/verbs.c |
7092 | @@ -75,16 +75,15 @@ |
7093 | * internal functions |
7094 | */ |
7095 | static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc); |
7096 | -static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf); |
7097 | +static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt); |
7098 | +static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt); |
7099 | static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt); |
7100 | static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf); |
7101 | -static void rpcrdma_mr_free(struct rpcrdma_mr *mr); |
7102 | static struct rpcrdma_regbuf * |
7103 | rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction, |
7104 | gfp_t flags); |
7105 | static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); |
7106 | static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); |
7107 | -static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); |
7108 | |
7109 | /* Wait for outstanding transport work to finish. ib_drain_qp |
7110 | * handles the drains in the wrong order for us, so open code |
7111 | @@ -170,7 +169,6 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
7112 | rdmab_addr(rep->rr_rdmabuf), |
7113 | wc->byte_len, DMA_FROM_DEVICE); |
7114 | |
7115 | - rpcrdma_post_recvs(r_xprt, false); |
7116 | rpcrdma_reply_handler(rep); |
7117 | return; |
7118 | |
7119 | @@ -247,6 +245,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) |
7120 | ia->ri_id->device->name, |
7121 | rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt)); |
7122 | #endif |
7123 | + init_completion(&ia->ri_remove_done); |
7124 | set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags); |
7125 | ep->rep_connected = -ENODEV; |
7126 | xprt_force_disconnect(xprt); |
7127 | @@ -301,7 +300,6 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia) |
7128 | trace_xprtrdma_conn_start(xprt); |
7129 | |
7130 | init_completion(&ia->ri_done); |
7131 | - init_completion(&ia->ri_remove_done); |
7132 | |
7133 | id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler, |
7134 | xprt, RDMA_PS_TCP, IB_QPT_RC); |
7135 | @@ -431,7 +429,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia) |
7136 | /* The ULP is responsible for ensuring all DMA |
7137 | * mappings and MRs are gone. |
7138 | */ |
7139 | - rpcrdma_reps_destroy(buf); |
7140 | + rpcrdma_reps_unmap(r_xprt); |
7141 | list_for_each_entry(req, &buf->rb_allreqs, rl_all) { |
7142 | rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf); |
7143 | rpcrdma_regbuf_dma_unmap(req->rl_sendbuf); |
7144 | @@ -609,6 +607,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, |
7145 | struct ib_qp_init_attr *qp_init_attr) |
7146 | { |
7147 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
7148 | + struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
7149 | int rc, err; |
7150 | |
7151 | trace_xprtrdma_reinsert(r_xprt); |
7152 | @@ -623,6 +622,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt, |
7153 | pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err); |
7154 | goto out2; |
7155 | } |
7156 | + memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr)); |
7157 | |
7158 | rc = -ENETUNREACH; |
7159 | err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr); |
7160 | @@ -780,6 +780,7 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
7161 | trace_xprtrdma_disconnect(r_xprt, rc); |
7162 | |
7163 | rpcrdma_xprt_drain(r_xprt); |
7164 | + rpcrdma_reqs_reset(r_xprt); |
7165 | } |
7166 | |
7167 | /* Fixed-size circular FIFO queue. This implementation is wait-free and |
7168 | @@ -965,7 +966,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt) |
7169 | mr->mr_xprt = r_xprt; |
7170 | |
7171 | spin_lock(&buf->rb_lock); |
7172 | - list_add(&mr->mr_list, &buf->rb_mrs); |
7173 | + rpcrdma_mr_push(mr, &buf->rb_mrs); |
7174 | list_add(&mr->mr_all, &buf->rb_all_mrs); |
7175 | spin_unlock(&buf->rb_lock); |
7176 | } |
7177 | @@ -1042,6 +1043,26 @@ out1: |
7178 | return NULL; |
7179 | } |
7180 | |
7181 | +/** |
7182 | + * rpcrdma_reqs_reset - Reset all reqs owned by a transport |
7183 | + * @r_xprt: controlling transport instance |
7184 | + * |
7185 | + * ASSUMPTION: the rb_allreqs list is stable for the duration, |
7186 | + * and thus can be walked without holding rb_lock. Eg. the |
7187 | + * caller is holding the transport send lock to exclude |
7188 | + * device removal or disconnection. |
7189 | + */ |
7190 | +static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt) |
7191 | +{ |
7192 | + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
7193 | + struct rpcrdma_req *req; |
7194 | + |
7195 | + list_for_each_entry(req, &buf->rb_allreqs, rl_all) { |
7196 | + /* Credits are valid only for one connection */ |
7197 | + req->rl_slot.rq_cong = 0; |
7198 | + } |
7199 | +} |
7200 | + |
7201 | static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, |
7202 | bool temp) |
7203 | { |
7204 | @@ -1065,6 +1086,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, |
7205 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
7206 | rep->rr_recv_wr.num_sge = 1; |
7207 | rep->rr_temp = temp; |
7208 | + list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps); |
7209 | return rep; |
7210 | |
7211 | out_free: |
7212 | @@ -1075,6 +1097,7 @@ out: |
7213 | |
7214 | static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep) |
7215 | { |
7216 | + list_del(&rep->rr_all); |
7217 | rpcrdma_regbuf_free(rep->rr_rdmabuf); |
7218 | kfree(rep); |
7219 | } |
7220 | @@ -1093,10 +1116,16 @@ static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf) |
7221 | static void rpcrdma_rep_put(struct rpcrdma_buffer *buf, |
7222 | struct rpcrdma_rep *rep) |
7223 | { |
7224 | - if (!rep->rr_temp) |
7225 | - llist_add(&rep->rr_node, &buf->rb_free_reps); |
7226 | - else |
7227 | - rpcrdma_rep_destroy(rep); |
7228 | + llist_add(&rep->rr_node, &buf->rb_free_reps); |
7229 | +} |
7230 | + |
7231 | +static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt) |
7232 | +{ |
7233 | + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
7234 | + struct rpcrdma_rep *rep; |
7235 | + |
7236 | + list_for_each_entry(rep, &buf->rb_all_reps, rr_all) |
7237 | + rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf); |
7238 | } |
7239 | |
7240 | static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf) |
7241 | @@ -1129,6 +1158,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
7242 | |
7243 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
7244 | INIT_LIST_HEAD(&buf->rb_allreqs); |
7245 | + INIT_LIST_HEAD(&buf->rb_all_reps); |
7246 | |
7247 | rc = -ENOMEM; |
7248 | for (i = 0; i < buf->rb_max_requests; i++) { |
7249 | @@ -1163,10 +1193,19 @@ out: |
7250 | */ |
7251 | void rpcrdma_req_destroy(struct rpcrdma_req *req) |
7252 | { |
7253 | + struct rpcrdma_mr *mr; |
7254 | + |
7255 | list_del(&req->rl_all); |
7256 | |
7257 | - while (!list_empty(&req->rl_free_mrs)) |
7258 | - rpcrdma_mr_free(rpcrdma_mr_pop(&req->rl_free_mrs)); |
7259 | + while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) { |
7260 | + struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; |
7261 | + |
7262 | + spin_lock(&buf->rb_lock); |
7263 | + list_del(&mr->mr_all); |
7264 | + spin_unlock(&buf->rb_lock); |
7265 | + |
7266 | + frwr_release_mr(mr); |
7267 | + } |
7268 | |
7269 | rpcrdma_regbuf_free(req->rl_recvbuf); |
7270 | rpcrdma_regbuf_free(req->rl_sendbuf); |
7271 | @@ -1174,24 +1213,28 @@ void rpcrdma_req_destroy(struct rpcrdma_req *req) |
7272 | kfree(req); |
7273 | } |
7274 | |
7275 | -static void |
7276 | -rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
7277 | +/** |
7278 | + * rpcrdma_mrs_destroy - Release all of a transport's MRs |
7279 | + * @buf: controlling buffer instance |
7280 | + * |
7281 | + * Relies on caller holding the transport send lock to protect |
7282 | + * removing mr->mr_list from req->rl_free_mrs safely. |
7283 | + */ |
7284 | +static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf) |
7285 | { |
7286 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, |
7287 | rx_buf); |
7288 | struct rpcrdma_mr *mr; |
7289 | - unsigned int count; |
7290 | |
7291 | - count = 0; |
7292 | spin_lock(&buf->rb_lock); |
7293 | while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, |
7294 | struct rpcrdma_mr, |
7295 | mr_all)) != NULL) { |
7296 | + list_del(&mr->mr_list); |
7297 | list_del(&mr->mr_all); |
7298 | spin_unlock(&buf->rb_lock); |
7299 | |
7300 | frwr_release_mr(mr); |
7301 | - count++; |
7302 | spin_lock(&buf->rb_lock); |
7303 | } |
7304 | spin_unlock(&buf->rb_lock); |
7305 | @@ -1264,17 +1307,6 @@ void rpcrdma_mr_put(struct rpcrdma_mr *mr) |
7306 | rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs); |
7307 | } |
7308 | |
7309 | -static void rpcrdma_mr_free(struct rpcrdma_mr *mr) |
7310 | -{ |
7311 | - struct rpcrdma_xprt *r_xprt = mr->mr_xprt; |
7312 | - struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
7313 | - |
7314 | - mr->mr_req = NULL; |
7315 | - spin_lock(&buf->rb_lock); |
7316 | - rpcrdma_mr_push(mr, &buf->rb_mrs); |
7317 | - spin_unlock(&buf->rb_lock); |
7318 | -} |
7319 | - |
7320 | /** |
7321 | * rpcrdma_buffer_get - Get a request buffer |
7322 | * @buffers: Buffer pool from which to obtain a buffer |
7323 | @@ -1455,8 +1487,13 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, |
7324 | return 0; |
7325 | } |
7326 | |
7327 | -static void |
7328 | -rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) |
7329 | +/** |
7330 | + * rpcrdma_post_recvs - Refill the Receive Queue |
7331 | + * @r_xprt: controlling transport instance |
7332 | + * @temp: mark Receive buffers to be deleted after use |
7333 | + * |
7334 | + */ |
7335 | +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) |
7336 | { |
7337 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
7338 | struct rpcrdma_ep *ep = &r_xprt->rx_ep; |
7339 | @@ -1478,6 +1515,10 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) |
7340 | wr = NULL; |
7341 | while (needed) { |
7342 | rep = rpcrdma_rep_get_locked(buf); |
7343 | + if (rep && rep->rr_temp) { |
7344 | + rpcrdma_rep_destroy(rep); |
7345 | + continue; |
7346 | + } |
7347 | if (!rep) |
7348 | rep = rpcrdma_rep_create(r_xprt, temp); |
7349 | if (!rep) |
7350 | diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h |
7351 | index 65e6b0eb862e..fc761679487c 100644 |
7352 | --- a/net/sunrpc/xprtrdma/xprt_rdma.h |
7353 | +++ b/net/sunrpc/xprtrdma/xprt_rdma.h |
7354 | @@ -203,6 +203,7 @@ struct rpcrdma_rep { |
7355 | struct xdr_stream rr_stream; |
7356 | struct llist_node rr_node; |
7357 | struct ib_recv_wr rr_recv_wr; |
7358 | + struct list_head rr_all; |
7359 | }; |
7360 | |
7361 | /* To reduce the rate at which a transport invokes ib_post_recv |
7362 | @@ -372,6 +373,7 @@ struct rpcrdma_buffer { |
7363 | |
7364 | struct list_head rb_allreqs; |
7365 | struct list_head rb_all_mrs; |
7366 | + struct list_head rb_all_reps; |
7367 | |
7368 | struct llist_head rb_free_reps; |
7369 | |
7370 | @@ -474,6 +476,7 @@ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *); |
7371 | |
7372 | int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *, |
7373 | struct rpcrdma_req *); |
7374 | +void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp); |
7375 | |
7376 | /* |
7377 | * Buffer calls - xprtrdma/verbs.c |
7378 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
7379 | index 0d8da809bea2..b3369d678f1a 100644 |
7380 | --- a/net/unix/af_unix.c |
7381 | +++ b/net/unix/af_unix.c |
7382 | @@ -646,6 +646,9 @@ static __poll_t unix_poll(struct file *, struct socket *, poll_table *); |
7383 | static __poll_t unix_dgram_poll(struct file *, struct socket *, |
7384 | poll_table *); |
7385 | static int unix_ioctl(struct socket *, unsigned int, unsigned long); |
7386 | +#ifdef CONFIG_COMPAT |
7387 | +static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
7388 | +#endif |
7389 | static int unix_shutdown(struct socket *, int); |
7390 | static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t); |
7391 | static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int); |
7392 | @@ -687,6 +690,9 @@ static const struct proto_ops unix_stream_ops = { |
7393 | .getname = unix_getname, |
7394 | .poll = unix_poll, |
7395 | .ioctl = unix_ioctl, |
7396 | +#ifdef CONFIG_COMPAT |
7397 | + .compat_ioctl = unix_compat_ioctl, |
7398 | +#endif |
7399 | .listen = unix_listen, |
7400 | .shutdown = unix_shutdown, |
7401 | .setsockopt = sock_no_setsockopt, |
7402 | @@ -710,6 +716,9 @@ static const struct proto_ops unix_dgram_ops = { |
7403 | .getname = unix_getname, |
7404 | .poll = unix_dgram_poll, |
7405 | .ioctl = unix_ioctl, |
7406 | +#ifdef CONFIG_COMPAT |
7407 | + .compat_ioctl = unix_compat_ioctl, |
7408 | +#endif |
7409 | .listen = sock_no_listen, |
7410 | .shutdown = unix_shutdown, |
7411 | .setsockopt = sock_no_setsockopt, |
7412 | @@ -732,6 +741,9 @@ static const struct proto_ops unix_seqpacket_ops = { |
7413 | .getname = unix_getname, |
7414 | .poll = unix_dgram_poll, |
7415 | .ioctl = unix_ioctl, |
7416 | +#ifdef CONFIG_COMPAT |
7417 | + .compat_ioctl = unix_compat_ioctl, |
7418 | +#endif |
7419 | .listen = unix_listen, |
7420 | .shutdown = unix_shutdown, |
7421 | .setsockopt = sock_no_setsockopt, |
7422 | @@ -2582,6 +2594,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
7423 | return err; |
7424 | } |
7425 | |
7426 | +#ifdef CONFIG_COMPAT |
7427 | +static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
7428 | +{ |
7429 | + return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); |
7430 | +} |
7431 | +#endif |
7432 | + |
7433 | static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait) |
7434 | { |
7435 | struct sock *sk = sock->sk; |
7436 | diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh |
7437 | index 06495379fcd8..2998ddb323e3 100755 |
7438 | --- a/scripts/link-vmlinux.sh |
7439 | +++ b/scripts/link-vmlinux.sh |
7440 | @@ -127,7 +127,8 @@ gen_btf() |
7441 | cut -d, -f1 | cut -d' ' -f2) |
7442 | bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \ |
7443 | awk '{print $4}') |
7444 | - ${OBJCOPY} --dump-section .BTF=.btf.vmlinux.bin ${1} 2>/dev/null |
7445 | + ${OBJCOPY} --set-section-flags .BTF=alloc -O binary \ |
7446 | + --only-section=.BTF ${1} .btf.vmlinux.bin 2>/dev/null |
7447 | ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \ |
7448 | --rename-section .data=.BTF .btf.vmlinux.bin ${2} |
7449 | } |
7450 | @@ -253,6 +254,10 @@ btf_vmlinux_bin_o="" |
7451 | if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then |
7452 | if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then |
7453 | btf_vmlinux_bin_o=.btf.vmlinux.bin.o |
7454 | + else |
7455 | + echo >&2 "Failed to generate BTF for vmlinux" |
7456 | + echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF" |
7457 | + exit 1 |
7458 | fi |
7459 | fi |
7460 | |
7461 | diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian |
7462 | index 7c230016b08d..357dc56bcf30 100755 |
7463 | --- a/scripts/package/mkdebian |
7464 | +++ b/scripts/package/mkdebian |
7465 | @@ -136,7 +136,7 @@ mkdir -p debian/source/ |
7466 | echo "1.0" > debian/source/format |
7467 | |
7468 | echo $debarch > debian/arch |
7469 | -extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)" |
7470 | +extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)" |
7471 | extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)" |
7472 | |
7473 | # Generate a simple changelog template |
7474 | diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c |
7475 | index dd3d5942e669..c36bafbcd77e 100644 |
7476 | --- a/security/tomoyo/common.c |
7477 | +++ b/security/tomoyo/common.c |
7478 | @@ -951,7 +951,8 @@ static bool tomoyo_manager(void) |
7479 | exe = tomoyo_get_exe(); |
7480 | if (!exe) |
7481 | return false; |
7482 | - list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) { |
7483 | + list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list, |
7484 | + srcu_read_lock_held(&tomoyo_ss)) { |
7485 | if (!ptr->head.is_deleted && |
7486 | (!tomoyo_pathcmp(domainname, ptr->manager) || |
7487 | !strcmp(exe, ptr->manager->name))) { |
7488 | @@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname) |
7489 | if (mutex_lock_interruptible(&tomoyo_policy_lock)) |
7490 | return -EINTR; |
7491 | /* Is there an active domain? */ |
7492 | - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { |
7493 | + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, |
7494 | + srcu_read_lock_held(&tomoyo_ss)) { |
7495 | /* Never delete tomoyo_kernel_domain */ |
7496 | if (domain == &tomoyo_kernel_domain) |
7497 | continue; |
7498 | @@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void) |
7499 | |
7500 | tomoyo_policy_loaded = true; |
7501 | pr_info("TOMOYO: 2.6.0\n"); |
7502 | - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { |
7503 | + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, |
7504 | + srcu_read_lock_held(&tomoyo_ss)) { |
7505 | const u8 profile = domain->profile; |
7506 | struct tomoyo_policy_namespace *ns = domain->ns; |
7507 | |
7508 | diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c |
7509 | index 8526a0a74023..7869d6a9980b 100644 |
7510 | --- a/security/tomoyo/domain.c |
7511 | +++ b/security/tomoyo/domain.c |
7512 | @@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, |
7513 | |
7514 | if (mutex_lock_interruptible(&tomoyo_policy_lock)) |
7515 | return -ENOMEM; |
7516 | - list_for_each_entry_rcu(entry, list, list) { |
7517 | + list_for_each_entry_rcu(entry, list, list, |
7518 | + srcu_read_lock_held(&tomoyo_ss)) { |
7519 | if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) |
7520 | continue; |
7521 | if (!check_duplicate(entry, new_entry)) |
7522 | @@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, |
7523 | } |
7524 | if (mutex_lock_interruptible(&tomoyo_policy_lock)) |
7525 | goto out; |
7526 | - list_for_each_entry_rcu(entry, list, list) { |
7527 | + list_for_each_entry_rcu(entry, list, list, |
7528 | + srcu_read_lock_held(&tomoyo_ss)) { |
7529 | if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) |
7530 | continue; |
7531 | if (!tomoyo_same_acl_head(entry, new_entry) || |
7532 | @@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r, |
7533 | u16 i = 0; |
7534 | |
7535 | retry: |
7536 | - list_for_each_entry_rcu(ptr, list, list) { |
7537 | + list_for_each_entry_rcu(ptr, list, list, |
7538 | + srcu_read_lock_held(&tomoyo_ss)) { |
7539 | if (ptr->is_deleted || ptr->type != r->param_type) |
7540 | continue; |
7541 | if (!check_entry(r, ptr)) |
7542 | @@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition |
7543 | { |
7544 | const struct tomoyo_transition_control *ptr; |
7545 | |
7546 | - list_for_each_entry_rcu(ptr, list, head.list) { |
7547 | + list_for_each_entry_rcu(ptr, list, head.list, |
7548 | + srcu_read_lock_held(&tomoyo_ss)) { |
7549 | if (ptr->head.is_deleted || ptr->type != type) |
7550 | continue; |
7551 | if (ptr->domainname) { |
7552 | @@ -735,7 +739,8 @@ retry: |
7553 | |
7554 | /* Check 'aggregator' directive. */ |
7555 | candidate = &exename; |
7556 | - list_for_each_entry_rcu(ptr, list, head.list) { |
7557 | + list_for_each_entry_rcu(ptr, list, head.list, |
7558 | + srcu_read_lock_held(&tomoyo_ss)) { |
7559 | if (ptr->head.is_deleted || |
7560 | !tomoyo_path_matches_pattern(&exename, |
7561 | ptr->original_name)) |
7562 | diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c |
7563 | index a37c7dc66e44..1cecdd797597 100644 |
7564 | --- a/security/tomoyo/group.c |
7565 | +++ b/security/tomoyo/group.c |
7566 | @@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, |
7567 | { |
7568 | struct tomoyo_path_group *member; |
7569 | |
7570 | - list_for_each_entry_rcu(member, &group->member_list, head.list) { |
7571 | + list_for_each_entry_rcu(member, &group->member_list, head.list, |
7572 | + srcu_read_lock_held(&tomoyo_ss)) { |
7573 | if (member->head.is_deleted) |
7574 | continue; |
7575 | if (!tomoyo_path_matches_pattern(pathname, member->member_name)) |
7576 | @@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min, |
7577 | struct tomoyo_number_group *member; |
7578 | bool matched = false; |
7579 | |
7580 | - list_for_each_entry_rcu(member, &group->member_list, head.list) { |
7581 | + list_for_each_entry_rcu(member, &group->member_list, head.list, |
7582 | + srcu_read_lock_held(&tomoyo_ss)) { |
7583 | if (member->head.is_deleted) |
7584 | continue; |
7585 | if (min > member->number.values[1] || |
7586 | @@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, |
7587 | bool matched = false; |
7588 | const u8 size = is_ipv6 ? 16 : 4; |
7589 | |
7590 | - list_for_each_entry_rcu(member, &group->member_list, head.list) { |
7591 | + list_for_each_entry_rcu(member, &group->member_list, head.list, |
7592 | + srcu_read_lock_held(&tomoyo_ss)) { |
7593 | if (member->head.is_deleted) |
7594 | continue; |
7595 | if (member->address.is_ipv6 != is_ipv6) |
7596 | diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c |
7597 | index 52752e1a84ed..eba0b3395851 100644 |
7598 | --- a/security/tomoyo/util.c |
7599 | +++ b/security/tomoyo/util.c |
7600 | @@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname) |
7601 | |
7602 | name.name = domainname; |
7603 | tomoyo_fill_path_info(&name); |
7604 | - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { |
7605 | + list_for_each_entry_rcu(domain, &tomoyo_domain_list, list, |
7606 | + srcu_read_lock_held(&tomoyo_ss)) { |
7607 | if (!domain->is_deleted && |
7608 | !tomoyo_pathcmp(&name, domain->domainname)) |
7609 | return domain; |
7610 | @@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) |
7611 | return false; |
7612 | if (!domain) |
7613 | return true; |
7614 | - list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { |
7615 | + list_for_each_entry_rcu(ptr, &domain->acl_info_list, list, |
7616 | + srcu_read_lock_held(&tomoyo_ss)) { |
7617 | u16 perm; |
7618 | u8 i; |
7619 | |
7620 | diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c |
7621 | index a78e4ab478df..c7a49d03463a 100644 |
7622 | --- a/sound/soc/fsl/fsl_esai.c |
7623 | +++ b/sound/soc/fsl/fsl_esai.c |
7624 | @@ -33,6 +33,7 @@ |
7625 | * @fsysclk: system clock source to derive HCK, SCK and FS |
7626 | * @spbaclk: SPBA clock (optional, depending on SoC design) |
7627 | * @task: tasklet to handle the reset operation |
7628 | + * @lock: spin lock between hw_reset() and trigger() |
7629 | * @fifo_depth: depth of tx/rx FIFO |
7630 | * @slot_width: width of each DAI slot |
7631 | * @slots: number of slots |
7632 | @@ -56,6 +57,7 @@ struct fsl_esai { |
7633 | struct clk *fsysclk; |
7634 | struct clk *spbaclk; |
7635 | struct tasklet_struct task; |
7636 | + spinlock_t lock; /* Protect hw_reset and trigger */ |
7637 | u32 fifo_depth; |
7638 | u32 slot_width; |
7639 | u32 slots; |
7640 | @@ -676,8 +678,10 @@ static void fsl_esai_hw_reset(unsigned long arg) |
7641 | { |
7642 | struct fsl_esai *esai_priv = (struct fsl_esai *)arg; |
7643 | bool tx = true, rx = false, enabled[2]; |
7644 | + unsigned long lock_flags; |
7645 | u32 tfcr, rfcr; |
7646 | |
7647 | + spin_lock_irqsave(&esai_priv->lock, lock_flags); |
7648 | /* Save the registers */ |
7649 | regmap_read(esai_priv->regmap, REG_ESAI_TFCR, &tfcr); |
7650 | regmap_read(esai_priv->regmap, REG_ESAI_RFCR, &rfcr); |
7651 | @@ -715,6 +719,8 @@ static void fsl_esai_hw_reset(unsigned long arg) |
7652 | fsl_esai_trigger_start(esai_priv, tx); |
7653 | if (enabled[rx]) |
7654 | fsl_esai_trigger_start(esai_priv, rx); |
7655 | + |
7656 | + spin_unlock_irqrestore(&esai_priv->lock, lock_flags); |
7657 | } |
7658 | |
7659 | static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, |
7660 | @@ -722,6 +728,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, |
7661 | { |
7662 | struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai); |
7663 | bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; |
7664 | + unsigned long lock_flags; |
7665 | |
7666 | esai_priv->channels[tx] = substream->runtime->channels; |
7667 | |
7668 | @@ -729,12 +736,16 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd, |
7669 | case SNDRV_PCM_TRIGGER_START: |
7670 | case SNDRV_PCM_TRIGGER_RESUME: |
7671 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
7672 | + spin_lock_irqsave(&esai_priv->lock, lock_flags); |
7673 | fsl_esai_trigger_start(esai_priv, tx); |
7674 | + spin_unlock_irqrestore(&esai_priv->lock, lock_flags); |
7675 | break; |
7676 | case SNDRV_PCM_TRIGGER_SUSPEND: |
7677 | case SNDRV_PCM_TRIGGER_STOP: |
7678 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
7679 | + spin_lock_irqsave(&esai_priv->lock, lock_flags); |
7680 | fsl_esai_trigger_stop(esai_priv, tx); |
7681 | + spin_unlock_irqrestore(&esai_priv->lock, lock_flags); |
7682 | break; |
7683 | default: |
7684 | return -EINVAL; |
7685 | @@ -1002,6 +1013,7 @@ static int fsl_esai_probe(struct platform_device *pdev) |
7686 | |
7687 | dev_set_drvdata(&pdev->dev, esai_priv); |
7688 | |
7689 | + spin_lock_init(&esai_priv->lock); |
7690 | ret = fsl_esai_hw_init(esai_priv); |
7691 | if (ret) |
7692 | return ret; |
7693 | diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig |
7694 | index 01c99750212a..ef493cae78ff 100644 |
7695 | --- a/sound/soc/intel/Kconfig |
7696 | +++ b/sound/soc/intel/Kconfig |
7697 | @@ -59,6 +59,9 @@ config SND_SOC_INTEL_HASWELL |
7698 | If you have a Intel Haswell or Broadwell platform connected to |
7699 | an I2S codec, then enable this option by saying Y or m. This is |
7700 | typically used for Chromebooks. This is a recommended option. |
7701 | + This option is mutually exclusive with the SOF support on |
7702 | + Broadwell. If you want to enable SOF on Broadwell, you need to |
7703 | + deselect this option first. |
7704 | |
7705 | config SND_SOC_INTEL_BAYTRAIL |
7706 | tristate "Baytrail (legacy) Platforms" |
7707 | diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c |
7708 | index e9596c2096cd..a6c1cf987e6e 100644 |
7709 | --- a/sound/soc/sh/rcar/core.c |
7710 | +++ b/sound/soc/sh/rcar/core.c |
7711 | @@ -376,6 +376,17 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io) |
7712 | */ |
7713 | u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io) |
7714 | { |
7715 | + static const u32 dalign_values[8][2] = { |
7716 | + {0x76543210, 0x67452301}, |
7717 | + {0x00000032, 0x00000023}, |
7718 | + {0x00007654, 0x00006745}, |
7719 | + {0x00000076, 0x00000067}, |
7720 | + {0xfedcba98, 0xefcdab89}, |
7721 | + {0x000000ba, 0x000000ab}, |
7722 | + {0x0000fedc, 0x0000efcd}, |
7723 | + {0x000000fe, 0x000000ef}, |
7724 | + }; |
7725 | + int id = 0, inv; |
7726 | struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io); |
7727 | struct rsnd_mod *target; |
7728 | struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); |
7729 | @@ -411,13 +422,18 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io) |
7730 | target = cmd ? cmd : ssiu; |
7731 | } |
7732 | |
7733 | + if (mod == ssiu) |
7734 | + id = rsnd_mod_id_sub(mod); |
7735 | + |
7736 | /* Non target mod or non 16bit needs normal DALIGN */ |
7737 | if ((snd_pcm_format_width(runtime->format) != 16) || |
7738 | (mod != target)) |
7739 | - return 0x76543210; |
7740 | + inv = 0; |
7741 | /* Target mod needs inverted DALIGN when 16bit */ |
7742 | else |
7743 | - return 0x67452301; |
7744 | + inv = 1; |
7745 | + |
7746 | + return dalign_values[id][inv]; |
7747 | } |
7748 | |
7749 | u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod) |
7750 | diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c |
7751 | index 88978a3036c4..9d3b546bae7b 100644 |
7752 | --- a/sound/soc/soc-core.c |
7753 | +++ b/sound/soc/soc-core.c |
7754 | @@ -1886,6 +1886,8 @@ match: |
7755 | |
7756 | /* convert non BE into BE */ |
7757 | dai_link->no_pcm = 1; |
7758 | + dai_link->dpcm_playback = 1; |
7759 | + dai_link->dpcm_capture = 1; |
7760 | |
7761 | /* override any BE fixups */ |
7762 | dai_link->be_hw_params_fixup = |
7763 | diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c |
7764 | index a6e96cf1d8ff..d07026a846b9 100644 |
7765 | --- a/sound/soc/soc-pcm.c |
7766 | +++ b/sound/soc/soc-pcm.c |
7767 | @@ -1148,7 +1148,9 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe, |
7768 | { |
7769 | struct snd_soc_dpcm *dpcm; |
7770 | unsigned long flags; |
7771 | +#ifdef CONFIG_DEBUG_FS |
7772 | char *name; |
7773 | +#endif |
7774 | |
7775 | /* only add new dpcms */ |
7776 | for_each_dpcm_be(fe, stream, dpcm) { |
7777 | diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c |
7778 | index 2a22b18e5ec0..69785f688ddf 100644 |
7779 | --- a/sound/soc/sof/imx/imx8.c |
7780 | +++ b/sound/soc/sof/imx/imx8.c |
7781 | @@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev) |
7782 | |
7783 | priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains, |
7784 | sizeof(*priv->pd_dev), GFP_KERNEL); |
7785 | - if (!priv) |
7786 | + if (!priv->pd_dev) |
7787 | return -ENOMEM; |
7788 | |
7789 | priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains, |
7790 | @@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev) |
7791 | } |
7792 | sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM; |
7793 | |
7794 | + /* set default mailbox offset for FW ready message */ |
7795 | + sdev->dsp_box.offset = MBOX_OFFSET; |
7796 | + |
7797 | return 0; |
7798 | |
7799 | exit_pdev_unregister: |
7800 | diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig |
7801 | index d62f51d33be1..8421b97d949e 100644 |
7802 | --- a/sound/soc/sof/intel/Kconfig |
7803 | +++ b/sound/soc/sof/intel/Kconfig |
7804 | @@ -76,10 +76,18 @@ config SND_SOC_SOF_BAYTRAIL |
7805 | |
7806 | config SND_SOC_SOF_BROADWELL_SUPPORT |
7807 | bool "SOF support for Broadwell" |
7808 | + depends on SND_SOC_INTEL_HASWELL=n |
7809 | help |
7810 | This adds support for Sound Open Firmware for Intel(R) platforms |
7811 | using the Broadwell processors. |
7812 | - Say Y if you have such a device. |
7813 | + This option is mutually exclusive with the Haswell/Broadwell legacy |
7814 | + driver. If you want to enable SOF on Broadwell you need to deselect |
7815 | + the legacy driver first. |
7816 | + SOF does fully support Broadwell yet, so this option is not |
7817 | + recommended for distros. At some point all legacy drivers will be |
7818 | + deprecated but not before all userspace firmware/topology/UCM files |
7819 | + are made available to downstream distros. |
7820 | + Say Y if you want to enable SOF on Broadwell |
7821 | If unsure select "N". |
7822 | |
7823 | config SND_SOC_SOF_BROADWELL |
7824 | diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c |
7825 | index cd4b235fce57..e53fb4bd66b3 100644 |
7826 | --- a/sound/soc/stm/stm32_spdifrx.c |
7827 | +++ b/sound/soc/stm/stm32_spdifrx.c |
7828 | @@ -12,7 +12,6 @@ |
7829 | #include <linux/delay.h> |
7830 | #include <linux/module.h> |
7831 | #include <linux/of_platform.h> |
7832 | -#include <linux/pinctrl/consumer.h> |
7833 | #include <linux/regmap.h> |
7834 | #include <linux/reset.h> |
7835 | |
7836 | @@ -220,6 +219,7 @@ |
7837 | * @slave_config: dma slave channel runtime config pointer |
7838 | * @phys_addr: SPDIFRX registers physical base address |
7839 | * @lock: synchronization enabling lock |
7840 | + * @irq_lock: prevent race condition with IRQ on stream state |
7841 | * @cs: channel status buffer |
7842 | * @ub: user data buffer |
7843 | * @irq: SPDIFRX interrupt line |
7844 | @@ -240,6 +240,7 @@ struct stm32_spdifrx_data { |
7845 | struct dma_slave_config slave_config; |
7846 | dma_addr_t phys_addr; |
7847 | spinlock_t lock; /* Sync enabling lock */ |
7848 | + spinlock_t irq_lock; /* Prevent race condition on stream state */ |
7849 | unsigned char cs[SPDIFRX_CS_BYTES_NB]; |
7850 | unsigned char ub[SPDIFRX_UB_BYTES_NB]; |
7851 | int irq; |
7852 | @@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx) |
7853 | static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) |
7854 | { |
7855 | int cr, cr_mask, imr, ret; |
7856 | + unsigned long flags; |
7857 | |
7858 | /* Enable IRQs */ |
7859 | imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE; |
7860 | @@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) |
7861 | if (ret) |
7862 | return ret; |
7863 | |
7864 | - spin_lock(&spdifrx->lock); |
7865 | + spin_lock_irqsave(&spdifrx->lock, flags); |
7866 | |
7867 | spdifrx->refcount++; |
7868 | |
7869 | @@ -360,7 +362,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) |
7870 | "Failed to start synchronization\n"); |
7871 | } |
7872 | |
7873 | - spin_unlock(&spdifrx->lock); |
7874 | + spin_unlock_irqrestore(&spdifrx->lock, flags); |
7875 | |
7876 | return ret; |
7877 | } |
7878 | @@ -368,11 +370,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx) |
7879 | static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx) |
7880 | { |
7881 | int cr, cr_mask, reg; |
7882 | + unsigned long flags; |
7883 | |
7884 | - spin_lock(&spdifrx->lock); |
7885 | + spin_lock_irqsave(&spdifrx->lock, flags); |
7886 | |
7887 | if (--spdifrx->refcount) { |
7888 | - spin_unlock(&spdifrx->lock); |
7889 | + spin_unlock_irqrestore(&spdifrx->lock, flags); |
7890 | return; |
7891 | } |
7892 | |
7893 | @@ -391,7 +394,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx) |
7894 | regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, ®); |
7895 | regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, ®); |
7896 | |
7897 | - spin_unlock(&spdifrx->lock); |
7898 | + spin_unlock_irqrestore(&spdifrx->lock, flags); |
7899 | } |
7900 | |
7901 | static int stm32_spdifrx_dma_ctrl_register(struct device *dev, |
7902 | @@ -478,8 +481,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx) |
7903 | memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB); |
7904 | memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB); |
7905 | |
7906 | - pinctrl_pm_select_default_state(&spdifrx->pdev->dev); |
7907 | - |
7908 | ret = stm32_spdifrx_dma_ctrl_start(spdifrx); |
7909 | if (ret < 0) |
7910 | return ret; |
7911 | @@ -511,7 +512,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx) |
7912 | |
7913 | end: |
7914 | clk_disable_unprepare(spdifrx->kclk); |
7915 | - pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev); |
7916 | |
7917 | return ret; |
7918 | } |
7919 | @@ -663,7 +663,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = { |
7920 | static irqreturn_t stm32_spdifrx_isr(int irq, void *devid) |
7921 | { |
7922 | struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid; |
7923 | - struct snd_pcm_substream *substream = spdifrx->substream; |
7924 | struct platform_device *pdev = spdifrx->pdev; |
7925 | unsigned int cr, mask, sr, imr; |
7926 | unsigned int flags; |
7927 | @@ -731,14 +730,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid) |
7928 | regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR, |
7929 | SPDIFRX_CR_SPDIFEN_MASK, cr); |
7930 | |
7931 | - if (substream) |
7932 | - snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED); |
7933 | + spin_lock(&spdifrx->irq_lock); |
7934 | + if (spdifrx->substream) |
7935 | + snd_pcm_stop(spdifrx->substream, |
7936 | + SNDRV_PCM_STATE_DISCONNECTED); |
7937 | + spin_unlock(&spdifrx->irq_lock); |
7938 | |
7939 | return IRQ_HANDLED; |
7940 | } |
7941 | |
7942 | - if (err_xrun && substream) |
7943 | - snd_pcm_stop_xrun(substream); |
7944 | + spin_lock(&spdifrx->irq_lock); |
7945 | + if (err_xrun && spdifrx->substream) |
7946 | + snd_pcm_stop_xrun(spdifrx->substream); |
7947 | + spin_unlock(&spdifrx->irq_lock); |
7948 | |
7949 | return IRQ_HANDLED; |
7950 | } |
7951 | @@ -747,9 +751,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream, |
7952 | struct snd_soc_dai *cpu_dai) |
7953 | { |
7954 | struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai); |
7955 | + unsigned long flags; |
7956 | int ret; |
7957 | |
7958 | + spin_lock_irqsave(&spdifrx->irq_lock, flags); |
7959 | spdifrx->substream = substream; |
7960 | + spin_unlock_irqrestore(&spdifrx->irq_lock, flags); |
7961 | |
7962 | ret = clk_prepare_enable(spdifrx->kclk); |
7963 | if (ret) |
7964 | @@ -825,8 +832,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream, |
7965 | struct snd_soc_dai *cpu_dai) |
7966 | { |
7967 | struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai); |
7968 | + unsigned long flags; |
7969 | |
7970 | + spin_lock_irqsave(&spdifrx->irq_lock, flags); |
7971 | spdifrx->substream = NULL; |
7972 | + spin_unlock_irqrestore(&spdifrx->irq_lock, flags); |
7973 | + |
7974 | clk_disable_unprepare(spdifrx->kclk); |
7975 | } |
7976 | |
7977 | @@ -930,6 +941,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev) |
7978 | spdifrx->pdev = pdev; |
7979 | init_completion(&spdifrx->cs_completion); |
7980 | spin_lock_init(&spdifrx->lock); |
7981 | + spin_lock_init(&spdifrx->irq_lock); |
7982 | |
7983 | platform_set_drvdata(pdev, spdifrx); |
7984 | |
7985 | diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile |
7986 | index 56ce6292071b..33e2638ef7f0 100644 |
7987 | --- a/tools/lib/bpf/Makefile |
7988 | +++ b/tools/lib/bpf/Makefile |
7989 | @@ -215,7 +215,7 @@ check_abi: $(OUTPUT)libbpf.so |
7990 | "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ |
7991 | "Please make sure all LIBBPF_API symbols are" \ |
7992 | "versioned in $(VERSION_SCRIPT)." >&2; \ |
7993 | - readelf -s --wide $(OUTPUT)libbpf-in.o | \ |
7994 | + readelf -s --wide $(BPF_IN_SHARED) | \ |
7995 | cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ |
7996 | awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ |
7997 | sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ |
7998 | diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c |
7999 | index cb1e51fcc84e..32b7c6f9043d 100644 |
8000 | --- a/tools/pci/pcitest.c |
8001 | +++ b/tools/pci/pcitest.c |
8002 | @@ -129,6 +129,7 @@ static int run_test(struct pci_test *test) |
8003 | } |
8004 | |
8005 | fflush(stdout); |
8006 | + close(fd); |
8007 | return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */ |
8008 | } |
8009 | |
8010 | diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json |
8011 | index 68618152ea2c..89e070727e1b 100644 |
8012 | --- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json |
8013 | +++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json |
8014 | @@ -4,7 +4,7 @@ |
8015 | "EventCode": "128", |
8016 | "EventName": "L1D_RO_EXCL_WRITES", |
8017 | "BriefDescription": "L1D Read-only Exclusive Writes", |
8018 | - "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" |
8019 | + "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line" |
8020 | }, |
8021 | { |
8022 | "Unit": "CPU-M-CF", |
8023 | diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh |
8024 | index b879305a766d..5b8c0fedee76 100755 |
8025 | --- a/tools/testing/selftests/firmware/fw_lib.sh |
8026 | +++ b/tools/testing/selftests/firmware/fw_lib.sh |
8027 | @@ -34,6 +34,12 @@ test_modprobe() |
8028 | |
8029 | check_mods() |
8030 | { |
8031 | + local uid=$(id -u) |
8032 | + if [ $uid -ne 0 ]; then |
8033 | + echo "skip all tests: must be run as root" >&2 |
8034 | + exit $ksft_skip |
8035 | + fi |
8036 | + |
8037 | trap "test_modprobe" EXIT |
8038 | if [ ! -d $DIR ]; then |
8039 | modprobe test_firmware |
8040 | diff --git a/tools/testing/selftests/net/forwarding/loopback.sh b/tools/testing/selftests/net/forwarding/loopback.sh |
8041 | index 6e4626ae71b0..8f4057310b5b 100755 |
8042 | --- a/tools/testing/selftests/net/forwarding/loopback.sh |
8043 | +++ b/tools/testing/selftests/net/forwarding/loopback.sh |
8044 | @@ -1,6 +1,9 @@ |
8045 | #!/bin/bash |
8046 | # SPDX-License-Identifier: GPL-2.0 |
8047 | |
8048 | +# Kselftest framework requirement - SKIP code is 4. |
8049 | +ksft_skip=4 |
8050 | + |
8051 | ALL_TESTS="loopback_test" |
8052 | NUM_NETIFS=2 |
8053 | source tc_common.sh |
8054 | @@ -72,6 +75,11 @@ setup_prepare() |
8055 | |
8056 | h1_create |
8057 | h2_create |
8058 | + |
8059 | + if ethtool -k $h1 | grep loopback | grep -q fixed; then |
8060 | + log_test "SKIP: dev $h1 does not support loopback feature" |
8061 | + exit $ksft_skip |
8062 | + fi |
8063 | } |
8064 | |
8065 | cleanup() |
8066 | diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings |
8067 | new file mode 100644 |
8068 | index 000000000000..e7b9417537fb |
8069 | --- /dev/null |
8070 | +++ b/tools/testing/selftests/rseq/settings |
8071 | @@ -0,0 +1 @@ |
8072 | +timeout=0 |