Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0160-4.19.61-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3439 - (show annotations) (download)
Fri Aug 2 11:48:06 2019 UTC (4 years, 9 months ago) by niro
File size: 422861 byte(s)
-linux-4.19.61
1 diff --git a/Documentation/atomic_t.txt b/Documentation/atomic_t.txt
2 index 913396ac5824..ed0d814df7e0 100644
3 --- a/Documentation/atomic_t.txt
4 +++ b/Documentation/atomic_t.txt
5 @@ -177,6 +177,9 @@ These helper barriers exist because architectures have varying implicit
6 ordering on their SMP atomic primitives. For example our TSO architectures
7 provide full ordered atomics and these barriers are no-ops.
8
9 +NOTE: when the atomic RmW ops are fully ordered, they should also imply a
10 +compiler barrier.
11 +
12 Thus:
13
14 atomic_fetch_add();
15 diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
16 index 42cd81090a2c..3f3cfc1d8d4d 100644
17 --- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
18 +++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
19 @@ -16,7 +16,7 @@ Required properties:
20
21 Optional properties:
22 - interrupts: interrupt line number for the SMI error/done interrupt
23 -- clocks: phandle for up to three required clocks for the MDIO instance
24 +- clocks: phandle for up to four required clocks for the MDIO instance
25
26 The child nodes of the MDIO driver are the individual PHY devices
27 connected to this MDIO bus. They must have a "reg" property given the
28 diff --git a/Documentation/scheduler/sched-pelt.c b/Documentation/scheduler/sched-pelt.c
29 index e4219139386a..7238b355919c 100644
30 --- a/Documentation/scheduler/sched-pelt.c
31 +++ b/Documentation/scheduler/sched-pelt.c
32 @@ -20,7 +20,8 @@ void calc_runnable_avg_yN_inv(void)
33 int i;
34 unsigned int x;
35
36 - printf("static const u32 runnable_avg_yN_inv[] = {");
37 + /* To silence -Wunused-but-set-variable warnings. */
38 + printf("static const u32 runnable_avg_yN_inv[] __maybe_unused = {");
39 for (i = 0; i < HALFLIFE; i++) {
40 x = ((1UL<<32)-1)*pow(y, i);
41
42 diff --git a/Makefile b/Makefile
43 index 5fb79d493012..b16485c580d7 100644
44 --- a/Makefile
45 +++ b/Makefile
46 @@ -1,7 +1,7 @@
47 # SPDX-License-Identifier: GPL-2.0
48 VERSION = 4
49 PATCHLEVEL = 19
50 -SUBLEVEL = 60
51 +SUBLEVEL = 61
52 EXTRAVERSION =
53 NAME = "People's Front"
54
55 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
56 index 8790a29d0af4..e3ebece79617 100644
57 --- a/arch/arm64/Kconfig
58 +++ b/arch/arm64/Kconfig
59 @@ -251,7 +251,8 @@ config GENERIC_CALIBRATE_DELAY
60 def_bool y
61
62 config ZONE_DMA32
63 - def_bool y
64 + bool "Support DMA32 zone" if EXPERT
65 + default y
66
67 config HAVE_GENERIC_GUP
68 def_bool y
69 diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
70 index 212e6634c9ba..7398ae8856dc 100644
71 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
72 +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2180.dtsi
73 @@ -330,7 +330,8 @@
74 regulator-max-microvolt = <1320000>;
75 enable-gpios = <&pmic 6 GPIO_ACTIVE_HIGH>;
76 regulator-ramp-delay = <80>;
77 - regulator-enable-ramp-delay = <1000>;
78 + regulator-enable-ramp-delay = <2000>;
79 + regulator-settling-time-us = <160>;
80 };
81 };
82 };
83 diff --git a/arch/arm64/boot/dts/nvidia/tegra210.dtsi b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
84 index 3be920efee82..6597c0894137 100644
85 --- a/arch/arm64/boot/dts/nvidia/tegra210.dtsi
86 +++ b/arch/arm64/boot/dts/nvidia/tegra210.dtsi
87 @@ -1119,7 +1119,7 @@
88 compatible = "nvidia,tegra210-agic";
89 #interrupt-cells = <3>;
90 interrupt-controller;
91 - reg = <0x702f9000 0x2000>,
92 + reg = <0x702f9000 0x1000>,
93 <0x702fa000 0x2000>;
94 interrupts = <GIC_SPI 102 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
95 clocks = <&tegra_car TEGRA210_CLK_APE>;
96 diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
97 index 17fac2889f56..d8c521c757e8 100644
98 --- a/arch/arm64/crypto/sha1-ce-glue.c
99 +++ b/arch/arm64/crypto/sha1-ce-glue.c
100 @@ -54,7 +54,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
101 unsigned int len, u8 *out)
102 {
103 struct sha1_ce_state *sctx = shash_desc_ctx(desc);
104 - bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
105 + bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE) && len;
106
107 if (!may_use_simd())
108 return crypto_sha1_finup(desc, data, len, out);
109 diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
110 index 261f5195cab7..c47d1a28ff6b 100644
111 --- a/arch/arm64/crypto/sha2-ce-glue.c
112 +++ b/arch/arm64/crypto/sha2-ce-glue.c
113 @@ -59,7 +59,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
114 unsigned int len, u8 *out)
115 {
116 struct sha256_ce_state *sctx = shash_desc_ctx(desc);
117 - bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
118 + bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE) && len;
119
120 if (!may_use_simd()) {
121 if (len)
122 diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
123 index ed46dc188b22..970f15c76bac 100644
124 --- a/arch/arm64/kernel/acpi.c
125 +++ b/arch/arm64/kernel/acpi.c
126 @@ -154,10 +154,14 @@ static int __init acpi_fadt_sanity_check(void)
127 */
128 if (table->revision < 5 ||
129 (table->revision == 5 && fadt->minor_revision < 1)) {
130 - pr_err("Unsupported FADT revision %d.%d, should be 5.1+\n",
131 + pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 5.1+\n",
132 table->revision, fadt->minor_revision);
133 - ret = -EINVAL;
134 - goto out;
135 +
136 + if (!fadt->arm_boot_flags) {
137 + ret = -EINVAL;
138 + goto out;
139 + }
140 + pr_err("FADT has ARM boot flags set, assuming 5.1\n");
141 }
142
143 if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
144 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
145 index 8556876c9109..5f800384cb9a 100644
146 --- a/arch/arm64/kernel/entry.S
147 +++ b/arch/arm64/kernel/entry.S
148 @@ -824,7 +824,7 @@ el0_dbg:
149 mov x1, x25
150 mov x2, sp
151 bl do_debug_exception
152 - enable_daif
153 + enable_da_f
154 ct_user_exit
155 b ret_to_user
156 el0_inv:
157 @@ -876,7 +876,7 @@ el0_error_naked:
158 enable_dbg
159 mov x0, sp
160 bl do_serror
161 - enable_daif
162 + enable_da_f
163 ct_user_exit
164 b ret_to_user
165 ENDPROC(el0_error)
166 diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
167 index 8da289dc843a..eff6a564ab80 100644
168 --- a/arch/arm64/kernel/image.h
169 +++ b/arch/arm64/kernel/image.h
170 @@ -73,7 +73,11 @@
171
172 #ifdef CONFIG_EFI
173
174 -__efistub_stext_offset = stext - _text;
175 +/*
176 + * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol:
177 + * https://github.com/ClangBuiltLinux/linux/issues/561
178 + */
179 +__efistub_stext_offset = ABSOLUTE(stext - _text);
180
181 /*
182 * The EFI stub has its own symbol namespace prefixed by __efistub_, to
183 diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
184 index 774c3e17c798..29d2f425806e 100644
185 --- a/arch/arm64/mm/init.c
186 +++ b/arch/arm64/mm/init.c
187 @@ -233,8 +233,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
188 {
189 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
190
191 - if (IS_ENABLED(CONFIG_ZONE_DMA32))
192 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
193 +#ifdef CONFIG_ZONE_DMA32
194 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
195 +#endif
196 max_zone_pfns[ZONE_NORMAL] = max;
197
198 free_area_init_nodes(max_zone_pfns);
199 diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
200 index 3c453a1f1ff1..172801ed35b8 100644
201 --- a/arch/mips/boot/compressed/Makefile
202 +++ b/arch/mips/boot/compressed/Makefile
203 @@ -78,6 +78,8 @@ OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
204 $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
205 $(call if_changed,objcopy)
206
207 +HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
208 +
209 # Calculate the load address of the compressed kernel image
210 hostprogs-y := calc_vmlinuz_load_addr
211
212 diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
213 index 542c3ede9722..d14f75ec8273 100644
214 --- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
215 +++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
216 @@ -13,7 +13,7 @@
217 #include <stdint.h>
218 #include <stdio.h>
219 #include <stdlib.h>
220 -#include "../../../../include/linux/sizes.h"
221 +#include <linux/sizes.h>
222
223 int main(int argc, char *argv[])
224 {
225 diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
226 index c2917b39966b..bba2c8837951 100644
227 --- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
228 +++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
229 @@ -27,8 +27,8 @@
230 #define AR933X_UART_CS_PARITY_S 0
231 #define AR933X_UART_CS_PARITY_M 0x3
232 #define AR933X_UART_CS_PARITY_NONE 0
233 -#define AR933X_UART_CS_PARITY_ODD 1
234 -#define AR933X_UART_CS_PARITY_EVEN 2
235 +#define AR933X_UART_CS_PARITY_ODD 2
236 +#define AR933X_UART_CS_PARITY_EVEN 3
237 #define AR933X_UART_CS_IF_MODE_S 2
238 #define AR933X_UART_CS_IF_MODE_M 0x3
239 #define AR933X_UART_CS_IF_MODE_NONE 0
240 diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
241 index 0964c236e3e5..de2998cb189e 100644
242 --- a/arch/parisc/kernel/ptrace.c
243 +++ b/arch/parisc/kernel/ptrace.c
244 @@ -167,6 +167,9 @@ long arch_ptrace(struct task_struct *child, long request,
245 if ((addr & (sizeof(unsigned long)-1)) ||
246 addr >= sizeof(struct pt_regs))
247 break;
248 + if (addr == PT_IAOQ0 || addr == PT_IAOQ1) {
249 + data |= 3; /* ensure userspace privilege */
250 + }
251 if ((addr >= PT_GR1 && addr <= PT_GR31) ||
252 addr == PT_IAOQ0 || addr == PT_IAOQ1 ||
253 (addr >= PT_FR0 && addr <= PT_FR31 + 4) ||
254 @@ -228,16 +231,18 @@ long arch_ptrace(struct task_struct *child, long request,
255
256 static compat_ulong_t translate_usr_offset(compat_ulong_t offset)
257 {
258 - if (offset < 0)
259 - return sizeof(struct pt_regs);
260 - else if (offset <= 32*4) /* gr[0..31] */
261 - return offset * 2 + 4;
262 - else if (offset <= 32*4+32*8) /* gr[0..31] + fr[0..31] */
263 - return offset + 32*4;
264 - else if (offset < sizeof(struct pt_regs)/2 + 32*4)
265 - return offset * 2 + 4 - 32*8;
266 + compat_ulong_t pos;
267 +
268 + if (offset < 32*4) /* gr[0..31] */
269 + pos = offset * 2 + 4;
270 + else if (offset < 32*4+32*8) /* fr[0] ... fr[31] */
271 + pos = (offset - 32*4) + PT_FR0;
272 + else if (offset < sizeof(struct pt_regs)/2 + 32*4) /* sr[0] ... ipsw */
273 + pos = (offset - 32*4 - 32*8) * 2 + PT_SR0 + 4;
274 else
275 - return sizeof(struct pt_regs);
276 + pos = sizeof(struct pt_regs);
277 +
278 + return pos;
279 }
280
281 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
282 @@ -281,9 +286,12 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
283 addr = translate_usr_offset(addr);
284 if (addr >= sizeof(struct pt_regs))
285 break;
286 + if (addr == PT_IAOQ0+4 || addr == PT_IAOQ1+4) {
287 + data |= 3; /* ensure userspace privilege */
288 + }
289 if (addr >= PT_FR0 && addr <= PT_FR31 + 4) {
290 /* Special case, fp regs are 64 bits anyway */
291 - *(__u64 *) ((char *) task_regs(child) + addr) = data;
292 + *(__u32 *) ((char *) task_regs(child) + addr) = data;
293 ret = 0;
294 }
295 else if ((addr >= PT_GR1+4 && addr <= PT_GR31+4) ||
296 @@ -496,7 +504,8 @@ static void set_reg(struct pt_regs *regs, int num, unsigned long val)
297 return;
298 case RI(iaoq[0]):
299 case RI(iaoq[1]):
300 - regs->iaoq[num - RI(iaoq[0])] = val;
301 + /* set 2 lowest bits to ensure userspace privilege: */
302 + regs->iaoq[num - RI(iaoq[0])] = val | 3;
303 return;
304 case RI(sar): regs->sar = val;
305 return;
306 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
307 index 2d8fc8c9da7a..06cc77813dbb 100644
308 --- a/arch/powerpc/kernel/exceptions-64s.S
309 +++ b/arch/powerpc/kernel/exceptions-64s.S
310 @@ -1745,7 +1745,7 @@ handle_page_fault:
311 addi r3,r1,STACK_FRAME_OVERHEAD
312 bl do_page_fault
313 cmpdi r3,0
314 - beq+ 12f
315 + beq+ ret_from_except_lite
316 bl save_nvgprs
317 mr r5,r3
318 addi r3,r1,STACK_FRAME_OVERHEAD
319 @@ -1760,7 +1760,12 @@ handle_dabr_fault:
320 ld r5,_DSISR(r1)
321 addi r3,r1,STACK_FRAME_OVERHEAD
322 bl do_break
323 -12: b ret_from_except_lite
324 + /*
325 + * do_break() may have changed the NV GPRS while handling a breakpoint.
326 + * If so, we need to restore them with their updated values. Don't use
327 + * ret_from_except_lite here.
328 + */
329 + b ret_from_except
330
331
332 #ifdef CONFIG_PPC_BOOK3S_64
333 diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
334 index 7a919e9a3400..cbdf86228eaa 100644
335 --- a/arch/powerpc/kernel/swsusp_32.S
336 +++ b/arch/powerpc/kernel/swsusp_32.S
337 @@ -25,11 +25,19 @@
338 #define SL_IBAT2 0x48
339 #define SL_DBAT3 0x50
340 #define SL_IBAT3 0x58
341 -#define SL_TB 0x60
342 -#define SL_R2 0x68
343 -#define SL_CR 0x6c
344 -#define SL_LR 0x70
345 -#define SL_R12 0x74 /* r12 to r31 */
346 +#define SL_DBAT4 0x60
347 +#define SL_IBAT4 0x68
348 +#define SL_DBAT5 0x70
349 +#define SL_IBAT5 0x78
350 +#define SL_DBAT6 0x80
351 +#define SL_IBAT6 0x88
352 +#define SL_DBAT7 0x90
353 +#define SL_IBAT7 0x98
354 +#define SL_TB 0xa0
355 +#define SL_R2 0xa8
356 +#define SL_CR 0xac
357 +#define SL_LR 0xb0
358 +#define SL_R12 0xb4 /* r12 to r31 */
359 #define SL_SIZE (SL_R12 + 80)
360
361 .section .data
362 @@ -114,6 +122,41 @@ _GLOBAL(swsusp_arch_suspend)
363 mfibatl r4,3
364 stw r4,SL_IBAT3+4(r11)
365
366 +BEGIN_MMU_FTR_SECTION
367 + mfspr r4,SPRN_DBAT4U
368 + stw r4,SL_DBAT4(r11)
369 + mfspr r4,SPRN_DBAT4L
370 + stw r4,SL_DBAT4+4(r11)
371 + mfspr r4,SPRN_DBAT5U
372 + stw r4,SL_DBAT5(r11)
373 + mfspr r4,SPRN_DBAT5L
374 + stw r4,SL_DBAT5+4(r11)
375 + mfspr r4,SPRN_DBAT6U
376 + stw r4,SL_DBAT6(r11)
377 + mfspr r4,SPRN_DBAT6L
378 + stw r4,SL_DBAT6+4(r11)
379 + mfspr r4,SPRN_DBAT7U
380 + stw r4,SL_DBAT7(r11)
381 + mfspr r4,SPRN_DBAT7L
382 + stw r4,SL_DBAT7+4(r11)
383 + mfspr r4,SPRN_IBAT4U
384 + stw r4,SL_IBAT4(r11)
385 + mfspr r4,SPRN_IBAT4L
386 + stw r4,SL_IBAT4+4(r11)
387 + mfspr r4,SPRN_IBAT5U
388 + stw r4,SL_IBAT5(r11)
389 + mfspr r4,SPRN_IBAT5L
390 + stw r4,SL_IBAT5+4(r11)
391 + mfspr r4,SPRN_IBAT6U
392 + stw r4,SL_IBAT6(r11)
393 + mfspr r4,SPRN_IBAT6L
394 + stw r4,SL_IBAT6+4(r11)
395 + mfspr r4,SPRN_IBAT7U
396 + stw r4,SL_IBAT7(r11)
397 + mfspr r4,SPRN_IBAT7L
398 + stw r4,SL_IBAT7+4(r11)
399 +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
400 +
401 #if 0
402 /* Backup various CPU config stuffs */
403 bl __save_cpu_setup
404 @@ -279,27 +322,41 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
405 mtibatu 3,r4
406 lwz r4,SL_IBAT3+4(r11)
407 mtibatl 3,r4
408 -#endif
409 -
410 BEGIN_MMU_FTR_SECTION
411 - li r4,0
412 + lwz r4,SL_DBAT4(r11)
413 mtspr SPRN_DBAT4U,r4
414 + lwz r4,SL_DBAT4+4(r11)
415 mtspr SPRN_DBAT4L,r4
416 + lwz r4,SL_DBAT5(r11)
417 mtspr SPRN_DBAT5U,r4
418 + lwz r4,SL_DBAT5+4(r11)
419 mtspr SPRN_DBAT5L,r4
420 + lwz r4,SL_DBAT6(r11)
421 mtspr SPRN_DBAT6U,r4
422 + lwz r4,SL_DBAT6+4(r11)
423 mtspr SPRN_DBAT6L,r4
424 + lwz r4,SL_DBAT7(r11)
425 mtspr SPRN_DBAT7U,r4
426 + lwz r4,SL_DBAT7+4(r11)
427 mtspr SPRN_DBAT7L,r4
428 + lwz r4,SL_IBAT4(r11)
429 mtspr SPRN_IBAT4U,r4
430 + lwz r4,SL_IBAT4+4(r11)
431 mtspr SPRN_IBAT4L,r4
432 + lwz r4,SL_IBAT5(r11)
433 mtspr SPRN_IBAT5U,r4
434 + lwz r4,SL_IBAT5+4(r11)
435 mtspr SPRN_IBAT5L,r4
436 + lwz r4,SL_IBAT6(r11)
437 mtspr SPRN_IBAT6U,r4
438 + lwz r4,SL_IBAT6+4(r11)
439 mtspr SPRN_IBAT6L,r4
440 + lwz r4,SL_IBAT7(r11)
441 mtspr SPRN_IBAT7U,r4
442 + lwz r4,SL_IBAT7+4(r11)
443 mtspr SPRN_IBAT7L,r4
444 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
445 +#endif
446
447 /* Flush all TLBs */
448 lis r4,0x1000
449 diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
450 index f89808b9713d..b0660ef69177 100644
451 --- a/arch/powerpc/platforms/powermac/sleep.S
452 +++ b/arch/powerpc/platforms/powermac/sleep.S
453 @@ -38,10 +38,18 @@
454 #define SL_IBAT2 0x48
455 #define SL_DBAT3 0x50
456 #define SL_IBAT3 0x58
457 -#define SL_TB 0x60
458 -#define SL_R2 0x68
459 -#define SL_CR 0x6c
460 -#define SL_R12 0x70 /* r12 to r31 */
461 +#define SL_DBAT4 0x60
462 +#define SL_IBAT4 0x68
463 +#define SL_DBAT5 0x70
464 +#define SL_IBAT5 0x78
465 +#define SL_DBAT6 0x80
466 +#define SL_IBAT6 0x88
467 +#define SL_DBAT7 0x90
468 +#define SL_IBAT7 0x98
469 +#define SL_TB 0xa0
470 +#define SL_R2 0xa8
471 +#define SL_CR 0xac
472 +#define SL_R12 0xb0 /* r12 to r31 */
473 #define SL_SIZE (SL_R12 + 80)
474
475 .section .text
476 @@ -126,6 +134,41 @@ _GLOBAL(low_sleep_handler)
477 mfibatl r4,3
478 stw r4,SL_IBAT3+4(r1)
479
480 +BEGIN_MMU_FTR_SECTION
481 + mfspr r4,SPRN_DBAT4U
482 + stw r4,SL_DBAT4(r1)
483 + mfspr r4,SPRN_DBAT4L
484 + stw r4,SL_DBAT4+4(r1)
485 + mfspr r4,SPRN_DBAT5U
486 + stw r4,SL_DBAT5(r1)
487 + mfspr r4,SPRN_DBAT5L
488 + stw r4,SL_DBAT5+4(r1)
489 + mfspr r4,SPRN_DBAT6U
490 + stw r4,SL_DBAT6(r1)
491 + mfspr r4,SPRN_DBAT6L
492 + stw r4,SL_DBAT6+4(r1)
493 + mfspr r4,SPRN_DBAT7U
494 + stw r4,SL_DBAT7(r1)
495 + mfspr r4,SPRN_DBAT7L
496 + stw r4,SL_DBAT7+4(r1)
497 + mfspr r4,SPRN_IBAT4U
498 + stw r4,SL_IBAT4(r1)
499 + mfspr r4,SPRN_IBAT4L
500 + stw r4,SL_IBAT4+4(r1)
501 + mfspr r4,SPRN_IBAT5U
502 + stw r4,SL_IBAT5(r1)
503 + mfspr r4,SPRN_IBAT5L
504 + stw r4,SL_IBAT5+4(r1)
505 + mfspr r4,SPRN_IBAT6U
506 + stw r4,SL_IBAT6(r1)
507 + mfspr r4,SPRN_IBAT6L
508 + stw r4,SL_IBAT6+4(r1)
509 + mfspr r4,SPRN_IBAT7U
510 + stw r4,SL_IBAT7(r1)
511 + mfspr r4,SPRN_IBAT7L
512 + stw r4,SL_IBAT7+4(r1)
513 +END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
514 +
515 /* Backup various CPU config stuffs */
516 bl __save_cpu_setup
517
518 @@ -326,22 +369,37 @@ grackle_wake_up:
519 mtibatl 3,r4
520
521 BEGIN_MMU_FTR_SECTION
522 - li r4,0
523 + lwz r4,SL_DBAT4(r1)
524 mtspr SPRN_DBAT4U,r4
525 + lwz r4,SL_DBAT4+4(r1)
526 mtspr SPRN_DBAT4L,r4
527 + lwz r4,SL_DBAT5(r1)
528 mtspr SPRN_DBAT5U,r4
529 + lwz r4,SL_DBAT5+4(r1)
530 mtspr SPRN_DBAT5L,r4
531 + lwz r4,SL_DBAT6(r1)
532 mtspr SPRN_DBAT6U,r4
533 + lwz r4,SL_DBAT6+4(r1)
534 mtspr SPRN_DBAT6L,r4
535 + lwz r4,SL_DBAT7(r1)
536 mtspr SPRN_DBAT7U,r4
537 + lwz r4,SL_DBAT7+4(r1)
538 mtspr SPRN_DBAT7L,r4
539 + lwz r4,SL_IBAT4(r1)
540 mtspr SPRN_IBAT4U,r4
541 + lwz r4,SL_IBAT4+4(r1)
542 mtspr SPRN_IBAT4L,r4
543 + lwz r4,SL_IBAT5(r1)
544 mtspr SPRN_IBAT5U,r4
545 + lwz r4,SL_IBAT5+4(r1)
546 mtspr SPRN_IBAT5L,r4
547 + lwz r4,SL_IBAT6(r1)
548 mtspr SPRN_IBAT6U,r4
549 + lwz r4,SL_IBAT6+4(r1)
550 mtspr SPRN_IBAT6L,r4
551 + lwz r4,SL_IBAT7(r1)
552 mtspr SPRN_IBAT7U,r4
553 + lwz r4,SL_IBAT7+4(r1)
554 mtspr SPRN_IBAT7L,r4
555 END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
556
557 diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
558 index 8006c54a91e3..fd8166ffbffa 100644
559 --- a/arch/powerpc/platforms/powernv/npu-dma.c
560 +++ b/arch/powerpc/platforms/powernv/npu-dma.c
561 @@ -56,9 +56,22 @@ static struct dentry *atsd_threshold_dentry;
562 static struct pci_dev *get_pci_dev(struct device_node *dn)
563 {
564 struct pci_dn *pdn = PCI_DN(dn);
565 + struct pci_dev *pdev;
566
567 - return pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
568 + pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
569 pdn->busno, pdn->devfn);
570 +
571 + /*
572 + * pci_get_domain_bus_and_slot() increased the reference count of
573 + * the PCI device, but callers don't need that actually as the PE
574 + * already holds a reference to the device. Since callers aren't
575 + * aware of the reference count change, call pci_dev_put() now to
576 + * avoid leaks.
577 + */
578 + if (pdev)
579 + pci_dev_put(pdev);
580 +
581 + return pdev;
582 }
583
584 /* Given a NPU device get the associated PCI device. */
585 diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
586 index e4c658cda3a7..f99cd31b6fd1 100644
587 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c
588 +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
589 @@ -1012,6 +1012,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
590 if (!memblock_size)
591 return -EINVAL;
592
593 + if (!pr->old_prop)
594 + return 0;
595 +
596 p = (__be32 *) pr->old_prop->value;
597 if (!p)
598 return -EINVAL;
599 diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
600 index 8671de126eac..baa7e36073f9 100644
601 --- a/arch/x86/events/amd/uncore.c
602 +++ b/arch/x86/events/amd/uncore.c
603 @@ -210,15 +210,22 @@ static int amd_uncore_event_init(struct perf_event *event)
604 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
605 hwc->idx = -1;
606
607 + if (event->cpu < 0)
608 + return -EINVAL;
609 +
610 /*
611 * SliceMask and ThreadMask need to be set for certain L3 events in
612 * Family 17h. For other events, the two fields do not affect the count.
613 */
614 - if (l3_mask)
615 - hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
616 + if (l3_mask && is_llc_event(event)) {
617 + int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4);
618
619 - if (event->cpu < 0)
620 - return -EINVAL;
621 + if (smp_num_siblings > 1)
622 + thread += cpu_data(event->cpu).apicid & 1;
623 +
624 + hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) &
625 + AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK;
626 + }
627
628 uncore = event_to_amd_uncore(event);
629 if (!uncore)
630 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
631 index c8b0bf2b0d5e..db5a2ba61753 100644
632 --- a/arch/x86/events/intel/core.c
633 +++ b/arch/x86/events/intel/core.c
634 @@ -2074,12 +2074,10 @@ static void intel_pmu_disable_event(struct perf_event *event)
635 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
636 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
637
638 - if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
639 + if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL))
640 intel_pmu_disable_fixed(hwc);
641 - return;
642 - }
643 -
644 - x86_pmu_disable_event(event);
645 + else
646 + x86_pmu_disable_event(event);
647
648 /*
649 * Needs to be called after x86_pmu_disable_event,
650 diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
651 index cc6dd4f78158..42fa3974c421 100644
652 --- a/arch/x86/events/intel/uncore.h
653 +++ b/arch/x86/events/intel/uncore.h
654 @@ -402,6 +402,16 @@ static inline bool is_freerunning_event(struct perf_event *event)
655 (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
656 }
657
658 +/* Check and reject invalid config */
659 +static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
660 + struct perf_event *event)
661 +{
662 + if (is_freerunning_event(event))
663 + return 0;
664 +
665 + return -EINVAL;
666 +}
667 +
668 static inline void uncore_disable_box(struct intel_uncore_box *box)
669 {
670 if (box->pmu->type->ops->disable_box)
671 diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
672 index b10e04387f38..8e4e8e423839 100644
673 --- a/arch/x86/events/intel/uncore_snbep.c
674 +++ b/arch/x86/events/intel/uncore_snbep.c
675 @@ -3585,6 +3585,7 @@ static struct uncore_event_desc skx_uncore_iio_freerunning_events[] = {
676
677 static struct intel_uncore_ops skx_uncore_iio_freerunning_ops = {
678 .read_counter = uncore_msr_read_counter,
679 + .hw_config = uncore_freerunning_hw_config,
680 };
681
682 static struct attribute *skx_uncore_iio_freerunning_formats_attr[] = {
683 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
684 index ce84388e540c..d266a4066289 100644
685 --- a/arch/x86/include/asm/atomic.h
686 +++ b/arch/x86/include/asm/atomic.h
687 @@ -54,7 +54,7 @@ static __always_inline void arch_atomic_add(int i, atomic_t *v)
688 {
689 asm volatile(LOCK_PREFIX "addl %1,%0"
690 : "+m" (v->counter)
691 - : "ir" (i));
692 + : "ir" (i) : "memory");
693 }
694
695 /**
696 @@ -68,7 +68,7 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
697 {
698 asm volatile(LOCK_PREFIX "subl %1,%0"
699 : "+m" (v->counter)
700 - : "ir" (i));
701 + : "ir" (i) : "memory");
702 }
703
704 /**
705 @@ -95,7 +95,7 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
706 static __always_inline void arch_atomic_inc(atomic_t *v)
707 {
708 asm volatile(LOCK_PREFIX "incl %0"
709 - : "+m" (v->counter));
710 + : "+m" (v->counter) :: "memory");
711 }
712 #define arch_atomic_inc arch_atomic_inc
713
714 @@ -108,7 +108,7 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
715 static __always_inline void arch_atomic_dec(atomic_t *v)
716 {
717 asm volatile(LOCK_PREFIX "decl %0"
718 - : "+m" (v->counter));
719 + : "+m" (v->counter) :: "memory");
720 }
721 #define arch_atomic_dec arch_atomic_dec
722
723 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
724 index 5f851d92eecd..55ca027f8c1c 100644
725 --- a/arch/x86/include/asm/atomic64_64.h
726 +++ b/arch/x86/include/asm/atomic64_64.h
727 @@ -45,7 +45,7 @@ static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
728 {
729 asm volatile(LOCK_PREFIX "addq %1,%0"
730 : "=m" (v->counter)
731 - : "er" (i), "m" (v->counter));
732 + : "er" (i), "m" (v->counter) : "memory");
733 }
734
735 /**
736 @@ -59,7 +59,7 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
737 {
738 asm volatile(LOCK_PREFIX "subq %1,%0"
739 : "=m" (v->counter)
740 - : "er" (i), "m" (v->counter));
741 + : "er" (i), "m" (v->counter) : "memory");
742 }
743
744 /**
745 @@ -87,7 +87,7 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
746 {
747 asm volatile(LOCK_PREFIX "incq %0"
748 : "=m" (v->counter)
749 - : "m" (v->counter));
750 + : "m" (v->counter) : "memory");
751 }
752 #define arch_atomic64_inc arch_atomic64_inc
753
754 @@ -101,7 +101,7 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
755 {
756 asm volatile(LOCK_PREFIX "decq %0"
757 : "=m" (v->counter)
758 - : "m" (v->counter));
759 + : "m" (v->counter) : "memory");
760 }
761 #define arch_atomic64_dec arch_atomic64_dec
762
763 diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
764 index 14de0432d288..84f848c2541a 100644
765 --- a/arch/x86/include/asm/barrier.h
766 +++ b/arch/x86/include/asm/barrier.h
767 @@ -80,8 +80,8 @@ do { \
768 })
769
770 /* Atomic operations are already serializing on x86 */
771 -#define __smp_mb__before_atomic() barrier()
772 -#define __smp_mb__after_atomic() barrier()
773 +#define __smp_mb__before_atomic() do { } while (0)
774 +#define __smp_mb__after_atomic() do { } while (0)
775
776 #include <asm-generic/barrier.h>
777
778 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
779 index 69037da75ea0..0cf704933f23 100644
780 --- a/arch/x86/include/asm/cpufeatures.h
781 +++ b/arch/x86/include/asm/cpufeatures.h
782 @@ -239,12 +239,14 @@
783 #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
784 #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
785 #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
786 +#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
787 #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
788 #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
789 #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
790 #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
791 #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
792 #define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
793 +#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
794 #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
795 #define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
796 #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
797 diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
798 index 2e38fb82b91d..aebedbaf5260 100644
799 --- a/arch/x86/include/asm/intel-family.h
800 +++ b/arch/x86/include/asm/intel-family.h
801 @@ -56,6 +56,7 @@
802 #define INTEL_FAM6_ICELAKE_XEON_D 0x6C
803 #define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
804 #define INTEL_FAM6_ICELAKE_MOBILE 0x7E
805 +#define INTEL_FAM6_ICELAKE_NNPI 0x9D
806
807 /* "Small Core" Processors (Atom) */
808
809 diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
810 index 0c5fcbd998cf..9d863e8f9b3f 100644
811 --- a/arch/x86/kernel/cpu/cacheinfo.c
812 +++ b/arch/x86/kernel/cpu/cacheinfo.c
813 @@ -651,8 +651,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
814 if (c->x86 < 0x17) {
815 /* LLC is at the node level. */
816 per_cpu(cpu_llc_id, cpu) = node_id;
817 - } else if (c->x86 == 0x17 &&
818 - c->x86_model >= 0 && c->x86_model <= 0x1F) {
819 + } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
820 /*
821 * LLC is at the core complex level.
822 * Core complex ID is ApicId[3] for these processors.
823 diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
824 index d0dfb892c72f..aed45b8895d5 100644
825 --- a/arch/x86/kernel/cpu/mkcapflags.sh
826 +++ b/arch/x86/kernel/cpu/mkcapflags.sh
827 @@ -4,6 +4,8 @@
828 # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
829 #
830
831 +set -e
832 +
833 IN=$1
834 OUT=$2
835
836 diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
837 index ddb1ca6923b1..5b4c32799094 100644
838 --- a/arch/x86/kernel/mpparse.c
839 +++ b/arch/x86/kernel/mpparse.c
840 @@ -547,17 +547,15 @@ void __init default_get_smp_config(unsigned int early)
841 * local APIC has default address
842 */
843 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
844 - return;
845 + goto out;
846 }
847
848 pr_info("Default MP configuration #%d\n", mpf->feature1);
849 construct_default_ISA_mptable(mpf->feature1);
850
851 } else if (mpf->physptr) {
852 - if (check_physptr(mpf, early)) {
853 - early_memunmap(mpf, sizeof(*mpf));
854 - return;
855 - }
856 + if (check_physptr(mpf, early))
857 + goto out;
858 } else
859 BUG();
860
861 @@ -566,7 +564,7 @@ void __init default_get_smp_config(unsigned int early)
862 /*
863 * Only use the first configuration found.
864 */
865 -
866 +out:
867 early_memunmap(mpf, sizeof(*mpf));
868 }
869
870 diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
871 index 952aebd0a8a3..acc8d217f656 100644
872 --- a/arch/x86/kvm/pmu.c
873 +++ b/arch/x86/kvm/pmu.c
874 @@ -131,8 +131,8 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
875 intr ? kvm_perf_overflow_intr :
876 kvm_perf_overflow, pmc);
877 if (IS_ERR(event)) {
878 - printk_once("kvm_pmu: event creation failed %ld\n",
879 - PTR_ERR(event));
880 + pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
881 + PTR_ERR(event), pmc->idx);
882 return;
883 }
884
885 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
886 index c630e02836a8..527524134693 100644
887 --- a/block/blk-cgroup.c
888 +++ b/block/blk-cgroup.c
889 @@ -1016,8 +1016,12 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
890 }
891 next:
892 if (has_stats) {
893 - off += scnprintf(buf+off, size-off, "\n");
894 - seq_commit(sf, off);
895 + if (off < size - 1) {
896 + off += scnprintf(buf+off, size-off, "\n");
897 + seq_commit(sf, off);
898 + } else {
899 + seq_commit(sf, -1);
900 + }
901 }
902 }
903
904 diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
905 index 6b8396ccb5c4..f4f7c73fb828 100644
906 --- a/block/blk-iolatency.c
907 +++ b/block/blk-iolatency.c
908 @@ -565,6 +565,10 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
909 if (!blkg)
910 return;
911
912 + /* We didn't actually submit this bio, don't account it. */
913 + if (bio->bi_status == BLK_STS_AGAIN)
914 + return;
915 +
916 iolat = blkg_to_lat(bio->bi_blkg);
917 if (!iolat)
918 return;
919 @@ -742,8 +746,10 @@ static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
920
921 if (!oldval && val)
922 return 1;
923 - if (oldval && !val)
924 + if (oldval && !val) {
925 + blkcg_clear_delay(blkg);
926 return -1;
927 + }
928 return 0;
929 }
930
931 diff --git a/block/blk-throttle.c b/block/blk-throttle.c
932 index 01d0620a4e4a..caee658609d7 100644
933 --- a/block/blk-throttle.c
934 +++ b/block/blk-throttle.c
935 @@ -892,13 +892,10 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
936 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
937 u64 tmp;
938
939 - jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
940 -
941 - /* Slice has just started. Consider one slice interval */
942 - if (!jiffy_elapsed)
943 - jiffy_elapsed_rnd = tg->td->throtl_slice;
944 + jiffy_elapsed = jiffies - tg->slice_start[rw];
945
946 - jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
947 + /* Round up to the next throttle slice, wait time must be nonzero */
948 + jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
949
950 /*
951 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
952 diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
953 index f3702e533ff4..d8a73d94bb30 100644
954 --- a/crypto/asymmetric_keys/Kconfig
955 +++ b/crypto/asymmetric_keys/Kconfig
956 @@ -15,6 +15,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
957 select MPILIB
958 select CRYPTO_HASH_INFO
959 select CRYPTO_AKCIPHER
960 + select CRYPTO_HASH
961 help
962 This option provides support for asymmetric public key type handling.
963 If signature generation and/or verification are to be used,
964 @@ -34,6 +35,7 @@ config X509_CERTIFICATE_PARSER
965 config PKCS7_MESSAGE_PARSER
966 tristate "PKCS#7 message parser"
967 depends on X509_CERTIFICATE_PARSER
968 + select CRYPTO_HASH
969 select ASN1
970 select OID_REGISTRY
971 help
972 @@ -56,6 +58,7 @@ config SIGNED_PE_FILE_VERIFICATION
973 bool "Support for PE file signature verification"
974 depends on PKCS7_MESSAGE_PARSER=y
975 depends on SYSTEM_DATA_VERIFICATION
976 + select CRYPTO_HASH
977 select ASN1
978 select OID_REGISTRY
979 help
980 diff --git a/crypto/chacha20poly1305.c b/crypto/chacha20poly1305.c
981 index 4d6f51bcdfab..af8afe5c06ea 100644
982 --- a/crypto/chacha20poly1305.c
983 +++ b/crypto/chacha20poly1305.c
984 @@ -67,6 +67,8 @@ struct chachapoly_req_ctx {
985 unsigned int cryptlen;
986 /* Actual AD, excluding IV */
987 unsigned int assoclen;
988 + /* request flags, with MAY_SLEEP cleared if needed */
989 + u32 flags;
990 union {
991 struct poly_req poly;
992 struct chacha_req chacha;
993 @@ -76,8 +78,12 @@ struct chachapoly_req_ctx {
994 static inline void async_done_continue(struct aead_request *req, int err,
995 int (*cont)(struct aead_request *))
996 {
997 - if (!err)
998 + if (!err) {
999 + struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
1000 +
1001 + rctx->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
1002 err = cont(req);
1003 + }
1004
1005 if (err != -EINPROGRESS && err != -EBUSY)
1006 aead_request_complete(req, err);
1007 @@ -144,7 +150,7 @@ static int chacha_decrypt(struct aead_request *req)
1008 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
1009 }
1010
1011 - skcipher_request_set_callback(&creq->req, aead_request_flags(req),
1012 + skcipher_request_set_callback(&creq->req, rctx->flags,
1013 chacha_decrypt_done, req);
1014 skcipher_request_set_tfm(&creq->req, ctx->chacha);
1015 skcipher_request_set_crypt(&creq->req, src, dst,
1016 @@ -188,7 +194,7 @@ static int poly_tail(struct aead_request *req)
1017 memcpy(&preq->tail.cryptlen, &len, sizeof(len));
1018 sg_set_buf(preq->src, &preq->tail, sizeof(preq->tail));
1019
1020 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1021 + ahash_request_set_callback(&preq->req, rctx->flags,
1022 poly_tail_done, req);
1023 ahash_request_set_tfm(&preq->req, ctx->poly);
1024 ahash_request_set_crypt(&preq->req, preq->src,
1025 @@ -219,7 +225,7 @@ static int poly_cipherpad(struct aead_request *req)
1026 sg_init_table(preq->src, 1);
1027 sg_set_buf(preq->src, &preq->pad, padlen);
1028
1029 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1030 + ahash_request_set_callback(&preq->req, rctx->flags,
1031 poly_cipherpad_done, req);
1032 ahash_request_set_tfm(&preq->req, ctx->poly);
1033 ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
1034 @@ -250,7 +256,7 @@ static int poly_cipher(struct aead_request *req)
1035 sg_init_table(rctx->src, 2);
1036 crypt = scatterwalk_ffwd(rctx->src, crypt, req->assoclen);
1037
1038 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1039 + ahash_request_set_callback(&preq->req, rctx->flags,
1040 poly_cipher_done, req);
1041 ahash_request_set_tfm(&preq->req, ctx->poly);
1042 ahash_request_set_crypt(&preq->req, crypt, NULL, rctx->cryptlen);
1043 @@ -280,7 +286,7 @@ static int poly_adpad(struct aead_request *req)
1044 sg_init_table(preq->src, 1);
1045 sg_set_buf(preq->src, preq->pad, padlen);
1046
1047 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1048 + ahash_request_set_callback(&preq->req, rctx->flags,
1049 poly_adpad_done, req);
1050 ahash_request_set_tfm(&preq->req, ctx->poly);
1051 ahash_request_set_crypt(&preq->req, preq->src, NULL, padlen);
1052 @@ -304,7 +310,7 @@ static int poly_ad(struct aead_request *req)
1053 struct poly_req *preq = &rctx->u.poly;
1054 int err;
1055
1056 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1057 + ahash_request_set_callback(&preq->req, rctx->flags,
1058 poly_ad_done, req);
1059 ahash_request_set_tfm(&preq->req, ctx->poly);
1060 ahash_request_set_crypt(&preq->req, req->src, NULL, rctx->assoclen);
1061 @@ -331,7 +337,7 @@ static int poly_setkey(struct aead_request *req)
1062 sg_init_table(preq->src, 1);
1063 sg_set_buf(preq->src, rctx->key, sizeof(rctx->key));
1064
1065 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1066 + ahash_request_set_callback(&preq->req, rctx->flags,
1067 poly_setkey_done, req);
1068 ahash_request_set_tfm(&preq->req, ctx->poly);
1069 ahash_request_set_crypt(&preq->req, preq->src, NULL, sizeof(rctx->key));
1070 @@ -355,7 +361,7 @@ static int poly_init(struct aead_request *req)
1071 struct poly_req *preq = &rctx->u.poly;
1072 int err;
1073
1074 - ahash_request_set_callback(&preq->req, aead_request_flags(req),
1075 + ahash_request_set_callback(&preq->req, rctx->flags,
1076 poly_init_done, req);
1077 ahash_request_set_tfm(&preq->req, ctx->poly);
1078
1079 @@ -393,7 +399,7 @@ static int poly_genkey(struct aead_request *req)
1080
1081 chacha_iv(creq->iv, req, 0);
1082
1083 - skcipher_request_set_callback(&creq->req, aead_request_flags(req),
1084 + skcipher_request_set_callback(&creq->req, rctx->flags,
1085 poly_genkey_done, req);
1086 skcipher_request_set_tfm(&creq->req, ctx->chacha);
1087 skcipher_request_set_crypt(&creq->req, creq->src, creq->src,
1088 @@ -433,7 +439,7 @@ static int chacha_encrypt(struct aead_request *req)
1089 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen);
1090 }
1091
1092 - skcipher_request_set_callback(&creq->req, aead_request_flags(req),
1093 + skcipher_request_set_callback(&creq->req, rctx->flags,
1094 chacha_encrypt_done, req);
1095 skcipher_request_set_tfm(&creq->req, ctx->chacha);
1096 skcipher_request_set_crypt(&creq->req, src, dst,
1097 @@ -451,6 +457,7 @@ static int chachapoly_encrypt(struct aead_request *req)
1098 struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
1099
1100 rctx->cryptlen = req->cryptlen;
1101 + rctx->flags = aead_request_flags(req);
1102
1103 /* encrypt call chain:
1104 * - chacha_encrypt/done()
1105 @@ -472,6 +479,7 @@ static int chachapoly_decrypt(struct aead_request *req)
1106 struct chachapoly_req_ctx *rctx = aead_request_ctx(req);
1107
1108 rctx->cryptlen = req->cryptlen - POLY1305_DIGEST_SIZE;
1109 + rctx->flags = aead_request_flags(req);
1110
1111 /* decrypt call chain:
1112 * - poly_genkey/done()
1113 diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
1114 index d9f192b953b2..591b52d3bdca 100644
1115 --- a/crypto/ghash-generic.c
1116 +++ b/crypto/ghash-generic.c
1117 @@ -34,6 +34,7 @@ static int ghash_setkey(struct crypto_shash *tfm,
1118 const u8 *key, unsigned int keylen)
1119 {
1120 struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
1121 + be128 k;
1122
1123 if (keylen != GHASH_BLOCK_SIZE) {
1124 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1125 @@ -42,7 +43,12 @@ static int ghash_setkey(struct crypto_shash *tfm,
1126
1127 if (ctx->gf128)
1128 gf128mul_free_4k(ctx->gf128);
1129 - ctx->gf128 = gf128mul_init_4k_lle((be128 *)key);
1130 +
1131 + BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
1132 + memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
1133 + ctx->gf128 = gf128mul_init_4k_lle(&k);
1134 + memzero_explicit(&k, GHASH_BLOCK_SIZE);
1135 +
1136 if (!ctx->gf128)
1137 return -ENOMEM;
1138
1139 diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
1140 index 7c3382facc82..600bd288881d 100644
1141 --- a/crypto/serpent_generic.c
1142 +++ b/crypto/serpent_generic.c
1143 @@ -229,7 +229,13 @@
1144 x4 ^= x2; \
1145 })
1146
1147 -static void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2, u32 r3, u32 r4, u32 *k)
1148 +/*
1149 + * both gcc and clang have misoptimized this function in the past,
1150 + * producing horrible object code from spilling temporary variables
1151 + * on the stack. Forcing this part out of line avoids that.
1152 + */
1153 +static noinline void __serpent_setkey_sbox(u32 r0, u32 r1, u32 r2,
1154 + u32 r3, u32 r4, u32 *k)
1155 {
1156 k += 100;
1157 S3(r3, r4, r0, r1, r2); store_and_load_keys(r1, r2, r4, r3, 28, 24);
1158 diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
1159 index 704bebbd35b0..298180bf7e3c 100644
1160 --- a/drivers/acpi/acpica/acevents.h
1161 +++ b/drivers/acpi/acpica/acevents.h
1162 @@ -69,7 +69,8 @@ acpi_status
1163 acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked);
1164
1165 acpi_status
1166 -acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
1167 +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
1168 + u8 clear_on_enable);
1169
1170 acpi_status
1171 acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info);
1172 diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
1173 index e10fec99a182..4b5d3b4c627a 100644
1174 --- a/drivers/acpi/acpica/evgpe.c
1175 +++ b/drivers/acpi/acpica/evgpe.c
1176 @@ -146,6 +146,7 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
1177 * FUNCTION: acpi_ev_add_gpe_reference
1178 *
1179 * PARAMETERS: gpe_event_info - Add a reference to this GPE
1180 + * clear_on_enable - Clear GPE status before enabling it
1181 *
1182 * RETURN: Status
1183 *
1184 @@ -155,7 +156,8 @@ acpi_ev_mask_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 is_masked)
1185 ******************************************************************************/
1186
1187 acpi_status
1188 -acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
1189 +acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info,
1190 + u8 clear_on_enable)
1191 {
1192 acpi_status status = AE_OK;
1193
1194 @@ -170,6 +172,10 @@ acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
1195
1196 /* Enable on first reference */
1197
1198 + if (clear_on_enable) {
1199 + (void)acpi_hw_clear_gpe(gpe_event_info);
1200 + }
1201 +
1202 status = acpi_ev_update_gpe_enable_mask(gpe_event_info);
1203 if (ACPI_SUCCESS(status)) {
1204 status = acpi_ev_enable_gpe(gpe_event_info);
1205 diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
1206 index b253063b09d3..8d96270ed8c7 100644
1207 --- a/drivers/acpi/acpica/evgpeblk.c
1208 +++ b/drivers/acpi/acpica/evgpeblk.c
1209 @@ -453,7 +453,7 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
1210 continue;
1211 }
1212
1213 - status = acpi_ev_add_gpe_reference(gpe_event_info);
1214 + status = acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
1215 if (ACPI_FAILURE(status)) {
1216 ACPI_EXCEPTION((AE_INFO, status,
1217 "Could not enable GPE 0x%02X",
1218 diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
1219 index febc332b00ac..841557bda641 100644
1220 --- a/drivers/acpi/acpica/evxface.c
1221 +++ b/drivers/acpi/acpica/evxface.c
1222 @@ -971,7 +971,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
1223 ACPI_GPE_DISPATCH_METHOD) ||
1224 (ACPI_GPE_DISPATCH_TYPE(handler->original_flags) ==
1225 ACPI_GPE_DISPATCH_NOTIFY)) && handler->originally_enabled) {
1226 - (void)acpi_ev_add_gpe_reference(gpe_event_info);
1227 + (void)acpi_ev_add_gpe_reference(gpe_event_info, FALSE);
1228 if (ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
1229
1230 /* Poll edge triggered GPEs to handle existing events */
1231 diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
1232 index b2d5f66cc1b0..4188731e7c40 100644
1233 --- a/drivers/acpi/acpica/evxfgpe.c
1234 +++ b/drivers/acpi/acpica/evxfgpe.c
1235 @@ -108,7 +108,7 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
1236 if (gpe_event_info) {
1237 if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
1238 ACPI_GPE_DISPATCH_NONE) {
1239 - status = acpi_ev_add_gpe_reference(gpe_event_info);
1240 + status = acpi_ev_add_gpe_reference(gpe_event_info, TRUE);
1241 if (ACPI_SUCCESS(status) &&
1242 ACPI_GPE_IS_POLLING_NEEDED(gpe_event_info)) {
1243
1244 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
1245 index 01306c018398..ccc80ff57eb2 100644
1246 --- a/drivers/ata/libata-eh.c
1247 +++ b/drivers/ata/libata-eh.c
1248 @@ -1490,7 +1490,7 @@ static int ata_eh_read_log_10h(struct ata_device *dev,
1249 tf->hob_lbah = buf[10];
1250 tf->nsect = buf[12];
1251 tf->hob_nsect = buf[13];
1252 - if (ata_id_has_ncq_autosense(dev->id))
1253 + if (dev->class == ATA_DEV_ZAC && ata_id_has_ncq_autosense(dev->id))
1254 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16];
1255
1256 return 0;
1257 @@ -1737,7 +1737,8 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1258 memcpy(&qc->result_tf, &tf, sizeof(tf));
1259 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1260 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1261 - if ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary) {
1262 + if (dev->class == ATA_DEV_ZAC &&
1263 + ((qc->result_tf.command & ATA_SENSE) || qc->result_tf.auxiliary)) {
1264 char sense_key, asc, ascq;
1265
1266 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff;
1267 @@ -1791,10 +1792,11 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1268 }
1269
1270 switch (qc->dev->class) {
1271 - case ATA_DEV_ATA:
1272 case ATA_DEV_ZAC:
1273 if (stat & ATA_SENSE)
1274 ata_eh_request_sense(qc, qc->scsicmd);
1275 + /* fall through */
1276 + case ATA_DEV_ATA:
1277 if (err & ATA_ICRC)
1278 qc->err_mask |= AC_ERR_ATA_BUS;
1279 if (err & (ATA_UNC | ATA_AMNF))
1280 diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
1281 index 87b562e49a43..c9687c8b2347 100644
1282 --- a/drivers/base/regmap/regmap-debugfs.c
1283 +++ b/drivers/base/regmap/regmap-debugfs.c
1284 @@ -575,6 +575,8 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
1285 }
1286
1287 if (!strcmp(name, "dummy")) {
1288 + kfree(map->debugfs_name);
1289 +
1290 map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
1291 dummy_index);
1292 name = map->debugfs_name;
1293 diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1294 index 0360a90ad6b6..6c9f6988bc09 100644
1295 --- a/drivers/base/regmap/regmap.c
1296 +++ b/drivers/base/regmap/regmap.c
1297 @@ -1618,6 +1618,8 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1298 map->format.reg_bytes +
1299 map->format.pad_bytes,
1300 val, val_len);
1301 + else
1302 + ret = -ENOTSUPP;
1303
1304 /* If that didn't work fall back on linearising by hand. */
1305 if (ret == -ENOTSUPP) {
1306 diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
1307 index a8de56f1936d..4a9a4d12721a 100644
1308 --- a/drivers/block/floppy.c
1309 +++ b/drivers/block/floppy.c
1310 @@ -2119,6 +2119,9 @@ static void setup_format_params(int track)
1311 raw_cmd->kernel_data = floppy_track_buffer;
1312 raw_cmd->length = 4 * F_SECT_PER_TRACK;
1313
1314 + if (!F_SECT_PER_TRACK)
1315 + return;
1316 +
1317 /* allow for about 30ms for data transport per track */
1318 head_shift = (F_SECT_PER_TRACK + 5) / 6;
1319
1320 @@ -3241,8 +3244,12 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
1321 int cnt;
1322
1323 /* sanity checking for parameters. */
1324 - if (g->sect <= 0 ||
1325 - g->head <= 0 ||
1326 + if ((int)g->sect <= 0 ||
1327 + (int)g->head <= 0 ||
1328 + /* check for overflow in max_sector */
1329 + (int)(g->sect * g->head) <= 0 ||
1330 + /* check for zero in F_SECT_PER_TRACK */
1331 + (unsigned char)((g->sect << 2) >> FD_SIZECODE(g)) == 0 ||
1332 g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
1333 /* check if reserved bits are set */
1334 (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_SECTBASEMASK)) != 0)
1335 @@ -3386,6 +3393,24 @@ static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1336 return 0;
1337 }
1338
1339 +static bool valid_floppy_drive_params(const short autodetect[8],
1340 + int native_format)
1341 +{
1342 + size_t floppy_type_size = ARRAY_SIZE(floppy_type);
1343 + size_t i = 0;
1344 +
1345 + for (i = 0; i < 8; ++i) {
1346 + if (autodetect[i] < 0 ||
1347 + autodetect[i] >= floppy_type_size)
1348 + return false;
1349 + }
1350 +
1351 + if (native_format < 0 || native_format >= floppy_type_size)
1352 + return false;
1353 +
1354 + return true;
1355 +}
1356 +
1357 static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
1358 unsigned long param)
1359 {
1360 @@ -3512,6 +3537,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
1361 SUPBOUND(size, strlen((const char *)outparam) + 1);
1362 break;
1363 case FDSETDRVPRM:
1364 + if (!valid_floppy_drive_params(inparam.dp.autodetect,
1365 + inparam.dp.native_format))
1366 + return -EINVAL;
1367 *UDP = inparam.dp;
1368 break;
1369 case FDGETDRVPRM:
1370 @@ -3709,6 +3737,8 @@ static int compat_setdrvprm(int drive,
1371 return -EPERM;
1372 if (copy_from_user(&v, arg, sizeof(struct compat_floppy_drive_params)))
1373 return -EFAULT;
1374 + if (!valid_floppy_drive_params(v.autodetect, v.native_format))
1375 + return -EINVAL;
1376 mutex_lock(&floppy_mutex);
1377 UDP->cmos = v.cmos;
1378 UDP->max_dtr = v.max_dtr;
1379 diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
1380 index 093b614d6524..c5c0b7c89481 100644
1381 --- a/drivers/block/null_blk_main.c
1382 +++ b/drivers/block/null_blk_main.c
1383 @@ -321,11 +321,12 @@ static ssize_t nullb_device_power_store(struct config_item *item,
1384 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
1385 dev->power = newp;
1386 } else if (dev->power && !newp) {
1387 - mutex_lock(&lock);
1388 - dev->power = newp;
1389 - null_del_dev(dev->nullb);
1390 - mutex_unlock(&lock);
1391 - clear_bit(NULLB_DEV_FL_UP, &dev->flags);
1392 + if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
1393 + mutex_lock(&lock);
1394 + dev->power = newp;
1395 + null_del_dev(dev->nullb);
1396 + mutex_unlock(&lock);
1397 + }
1398 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
1399 }
1400
1401 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1402 index 40a4f95f6178..75cf605f54e5 100644
1403 --- a/drivers/bluetooth/btusb.c
1404 +++ b/drivers/bluetooth/btusb.c
1405 @@ -277,7 +277,9 @@ static const struct usb_device_id blacklist_table[] = {
1406 { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
1407 { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
1408 { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
1409 + { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
1410 { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
1411 + { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
1412
1413 /* Broadcom BCM2035 */
1414 { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
1415 diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
1416 index 1a7f0c82fb36..66fe1e6dc631 100644
1417 --- a/drivers/bluetooth/hci_bcsp.c
1418 +++ b/drivers/bluetooth/hci_bcsp.c
1419 @@ -759,6 +759,11 @@ static int bcsp_close(struct hci_uart *hu)
1420 skb_queue_purge(&bcsp->rel);
1421 skb_queue_purge(&bcsp->unrel);
1422
1423 + if (bcsp->rx_skb) {
1424 + kfree_skb(bcsp->rx_skb);
1425 + bcsp->rx_skb = NULL;
1426 + }
1427 +
1428 kfree(bcsp);
1429 return 0;
1430 }
1431 diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
1432 index d55c30f6981d..aaf5bfa9bd9c 100644
1433 --- a/drivers/clocksource/exynos_mct.c
1434 +++ b/drivers/clocksource/exynos_mct.c
1435 @@ -211,7 +211,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
1436
1437 static struct clocksource mct_frc = {
1438 .name = "mct-frc",
1439 - .rating = 400,
1440 + .rating = 450, /* use value higher than ARM arch timer */
1441 .read = exynos4_frc_read,
1442 .mask = CLOCKSOURCE_MASK(32),
1443 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
1444 @@ -466,7 +466,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
1445 evt->set_state_oneshot_stopped = set_state_shutdown;
1446 evt->tick_resume = set_state_shutdown;
1447 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
1448 - evt->rating = 450;
1449 + evt->rating = 500; /* use value higher than ARM arch timer */
1450
1451 exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
1452
1453 diff --git a/drivers/crypto/amcc/crypto4xx_alg.c b/drivers/crypto/amcc/crypto4xx_alg.c
1454 index 0c85a5123f85..1d87deca32ed 100644
1455 --- a/drivers/crypto/amcc/crypto4xx_alg.c
1456 +++ b/drivers/crypto/amcc/crypto4xx_alg.c
1457 @@ -76,12 +76,16 @@ static void set_dynamic_sa_command_1(struct dynamic_sa_ctl *sa, u32 cm,
1458 }
1459
1460 static inline int crypto4xx_crypt(struct skcipher_request *req,
1461 - const unsigned int ivlen, bool decrypt)
1462 + const unsigned int ivlen, bool decrypt,
1463 + bool check_blocksize)
1464 {
1465 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1466 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(cipher);
1467 __le32 iv[AES_IV_SIZE];
1468
1469 + if (check_blocksize && !IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
1470 + return -EINVAL;
1471 +
1472 if (ivlen)
1473 crypto4xx_memcpy_to_le32(iv, req->iv, ivlen);
1474
1475 @@ -90,24 +94,34 @@ static inline int crypto4xx_crypt(struct skcipher_request *req,
1476 ctx->sa_len, 0, NULL);
1477 }
1478
1479 -int crypto4xx_encrypt_noiv(struct skcipher_request *req)
1480 +int crypto4xx_encrypt_noiv_block(struct skcipher_request *req)
1481 +{
1482 + return crypto4xx_crypt(req, 0, false, true);
1483 +}
1484 +
1485 +int crypto4xx_encrypt_iv_stream(struct skcipher_request *req)
1486 +{
1487 + return crypto4xx_crypt(req, AES_IV_SIZE, false, false);
1488 +}
1489 +
1490 +int crypto4xx_decrypt_noiv_block(struct skcipher_request *req)
1491 {
1492 - return crypto4xx_crypt(req, 0, false);
1493 + return crypto4xx_crypt(req, 0, true, true);
1494 }
1495
1496 -int crypto4xx_encrypt_iv(struct skcipher_request *req)
1497 +int crypto4xx_decrypt_iv_stream(struct skcipher_request *req)
1498 {
1499 - return crypto4xx_crypt(req, AES_IV_SIZE, false);
1500 + return crypto4xx_crypt(req, AES_IV_SIZE, true, false);
1501 }
1502
1503 -int crypto4xx_decrypt_noiv(struct skcipher_request *req)
1504 +int crypto4xx_encrypt_iv_block(struct skcipher_request *req)
1505 {
1506 - return crypto4xx_crypt(req, 0, true);
1507 + return crypto4xx_crypt(req, AES_IV_SIZE, false, true);
1508 }
1509
1510 -int crypto4xx_decrypt_iv(struct skcipher_request *req)
1511 +int crypto4xx_decrypt_iv_block(struct skcipher_request *req)
1512 {
1513 - return crypto4xx_crypt(req, AES_IV_SIZE, true);
1514 + return crypto4xx_crypt(req, AES_IV_SIZE, true, true);
1515 }
1516
1517 /**
1518 @@ -278,8 +292,8 @@ crypto4xx_ctr_crypt(struct skcipher_request *req, bool encrypt)
1519 return ret;
1520 }
1521
1522 - return encrypt ? crypto4xx_encrypt_iv(req)
1523 - : crypto4xx_decrypt_iv(req);
1524 + return encrypt ? crypto4xx_encrypt_iv_stream(req)
1525 + : crypto4xx_decrypt_iv_stream(req);
1526 }
1527
1528 static int crypto4xx_sk_setup_fallback(struct crypto4xx_ctx *ctx,
1529 diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
1530 index d2ec9fd1b8bb..6386e1784fe4 100644
1531 --- a/drivers/crypto/amcc/crypto4xx_core.c
1532 +++ b/drivers/crypto/amcc/crypto4xx_core.c
1533 @@ -1153,8 +1153,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1534 .max_keysize = AES_MAX_KEY_SIZE,
1535 .ivsize = AES_IV_SIZE,
1536 .setkey = crypto4xx_setkey_aes_cbc,
1537 - .encrypt = crypto4xx_encrypt_iv,
1538 - .decrypt = crypto4xx_decrypt_iv,
1539 + .encrypt = crypto4xx_encrypt_iv_block,
1540 + .decrypt = crypto4xx_decrypt_iv_block,
1541 .init = crypto4xx_sk_init,
1542 .exit = crypto4xx_sk_exit,
1543 } },
1544 @@ -1173,8 +1173,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1545 .max_keysize = AES_MAX_KEY_SIZE,
1546 .ivsize = AES_IV_SIZE,
1547 .setkey = crypto4xx_setkey_aes_cfb,
1548 - .encrypt = crypto4xx_encrypt_iv,
1549 - .decrypt = crypto4xx_decrypt_iv,
1550 + .encrypt = crypto4xx_encrypt_iv_stream,
1551 + .decrypt = crypto4xx_decrypt_iv_stream,
1552 .init = crypto4xx_sk_init,
1553 .exit = crypto4xx_sk_exit,
1554 } },
1555 @@ -1186,7 +1186,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1556 .cra_flags = CRYPTO_ALG_NEED_FALLBACK |
1557 CRYPTO_ALG_ASYNC |
1558 CRYPTO_ALG_KERN_DRIVER_ONLY,
1559 - .cra_blocksize = AES_BLOCK_SIZE,
1560 + .cra_blocksize = 1,
1561 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1562 .cra_module = THIS_MODULE,
1563 },
1564 @@ -1206,7 +1206,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1565 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1566 .cra_flags = CRYPTO_ALG_ASYNC |
1567 CRYPTO_ALG_KERN_DRIVER_ONLY,
1568 - .cra_blocksize = AES_BLOCK_SIZE,
1569 + .cra_blocksize = 1,
1570 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1571 .cra_module = THIS_MODULE,
1572 },
1573 @@ -1226,15 +1226,15 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1574 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1575 .cra_flags = CRYPTO_ALG_ASYNC |
1576 CRYPTO_ALG_KERN_DRIVER_ONLY,
1577 - .cra_blocksize = AES_BLOCK_SIZE,
1578 + .cra_blocksize = 1,
1579 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1580 .cra_module = THIS_MODULE,
1581 },
1582 .min_keysize = AES_MIN_KEY_SIZE,
1583 .max_keysize = AES_MAX_KEY_SIZE,
1584 .setkey = crypto4xx_setkey_aes_ecb,
1585 - .encrypt = crypto4xx_encrypt_noiv,
1586 - .decrypt = crypto4xx_decrypt_noiv,
1587 + .encrypt = crypto4xx_encrypt_noiv_block,
1588 + .decrypt = crypto4xx_decrypt_noiv_block,
1589 .init = crypto4xx_sk_init,
1590 .exit = crypto4xx_sk_exit,
1591 } },
1592 @@ -1245,7 +1245,7 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1593 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1594 .cra_flags = CRYPTO_ALG_ASYNC |
1595 CRYPTO_ALG_KERN_DRIVER_ONLY,
1596 - .cra_blocksize = AES_BLOCK_SIZE,
1597 + .cra_blocksize = 1,
1598 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1599 .cra_module = THIS_MODULE,
1600 },
1601 @@ -1253,8 +1253,8 @@ static struct crypto4xx_alg_common crypto4xx_alg[] = {
1602 .max_keysize = AES_MAX_KEY_SIZE,
1603 .ivsize = AES_IV_SIZE,
1604 .setkey = crypto4xx_setkey_aes_ofb,
1605 - .encrypt = crypto4xx_encrypt_iv,
1606 - .decrypt = crypto4xx_decrypt_iv,
1607 + .encrypt = crypto4xx_encrypt_iv_stream,
1608 + .decrypt = crypto4xx_decrypt_iv_stream,
1609 .init = crypto4xx_sk_init,
1610 .exit = crypto4xx_sk_exit,
1611 } },
1612 diff --git a/drivers/crypto/amcc/crypto4xx_core.h b/drivers/crypto/amcc/crypto4xx_core.h
1613 index e2ca56722f07..21a6bbcedc55 100644
1614 --- a/drivers/crypto/amcc/crypto4xx_core.h
1615 +++ b/drivers/crypto/amcc/crypto4xx_core.h
1616 @@ -179,10 +179,12 @@ int crypto4xx_setkey_rfc3686(struct crypto_skcipher *cipher,
1617 const u8 *key, unsigned int keylen);
1618 int crypto4xx_encrypt_ctr(struct skcipher_request *req);
1619 int crypto4xx_decrypt_ctr(struct skcipher_request *req);
1620 -int crypto4xx_encrypt_iv(struct skcipher_request *req);
1621 -int crypto4xx_decrypt_iv(struct skcipher_request *req);
1622 -int crypto4xx_encrypt_noiv(struct skcipher_request *req);
1623 -int crypto4xx_decrypt_noiv(struct skcipher_request *req);
1624 +int crypto4xx_encrypt_iv_stream(struct skcipher_request *req);
1625 +int crypto4xx_decrypt_iv_stream(struct skcipher_request *req);
1626 +int crypto4xx_encrypt_iv_block(struct skcipher_request *req);
1627 +int crypto4xx_decrypt_iv_block(struct skcipher_request *req);
1628 +int crypto4xx_encrypt_noiv_block(struct skcipher_request *req);
1629 +int crypto4xx_decrypt_noiv_block(struct skcipher_request *req);
1630 int crypto4xx_rfc3686_encrypt(struct skcipher_request *req);
1631 int crypto4xx_rfc3686_decrypt(struct skcipher_request *req);
1632 int crypto4xx_sha1_alg_init(struct crypto_tfm *tfm);
1633 diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
1634 index 53ab1f140a26..8a3ed4031206 100644
1635 --- a/drivers/crypto/amcc/crypto4xx_trng.c
1636 +++ b/drivers/crypto/amcc/crypto4xx_trng.c
1637 @@ -111,7 +111,6 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
1638 return;
1639
1640 err_out:
1641 - of_node_put(trng);
1642 iounmap(dev->trng_base);
1643 kfree(rng);
1644 dev->trng_base = NULL;
1645 diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
1646 index 9bc54c3c2cb9..1907945f82b7 100644
1647 --- a/drivers/crypto/caam/caamalg.c
1648 +++ b/drivers/crypto/caam/caamalg.c
1649 @@ -887,6 +887,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1650 struct ablkcipher_request *req = context;
1651 struct ablkcipher_edesc *edesc;
1652 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
1653 + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
1654 int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
1655
1656 #ifdef DEBUG
1657 @@ -911,10 +912,11 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
1658
1659 /*
1660 * The crypto API expects us to set the IV (req->info) to the last
1661 - * ciphertext block. This is used e.g. by the CTS mode.
1662 + * ciphertext block when running in CBC mode.
1663 */
1664 - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
1665 - ivsize, 0);
1666 + if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
1667 + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
1668 + ivsize, ivsize, 0);
1669
1670 /* In case initial IV was generated, copy it in GIVCIPHER request */
1671 if (edesc->iv_dir == DMA_FROM_DEVICE) {
1672 @@ -1651,10 +1653,11 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
1673
1674 /*
1675 * The crypto API expects us to set the IV (req->info) to the last
1676 - * ciphertext block.
1677 + * ciphertext block when running in CBC mode.
1678 */
1679 - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
1680 - ivsize, 0);
1681 + if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC)
1682 + scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
1683 + ivsize, ivsize, 0);
1684
1685 /* Create and submit job descriptor*/
1686 init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
1687 diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
1688 index 1b5035d56288..9b6d8972a565 100644
1689 --- a/drivers/crypto/ccp/ccp-dev.c
1690 +++ b/drivers/crypto/ccp/ccp-dev.c
1691 @@ -35,56 +35,62 @@ struct ccp_tasklet_data {
1692 };
1693
1694 /* Human-readable error strings */
1695 +#define CCP_MAX_ERROR_CODE 64
1696 static char *ccp_error_codes[] = {
1697 "",
1698 - "ERR 01: ILLEGAL_ENGINE",
1699 - "ERR 02: ILLEGAL_KEY_ID",
1700 - "ERR 03: ILLEGAL_FUNCTION_TYPE",
1701 - "ERR 04: ILLEGAL_FUNCTION_MODE",
1702 - "ERR 05: ILLEGAL_FUNCTION_ENCRYPT",
1703 - "ERR 06: ILLEGAL_FUNCTION_SIZE",
1704 - "ERR 07: Zlib_MISSING_INIT_EOM",
1705 - "ERR 08: ILLEGAL_FUNCTION_RSVD",
1706 - "ERR 09: ILLEGAL_BUFFER_LENGTH",
1707 - "ERR 10: VLSB_FAULT",
1708 - "ERR 11: ILLEGAL_MEM_ADDR",
1709 - "ERR 12: ILLEGAL_MEM_SEL",
1710 - "ERR 13: ILLEGAL_CONTEXT_ID",
1711 - "ERR 14: ILLEGAL_KEY_ADDR",
1712 - "ERR 15: 0xF Reserved",
1713 - "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE",
1714 - "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE",
1715 - "ERR 18: CMD_TIMEOUT",
1716 - "ERR 19: IDMA0_AXI_SLVERR",
1717 - "ERR 20: IDMA0_AXI_DECERR",
1718 - "ERR 21: 0x15 Reserved",
1719 - "ERR 22: IDMA1_AXI_SLAVE_FAULT",
1720 - "ERR 23: IDMA1_AIXI_DECERR",
1721 - "ERR 24: 0x18 Reserved",
1722 - "ERR 25: ZLIBVHB_AXI_SLVERR",
1723 - "ERR 26: ZLIBVHB_AXI_DECERR",
1724 - "ERR 27: 0x1B Reserved",
1725 - "ERR 27: ZLIB_UNEXPECTED_EOM",
1726 - "ERR 27: ZLIB_EXTRA_DATA",
1727 - "ERR 30: ZLIB_BTYPE",
1728 - "ERR 31: ZLIB_UNDEFINED_SYMBOL",
1729 - "ERR 32: ZLIB_UNDEFINED_DISTANCE_S",
1730 - "ERR 33: ZLIB_CODE_LENGTH_SYMBOL",
1731 - "ERR 34: ZLIB _VHB_ILLEGAL_FETCH",
1732 - "ERR 35: ZLIB_UNCOMPRESSED_LEN",
1733 - "ERR 36: ZLIB_LIMIT_REACHED",
1734 - "ERR 37: ZLIB_CHECKSUM_MISMATCH0",
1735 - "ERR 38: ODMA0_AXI_SLVERR",
1736 - "ERR 39: ODMA0_AXI_DECERR",
1737 - "ERR 40: 0x28 Reserved",
1738 - "ERR 41: ODMA1_AXI_SLVERR",
1739 - "ERR 42: ODMA1_AXI_DECERR",
1740 - "ERR 43: LSB_PARITY_ERR",
1741 + "ILLEGAL_ENGINE",
1742 + "ILLEGAL_KEY_ID",
1743 + "ILLEGAL_FUNCTION_TYPE",
1744 + "ILLEGAL_FUNCTION_MODE",
1745 + "ILLEGAL_FUNCTION_ENCRYPT",
1746 + "ILLEGAL_FUNCTION_SIZE",
1747 + "Zlib_MISSING_INIT_EOM",
1748 + "ILLEGAL_FUNCTION_RSVD",
1749 + "ILLEGAL_BUFFER_LENGTH",
1750 + "VLSB_FAULT",
1751 + "ILLEGAL_MEM_ADDR",
1752 + "ILLEGAL_MEM_SEL",
1753 + "ILLEGAL_CONTEXT_ID",
1754 + "ILLEGAL_KEY_ADDR",
1755 + "0xF Reserved",
1756 + "Zlib_ILLEGAL_MULTI_QUEUE",
1757 + "Zlib_ILLEGAL_JOBID_CHANGE",
1758 + "CMD_TIMEOUT",
1759 + "IDMA0_AXI_SLVERR",
1760 + "IDMA0_AXI_DECERR",
1761 + "0x15 Reserved",
1762 + "IDMA1_AXI_SLAVE_FAULT",
1763 + "IDMA1_AIXI_DECERR",
1764 + "0x18 Reserved",
1765 + "ZLIBVHB_AXI_SLVERR",
1766 + "ZLIBVHB_AXI_DECERR",
1767 + "0x1B Reserved",
1768 + "ZLIB_UNEXPECTED_EOM",
1769 + "ZLIB_EXTRA_DATA",
1770 + "ZLIB_BTYPE",
1771 + "ZLIB_UNDEFINED_SYMBOL",
1772 + "ZLIB_UNDEFINED_DISTANCE_S",
1773 + "ZLIB_CODE_LENGTH_SYMBOL",
1774 + "ZLIB _VHB_ILLEGAL_FETCH",
1775 + "ZLIB_UNCOMPRESSED_LEN",
1776 + "ZLIB_LIMIT_REACHED",
1777 + "ZLIB_CHECKSUM_MISMATCH0",
1778 + "ODMA0_AXI_SLVERR",
1779 + "ODMA0_AXI_DECERR",
1780 + "0x28 Reserved",
1781 + "ODMA1_AXI_SLVERR",
1782 + "ODMA1_AXI_DECERR",
1783 };
1784
1785 -void ccp_log_error(struct ccp_device *d, int e)
1786 +void ccp_log_error(struct ccp_device *d, unsigned int e)
1787 {
1788 - dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e);
1789 + if (WARN_ON(e >= CCP_MAX_ERROR_CODE))
1790 + return;
1791 +
1792 + if (e < ARRAY_SIZE(ccp_error_codes))
1793 + dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]);
1794 + else
1795 + dev_err(d->dev, "CCP error %d: Unknown Error\n", e);
1796 }
1797
1798 /* List of CCPs, CCP count, read-write access lock, and access functions
1799 diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
1800 index 6810b65c1939..7442b0422f8a 100644
1801 --- a/drivers/crypto/ccp/ccp-dev.h
1802 +++ b/drivers/crypto/ccp/ccp-dev.h
1803 @@ -632,7 +632,7 @@ struct ccp5_desc {
1804 void ccp_add_device(struct ccp_device *ccp);
1805 void ccp_del_device(struct ccp_device *ccp);
1806
1807 -extern void ccp_log_error(struct ccp_device *, int);
1808 +extern void ccp_log_error(struct ccp_device *, unsigned int);
1809
1810 struct ccp_device *ccp_alloc_struct(struct sp_device *sp);
1811 bool ccp_queues_suspended(struct ccp_device *ccp);
1812 diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
1813 index 0ea43cdeb05f..e212badd39fa 100644
1814 --- a/drivers/crypto/ccp/ccp-ops.c
1815 +++ b/drivers/crypto/ccp/ccp-ops.c
1816 @@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
1817
1818 unsigned long long *final;
1819 unsigned int dm_offset;
1820 + unsigned int jobid;
1821 unsigned int ilen;
1822 bool in_place = true; /* Default value */
1823 int ret;
1824 @@ -663,9 +664,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
1825 p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
1826 }
1827
1828 + jobid = CCP_NEW_JOBID(cmd_q->ccp);
1829 +
1830 memset(&op, 0, sizeof(op));
1831 op.cmd_q = cmd_q;
1832 - op.jobid = CCP_NEW_JOBID(cmd_q->ccp);
1833 + op.jobid = jobid;
1834 op.sb_key = cmd_q->sb_key; /* Pre-allocated */
1835 op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1836 op.init = 1;
1837 @@ -816,6 +819,13 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
1838 final[0] = cpu_to_be64(aes->aad_len * 8);
1839 final[1] = cpu_to_be64(ilen * 8);
1840
1841 + memset(&op, 0, sizeof(op));
1842 + op.cmd_q = cmd_q;
1843 + op.jobid = jobid;
1844 + op.sb_key = cmd_q->sb_key; /* Pre-allocated */
1845 + op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */
1846 + op.init = 1;
1847 + op.u.aes.type = aes->type;
1848 op.u.aes.mode = CCP_AES_MODE_GHASH;
1849 op.u.aes.action = CCP_AES_GHASHFINAL;
1850 op.src.type = CCP_MEMTYPE_SYSTEM;
1851 @@ -843,7 +853,8 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
1852 if (ret)
1853 goto e_tag;
1854
1855 - ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
1856 + ret = crypto_memneq(tag.address, final_wa.address,
1857 + AES_BLOCK_SIZE) ? -EBADMSG : 0;
1858 ccp_dm_free(&tag);
1859 }
1860
1861 diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
1862 index 3aef1d43e435..42a3830fbd19 100644
1863 --- a/drivers/crypto/inside-secure/safexcel_cipher.c
1864 +++ b/drivers/crypto/inside-secure/safexcel_cipher.c
1865 @@ -51,6 +51,8 @@ struct safexcel_cipher_ctx {
1866
1867 struct safexcel_cipher_req {
1868 enum safexcel_cipher_direction direction;
1869 + /* Number of result descriptors associated to the request */
1870 + unsigned int rdescs;
1871 bool needs_inv;
1872 };
1873
1874 @@ -333,7 +335,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
1875
1876 *ret = 0;
1877
1878 - do {
1879 + if (unlikely(!sreq->rdescs))
1880 + return 0;
1881 +
1882 + while (sreq->rdescs--) {
1883 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
1884 if (IS_ERR(rdesc)) {
1885 dev_err(priv->dev,
1886 @@ -346,7 +351,7 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
1887 *ret = safexcel_rdesc_check_errors(priv, rdesc);
1888
1889 ndesc++;
1890 - } while (!rdesc->last_seg);
1891 + }
1892
1893 safexcel_complete(priv, ring);
1894
1895 @@ -501,6 +506,7 @@ cdesc_rollback:
1896 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
1897 int ring,
1898 struct crypto_async_request *base,
1899 + struct safexcel_cipher_req *sreq,
1900 bool *should_complete, int *ret)
1901 {
1902 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
1903 @@ -509,7 +515,10 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
1904
1905 *ret = 0;
1906
1907 - do {
1908 + if (unlikely(!sreq->rdescs))
1909 + return 0;
1910 +
1911 + while (sreq->rdescs--) {
1912 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
1913 if (IS_ERR(rdesc)) {
1914 dev_err(priv->dev,
1915 @@ -522,7 +531,7 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
1916 *ret = safexcel_rdesc_check_errors(priv, rdesc);
1917
1918 ndesc++;
1919 - } while (!rdesc->last_seg);
1920 + }
1921
1922 safexcel_complete(priv, ring);
1923
1924 @@ -564,7 +573,7 @@ static int safexcel_skcipher_handle_result(struct safexcel_crypto_priv *priv,
1925
1926 if (sreq->needs_inv) {
1927 sreq->needs_inv = false;
1928 - err = safexcel_handle_inv_result(priv, ring, async,
1929 + err = safexcel_handle_inv_result(priv, ring, async, sreq,
1930 should_complete, ret);
1931 } else {
1932 err = safexcel_handle_req_result(priv, ring, async, req->src,
1933 @@ -587,7 +596,7 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
1934
1935 if (sreq->needs_inv) {
1936 sreq->needs_inv = false;
1937 - err = safexcel_handle_inv_result(priv, ring, async,
1938 + err = safexcel_handle_inv_result(priv, ring, async, sreq,
1939 should_complete, ret);
1940 } else {
1941 err = safexcel_handle_req_result(priv, ring, async, req->src,
1942 @@ -633,6 +642,8 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
1943 ret = safexcel_send_req(async, ring, sreq, req->src,
1944 req->dst, req->cryptlen, 0, 0, req->iv,
1945 commands, results);
1946 +
1947 + sreq->rdescs = *results;
1948 return ret;
1949 }
1950
1951 @@ -655,6 +666,7 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
1952 req->cryptlen, req->assoclen,
1953 crypto_aead_authsize(tfm), req->iv,
1954 commands, results);
1955 + sreq->rdescs = *results;
1956 return ret;
1957 }
1958
1959 diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1960 index 5849075d54c7..41b288bdcdbf 100644
1961 --- a/drivers/crypto/talitos.c
1962 +++ b/drivers/crypto/talitos.c
1963 @@ -1001,7 +1001,6 @@ static void ipsec_esp_encrypt_done(struct device *dev,
1964 unsigned int authsize = crypto_aead_authsize(authenc);
1965 unsigned int ivsize = crypto_aead_ivsize(authenc);
1966 struct talitos_edesc *edesc;
1967 - struct scatterlist *sg;
1968 void *icvdata;
1969
1970 edesc = container_of(desc, struct talitos_edesc, desc);
1971 @@ -1015,9 +1014,8 @@ static void ipsec_esp_encrypt_done(struct device *dev,
1972 else
1973 icvdata = &edesc->link_tbl[edesc->src_nents +
1974 edesc->dst_nents + 2];
1975 - sg = sg_last(areq->dst, edesc->dst_nents);
1976 - memcpy((char *)sg_virt(sg) + sg->length - authsize,
1977 - icvdata, authsize);
1978 + sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
1979 + authsize, areq->assoclen + areq->cryptlen);
1980 }
1981
1982 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1983 @@ -1035,7 +1033,6 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1984 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1985 unsigned int authsize = crypto_aead_authsize(authenc);
1986 struct talitos_edesc *edesc;
1987 - struct scatterlist *sg;
1988 char *oicv, *icv;
1989 struct talitos_private *priv = dev_get_drvdata(dev);
1990 bool is_sec1 = has_ftr_sec1(priv);
1991 @@ -1045,9 +1042,18 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1992 ipsec_esp_unmap(dev, edesc, req);
1993
1994 if (!err) {
1995 + char icvdata[SHA512_DIGEST_SIZE];
1996 + int nents = edesc->dst_nents ? : 1;
1997 + unsigned int len = req->assoclen + req->cryptlen;
1998 +
1999 /* auth check */
2000 - sg = sg_last(req->dst, edesc->dst_nents ? : 1);
2001 - icv = (char *)sg_virt(sg) + sg->length - authsize;
2002 + if (nents > 1) {
2003 + sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
2004 + len - authsize);
2005 + icv = icvdata;
2006 + } else {
2007 + icv = (char *)sg_virt(req->dst) + len - authsize;
2008 + }
2009
2010 if (edesc->dma_len) {
2011 if (is_sec1)
2012 @@ -1463,7 +1469,6 @@ static int aead_decrypt(struct aead_request *req)
2013 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
2014 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
2015 struct talitos_edesc *edesc;
2016 - struct scatterlist *sg;
2017 void *icvdata;
2018
2019 req->cryptlen -= authsize;
2020 @@ -1497,9 +1502,8 @@ static int aead_decrypt(struct aead_request *req)
2021 else
2022 icvdata = &edesc->link_tbl[0];
2023
2024 - sg = sg_last(req->src, edesc->src_nents ? : 1);
2025 -
2026 - memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
2027 + sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
2028 + req->assoclen + req->cryptlen - authsize);
2029
2030 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
2031 }
2032 @@ -1553,11 +1557,15 @@ static void ablkcipher_done(struct device *dev,
2033 int err)
2034 {
2035 struct ablkcipher_request *areq = context;
2036 + struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
2037 + struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
2038 + unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
2039 struct talitos_edesc *edesc;
2040
2041 edesc = container_of(desc, struct talitos_edesc, desc);
2042
2043 common_nonsnoop_unmap(dev, edesc, areq);
2044 + memcpy(areq->info, ctx->iv, ivsize);
2045
2046 kfree(edesc);
2047
2048 @@ -3185,7 +3193,10 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2049 alg->cra_priority = t_alg->algt.priority;
2050 else
2051 alg->cra_priority = TALITOS_CRA_PRIORITY;
2052 - alg->cra_alignmask = 0;
2053 + if (has_ftr_sec1(priv))
2054 + alg->cra_alignmask = 3;
2055 + else
2056 + alg->cra_alignmask = 0;
2057 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2058 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2059
2060 diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
2061 index 1c658ec3cbf4..3f5a01cb4ab4 100644
2062 --- a/drivers/dma/imx-sdma.c
2063 +++ b/drivers/dma/imx-sdma.c
2064 @@ -2039,27 +2039,6 @@ static int sdma_probe(struct platform_device *pdev)
2065 if (pdata && pdata->script_addrs)
2066 sdma_add_scripts(sdma, pdata->script_addrs);
2067
2068 - if (pdata) {
2069 - ret = sdma_get_firmware(sdma, pdata->fw_name);
2070 - if (ret)
2071 - dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2072 - } else {
2073 - /*
2074 - * Because that device tree does not encode ROM script address,
2075 - * the RAM script in firmware is mandatory for device tree
2076 - * probe, otherwise it fails.
2077 - */
2078 - ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2079 - &fw_name);
2080 - if (ret)
2081 - dev_warn(&pdev->dev, "failed to get firmware name\n");
2082 - else {
2083 - ret = sdma_get_firmware(sdma, fw_name);
2084 - if (ret)
2085 - dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2086 - }
2087 - }
2088 -
2089 sdma->dma_device.dev = &pdev->dev;
2090
2091 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2092 @@ -2103,6 +2082,33 @@ static int sdma_probe(struct platform_device *pdev)
2093 of_node_put(spba_bus);
2094 }
2095
2096 + /*
2097 + * Kick off firmware loading as the very last step:
2098 + * attempt to load firmware only if we're not on the error path, because
2099 + * the firmware callback requires a fully functional and allocated sdma
2100 + * instance.
2101 + */
2102 + if (pdata) {
2103 + ret = sdma_get_firmware(sdma, pdata->fw_name);
2104 + if (ret)
2105 + dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2106 + } else {
2107 + /*
2108 + * Because that device tree does not encode ROM script address,
2109 + * the RAM script in firmware is mandatory for device tree
2110 + * probe, otherwise it fails.
2111 + */
2112 + ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2113 + &fw_name);
2114 + if (ret) {
2115 + dev_warn(&pdev->dev, "failed to get firmware name\n");
2116 + } else {
2117 + ret = sdma_get_firmware(sdma, fw_name);
2118 + if (ret)
2119 + dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2120 + }
2121 + }
2122 +
2123 return 0;
2124
2125 err_register:
2126 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
2127 index 20374b8248f0..d4545a9222a0 100644
2128 --- a/drivers/edac/edac_mc_sysfs.c
2129 +++ b/drivers/edac/edac_mc_sysfs.c
2130 @@ -26,7 +26,7 @@
2131 static int edac_mc_log_ue = 1;
2132 static int edac_mc_log_ce = 1;
2133 static int edac_mc_panic_on_ue;
2134 -static int edac_mc_poll_msec = 1000;
2135 +static unsigned int edac_mc_poll_msec = 1000;
2136
2137 /* Getter functions for above */
2138 int edac_mc_get_log_ue(void)
2139 @@ -45,30 +45,30 @@ int edac_mc_get_panic_on_ue(void)
2140 }
2141
2142 /* this is temporary */
2143 -int edac_mc_get_poll_msec(void)
2144 +unsigned int edac_mc_get_poll_msec(void)
2145 {
2146 return edac_mc_poll_msec;
2147 }
2148
2149 static int edac_set_poll_msec(const char *val, const struct kernel_param *kp)
2150 {
2151 - unsigned long l;
2152 + unsigned int i;
2153 int ret;
2154
2155 if (!val)
2156 return -EINVAL;
2157
2158 - ret = kstrtoul(val, 0, &l);
2159 + ret = kstrtouint(val, 0, &i);
2160 if (ret)
2161 return ret;
2162
2163 - if (l < 1000)
2164 + if (i < 1000)
2165 return -EINVAL;
2166
2167 - *((unsigned long *)kp->arg) = l;
2168 + *((unsigned int *)kp->arg) = i;
2169
2170 /* notify edac_mc engine to reset the poll period */
2171 - edac_mc_reset_delay_period(l);
2172 + edac_mc_reset_delay_period(i);
2173
2174 return 0;
2175 }
2176 @@ -82,7 +82,7 @@ MODULE_PARM_DESC(edac_mc_log_ue,
2177 module_param(edac_mc_log_ce, int, 0644);
2178 MODULE_PARM_DESC(edac_mc_log_ce,
2179 "Log correctable error to console: 0=off 1=on");
2180 -module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_int,
2181 +module_param_call(edac_mc_poll_msec, edac_set_poll_msec, param_get_uint,
2182 &edac_mc_poll_msec, 0644);
2183 MODULE_PARM_DESC(edac_mc_poll_msec, "Polling period in milliseconds");
2184
2185 @@ -404,6 +404,8 @@ static inline int nr_pages_per_csrow(struct csrow_info *csrow)
2186 static int edac_create_csrow_object(struct mem_ctl_info *mci,
2187 struct csrow_info *csrow, int index)
2188 {
2189 + int err;
2190 +
2191 csrow->dev.type = &csrow_attr_type;
2192 csrow->dev.bus = mci->bus;
2193 csrow->dev.groups = csrow_dev_groups;
2194 @@ -416,7 +418,11 @@ static int edac_create_csrow_object(struct mem_ctl_info *mci,
2195 edac_dbg(0, "creating (virtual) csrow node %s\n",
2196 dev_name(&csrow->dev));
2197
2198 - return device_add(&csrow->dev);
2199 + err = device_add(&csrow->dev);
2200 + if (err)
2201 + put_device(&csrow->dev);
2202 +
2203 + return err;
2204 }
2205
2206 /* Create a CSROW object under specifed edac_mc_device */
2207 diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
2208 index dec88dcea036..c9f0e73872a6 100644
2209 --- a/drivers/edac/edac_module.h
2210 +++ b/drivers/edac/edac_module.h
2211 @@ -36,7 +36,7 @@ extern int edac_mc_get_log_ue(void);
2212 extern int edac_mc_get_log_ce(void);
2213 extern int edac_mc_get_panic_on_ue(void);
2214 extern int edac_get_poll_msec(void);
2215 -extern int edac_mc_get_poll_msec(void);
2216 +extern unsigned int edac_mc_get_poll_msec(void);
2217
2218 unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf,
2219 unsigned len);
2220 diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
2221 index 6fa430d98517..feabac40743e 100644
2222 --- a/drivers/gpio/gpio-omap.c
2223 +++ b/drivers/gpio/gpio-omap.c
2224 @@ -837,9 +837,9 @@ static void omap_gpio_irq_shutdown(struct irq_data *d)
2225
2226 raw_spin_lock_irqsave(&bank->lock, flags);
2227 bank->irq_usage &= ~(BIT(offset));
2228 - omap_set_gpio_irqenable(bank, offset, 0);
2229 - omap_clear_gpio_irqstatus(bank, offset);
2230 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
2231 + omap_clear_gpio_irqstatus(bank, offset);
2232 + omap_set_gpio_irqenable(bank, offset, 0);
2233 if (!LINE_USED(bank->mod_usage, offset))
2234 omap_clear_gpio_debounce(bank, offset);
2235 omap_disable_gpio_module(bank, offset);
2236 @@ -881,8 +881,8 @@ static void omap_gpio_mask_irq(struct irq_data *d)
2237 unsigned long flags;
2238
2239 raw_spin_lock_irqsave(&bank->lock, flags);
2240 - omap_set_gpio_irqenable(bank, offset, 0);
2241 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
2242 + omap_set_gpio_irqenable(bank, offset, 0);
2243 raw_spin_unlock_irqrestore(&bank->lock, flags);
2244 }
2245
2246 @@ -894,9 +894,6 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
2247 unsigned long flags;
2248
2249 raw_spin_lock_irqsave(&bank->lock, flags);
2250 - if (trigger)
2251 - omap_set_gpio_triggering(bank, offset, trigger);
2252 -
2253 omap_set_gpio_irqenable(bank, offset, 1);
2254
2255 /*
2256 @@ -904,9 +901,13 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
2257 * is cleared, thus after the handler has run. OMAP4 needs this done
2258 * after enabing the interrupt to clear the wakeup status.
2259 */
2260 - if (bank->level_mask & BIT(offset))
2261 + if (bank->regs->leveldetect0 && bank->regs->wkup_en &&
2262 + trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
2263 omap_clear_gpio_irqstatus(bank, offset);
2264
2265 + if (trigger)
2266 + omap_set_gpio_triggering(bank, offset, trigger);
2267 +
2268 raw_spin_unlock_irqrestore(&bank->lock, flags);
2269 }
2270
2271 @@ -1687,6 +1688,8 @@ static struct omap_gpio_reg_offs omap4_gpio_regs = {
2272 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT,
2273 .irqstatus = OMAP4_GPIO_IRQSTATUS0,
2274 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1,
2275 + .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0,
2276 + .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1,
2277 .irqenable = OMAP4_GPIO_IRQSTATUSSET0,
2278 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1,
2279 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0,
2280 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
2281 index fd713326dcfc..4a48c7c47709 100644
2282 --- a/drivers/gpio/gpiolib.c
2283 +++ b/drivers/gpio/gpiolib.c
2284 @@ -2877,7 +2877,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
2285 int gpiod_get_raw_value(const struct gpio_desc *desc)
2286 {
2287 VALIDATE_DESC(desc);
2288 - /* Should be using gpio_get_value_cansleep() */
2289 + /* Should be using gpiod_get_raw_value_cansleep() */
2290 WARN_ON(desc->gdev->chip->can_sleep);
2291 return gpiod_get_raw_value_commit(desc);
2292 }
2293 @@ -2898,7 +2898,7 @@ int gpiod_get_value(const struct gpio_desc *desc)
2294 int value;
2295
2296 VALIDATE_DESC(desc);
2297 - /* Should be using gpio_get_value_cansleep() */
2298 + /* Should be using gpiod_get_value_cansleep() */
2299 WARN_ON(desc->gdev->chip->can_sleep);
2300
2301 value = gpiod_get_raw_value_commit(desc);
2302 @@ -3123,7 +3123,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
2303 void gpiod_set_raw_value(struct gpio_desc *desc, int value)
2304 {
2305 VALIDATE_DESC_VOID(desc);
2306 - /* Should be using gpiod_set_value_cansleep() */
2307 + /* Should be using gpiod_set_raw_value_cansleep() */
2308 WARN_ON(desc->gdev->chip->can_sleep);
2309 gpiod_set_raw_value_commit(desc, value);
2310 }
2311 @@ -3164,6 +3164,7 @@ static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
2312 void gpiod_set_value(struct gpio_desc *desc, int value)
2313 {
2314 VALIDATE_DESC_VOID(desc);
2315 + /* Should be using gpiod_set_value_cansleep() */
2316 WARN_ON(desc->gdev->chip->can_sleep);
2317 gpiod_set_value_nocheck(desc, value);
2318 }
2319 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
2320 index 5965f6383ada..e5e7e65934da 100644
2321 --- a/drivers/gpu/drm/drm_edid.c
2322 +++ b/drivers/gpu/drm/drm_edid.c
2323 @@ -1349,6 +1349,7 @@ MODULE_PARM_DESC(edid_fixup,
2324
2325 static void drm_get_displayid(struct drm_connector *connector,
2326 struct edid *edid);
2327 +static int validate_displayid(u8 *displayid, int length, int idx);
2328
2329 static int drm_edid_block_checksum(const u8 *raw_edid)
2330 {
2331 @@ -2932,16 +2933,46 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
2332 return edid_ext;
2333 }
2334
2335 -static u8 *drm_find_cea_extension(const struct edid *edid)
2336 -{
2337 - return drm_find_edid_extension(edid, CEA_EXT);
2338 -}
2339
2340 static u8 *drm_find_displayid_extension(const struct edid *edid)
2341 {
2342 return drm_find_edid_extension(edid, DISPLAYID_EXT);
2343 }
2344
2345 +static u8 *drm_find_cea_extension(const struct edid *edid)
2346 +{
2347 + int ret;
2348 + int idx = 1;
2349 + int length = EDID_LENGTH;
2350 + struct displayid_block *block;
2351 + u8 *cea;
2352 + u8 *displayid;
2353 +
2354 + /* Look for a top level CEA extension block */
2355 + cea = drm_find_edid_extension(edid, CEA_EXT);
2356 + if (cea)
2357 + return cea;
2358 +
2359 + /* CEA blocks can also be found embedded in a DisplayID block */
2360 + displayid = drm_find_displayid_extension(edid);
2361 + if (!displayid)
2362 + return NULL;
2363 +
2364 + ret = validate_displayid(displayid, length, idx);
2365 + if (ret)
2366 + return NULL;
2367 +
2368 + idx += sizeof(struct displayid_hdr);
2369 + for_each_displayid_db(displayid, block, idx, length) {
2370 + if (block->tag == DATA_BLOCK_CTA) {
2371 + cea = (u8 *)block;
2372 + break;
2373 + }
2374 + }
2375 +
2376 + return cea;
2377 +}
2378 +
2379 /*
2380 * Calculate the alternate clock for the CEA mode
2381 * (60Hz vs. 59.94Hz etc.)
2382 @@ -3665,13 +3696,38 @@ cea_revision(const u8 *cea)
2383 static int
2384 cea_db_offsets(const u8 *cea, int *start, int *end)
2385 {
2386 - /* Data block offset in CEA extension block */
2387 - *start = 4;
2388 - *end = cea[2];
2389 - if (*end == 0)
2390 - *end = 127;
2391 - if (*end < 4 || *end > 127)
2392 - return -ERANGE;
2393 + /* DisplayID CTA extension blocks and top-level CEA EDID
2394 + * block header definitions differ in the following bytes:
2395 + * 1) Byte 2 of the header specifies length differently,
2396 + * 2) Byte 3 is only present in the CEA top level block.
2397 + *
2398 + * The different definitions for byte 2 follow.
2399 + *
2400 + * DisplayID CTA extension block defines byte 2 as:
2401 + * Number of payload bytes
2402 + *
2403 + * CEA EDID block defines byte 2 as:
2404 + * Byte number (decimal) within this block where the 18-byte
2405 + * DTDs begin. If no non-DTD data is present in this extension
2406 + * block, the value should be set to 04h (the byte after next).
2407 + * If set to 00h, there are no DTDs present in this block and
2408 + * no non-DTD data.
2409 + */
2410 + if (cea[0] == DATA_BLOCK_CTA) {
2411 + *start = 3;
2412 + *end = *start + cea[2];
2413 + } else if (cea[0] == CEA_EXT) {
2414 + /* Data block offset in CEA extension block */
2415 + *start = 4;
2416 + *end = cea[2];
2417 + if (*end == 0)
2418 + *end = 127;
2419 + if (*end < 4 || *end > 127)
2420 + return -ERANGE;
2421 + } else {
2422 + return -ENOTSUPP;
2423 + }
2424 +
2425 return 0;
2426 }
2427
2428 @@ -5218,6 +5274,9 @@ static int drm_parse_display_id(struct drm_connector *connector,
2429 case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
2430 /* handled in mode gathering code. */
2431 break;
2432 + case DATA_BLOCK_CTA:
2433 + /* handled in the cea parser code. */
2434 + break;
2435 default:
2436 DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
2437 break;
2438 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
2439 index ecacb22834d7..719345074711 100644
2440 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
2441 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
2442 @@ -184,6 +184,25 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
2443 return 0;
2444 }
2445
2446 +static int
2447 +nvkm_i2c_preinit(struct nvkm_subdev *subdev)
2448 +{
2449 + struct nvkm_i2c *i2c = nvkm_i2c(subdev);
2450 + struct nvkm_i2c_bus *bus;
2451 + struct nvkm_i2c_pad *pad;
2452 +
2453 + /*
2454 + * We init our i2c busses as early as possible, since they may be
2455 + * needed by the vbios init scripts on some cards
2456 + */
2457 + list_for_each_entry(pad, &i2c->pad, head)
2458 + nvkm_i2c_pad_init(pad);
2459 + list_for_each_entry(bus, &i2c->bus, head)
2460 + nvkm_i2c_bus_init(bus);
2461 +
2462 + return 0;
2463 +}
2464 +
2465 static int
2466 nvkm_i2c_init(struct nvkm_subdev *subdev)
2467 {
2468 @@ -238,6 +257,7 @@ nvkm_i2c_dtor(struct nvkm_subdev *subdev)
2469 static const struct nvkm_subdev_func
2470 nvkm_i2c = {
2471 .dtor = nvkm_i2c_dtor,
2472 + .preinit = nvkm_i2c_preinit,
2473 .init = nvkm_i2c_init,
2474 .fini = nvkm_i2c_fini,
2475 .intr = nvkm_i2c_intr,
2476 diff --git a/drivers/gpu/ipu-v3/ipu-ic.c b/drivers/gpu/ipu-v3/ipu-ic.c
2477 index 67cc820253a9..fb79e118f26c 100644
2478 --- a/drivers/gpu/ipu-v3/ipu-ic.c
2479 +++ b/drivers/gpu/ipu-v3/ipu-ic.c
2480 @@ -257,7 +257,7 @@ static int init_csc(struct ipu_ic *ic,
2481 writel(param, base++);
2482
2483 param = ((a[0] & 0x1fe0) >> 5) | (params->scale << 8) |
2484 - (params->sat << 9);
2485 + (params->sat << 10);
2486 writel(param, base++);
2487
2488 param = ((a[1] & 0x1f) << 27) | ((c[0][1] & 0x1ff) << 18) |
2489 diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
2490 index 0bdd85d486fe..9cd4705b74bd 100644
2491 --- a/drivers/hid/wacom_sys.c
2492 +++ b/drivers/hid/wacom_sys.c
2493 @@ -275,6 +275,9 @@ static void wacom_feature_mapping(struct hid_device *hdev,
2494 wacom_hid_usage_quirk(hdev, field, usage);
2495
2496 switch (equivalent_usage) {
2497 + case WACOM_HID_WD_TOUCH_RING_SETTING:
2498 + wacom->generic_has_leds = true;
2499 + break;
2500 case HID_DG_CONTACTMAX:
2501 /* leave touch_max as is if predefined */
2502 if (!features->touch_max) {
2503 diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
2504 index d7c3f4ac2c04..0ae848369474 100644
2505 --- a/drivers/hid/wacom_wac.c
2506 +++ b/drivers/hid/wacom_wac.c
2507 @@ -1928,8 +1928,6 @@ static void wacom_wac_pad_usage_mapping(struct hid_device *hdev,
2508 features->device_type |= WACOM_DEVICETYPE_PAD;
2509 break;
2510 case WACOM_HID_WD_BUTTONCENTER:
2511 - wacom->generic_has_leds = true;
2512 - /* fall through */
2513 case WACOM_HID_WD_BUTTONHOME:
2514 case WACOM_HID_WD_BUTTONUP:
2515 case WACOM_HID_WD_BUTTONDOWN:
2516 @@ -2121,14 +2119,12 @@ static void wacom_wac_pad_report(struct hid_device *hdev,
2517 bool active = wacom_wac->hid_data.inrange_state != 0;
2518
2519 /* report prox for expresskey events */
2520 - if ((wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY) &&
2521 - wacom_wac->hid_data.pad_input_event_flag) {
2522 + if (wacom_wac->hid_data.pad_input_event_flag) {
2523 input_event(input, EV_ABS, ABS_MISC, active ? PAD_DEVICE_ID : 0);
2524 input_sync(input);
2525 if (!active)
2526 wacom_wac->hid_data.pad_input_event_flag = false;
2527 }
2528 -
2529 }
2530
2531 static void wacom_wac_pen_usage_mapping(struct hid_device *hdev,
2532 @@ -2725,9 +2721,7 @@ static int wacom_wac_collection(struct hid_device *hdev, struct hid_report *repo
2533 if (report->type != HID_INPUT_REPORT)
2534 return -1;
2535
2536 - if (WACOM_PAD_FIELD(field) && wacom->wacom_wac.pad_input)
2537 - wacom_wac_pad_report(hdev, report, field);
2538 - else if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
2539 + if (WACOM_PEN_FIELD(field) && wacom->wacom_wac.pen_input)
2540 wacom_wac_pen_report(hdev, report);
2541 else if (WACOM_FINGER_FIELD(field) && wacom->wacom_wac.touch_input)
2542 wacom_wac_finger_report(hdev, report);
2543 @@ -2741,7 +2735,7 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
2544 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
2545 struct hid_field *field;
2546 bool pad_in_hid_field = false, pen_in_hid_field = false,
2547 - finger_in_hid_field = false;
2548 + finger_in_hid_field = false, true_pad = false;
2549 int r;
2550 int prev_collection = -1;
2551
2552 @@ -2757,6 +2751,8 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
2553 pen_in_hid_field = true;
2554 if (WACOM_FINGER_FIELD(field))
2555 finger_in_hid_field = true;
2556 + if (wacom_equivalent_usage(field->physical) == HID_DG_TABLETFUNCTIONKEY)
2557 + true_pad = true;
2558 }
2559
2560 wacom_wac_battery_pre_report(hdev, report);
2561 @@ -2780,6 +2776,9 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
2562 }
2563
2564 wacom_wac_battery_report(hdev, report);
2565 +
2566 + if (true_pad && wacom->wacom_wac.pad_input)
2567 + wacom_wac_pad_report(hdev, report, field);
2568 }
2569
2570 static int wacom_bpt_touch(struct wacom_wac *wacom)
2571 @@ -3735,7 +3734,7 @@ int wacom_setup_touch_input_capabilities(struct input_dev *input_dev,
2572 0, 5920, 4, 0);
2573 }
2574 input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
2575 - input_abs_set_res(input_dev, ABS_MT_POSITION_X, 40);
2576 + input_abs_set_res(input_dev, ABS_MT_POSITION_Y, 40);
2577
2578 /* fall through */
2579
2580 diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
2581 index 295fd3718caa..f67d871841c0 100644
2582 --- a/drivers/hid/wacom_wac.h
2583 +++ b/drivers/hid/wacom_wac.h
2584 @@ -145,6 +145,7 @@
2585 #define WACOM_HID_WD_OFFSETBOTTOM (WACOM_HID_UP_WACOMDIGITIZER | 0x0d33)
2586 #define WACOM_HID_WD_DATAMODE (WACOM_HID_UP_WACOMDIGITIZER | 0x1002)
2587 #define WACOM_HID_WD_DIGITIZERINFO (WACOM_HID_UP_WACOMDIGITIZER | 0x1013)
2588 +#define WACOM_HID_WD_TOUCH_RING_SETTING (WACOM_HID_UP_WACOMDIGITIZER | 0x1032)
2589 #define WACOM_HID_UP_G9 0xff090000
2590 #define WACOM_HID_G9_PEN (WACOM_HID_UP_G9 | 0x02)
2591 #define WACOM_HID_G9_TOUCHSCREEN (WACOM_HID_UP_G9 | 0x11)
2592 diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
2593 index 8ff326c0c406..3cdf85b1ce4f 100644
2594 --- a/drivers/hwtracing/intel_th/msu.c
2595 +++ b/drivers/hwtracing/intel_th/msu.c
2596 @@ -632,7 +632,7 @@ static int msc_buffer_contig_alloc(struct msc *msc, unsigned long size)
2597 goto err_out;
2598
2599 ret = -ENOMEM;
2600 - page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
2601 + page = alloc_pages(GFP_KERNEL | __GFP_ZERO | GFP_DMA32, order);
2602 if (!page)
2603 goto err_free_sgt;
2604
2605 diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
2606 index 70f2cb90adc5..e759ac0d48be 100644
2607 --- a/drivers/hwtracing/intel_th/pci.c
2608 +++ b/drivers/hwtracing/intel_th/pci.c
2609 @@ -170,6 +170,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
2610 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x02a6),
2611 .driver_data = (kernel_ulong_t)&intel_th_2x,
2612 },
2613 + {
2614 + /* Ice Lake NNPI */
2615 + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x45c5),
2616 + .driver_data = (kernel_ulong_t)&intel_th_2x,
2617 + },
2618 { 0 },
2619 };
2620
2621 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
2622 index 8cc4da62f050..53eccc0da8fd 100644
2623 --- a/drivers/infiniband/hw/mlx5/main.c
2624 +++ b/drivers/infiniband/hw/mlx5/main.c
2625 @@ -939,15 +939,19 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
2626 }
2627
2628 if (MLX5_CAP_GEN(mdev, tag_matching)) {
2629 - props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
2630 props->tm_caps.max_num_tags =
2631 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
2632 - props->tm_caps.flags = IB_TM_CAP_RC;
2633 props->tm_caps.max_ops =
2634 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
2635 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
2636 }
2637
2638 + if (MLX5_CAP_GEN(mdev, tag_matching) &&
2639 + MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
2640 + props->tm_caps.flags = IB_TM_CAP_RNDV_RC;
2641 + props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
2642 + }
2643 +
2644 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
2645 props->cq_caps.max_cq_moderation_count =
2646 MLX5_MAX_CQ_COUNT;
2647 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
2648 index 30f840f874b3..009615499b37 100644
2649 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
2650 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
2651 @@ -1997,6 +1997,7 @@ static int ipoib_get_vf_config(struct net_device *dev, int vf,
2652 return err;
2653
2654 ivf->vf = vf;
2655 + memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
2656
2657 return 0;
2658 }
2659 diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
2660 index 0a6f7ca883e7..dd80ff6cc427 100644
2661 --- a/drivers/input/mouse/alps.c
2662 +++ b/drivers/input/mouse/alps.c
2663 @@ -24,6 +24,7 @@
2664
2665 #include "psmouse.h"
2666 #include "alps.h"
2667 +#include "trackpoint.h"
2668
2669 /*
2670 * Definitions for ALPS version 3 and 4 command mode protocol
2671 @@ -2864,6 +2865,23 @@ static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
2672 return NULL;
2673 }
2674
2675 +static bool alps_is_cs19_trackpoint(struct psmouse *psmouse)
2676 +{
2677 + u8 param[2] = { 0 };
2678 +
2679 + if (ps2_command(&psmouse->ps2dev,
2680 + param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
2681 + return false;
2682 +
2683 + /*
2684 + * param[0] contains the trackpoint device variant_id while
2685 + * param[1] contains the firmware_id. So far all alps
2686 + * trackpoint-only devices have their variant_ids equal
2687 + * TP_VARIANT_ALPS and their firmware_ids are in 0x20~0x2f range.
2688 + */
2689 + return param[0] == TP_VARIANT_ALPS && ((param[1] & 0xf0) == 0x20);
2690 +}
2691 +
2692 static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
2693 {
2694 const struct alps_protocol_info *protocol;
2695 @@ -3164,6 +3182,20 @@ int alps_detect(struct psmouse *psmouse, bool set_properties)
2696 if (error)
2697 return error;
2698
2699 + /*
2700 + * ALPS cs19 is a trackpoint-only device, and uses different
2701 + * protocol than DualPoint ones, so we return -EINVAL here and let
2702 + * trackpoint.c drive this device. If the trackpoint driver is not
2703 + * enabled, the device will fall back to a bare PS/2 mouse.
2704 + * If ps2_command() fails here, we depend on the immediately
2705 + * followed psmouse_reset() to reset the device to normal state.
2706 + */
2707 + if (alps_is_cs19_trackpoint(psmouse)) {
2708 + psmouse_dbg(psmouse,
2709 + "ALPS CS19 trackpoint-only device detected, ignoring\n");
2710 + return -EINVAL;
2711 + }
2712 +
2713 /*
2714 * Reset the device to make sure it is fully operational:
2715 * on some laptops, like certain Dell Latitudes, we may
2716 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
2717 index 68fd8232d44c..af7d48431b85 100644
2718 --- a/drivers/input/mouse/synaptics.c
2719 +++ b/drivers/input/mouse/synaptics.c
2720 @@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
2721 "LEN0093", /* T480 */
2722 "LEN0096", /* X280 */
2723 "LEN0097", /* X280 -> ALPS trackpoint */
2724 + "LEN009b", /* T580 */
2725 "LEN200f", /* T450s */
2726 "LEN2054", /* E480 */
2727 "LEN2055", /* E580 */
2728 diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
2729 index 4b8b9d7aa75e..35031228a6d0 100644
2730 --- a/drivers/input/tablet/gtco.c
2731 +++ b/drivers/input/tablet/gtco.c
2732 @@ -78,6 +78,7 @@ Scott Hill shill@gtcocalcomp.com
2733
2734 /* Max size of a single report */
2735 #define REPORT_MAX_SIZE 10
2736 +#define MAX_COLLECTION_LEVELS 10
2737
2738
2739 /* Bitmask whether pen is in range */
2740 @@ -223,8 +224,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
2741 char maintype = 'x';
2742 char globtype[12];
2743 int indent = 0;
2744 - char indentstr[10] = "";
2745 -
2746 + char indentstr[MAX_COLLECTION_LEVELS + 1] = { 0 };
2747
2748 dev_dbg(ddev, "======>>>>>>PARSE<<<<<<======\n");
2749
2750 @@ -350,6 +350,13 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
2751 case TAG_MAIN_COL_START:
2752 maintype = 'S';
2753
2754 + if (indent == MAX_COLLECTION_LEVELS) {
2755 + dev_err(ddev, "Collection level %d would exceed limit of %d\n",
2756 + indent + 1,
2757 + MAX_COLLECTION_LEVELS);
2758 + break;
2759 + }
2760 +
2761 if (data == 0) {
2762 dev_dbg(ddev, "======>>>>>> Physical\n");
2763 strcpy(globtype, "Physical");
2764 @@ -369,8 +376,15 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
2765 break;
2766
2767 case TAG_MAIN_COL_END:
2768 - dev_dbg(ddev, "<<<<<<======\n");
2769 maintype = 'E';
2770 +
2771 + if (indent == 0) {
2772 + dev_err(ddev, "Collection level already at zero\n");
2773 + break;
2774 + }
2775 +
2776 + dev_dbg(ddev, "<<<<<<======\n");
2777 +
2778 indent--;
2779 for (x = 0; x < indent; x++)
2780 indentstr[x] = '-';
2781 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
2782 index 8c15c5980299..bc14825edc9c 100644
2783 --- a/drivers/iommu/iommu.c
2784 +++ b/drivers/iommu/iommu.c
2785 @@ -211,18 +211,21 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new,
2786 pos = pos->next;
2787 } else if ((start >= a) && (end <= b)) {
2788 if (new->type == type)
2789 - goto done;
2790 + return 0;
2791 else
2792 pos = pos->next;
2793 } else {
2794 if (new->type == type) {
2795 phys_addr_t new_start = min(a, start);
2796 phys_addr_t new_end = max(b, end);
2797 + int ret;
2798
2799 list_del(&entry->list);
2800 entry->start = new_start;
2801 entry->length = new_end - new_start + 1;
2802 - iommu_insert_resv_region(entry, regions);
2803 + ret = iommu_insert_resv_region(entry, regions);
2804 + kfree(entry);
2805 + return ret;
2806 } else {
2807 pos = pos->next;
2808 }
2809 @@ -235,7 +238,6 @@ insert:
2810 return -ENOMEM;
2811
2812 list_add_tail(&region->list, pos);
2813 -done:
2814 return 0;
2815 }
2816
2817 diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
2818 index 7b531fd075b8..7599b10ecf09 100644
2819 --- a/drivers/irqchip/irq-meson-gpio.c
2820 +++ b/drivers/irqchip/irq-meson-gpio.c
2821 @@ -73,6 +73,7 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
2822 { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
2823 { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
2824 { .compatible = "amlogic,meson-axg-gpio-intc", .data = &axg_params },
2825 + { .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
2826 { }
2827 };
2828
2829 diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
2830 index 95be6e36c7dd..80710c62ac29 100644
2831 --- a/drivers/lightnvm/pblk-core.c
2832 +++ b/drivers/lightnvm/pblk-core.c
2833 @@ -288,14 +288,16 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
2834 void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
2835 int nr_pages)
2836 {
2837 - struct bio_vec bv;
2838 - int i;
2839 -
2840 - WARN_ON(off + nr_pages != bio->bi_vcnt);
2841 -
2842 - for (i = off; i < nr_pages + off; i++) {
2843 - bv = bio->bi_io_vec[i];
2844 - mempool_free(bv.bv_page, &pblk->page_bio_pool);
2845 + struct bio_vec *bv;
2846 + struct page *page;
2847 + int i, e, nbv = 0;
2848 +
2849 + for (i = 0; i < bio->bi_vcnt; i++) {
2850 + bv = &bio->bi_io_vec[i];
2851 + page = bv->bv_page;
2852 + for (e = 0; e < bv->bv_len; e += PBLK_EXPOSED_PAGE_SIZE, nbv++)
2853 + if (nbv >= off)
2854 + mempool_free(page++, &pblk->page_bio_pool);
2855 }
2856 }
2857
2858 diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
2859 index de85b3af3b39..9c3beb1e382b 100644
2860 --- a/drivers/md/bcache/alloc.c
2861 +++ b/drivers/md/bcache/alloc.c
2862 @@ -393,6 +393,11 @@ long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
2863 struct bucket *b;
2864 long r;
2865
2866 +
2867 + /* No allocation if CACHE_SET_IO_DISABLE bit is set */
2868 + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
2869 + return -1;
2870 +
2871 /* fastpath */
2872 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
2873 fifo_pop(&ca->free[reserve], r))
2874 @@ -484,6 +489,10 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
2875 {
2876 int i;
2877
2878 + /* No allocation if CACHE_SET_IO_DISABLE bit is set */
2879 + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
2880 + return -1;
2881 +
2882 lockdep_assert_held(&c->bucket_lock);
2883 BUG_ON(!n || n > c->caches_loaded || n > 8);
2884
2885 diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
2886 index 954dad29e6e8..83f0b91aeb90 100644
2887 --- a/drivers/md/bcache/bcache.h
2888 +++ b/drivers/md/bcache/bcache.h
2889 @@ -708,8 +708,6 @@ struct cache_set {
2890
2891 #define BUCKET_HASH_BITS 12
2892 struct hlist_head bucket_hash[1 << BUCKET_HASH_BITS];
2893 -
2894 - DECLARE_HEAP(struct btree *, flush_btree);
2895 };
2896
2897 struct bbio {
2898 diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
2899 index c25097968319..4d93f07f63e5 100644
2900 --- a/drivers/md/bcache/io.c
2901 +++ b/drivers/md/bcache/io.c
2902 @@ -58,6 +58,18 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
2903
2904 WARN_ONCE(!dc, "NULL pointer of struct cached_dev");
2905
2906 + /*
2907 + * Read-ahead requests on a degrading and recovering md raid
2908 + * (e.g. raid6) device might be failured immediately by md
2909 + * raid code, which is not a real hardware media failure. So
2910 + * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
2911 + */
2912 + if (bio->bi_opf & REQ_RAHEAD) {
2913 + pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore",
2914 + dc->backing_dev_name);
2915 + return;
2916 + }
2917 +
2918 errors = atomic_add_return(1, &dc->io_errors);
2919 if (errors < dc->error_limit)
2920 pr_err("%s: IO error on backing device, unrecoverable",
2921 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
2922 index f880e5eba8dd..ec1e35a62934 100644
2923 --- a/drivers/md/bcache/journal.c
2924 +++ b/drivers/md/bcache/journal.c
2925 @@ -390,12 +390,6 @@ err:
2926 }
2927
2928 /* Journalling */
2929 -#define journal_max_cmp(l, r) \
2930 - (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
2931 - fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
2932 -#define journal_min_cmp(l, r) \
2933 - (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
2934 - fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
2935
2936 static void btree_flush_write(struct cache_set *c)
2937 {
2938 @@ -403,35 +397,25 @@ static void btree_flush_write(struct cache_set *c)
2939 * Try to find the btree node with that references the oldest journal
2940 * entry, best is our current candidate and is locked if non NULL:
2941 */
2942 - struct btree *b;
2943 - int i;
2944 + struct btree *b, *best;
2945 + unsigned int i;
2946
2947 atomic_long_inc(&c->flush_write);
2948 -
2949 retry:
2950 - spin_lock(&c->journal.lock);
2951 - if (heap_empty(&c->flush_btree)) {
2952 - for_each_cached_btree(b, c, i)
2953 - if (btree_current_write(b)->journal) {
2954 - if (!heap_full(&c->flush_btree))
2955 - heap_add(&c->flush_btree, b,
2956 - journal_max_cmp);
2957 - else if (journal_max_cmp(b,
2958 - heap_peek(&c->flush_btree))) {
2959 - c->flush_btree.data[0] = b;
2960 - heap_sift(&c->flush_btree, 0,
2961 - journal_max_cmp);
2962 - }
2963 + best = NULL;
2964 +
2965 + for_each_cached_btree(b, c, i)
2966 + if (btree_current_write(b)->journal) {
2967 + if (!best)
2968 + best = b;
2969 + else if (journal_pin_cmp(c,
2970 + btree_current_write(best)->journal,
2971 + btree_current_write(b)->journal)) {
2972 + best = b;
2973 }
2974 + }
2975
2976 - for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
2977 - heap_sift(&c->flush_btree, i, journal_min_cmp);
2978 - }
2979 -
2980 - b = NULL;
2981 - heap_pop(&c->flush_btree, b, journal_min_cmp);
2982 - spin_unlock(&c->journal.lock);
2983 -
2984 + b = best;
2985 if (b) {
2986 mutex_lock(&b->write_lock);
2987 if (!btree_current_write(b)->journal) {
2988 @@ -810,6 +794,10 @@ atomic_t *bch_journal(struct cache_set *c,
2989 struct journal_write *w;
2990 atomic_t *ret;
2991
2992 + /* No journaling if CACHE_SET_IO_DISABLE set already */
2993 + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
2994 + return NULL;
2995 +
2996 if (!CACHE_SYNC(&c->sb))
2997 return NULL;
2998
2999 @@ -854,7 +842,6 @@ void bch_journal_free(struct cache_set *c)
3000 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
3001 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
3002 free_fifo(&c->journal.pin);
3003 - free_heap(&c->flush_btree);
3004 }
3005
3006 int bch_journal_alloc(struct cache_set *c)
3007 @@ -869,8 +856,7 @@ int bch_journal_alloc(struct cache_set *c)
3008 j->w[0].c = c;
3009 j->w[1].c = c;
3010
3011 - if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
3012 - !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
3013 + if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
3014 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
3015 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
3016 return -ENOMEM;
3017 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3018 index 2409507d7bff..e6c7a84bb1df 100644
3019 --- a/drivers/md/bcache/super.c
3020 +++ b/drivers/md/bcache/super.c
3021 @@ -1180,18 +1180,16 @@ static void cached_dev_free(struct closure *cl)
3022 {
3023 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
3024
3025 - mutex_lock(&bch_register_lock);
3026 -
3027 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
3028 cancel_writeback_rate_update_dwork(dc);
3029
3030 if (!IS_ERR_OR_NULL(dc->writeback_thread))
3031 kthread_stop(dc->writeback_thread);
3032 - if (dc->writeback_write_wq)
3033 - destroy_workqueue(dc->writeback_write_wq);
3034 if (!IS_ERR_OR_NULL(dc->status_update_thread))
3035 kthread_stop(dc->status_update_thread);
3036
3037 + mutex_lock(&bch_register_lock);
3038 +
3039 if (atomic_read(&dc->running))
3040 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
3041 bcache_device_free(&dc->disk);
3042 @@ -1425,8 +1423,6 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
3043
3044 bool bch_cached_dev_error(struct cached_dev *dc)
3045 {
3046 - struct cache_set *c;
3047 -
3048 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
3049 return false;
3050
3051 @@ -1437,21 +1433,6 @@ bool bch_cached_dev_error(struct cached_dev *dc)
3052 pr_err("stop %s: too many IO errors on backing device %s\n",
3053 dc->disk.disk->disk_name, dc->backing_dev_name);
3054
3055 - /*
3056 - * If the cached device is still attached to a cache set,
3057 - * even dc->io_disable is true and no more I/O requests
3058 - * accepted, cache device internal I/O (writeback scan or
3059 - * garbage collection) may still prevent bcache device from
3060 - * being stopped. So here CACHE_SET_IO_DISABLE should be
3061 - * set to c->flags too, to make the internal I/O to cache
3062 - * device rejected and stopped immediately.
3063 - * If c is NULL, that means the bcache device is not attached
3064 - * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
3065 - */
3066 - c = dc->disk.c;
3067 - if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
3068 - pr_info("CACHE_SET_IO_DISABLE already set");
3069 -
3070 bcache_device_stop(&dc->disk);
3071 return true;
3072 }
3073 @@ -1552,7 +1533,7 @@ static void cache_set_flush(struct closure *cl)
3074 kobject_put(&c->internal);
3075 kobject_del(&c->kobj);
3076
3077 - if (c->gc_thread)
3078 + if (!IS_ERR_OR_NULL(c->gc_thread))
3079 kthread_stop(c->gc_thread);
3080
3081 if (!IS_ERR_OR_NULL(c->root))
3082 diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
3083 index 541454b4f479..5bb81e564ce8 100644
3084 --- a/drivers/md/bcache/sysfs.c
3085 +++ b/drivers/md/bcache/sysfs.c
3086 @@ -175,7 +175,7 @@ SHOW(__bch_cached_dev)
3087 var_print(writeback_percent);
3088 sysfs_hprint(writeback_rate,
3089 wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
3090 - sysfs_hprint(io_errors, atomic_read(&dc->io_errors));
3091 + sysfs_printf(io_errors, "%i", atomic_read(&dc->io_errors));
3092 sysfs_printf(io_error_limit, "%i", dc->error_limit);
3093 sysfs_printf(io_disable, "%i", dc->io_disable);
3094 var_print(writeback_rate_update_seconds);
3095 @@ -426,7 +426,7 @@ static struct attribute *bch_cached_dev_files[] = {
3096 &sysfs_writeback_rate_p_term_inverse,
3097 &sysfs_writeback_rate_minimum,
3098 &sysfs_writeback_rate_debug,
3099 - &sysfs_errors,
3100 + &sysfs_io_errors,
3101 &sysfs_io_error_limit,
3102 &sysfs_io_disable,
3103 &sysfs_dirty_data,
3104 diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
3105 index 00aab6abcfe4..b1f5b7aea872 100644
3106 --- a/drivers/md/bcache/util.h
3107 +++ b/drivers/md/bcache/util.h
3108 @@ -113,8 +113,6 @@ do { \
3109
3110 #define heap_full(h) ((h)->used == (h)->size)
3111
3112 -#define heap_empty(h) ((h)->used == 0)
3113 -
3114 #define DECLARE_FIFO(type, name) \
3115 struct { \
3116 size_t front, back, size, mask; \
3117 diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
3118 index 08c3a9f9676c..ba5395fd386d 100644
3119 --- a/drivers/md/bcache/writeback.c
3120 +++ b/drivers/md/bcache/writeback.c
3121 @@ -708,6 +708,10 @@ static int bch_writeback_thread(void *arg)
3122 }
3123 }
3124
3125 + if (dc->writeback_write_wq) {
3126 + flush_workqueue(dc->writeback_write_wq);
3127 + destroy_workqueue(dc->writeback_write_wq);
3128 + }
3129 cached_dev_put(dc);
3130 wait_for_kthread_stop();
3131
3132 @@ -803,6 +807,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc)
3133 "bcache_writeback");
3134 if (IS_ERR(dc->writeback_thread)) {
3135 cached_dev_put(dc);
3136 + destroy_workqueue(dc->writeback_write_wq);
3137 return PTR_ERR(dc->writeback_thread);
3138 }
3139
3140 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
3141 index dc385b70e4c3..b1d0ae2dbd3d 100644
3142 --- a/drivers/md/dm-bufio.c
3143 +++ b/drivers/md/dm-bufio.c
3144 @@ -1602,9 +1602,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3145 unsigned long freed;
3146
3147 c = container_of(shrink, struct dm_bufio_client, shrinker);
3148 - if (sc->gfp_mask & __GFP_FS)
3149 - dm_bufio_lock(c);
3150 - else if (!dm_bufio_trylock(c))
3151 + if (!dm_bufio_trylock(c))
3152 return SHRINK_STOP;
3153
3154 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
3155 diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
3156 index d8334cd45d7c..4cdde7a02e94 100644
3157 --- a/drivers/md/dm-zoned-metadata.c
3158 +++ b/drivers/md/dm-zoned-metadata.c
3159 @@ -1593,30 +1593,6 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
3160 return zone;
3161 }
3162
3163 -/*
3164 - * Activate a zone (increment its reference count).
3165 - */
3166 -void dmz_activate_zone(struct dm_zone *zone)
3167 -{
3168 - set_bit(DMZ_ACTIVE, &zone->flags);
3169 - atomic_inc(&zone->refcount);
3170 -}
3171 -
3172 -/*
3173 - * Deactivate a zone. This decrement the zone reference counter
3174 - * and clears the active state of the zone once the count reaches 0,
3175 - * indicating that all BIOs to the zone have completed. Returns
3176 - * true if the zone was deactivated.
3177 - */
3178 -void dmz_deactivate_zone(struct dm_zone *zone)
3179 -{
3180 - if (atomic_dec_and_test(&zone->refcount)) {
3181 - WARN_ON(!test_bit(DMZ_ACTIVE, &zone->flags));
3182 - clear_bit_unlock(DMZ_ACTIVE, &zone->flags);
3183 - smp_mb__after_atomic();
3184 - }
3185 -}
3186 -
3187 /*
3188 * Get the zone mapping a chunk, if the chunk is mapped already.
3189 * If no mapping exist and the operation is WRITE, a zone is
3190 diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
3191 index 12419f0bfe78..ed8de49c9a08 100644
3192 --- a/drivers/md/dm-zoned.h
3193 +++ b/drivers/md/dm-zoned.h
3194 @@ -115,7 +115,6 @@ enum {
3195 DMZ_BUF,
3196
3197 /* Zone internal state */
3198 - DMZ_ACTIVE,
3199 DMZ_RECLAIM,
3200 DMZ_SEQ_WRITE_ERR,
3201 };
3202 @@ -128,7 +127,6 @@ enum {
3203 #define dmz_is_empty(z) ((z)->wp_block == 0)
3204 #define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
3205 #define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
3206 -#define dmz_is_active(z) test_bit(DMZ_ACTIVE, &(z)->flags)
3207 #define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
3208 #define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
3209
3210 @@ -188,8 +186,30 @@ void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
3211 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
3212 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
3213
3214 -void dmz_activate_zone(struct dm_zone *zone);
3215 -void dmz_deactivate_zone(struct dm_zone *zone);
3216 +/*
3217 + * Activate a zone (increment its reference count).
3218 + */
3219 +static inline void dmz_activate_zone(struct dm_zone *zone)
3220 +{
3221 + atomic_inc(&zone->refcount);
3222 +}
3223 +
3224 +/*
3225 + * Deactivate a zone. This decrement the zone reference counter
3226 + * indicating that all BIOs to the zone have completed when the count is 0.
3227 + */
3228 +static inline void dmz_deactivate_zone(struct dm_zone *zone)
3229 +{
3230 + atomic_dec(&zone->refcount);
3231 +}
3232 +
3233 +/*
3234 + * Test if a zone is active, that is, has a refcount > 0.
3235 + */
3236 +static inline bool dmz_is_active(struct dm_zone *zone)
3237 +{
3238 + return atomic_read(&zone->refcount);
3239 +}
3240
3241 int dmz_lock_zone_reclaim(struct dm_zone *zone);
3242 void dmz_unlock_zone_reclaim(struct dm_zone *zone);
3243 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
3244 index f237d6f30752..a147619498df 100644
3245 --- a/drivers/md/raid5.c
3246 +++ b/drivers/md/raid5.c
3247 @@ -7670,7 +7670,7 @@ abort:
3248 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
3249 {
3250 struct r5conf *conf = mddev->private;
3251 - int err = -EEXIST;
3252 + int ret, err = -EEXIST;
3253 int disk;
3254 struct disk_info *p;
3255 int first = 0;
3256 @@ -7685,7 +7685,14 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
3257 * The array is in readonly mode if journal is missing, so no
3258 * write requests running. We should be safe
3259 */
3260 - log_init(conf, rdev, false);
3261 + ret = log_init(conf, rdev, false);
3262 + if (ret)
3263 + return ret;
3264 +
3265 + ret = r5l_start(conf->log);
3266 + if (ret)
3267 + return ret;
3268 +
3269 return 0;
3270 }
3271 if (mddev->recovery_disabled == conf->recovery_disabled)
3272 diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
3273 index 9226dca44e90..93d250db0b6f 100644
3274 --- a/drivers/media/common/videobuf2/videobuf2-core.c
3275 +++ b/drivers/media/common/videobuf2/videobuf2-core.c
3276 @@ -207,6 +207,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
3277 for (plane = 0; plane < vb->num_planes; ++plane) {
3278 unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
3279
3280 + /* Did it wrap around? */
3281 + if (size < vb->planes[plane].length)
3282 + goto free;
3283 +
3284 mem_priv = call_ptr_memop(vb, alloc,
3285 q->alloc_devs[plane] ? : q->dev,
3286 q->dma_attrs, size, q->dma_dir, q->gfp_flags);
3287 diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
3288 index 015e737095cd..e9bfea986cc4 100644
3289 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
3290 +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
3291 @@ -59,7 +59,7 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
3292 gfp_t gfp_flags)
3293 {
3294 unsigned int last_page = 0;
3295 - int size = buf->size;
3296 + unsigned long size = buf->size;
3297
3298 while (size > 0) {
3299 struct page *pages;
3300 diff --git a/drivers/media/dvb-frontends/tua6100.c b/drivers/media/dvb-frontends/tua6100.c
3301 index b233b7be0b84..e6aaf4973aef 100644
3302 --- a/drivers/media/dvb-frontends/tua6100.c
3303 +++ b/drivers/media/dvb-frontends/tua6100.c
3304 @@ -75,8 +75,8 @@ static int tua6100_set_params(struct dvb_frontend *fe)
3305 struct i2c_msg msg1 = { .addr = priv->i2c_address, .flags = 0, .buf = reg1, .len = 4 };
3306 struct i2c_msg msg2 = { .addr = priv->i2c_address, .flags = 0, .buf = reg2, .len = 3 };
3307
3308 -#define _R 4
3309 -#define _P 32
3310 +#define _R_VAL 4
3311 +#define _P_VAL 32
3312 #define _ri 4000000
3313
3314 // setup register 0
3315 @@ -91,14 +91,14 @@ static int tua6100_set_params(struct dvb_frontend *fe)
3316 else
3317 reg1[1] = 0x0c;
3318
3319 - if (_P == 64)
3320 + if (_P_VAL == 64)
3321 reg1[1] |= 0x40;
3322 if (c->frequency >= 1525000)
3323 reg1[1] |= 0x80;
3324
3325 // register 2
3326 - reg2[1] = (_R >> 8) & 0x03;
3327 - reg2[2] = _R;
3328 + reg2[1] = (_R_VAL >> 8) & 0x03;
3329 + reg2[2] = _R_VAL;
3330 if (c->frequency < 1455000)
3331 reg2[1] |= 0x1c;
3332 else if (c->frequency < 1630000)
3333 @@ -110,18 +110,18 @@ static int tua6100_set_params(struct dvb_frontend *fe)
3334 * The N divisor ratio (note: c->frequency is in kHz, but we
3335 * need it in Hz)
3336 */
3337 - prediv = (c->frequency * _R) / (_ri / 1000);
3338 - div = prediv / _P;
3339 + prediv = (c->frequency * _R_VAL) / (_ri / 1000);
3340 + div = prediv / _P_VAL;
3341 reg1[1] |= (div >> 9) & 0x03;
3342 reg1[2] = div >> 1;
3343 reg1[3] = (div << 7);
3344 - priv->frequency = ((div * _P) * (_ri / 1000)) / _R;
3345 + priv->frequency = ((div * _P_VAL) * (_ri / 1000)) / _R_VAL;
3346
3347 // Finally, calculate and store the value for A
3348 - reg1[3] |= (prediv - (div*_P)) & 0x7f;
3349 + reg1[3] |= (prediv - (div*_P_VAL)) & 0x7f;
3350
3351 -#undef _R
3352 -#undef _P
3353 +#undef _R_VAL
3354 +#undef _P_VAL
3355 #undef _ri
3356
3357 if (fe->ops.i2c_gate_ctrl)
3358 diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
3359 index a94eb03d10d4..520b3c3bf48c 100644
3360 --- a/drivers/media/i2c/Makefile
3361 +++ b/drivers/media/i2c/Makefile
3362 @@ -36,7 +36,7 @@ obj-$(CONFIG_VIDEO_ADV748X) += adv748x/
3363 obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
3364 obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
3365 obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
3366 -obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
3367 +obj-$(CONFIG_VIDEO_ADV7511) += adv7511-v4l2.o
3368 obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
3369 obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
3370 obj-$(CONFIG_VIDEO_BT819) += bt819.o
3371 diff --git a/drivers/media/i2c/adv7511-v4l2.c b/drivers/media/i2c/adv7511-v4l2.c
3372 new file mode 100644
3373 index 000000000000..6869bb593a68
3374 --- /dev/null
3375 +++ b/drivers/media/i2c/adv7511-v4l2.c
3376 @@ -0,0 +1,1997 @@
3377 +// SPDX-License-Identifier: GPL-2.0-only
3378 +/*
3379 + * Analog Devices ADV7511 HDMI Transmitter Device Driver
3380 + *
3381 + * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
3382 + */
3383 +
3384 +/*
3385 + * This file is named adv7511-v4l2.c so it doesn't conflict with the Analog
3386 + * Device ADV7511 (config fragment CONFIG_DRM_I2C_ADV7511).
3387 + */
3388 +
3389 +
3390 +#include <linux/kernel.h>
3391 +#include <linux/module.h>
3392 +#include <linux/slab.h>
3393 +#include <linux/i2c.h>
3394 +#include <linux/delay.h>
3395 +#include <linux/videodev2.h>
3396 +#include <linux/gpio.h>
3397 +#include <linux/workqueue.h>
3398 +#include <linux/hdmi.h>
3399 +#include <linux/v4l2-dv-timings.h>
3400 +#include <media/v4l2-device.h>
3401 +#include <media/v4l2-common.h>
3402 +#include <media/v4l2-ctrls.h>
3403 +#include <media/v4l2-dv-timings.h>
3404 +#include <media/i2c/adv7511.h>
3405 +#include <media/cec.h>
3406 +
3407 +static int debug;
3408 +module_param(debug, int, 0644);
3409 +MODULE_PARM_DESC(debug, "debug level (0-2)");
3410 +
3411 +MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
3412 +MODULE_AUTHOR("Hans Verkuil");
3413 +MODULE_LICENSE("GPL v2");
3414 +
3415 +#define MASK_ADV7511_EDID_RDY_INT 0x04
3416 +#define MASK_ADV7511_MSEN_INT 0x40
3417 +#define MASK_ADV7511_HPD_INT 0x80
3418 +
3419 +#define MASK_ADV7511_HPD_DETECT 0x40
3420 +#define MASK_ADV7511_MSEN_DETECT 0x20
3421 +#define MASK_ADV7511_EDID_RDY 0x10
3422 +
3423 +#define EDID_MAX_RETRIES (8)
3424 +#define EDID_DELAY 250
3425 +#define EDID_MAX_SEGM 8
3426 +
3427 +#define ADV7511_MAX_WIDTH 1920
3428 +#define ADV7511_MAX_HEIGHT 1200
3429 +#define ADV7511_MIN_PIXELCLOCK 20000000
3430 +#define ADV7511_MAX_PIXELCLOCK 225000000
3431 +
3432 +#define ADV7511_MAX_ADDRS (3)
3433 +
3434 +/*
3435 +**********************************************************************
3436 +*
3437 +* Arrays with configuration parameters for the ADV7511
3438 +*
3439 +**********************************************************************
3440 +*/
3441 +
3442 +struct i2c_reg_value {
3443 + unsigned char reg;
3444 + unsigned char value;
3445 +};
3446 +
3447 +struct adv7511_state_edid {
3448 + /* total number of blocks */
3449 + u32 blocks;
3450 + /* Number of segments read */
3451 + u32 segments;
3452 + u8 data[EDID_MAX_SEGM * 256];
3453 + /* Number of EDID read retries left */
3454 + unsigned read_retries;
3455 + bool complete;
3456 +};
3457 +
3458 +struct adv7511_state {
3459 + struct adv7511_platform_data pdata;
3460 + struct v4l2_subdev sd;
3461 + struct media_pad pad;
3462 + struct v4l2_ctrl_handler hdl;
3463 + int chip_revision;
3464 + u8 i2c_edid_addr;
3465 + u8 i2c_pktmem_addr;
3466 + u8 i2c_cec_addr;
3467 +
3468 + struct i2c_client *i2c_cec;
3469 + struct cec_adapter *cec_adap;
3470 + u8 cec_addr[ADV7511_MAX_ADDRS];
3471 + u8 cec_valid_addrs;
3472 + bool cec_enabled_adap;
3473 +
3474 + /* Is the adv7511 powered on? */
3475 + bool power_on;
3476 + /* Did we receive hotplug and rx-sense signals? */
3477 + bool have_monitor;
3478 + bool enabled_irq;
3479 + /* timings from s_dv_timings */
3480 + struct v4l2_dv_timings dv_timings;
3481 + u32 fmt_code;
3482 + u32 colorspace;
3483 + u32 ycbcr_enc;
3484 + u32 quantization;
3485 + u32 xfer_func;
3486 + u32 content_type;
3487 + /* controls */
3488 + struct v4l2_ctrl *hdmi_mode_ctrl;
3489 + struct v4l2_ctrl *hotplug_ctrl;
3490 + struct v4l2_ctrl *rx_sense_ctrl;
3491 + struct v4l2_ctrl *have_edid0_ctrl;
3492 + struct v4l2_ctrl *rgb_quantization_range_ctrl;
3493 + struct v4l2_ctrl *content_type_ctrl;
3494 + struct i2c_client *i2c_edid;
3495 + struct i2c_client *i2c_pktmem;
3496 + struct adv7511_state_edid edid;
3497 + /* Running counter of the number of detected EDIDs (for debugging) */
3498 + unsigned edid_detect_counter;
3499 + struct workqueue_struct *work_queue;
3500 + struct delayed_work edid_handler; /* work entry */
3501 +};
3502 +
3503 +static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
3504 +static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
3505 +static void adv7511_setup(struct v4l2_subdev *sd);
3506 +static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
3507 +static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
3508 +
3509 +
3510 +static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
3511 + .type = V4L2_DV_BT_656_1120,
3512 + /* keep this initialization for compatibility with GCC < 4.4.6 */
3513 + .reserved = { 0 },
3514 + V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
3515 + ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
3516 + V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
3517 + V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
3518 + V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
3519 + V4L2_DV_BT_CAP_CUSTOM)
3520 +};
3521 +
3522 +static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
3523 +{
3524 + return container_of(sd, struct adv7511_state, sd);
3525 +}
3526 +
3527 +static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
3528 +{
3529 + return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
3530 +}
3531 +
3532 +/* ------------------------ I2C ----------------------------------------------- */
3533 +
3534 +static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
3535 + u8 command, bool check)
3536 +{
3537 + union i2c_smbus_data data;
3538 +
3539 + if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
3540 + I2C_SMBUS_READ, command,
3541 + I2C_SMBUS_BYTE_DATA, &data))
3542 + return data.byte;
3543 + if (check)
3544 + v4l_err(client, "error reading %02x, %02x\n",
3545 + client->addr, command);
3546 + return -1;
3547 +}
3548 +
3549 +static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
3550 +{
3551 + int i;
3552 + for (i = 0; i < 3; i++) {
3553 + int ret = adv_smbus_read_byte_data_check(client, command, true);
3554 + if (ret >= 0) {
3555 + if (i)
3556 + v4l_err(client, "read ok after %d retries\n", i);
3557 + return ret;
3558 + }
3559 + }
3560 + v4l_err(client, "read failed\n");
3561 + return -1;
3562 +}
3563 +
3564 +static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
3565 +{
3566 + struct i2c_client *client = v4l2_get_subdevdata(sd);
3567 +
3568 + return adv_smbus_read_byte_data(client, reg);
3569 +}
3570 +
3571 +static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
3572 +{
3573 + struct i2c_client *client = v4l2_get_subdevdata(sd);
3574 + int ret;
3575 + int i;
3576 +
3577 + for (i = 0; i < 3; i++) {
3578 + ret = i2c_smbus_write_byte_data(client, reg, val);
3579 + if (ret == 0)
3580 + return 0;
3581 + }
3582 + v4l2_err(sd, "%s: i2c write error\n", __func__);
3583 + return ret;
3584 +}
3585 +
3586 +/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
3587 + and then the value-mask (to be OR-ed). */
3588 +static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
3589 +{
3590 + adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
3591 +}
3592 +
3593 +static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
3594 + u8 command, unsigned length, u8 *values)
3595 +{
3596 + union i2c_smbus_data data;
3597 + int ret;
3598 +
3599 + if (length > I2C_SMBUS_BLOCK_MAX)
3600 + length = I2C_SMBUS_BLOCK_MAX;
3601 + data.block[0] = length;
3602 +
3603 + ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
3604 + I2C_SMBUS_READ, command,
3605 + I2C_SMBUS_I2C_BLOCK_DATA, &data);
3606 + memcpy(values, data.block + 1, length);
3607 + return ret;
3608 +}
3609 +
3610 +static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
3611 +{
3612 + struct adv7511_state *state = get_adv7511_state(sd);
3613 + int i;
3614 + int err = 0;
3615 +
3616 + v4l2_dbg(1, debug, sd, "%s:\n", __func__);
3617 +
3618 + for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
3619 + err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
3620 + I2C_SMBUS_BLOCK_MAX, buf + i);
3621 + if (err)
3622 + v4l2_err(sd, "%s: i2c read error\n", __func__);
3623 +}
3624 +
3625 +static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
3626 +{
3627 + struct adv7511_state *state = get_adv7511_state(sd);
3628 +
3629 + return i2c_smbus_read_byte_data(state->i2c_cec, reg);
3630 +}
3631 +
3632 +static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
3633 +{
3634 + struct adv7511_state *state = get_adv7511_state(sd);
3635 + int ret;
3636 + int i;
3637 +
3638 + for (i = 0; i < 3; i++) {
3639 + ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
3640 + if (ret == 0)
3641 + return 0;
3642 + }
3643 + v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
3644 + return ret;
3645 +}
3646 +
3647 +static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
3648 + u8 val)
3649 +{
3650 + return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
3651 +}
3652 +
3653 +static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
3654 +{
3655 + struct adv7511_state *state = get_adv7511_state(sd);
3656 +
3657 + return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
3658 +}
3659 +
3660 +static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
3661 +{
3662 + struct adv7511_state *state = get_adv7511_state(sd);
3663 + int ret;
3664 + int i;
3665 +
3666 + for (i = 0; i < 3; i++) {
3667 + ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
3668 + if (ret == 0)
3669 + return 0;
3670 + }
3671 + v4l2_err(sd, "%s: i2c write error\n", __func__);
3672 + return ret;
3673 +}
3674 +
3675 +/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
3676 + and then the value-mask (to be OR-ed). */
3677 +static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
3678 +{
3679 + adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
3680 +}
3681 +
3682 +static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
3683 +{
3684 + return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
3685 +}
3686 +
3687 +static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
3688 +{
3689 + return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
3690 +}
3691 +
3692 +static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode)
3693 +{
3694 + adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
3695 +}
3696 +
3697 +static void adv7511_csc_coeff(struct v4l2_subdev *sd,
3698 + u16 A1, u16 A2, u16 A3, u16 A4,
3699 + u16 B1, u16 B2, u16 B3, u16 B4,
3700 + u16 C1, u16 C2, u16 C3, u16 C4)
3701 +{
3702 + /* A */
3703 + adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
3704 + adv7511_wr(sd, 0x19, A1);
3705 + adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
3706 + adv7511_wr(sd, 0x1B, A2);
3707 + adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
3708 + adv7511_wr(sd, 0x1d, A3);
3709 + adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
3710 + adv7511_wr(sd, 0x1f, A4);
3711 +
3712 + /* B */
3713 + adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
3714 + adv7511_wr(sd, 0x21, B1);
3715 + adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
3716 + adv7511_wr(sd, 0x23, B2);
3717 + adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
3718 + adv7511_wr(sd, 0x25, B3);
3719 + adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
3720 + adv7511_wr(sd, 0x27, B4);
3721 +
3722 + /* C */
3723 + adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
3724 + adv7511_wr(sd, 0x29, C1);
3725 + adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
3726 + adv7511_wr(sd, 0x2B, C2);
3727 + adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
3728 + adv7511_wr(sd, 0x2D, C3);
3729 + adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
3730 + adv7511_wr(sd, 0x2F, C4);
3731 +}
3732 +
3733 +static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
3734 +{
3735 + if (enable) {
3736 + u8 csc_mode = 0;
3737 + adv7511_csc_conversion_mode(sd, csc_mode);
3738 + adv7511_csc_coeff(sd,
3739 + 4096-564, 0, 0, 256,
3740 + 0, 4096-564, 0, 256,
3741 + 0, 0, 4096-564, 256);
3742 + /* enable CSC */
3743 + adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
3744 + /* AVI infoframe: Limited range RGB (16-235) */
3745 + adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
3746 + } else {
3747 + /* disable CSC */
3748 + adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
3749 + /* AVI infoframe: Full range RGB (0-255) */
3750 + adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
3751 + }
3752 +}
3753 +
3754 +static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
3755 +{
3756 + struct adv7511_state *state = get_adv7511_state(sd);
3757 +
3758 + /* Only makes sense for RGB formats */
3759 + if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
3760 + /* so just keep quantization */
3761 + adv7511_csc_rgb_full2limit(sd, false);
3762 + return;
3763 + }
3764 +
3765 + switch (ctrl->val) {
3766 + case V4L2_DV_RGB_RANGE_AUTO:
3767 + /* automatic */
3768 + if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
3769 + /* CE format, RGB limited range (16-235) */
3770 + adv7511_csc_rgb_full2limit(sd, true);
3771 + } else {
3772 + /* not CE format, RGB full range (0-255) */
3773 + adv7511_csc_rgb_full2limit(sd, false);
3774 + }
3775 + break;
3776 + case V4L2_DV_RGB_RANGE_LIMITED:
3777 + /* RGB limited range (16-235) */
3778 + adv7511_csc_rgb_full2limit(sd, true);
3779 + break;
3780 + case V4L2_DV_RGB_RANGE_FULL:
3781 + /* RGB full range (0-255) */
3782 + adv7511_csc_rgb_full2limit(sd, false);
3783 + break;
3784 + }
3785 +}
3786 +
3787 +/* ------------------------------ CTRL OPS ------------------------------ */
3788 +
3789 +static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
3790 +{
3791 + struct v4l2_subdev *sd = to_sd(ctrl);
3792 + struct adv7511_state *state = get_adv7511_state(sd);
3793 +
3794 + v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
3795 +
3796 + if (state->hdmi_mode_ctrl == ctrl) {
3797 + /* Set HDMI or DVI-D */
3798 + adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
3799 + return 0;
3800 + }
3801 + if (state->rgb_quantization_range_ctrl == ctrl) {
3802 + adv7511_set_rgb_quantization_mode(sd, ctrl);
3803 + return 0;
3804 + }
3805 + if (state->content_type_ctrl == ctrl) {
3806 + u8 itc, cn;
3807 +
3808 + state->content_type = ctrl->val;
3809 + itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
3810 + cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
3811 + adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
3812 + adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
3813 + return 0;
3814 + }
3815 +
3816 + return -EINVAL;
3817 +}
3818 +
3819 +static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
3820 + .s_ctrl = adv7511_s_ctrl,
3821 +};
3822 +
3823 +/* ---------------------------- CORE OPS ------------------------------------------- */
3824 +
3825 +#ifdef CONFIG_VIDEO_ADV_DEBUG
3826 +static void adv7511_inv_register(struct v4l2_subdev *sd)
3827 +{
3828 + struct adv7511_state *state = get_adv7511_state(sd);
3829 +
3830 + v4l2_info(sd, "0x000-0x0ff: Main Map\n");
3831 + if (state->i2c_cec)
3832 + v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
3833 +}
3834 +
3835 +static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
3836 +{
3837 + struct adv7511_state *state = get_adv7511_state(sd);
3838 +
3839 + reg->size = 1;
3840 + switch (reg->reg >> 8) {
3841 + case 0:
3842 + reg->val = adv7511_rd(sd, reg->reg & 0xff);
3843 + break;
3844 + case 1:
3845 + if (state->i2c_cec) {
3846 + reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
3847 + break;
3848 + }
3849 + /* fall through */
3850 + default:
3851 + v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
3852 + adv7511_inv_register(sd);
3853 + break;
3854 + }
3855 + return 0;
3856 +}
3857 +
3858 +static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
3859 +{
3860 + struct adv7511_state *state = get_adv7511_state(sd);
3861 +
3862 + switch (reg->reg >> 8) {
3863 + case 0:
3864 + adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
3865 + break;
3866 + case 1:
3867 + if (state->i2c_cec) {
3868 + adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
3869 + break;
3870 + }
3871 + /* fall through */
3872 + default:
3873 + v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
3874 + adv7511_inv_register(sd);
3875 + break;
3876 + }
3877 + return 0;
3878 +}
3879 +#endif
3880 +
3881 +struct adv7511_cfg_read_infoframe {
3882 + const char *desc;
3883 + u8 present_reg;
3884 + u8 present_mask;
3885 + u8 header[3];
3886 + u16 payload_addr;
3887 +};
3888 +
3889 +static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
3890 +{
3891 + u8 csum = 0;
3892 + size_t i;
3893 +
3894 + /* compute checksum */
3895 + for (i = 0; i < size; i++)
3896 + csum += ptr[i];
3897 +
3898 + return 256 - csum;
3899 +}
3900 +
3901 +static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_infoframe *cri)
3902 +{
3903 + struct i2c_client *client = v4l2_get_subdevdata(sd);
3904 + struct device *dev = &client->dev;
3905 + union hdmi_infoframe frame;
3906 + u8 buffer[32];
3907 + u8 len;
3908 + int i;
3909 +
3910 + if (!(adv7511_rd(sd, cri->present_reg) & cri->present_mask)) {
3911 + v4l2_info(sd, "%s infoframe not transmitted\n", cri->desc);
3912 + return;
3913 + }
3914 +
3915 + memcpy(buffer, cri->header, sizeof(cri->header));
3916 +
3917 + len = buffer[2];
3918 +
3919 + if (len + 4 > sizeof(buffer)) {
3920 + v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
3921 + return;
3922 + }
3923 +
3924 + if (cri->payload_addr >= 0x100) {
3925 + for (i = 0; i < len; i++)
3926 + buffer[i + 4] = adv7511_pktmem_rd(sd, cri->payload_addr + i - 0x100);
3927 + } else {
3928 + for (i = 0; i < len; i++)
3929 + buffer[i + 4] = adv7511_rd(sd, cri->payload_addr + i);
3930 + }
3931 + buffer[3] = 0;
3932 + buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
3933 +
3934 + if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
3935 + v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
3936 + return;
3937 + }
3938 +
3939 + hdmi_infoframe_log(KERN_INFO, dev, &frame);
3940 +}
3941 +
3942 +static void adv7511_log_infoframes(struct v4l2_subdev *sd)
3943 +{
3944 + static const struct adv7511_cfg_read_infoframe cri[] = {
3945 + { "AVI", 0x44, 0x10, { 0x82, 2, 13 }, 0x55 },
3946 + { "Audio", 0x44, 0x08, { 0x84, 1, 10 }, 0x73 },
3947 + { "SDP", 0x40, 0x40, { 0x83, 1, 25 }, 0x103 },
3948 + };
3949 + int i;
3950 +
3951 + for (i = 0; i < ARRAY_SIZE(cri); i++)
3952 + log_infoframe(sd, &cri[i]);
3953 +}
3954 +
3955 +static int adv7511_log_status(struct v4l2_subdev *sd)
3956 +{
3957 + struct adv7511_state *state = get_adv7511_state(sd);
3958 + struct adv7511_state_edid *edid = &state->edid;
3959 + int i;
3960 +
3961 + static const char * const states[] = {
3962 + "in reset",
3963 + "reading EDID",
3964 + "idle",
3965 + "initializing HDCP",
3966 + "HDCP enabled",
3967 + "initializing HDCP repeater",
3968 + "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
3969 + };
3970 + static const char * const errors[] = {
3971 + "no error",
3972 + "bad receiver BKSV",
3973 + "Ri mismatch",
3974 + "Pj mismatch",
3975 + "i2c error",
3976 + "timed out",
3977 + "max repeater cascade exceeded",
3978 + "hash check failed",
3979 + "too many devices",
3980 + "9", "A", "B", "C", "D", "E", "F"
3981 + };
3982 +
3983 + v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
3984 + v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
3985 + (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
3986 + (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
3987 + edid->segments ? "found" : "no",
3988 + edid->blocks);
3989 + v4l2_info(sd, "%s output %s\n",
3990 + (adv7511_rd(sd, 0xaf) & 0x02) ?
3991 + "HDMI" : "DVI-D",
3992 + (adv7511_rd(sd, 0xa1) & 0x3c) ?
3993 + "disabled" : "enabled");
3994 + v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
3995 + states[adv7511_rd(sd, 0xc8) & 0xf],
3996 + errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
3997 + adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
3998 + v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
3999 + if (adv7511_rd(sd, 0xaf) & 0x02) {
4000 + /* HDMI only */
4001 + u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
4002 + u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
4003 + adv7511_rd(sd, 0x02) << 8 |
4004 + adv7511_rd(sd, 0x03);
4005 + u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
4006 + u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
4007 + u32 CTS;
4008 +
4009 + if (manual_cts)
4010 + CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
4011 + adv7511_rd(sd, 0x08) << 8 |
4012 + adv7511_rd(sd, 0x09);
4013 + else
4014 + CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
4015 + adv7511_rd(sd, 0x05) << 8 |
4016 + adv7511_rd(sd, 0x06);
4017 + v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
4018 + manual_cts ? "manual" : "automatic", N, CTS);
4019 + v4l2_info(sd, "VIC: detected %d, sent %d\n",
4020 + vic_detect, vic_sent);
4021 + adv7511_log_infoframes(sd);
4022 + }
4023 + if (state->dv_timings.type == V4L2_DV_BT_656_1120)
4024 + v4l2_print_dv_timings(sd->name, "timings: ",
4025 + &state->dv_timings, false);
4026 + else
4027 + v4l2_info(sd, "no timings set\n");
4028 + v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
4029 +
4030 + if (state->i2c_cec == NULL)
4031 + return 0;
4032 +
4033 + v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
4034 +
4035 + v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
4036 + "enabled" : "disabled");
4037 + if (state->cec_enabled_adap) {
4038 + for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
4039 + bool is_valid = state->cec_valid_addrs & (1 << i);
4040 +
4041 + if (is_valid)
4042 + v4l2_info(sd, "CEC Logical Address: 0x%x\n",
4043 + state->cec_addr[i]);
4044 + }
4045 + }
4046 + v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
4047 + return 0;
4048 +}
4049 +
4050 +/* Power up/down adv7511 */
4051 +static int adv7511_s_power(struct v4l2_subdev *sd, int on)
4052 +{
4053 + struct adv7511_state *state = get_adv7511_state(sd);
4054 + const int retries = 20;
4055 + int i;
4056 +
4057 + v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
4058 +
4059 + state->power_on = on;
4060 +
4061 + if (!on) {
4062 + /* Power down */
4063 + adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
4064 + return true;
4065 + }
4066 +
4067 + /* Power up */
4068 + /* The adv7511 does not always come up immediately.
4069 + Retry multiple times. */
4070 + for (i = 0; i < retries; i++) {
4071 + adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
4072 + if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
4073 + break;
4074 + adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
4075 + msleep(10);
4076 + }
4077 + if (i == retries) {
4078 + v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
4079 + adv7511_s_power(sd, 0);
4080 + return false;
4081 + }
4082 + if (i > 1)
4083 + v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
4084 +
4085 + /* Reserved registers that must be set */
4086 + adv7511_wr(sd, 0x98, 0x03);
4087 + adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
4088 + adv7511_wr(sd, 0x9c, 0x30);
4089 + adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
4090 + adv7511_wr(sd, 0xa2, 0xa4);
4091 + adv7511_wr(sd, 0xa3, 0xa4);
4092 + adv7511_wr(sd, 0xe0, 0xd0);
4093 + adv7511_wr(sd, 0xf9, 0x00);
4094 +
4095 + adv7511_wr(sd, 0x43, state->i2c_edid_addr);
4096 + adv7511_wr(sd, 0x45, state->i2c_pktmem_addr);
4097 +
4098 + /* Set number of attempts to read the EDID */
4099 + adv7511_wr(sd, 0xc9, 0xf);
4100 + return true;
4101 +}
4102 +
4103 +#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
4104 +static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
4105 +{
4106 + struct adv7511_state *state = cec_get_drvdata(adap);
4107 + struct v4l2_subdev *sd = &state->sd;
4108 +
4109 + if (state->i2c_cec == NULL)
4110 + return -EIO;
4111 +
4112 + if (!state->cec_enabled_adap && enable) {
4113 + /* power up cec section */
4114 + adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
4115 + /* legacy mode and clear all rx buffers */
4116 + adv7511_cec_write(sd, 0x4a, 0x00);
4117 + adv7511_cec_write(sd, 0x4a, 0x07);
4118 + adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
4119 + /* enabled irqs: */
4120 + /* tx: ready */
4121 + /* tx: arbitration lost */
4122 + /* tx: retry timeout */
4123 + /* rx: ready 1 */
4124 + if (state->enabled_irq)
4125 + adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
4126 + } else if (state->cec_enabled_adap && !enable) {
4127 + if (state->enabled_irq)
4128 + adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
4129 + /* disable address mask 1-3 */
4130 + adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
4131 + /* power down cec section */
4132 + adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
4133 + state->cec_valid_addrs = 0;
4134 + }
4135 + state->cec_enabled_adap = enable;
4136 + return 0;
4137 +}
4138 +
4139 +static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
4140 +{
4141 + struct adv7511_state *state = cec_get_drvdata(adap);
4142 + struct v4l2_subdev *sd = &state->sd;
4143 + unsigned int i, free_idx = ADV7511_MAX_ADDRS;
4144 +
4145 + if (!state->cec_enabled_adap)
4146 + return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
4147 +
4148 + if (addr == CEC_LOG_ADDR_INVALID) {
4149 + adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
4150 + state->cec_valid_addrs = 0;
4151 + return 0;
4152 + }
4153 +
4154 + for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
4155 + bool is_valid = state->cec_valid_addrs & (1 << i);
4156 +
4157 + if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
4158 + free_idx = i;
4159 + if (is_valid && state->cec_addr[i] == addr)
4160 + return 0;
4161 + }
4162 + if (i == ADV7511_MAX_ADDRS) {
4163 + i = free_idx;
4164 + if (i == ADV7511_MAX_ADDRS)
4165 + return -ENXIO;
4166 + }
4167 + state->cec_addr[i] = addr;
4168 + state->cec_valid_addrs |= 1 << i;
4169 +
4170 + switch (i) {
4171 + case 0:
4172 + /* enable address mask 0 */
4173 + adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
4174 + /* set address for mask 0 */
4175 + adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
4176 + break;
4177 + case 1:
4178 + /* enable address mask 1 */
4179 + adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
4180 + /* set address for mask 1 */
4181 + adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
4182 + break;
4183 + case 2:
4184 + /* enable address mask 2 */
4185 + adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
4186 + /* set address for mask 1 */
4187 + adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
4188 + break;
4189 + }
4190 + return 0;
4191 +}
4192 +
4193 +static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
4194 + u32 signal_free_time, struct cec_msg *msg)
4195 +{
4196 + struct adv7511_state *state = cec_get_drvdata(adap);
4197 + struct v4l2_subdev *sd = &state->sd;
4198 + u8 len = msg->len;
4199 + unsigned int i;
4200 +
4201 + v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
4202 +
4203 + if (len > 16) {
4204 + v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
4205 + return -EINVAL;
4206 + }
4207 +
4208 + /*
4209 + * The number of retries is the number of attempts - 1, but retry
4210 + * at least once. It's not clear if a value of 0 is allowed, so
4211 + * let's do at least one retry.
4212 + */
4213 + adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
4214 +
4215 + /* clear cec tx irq status */
4216 + adv7511_wr(sd, 0x97, 0x38);
4217 +
4218 + /* write data */
4219 + for (i = 0; i < len; i++)
4220 + adv7511_cec_write(sd, i, msg->msg[i]);
4221 +
4222 + /* set length (data + header) */
4223 + adv7511_cec_write(sd, 0x10, len);
4224 + /* start transmit, enable tx */
4225 + adv7511_cec_write(sd, 0x11, 0x01);
4226 + return 0;
4227 +}
4228 +
4229 +static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
4230 +{
4231 + struct adv7511_state *state = get_adv7511_state(sd);
4232 +
4233 + if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
4234 + v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
4235 + return;
4236 + }
4237 +
4238 + if (tx_raw_status & 0x10) {
4239 + v4l2_dbg(1, debug, sd,
4240 + "%s: tx raw: arbitration lost\n", __func__);
4241 + cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
4242 + 1, 0, 0, 0);
4243 + return;
4244 + }
4245 + if (tx_raw_status & 0x08) {
4246 + u8 status;
4247 + u8 nack_cnt;
4248 + u8 low_drive_cnt;
4249 +
4250 + v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
4251 + /*
4252 + * We set this status bit since this hardware performs
4253 + * retransmissions.
4254 + */
4255 + status = CEC_TX_STATUS_MAX_RETRIES;
4256 + nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
4257 + if (nack_cnt)
4258 + status |= CEC_TX_STATUS_NACK;
4259 + low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
4260 + if (low_drive_cnt)
4261 + status |= CEC_TX_STATUS_LOW_DRIVE;
4262 + cec_transmit_done(state->cec_adap, status,
4263 + 0, nack_cnt, low_drive_cnt, 0);
4264 + return;
4265 + }
4266 + if (tx_raw_status & 0x20) {
4267 + v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
4268 + cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
4269 + return;
4270 + }
4271 +}
4272 +
4273 +static const struct cec_adap_ops adv7511_cec_adap_ops = {
4274 + .adap_enable = adv7511_cec_adap_enable,
4275 + .adap_log_addr = adv7511_cec_adap_log_addr,
4276 + .adap_transmit = adv7511_cec_adap_transmit,
4277 +};
4278 +#endif
4279 +
4280 +/* Enable interrupts */
4281 +static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
4282 +{
4283 + struct adv7511_state *state = get_adv7511_state(sd);
4284 + u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
4285 + u8 irqs_rd;
4286 + int retries = 100;
4287 +
4288 + v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
4289 +
4290 + if (state->enabled_irq == enable)
4291 + return;
4292 + state->enabled_irq = enable;
4293 +
4294 + /* The datasheet says that the EDID ready interrupt should be
4295 + disabled if there is no hotplug. */
4296 + if (!enable)
4297 + irqs = 0;
4298 + else if (adv7511_have_hotplug(sd))
4299 + irqs |= MASK_ADV7511_EDID_RDY_INT;
4300 +
4301 + /*
4302 + * This i2c write can fail (approx. 1 in 1000 writes). But it
4303 + * is essential that this register is correct, so retry it
4304 + * multiple times.
4305 + *
4306 + * Note that the i2c write does not report an error, but the readback
4307 + * clearly shows the wrong value.
4308 + */
4309 + do {
4310 + adv7511_wr(sd, 0x94, irqs);
4311 + irqs_rd = adv7511_rd(sd, 0x94);
4312 + } while (retries-- && irqs_rd != irqs);
4313 +
4314 + if (irqs_rd != irqs)
4315 + v4l2_err(sd, "Could not set interrupts: hw failure?\n");
4316 +
4317 + adv7511_wr_and_or(sd, 0x95, 0xc0,
4318 + (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
4319 +}
4320 +
4321 +/* Interrupt handler */
4322 +static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
4323 +{
4324 + u8 irq_status;
4325 + u8 cec_irq;
4326 +
4327 + /* disable interrupts to prevent a race condition */
4328 + adv7511_set_isr(sd, false);
4329 + irq_status = adv7511_rd(sd, 0x96);
4330 + cec_irq = adv7511_rd(sd, 0x97);
4331 + /* clear detected interrupts */
4332 + adv7511_wr(sd, 0x96, irq_status);
4333 + adv7511_wr(sd, 0x97, cec_irq);
4334 +
4335 + v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
4336 + irq_status, cec_irq);
4337 +
4338 + if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
4339 + adv7511_check_monitor_present_status(sd);
4340 + if (irq_status & MASK_ADV7511_EDID_RDY_INT)
4341 + adv7511_check_edid_status(sd);
4342 +
4343 +#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
4344 + if (cec_irq & 0x38)
4345 + adv_cec_tx_raw_status(sd, cec_irq);
4346 +
4347 + if (cec_irq & 1) {
4348 + struct adv7511_state *state = get_adv7511_state(sd);
4349 + struct cec_msg msg;
4350 +
4351 + msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
4352 +
4353 + v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
4354 + msg.len);
4355 +
4356 + if (msg.len > 16)
4357 + msg.len = 16;
4358 +
4359 + if (msg.len) {
4360 + u8 i;
4361 +
4362 + for (i = 0; i < msg.len; i++)
4363 + msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
4364 +
4365 + adv7511_cec_write(sd, 0x4a, 0); /* toggle to re-enable rx 1 */
4366 + adv7511_cec_write(sd, 0x4a, 1);
4367 + cec_received_msg(state->cec_adap, &msg);
4368 + }
4369 + }
4370 +#endif
4371 +
4372 + /* enable interrupts */
4373 + adv7511_set_isr(sd, true);
4374 +
4375 + if (handled)
4376 + *handled = true;
4377 + return 0;
4378 +}
4379 +
4380 +static const struct v4l2_subdev_core_ops adv7511_core_ops = {
4381 + .log_status = adv7511_log_status,
4382 +#ifdef CONFIG_VIDEO_ADV_DEBUG
4383 + .g_register = adv7511_g_register,
4384 + .s_register = adv7511_s_register,
4385 +#endif
4386 + .s_power = adv7511_s_power,
4387 + .interrupt_service_routine = adv7511_isr,
4388 +};
4389 +
4390 +/* ------------------------------ VIDEO OPS ------------------------------ */
4391 +
4392 +/* Enable/disable adv7511 output */
4393 +static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
4394 +{
4395 + struct adv7511_state *state = get_adv7511_state(sd);
4396 +
4397 + v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
4398 + adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
4399 + if (enable) {
4400 + adv7511_check_monitor_present_status(sd);
4401 + } else {
4402 + adv7511_s_power(sd, 0);
4403 + state->have_monitor = false;
4404 + }
4405 + return 0;
4406 +}
4407 +
4408 +static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
4409 + struct v4l2_dv_timings *timings)
4410 +{
4411 + struct adv7511_state *state = get_adv7511_state(sd);
4412 + struct v4l2_bt_timings *bt = &timings->bt;
4413 + u32 fps;
4414 +
4415 + v4l2_dbg(1, debug, sd, "%s:\n", __func__);
4416 +
4417 + /* quick sanity check */
4418 + if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
4419 + return -EINVAL;
4420 +
4421 + /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
4422 + if the format is one of the CEA or DMT timings. */
4423 + v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
4424 +
4425 + /* save timings */
4426 + state->dv_timings = *timings;
4427 +
4428 + /* set h/vsync polarities */
4429 + adv7511_wr_and_or(sd, 0x17, 0x9f,
4430 + ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
4431 + ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
4432 +
4433 + fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
4434 + switch (fps) {
4435 + case 24:
4436 + adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
4437 + break;
4438 + case 25:
4439 + adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
4440 + break;
4441 + case 30:
4442 + adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
4443 + break;
4444 + default:
4445 + adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
4446 + break;
4447 + }
4448 +
4449 + /* update quantization range based on new dv_timings */
4450 + adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
4451 +
4452 + return 0;
4453 +}
4454 +
4455 +static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
4456 + struct v4l2_dv_timings *timings)
4457 +{
4458 + struct adv7511_state *state = get_adv7511_state(sd);
4459 +
4460 + v4l2_dbg(1, debug, sd, "%s:\n", __func__);
4461 +
4462 + if (!timings)
4463 + return -EINVAL;
4464 +
4465 + *timings = state->dv_timings;
4466 +
4467 + return 0;
4468 +}
4469 +
4470 +static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
4471 + struct v4l2_enum_dv_timings *timings)
4472 +{
4473 + if (timings->pad != 0)
4474 + return -EINVAL;
4475 +
4476 + return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
4477 +}
4478 +
4479 +static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
4480 + struct v4l2_dv_timings_cap *cap)
4481 +{
4482 + if (cap->pad != 0)
4483 + return -EINVAL;
4484 +
4485 + *cap = adv7511_timings_cap;
4486 + return 0;
4487 +}
4488 +
4489 +static const struct v4l2_subdev_video_ops adv7511_video_ops = {
4490 + .s_stream = adv7511_s_stream,
4491 + .s_dv_timings = adv7511_s_dv_timings,
4492 + .g_dv_timings = adv7511_g_dv_timings,
4493 +};
4494 +
4495 +/* ------------------------------ AUDIO OPS ------------------------------ */
4496 +static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
4497 +{
4498 + v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
4499 +
4500 + if (enable)
4501 + adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
4502 + else
4503 + adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
4504 +
4505 + return 0;
4506 +}
4507 +
4508 +static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
4509 +{
4510 + u32 N;
4511 +
4512 + switch (freq) {
4513 + case 32000: N = 4096; break;
4514 + case 44100: N = 6272; break;
4515 + case 48000: N = 6144; break;
4516 + case 88200: N = 12544; break;
4517 + case 96000: N = 12288; break;
4518 + case 176400: N = 25088; break;
4519 + case 192000: N = 24576; break;
4520 + default:
4521 + return -EINVAL;
4522 + }
4523 +
4524 + /* Set N (used with CTS to regenerate the audio clock) */
4525 + adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
4526 + adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
4527 + adv7511_wr(sd, 0x03, N & 0xff);
4528 +
4529 + return 0;
4530 +}
4531 +
4532 +static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
4533 +{
4534 + u32 i2s_sf;
4535 +
4536 + switch (freq) {
4537 + case 32000: i2s_sf = 0x30; break;
4538 + case 44100: i2s_sf = 0x00; break;
4539 + case 48000: i2s_sf = 0x20; break;
4540 + case 88200: i2s_sf = 0x80; break;
4541 + case 96000: i2s_sf = 0xa0; break;
4542 + case 176400: i2s_sf = 0xc0; break;
4543 + case 192000: i2s_sf = 0xe0; break;
4544 + default:
4545 + return -EINVAL;
4546 + }
4547 +
4548 + /* Set sampling frequency for I2S audio to 48 kHz */
4549 + adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
4550 +
4551 + return 0;
4552 +}
4553 +
4554 +static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
4555 +{
4556 + /* Only 2 channels in use for application */
4557 + adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
4558 + /* Speaker mapping */
4559 + adv7511_wr(sd, 0x76, 0x00);
4560 +
4561 + /* 16 bit audio word length */
4562 + adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
4563 +
4564 + return 0;
4565 +}
4566 +
4567 +static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
4568 + .s_stream = adv7511_s_audio_stream,
4569 + .s_clock_freq = adv7511_s_clock_freq,
4570 + .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
4571 + .s_routing = adv7511_s_routing,
4572 +};
4573 +
4574 +/* ---------------------------- PAD OPS ------------------------------------- */
4575 +
4576 +static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
4577 +{
4578 + struct adv7511_state *state = get_adv7511_state(sd);
4579 +
4580 + memset(edid->reserved, 0, sizeof(edid->reserved));
4581 +
4582 + if (edid->pad != 0)
4583 + return -EINVAL;
4584 +
4585 + if (edid->start_block == 0 && edid->blocks == 0) {
4586 + edid->blocks = state->edid.segments * 2;
4587 + return 0;
4588 + }
4589 +
4590 + if (state->edid.segments == 0)
4591 + return -ENODATA;
4592 +
4593 + if (edid->start_block >= state->edid.segments * 2)
4594 + return -EINVAL;
4595 +
4596 + if (edid->start_block + edid->blocks > state->edid.segments * 2)
4597 + edid->blocks = state->edid.segments * 2 - edid->start_block;
4598 +
4599 + memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
4600 + 128 * edid->blocks);
4601 +
4602 + return 0;
4603 +}
4604 +
4605 +static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
4606 + struct v4l2_subdev_pad_config *cfg,
4607 + struct v4l2_subdev_mbus_code_enum *code)
4608 +{
4609 + if (code->pad != 0)
4610 + return -EINVAL;
4611 +
4612 + switch (code->index) {
4613 + case 0:
4614 + code->code = MEDIA_BUS_FMT_RGB888_1X24;
4615 + break;
4616 + case 1:
4617 + code->code = MEDIA_BUS_FMT_YUYV8_1X16;
4618 + break;
4619 + case 2:
4620 + code->code = MEDIA_BUS_FMT_UYVY8_1X16;
4621 + break;
4622 + default:
4623 + return -EINVAL;
4624 + }
4625 + return 0;
4626 +}
4627 +
4628 +static void adv7511_fill_format(struct adv7511_state *state,
4629 + struct v4l2_mbus_framefmt *format)
4630 +{
4631 + format->width = state->dv_timings.bt.width;
4632 + format->height = state->dv_timings.bt.height;
4633 + format->field = V4L2_FIELD_NONE;
4634 +}
4635 +
4636 +static int adv7511_get_fmt(struct v4l2_subdev *sd,
4637 + struct v4l2_subdev_pad_config *cfg,
4638 + struct v4l2_subdev_format *format)
4639 +{
4640 + struct adv7511_state *state = get_adv7511_state(sd);
4641 +
4642 + if (format->pad != 0)
4643 + return -EINVAL;
4644 +
4645 + memset(&format->format, 0, sizeof(format->format));
4646 + adv7511_fill_format(state, &format->format);
4647 +
4648 + if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
4649 + struct v4l2_mbus_framefmt *fmt;
4650 +
4651 + fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
4652 + format->format.code = fmt->code;
4653 + format->format.colorspace = fmt->colorspace;
4654 + format->format.ycbcr_enc = fmt->ycbcr_enc;
4655 + format->format.quantization = fmt->quantization;
4656 + format->format.xfer_func = fmt->xfer_func;
4657 + } else {
4658 + format->format.code = state->fmt_code;
4659 + format->format.colorspace = state->colorspace;
4660 + format->format.ycbcr_enc = state->ycbcr_enc;
4661 + format->format.quantization = state->quantization;
4662 + format->format.xfer_func = state->xfer_func;
4663 + }
4664 +
4665 + return 0;
4666 +}
4667 +
4668 +static int adv7511_set_fmt(struct v4l2_subdev *sd,
4669 + struct v4l2_subdev_pad_config *cfg,
4670 + struct v4l2_subdev_format *format)
4671 +{
4672 + struct adv7511_state *state = get_adv7511_state(sd);
4673 + /*
4674 + * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
4675 + * Video Information (AVI) InfoFrame Format"
4676 + *
4677 + * c = Colorimetry
4678 + * ec = Extended Colorimetry
4679 + * y = RGB or YCbCr
4680 + * q = RGB Quantization Range
4681 + * yq = YCC Quantization Range
4682 + */
4683 + u8 c = HDMI_COLORIMETRY_NONE;
4684 + u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
4685 + u8 y = HDMI_COLORSPACE_RGB;
4686 + u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
4687 + u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
4688 + u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
4689 + u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
4690 +
4691 + if (format->pad != 0)
4692 + return -EINVAL;
4693 + switch (format->format.code) {
4694 + case MEDIA_BUS_FMT_UYVY8_1X16:
4695 + case MEDIA_BUS_FMT_YUYV8_1X16:
4696 + case MEDIA_BUS_FMT_RGB888_1X24:
4697 + break;
4698 + default:
4699 + return -EINVAL;
4700 + }
4701 +
4702 + adv7511_fill_format(state, &format->format);
4703 + if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
4704 + struct v4l2_mbus_framefmt *fmt;
4705 +
4706 + fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
4707 + fmt->code = format->format.code;
4708 + fmt->colorspace = format->format.colorspace;
4709 + fmt->ycbcr_enc = format->format.ycbcr_enc;
4710 + fmt->quantization = format->format.quantization;
4711 + fmt->xfer_func = format->format.xfer_func;
4712 + return 0;
4713 + }
4714 +
4715 + switch (format->format.code) {
4716 + case MEDIA_BUS_FMT_UYVY8_1X16:
4717 + adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
4718 + adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
4719 + y = HDMI_COLORSPACE_YUV422;
4720 + break;
4721 + case MEDIA_BUS_FMT_YUYV8_1X16:
4722 + adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
4723 + adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
4724 + y = HDMI_COLORSPACE_YUV422;
4725 + break;
4726 + case MEDIA_BUS_FMT_RGB888_1X24:
4727 + default:
4728 + adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
4729 + adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
4730 + break;
4731 + }
4732 + state->fmt_code = format->format.code;
4733 + state->colorspace = format->format.colorspace;
4734 + state->ycbcr_enc = format->format.ycbcr_enc;
4735 + state->quantization = format->format.quantization;
4736 + state->xfer_func = format->format.xfer_func;
4737 +
4738 + switch (format->format.colorspace) {
4739 + case V4L2_COLORSPACE_OPRGB:
4740 + c = HDMI_COLORIMETRY_EXTENDED;
4741 + ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
4742 + HDMI_EXTENDED_COLORIMETRY_OPRGB;
4743 + break;
4744 + case V4L2_COLORSPACE_SMPTE170M:
4745 + c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
4746 + if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
4747 + c = HDMI_COLORIMETRY_EXTENDED;
4748 + ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
4749 + }
4750 + break;
4751 + case V4L2_COLORSPACE_REC709:
4752 + c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
4753 + if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
4754 + c = HDMI_COLORIMETRY_EXTENDED;
4755 + ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
4756 + }
4757 + break;
4758 + case V4L2_COLORSPACE_SRGB:
4759 + c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
4760 + ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
4761 + HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
4762 + break;
4763 + case V4L2_COLORSPACE_BT2020:
4764 + c = HDMI_COLORIMETRY_EXTENDED;
4765 + if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
4766 + ec = 5; /* Not yet available in hdmi.h */
4767 + else
4768 + ec = 6; /* Not yet available in hdmi.h */
4769 + break;
4770 + default:
4771 + break;
4772 + }
4773 +
4774 + /*
4775 + * CEA-861-F says that for RGB formats the YCC range must match the
4776 + * RGB range, although sources should ignore the YCC range.
4777 + *
4778 + * The RGB quantization range shouldn't be non-zero if the EDID doesn't
4779 + * have the Q bit set in the Video Capabilities Data Block, however this
4780 + * isn't checked at the moment. The assumption is that the application
4781 + * knows the EDID and can detect this.
4782 + *
4783 + * The same is true for the YCC quantization range: non-standard YCC
4784 + * quantization ranges should only be sent if the EDID has the YQ bit
4785 + * set in the Video Capabilities Data Block.
4786 + */
4787 + switch (format->format.quantization) {
4788 + case V4L2_QUANTIZATION_FULL_RANGE:
4789 + q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
4790 + HDMI_QUANTIZATION_RANGE_FULL;
4791 + yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
4792 + break;
4793 + case V4L2_QUANTIZATION_LIM_RANGE:
4794 + q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
4795 + HDMI_QUANTIZATION_RANGE_LIMITED;
4796 + yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
4797 + break;
4798 + }
4799 +
4800 + adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
4801 + adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
4802 + adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
4803 + adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
4804 + adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
4805 + adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
4806 + adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
4807 +
4808 + return 0;
4809 +}
4810 +
4811 +static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
4812 + .get_edid = adv7511_get_edid,
4813 + .enum_mbus_code = adv7511_enum_mbus_code,
4814 + .get_fmt = adv7511_get_fmt,
4815 + .set_fmt = adv7511_set_fmt,
4816 + .enum_dv_timings = adv7511_enum_dv_timings,
4817 + .dv_timings_cap = adv7511_dv_timings_cap,
4818 +};
4819 +
4820 +/* --------------------- SUBDEV OPS --------------------------------------- */
4821 +
4822 +static const struct v4l2_subdev_ops adv7511_ops = {
4823 + .core = &adv7511_core_ops,
4824 + .pad = &adv7511_pad_ops,
4825 + .video = &adv7511_video_ops,
4826 + .audio = &adv7511_audio_ops,
4827 +};
4828 +
4829 +/* ----------------------------------------------------------------------- */
4830 +static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf)
4831 +{
4832 + if (debug >= lvl) {
4833 + int i, j;
4834 + v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
4835 + for (i = 0; i < 256; i += 16) {
4836 + u8 b[128];
4837 + u8 *bp = b;
4838 + if (i == 128)
4839 + v4l2_dbg(lvl, debug, sd, "\n");
4840 + for (j = i; j < i + 16; j++) {
4841 + sprintf(bp, "0x%02x, ", buf[j]);
4842 + bp += 6;
4843 + }
4844 + bp[0] = '\0';
4845 + v4l2_dbg(lvl, debug, sd, "%s\n", b);
4846 + }
4847 + }
4848 +}
4849 +
4850 +static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
4851 +{
4852 + struct adv7511_state *state = get_adv7511_state(sd);
4853 + struct adv7511_edid_detect ed;
4854 +
4855 + /* We failed to read the EDID, so send an event for this. */
4856 + ed.present = false;
4857 + ed.segment = adv7511_rd(sd, 0xc4);
4858 + ed.phys_addr = CEC_PHYS_ADDR_INVALID;
4859 + cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
4860 + v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
4861 + v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
4862 +}
4863 +
4864 +static void adv7511_edid_handler(struct work_struct *work)
4865 +{
4866 + struct delayed_work *dwork = to_delayed_work(work);
4867 + struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
4868 + struct v4l2_subdev *sd = &state->sd;
4869 +
4870 + v4l2_dbg(1, debug, sd, "%s:\n", __func__);
4871 +
4872 + if (adv7511_check_edid_status(sd)) {
4873 + /* Return if we received the EDID. */
4874 + return;
4875 + }
4876 +
4877 + if (adv7511_have_hotplug(sd)) {
4878 + /* We must retry reading the EDID several times, it is possible
4879 + * that initially the EDID couldn't be read due to i2c errors
4880 + * (DVI connectors are particularly prone to this problem). */
4881 + if (state->edid.read_retries) {
4882 + state->edid.read_retries--;
4883 + v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
4884 + state->have_monitor = false;
4885 + adv7511_s_power(sd, false);
4886 + adv7511_s_power(sd, true);
4887 + queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
4888 + return;
4889 + }
4890 + }
4891 +
4892 + /* We failed to read the EDID, so send an event for this. */
4893 + adv7511_notify_no_edid(sd);
4894 + v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
4895 +}
4896 +
4897 +static void adv7511_audio_setup(struct v4l2_subdev *sd)
4898 +{
4899 + v4l2_dbg(1, debug, sd, "%s\n", __func__);
4900 +
4901 + adv7511_s_i2s_clock_freq(sd, 48000);
4902 + adv7511_s_clock_freq(sd, 48000);
4903 + adv7511_s_routing(sd, 0, 0, 0);
4904 +}
4905 +
4906 +/* Configure hdmi transmitter. */
4907 +static void adv7511_setup(struct v4l2_subdev *sd)
4908 +{
4909 + struct adv7511_state *state = get_adv7511_state(sd);
4910 + v4l2_dbg(1, debug, sd, "%s\n", __func__);
4911 +
4912 + /* Input format: RGB 4:4:4 */
4913 + adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
4914 + /* Output format: RGB 4:4:4 */
4915 + adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
4916 + /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
4917 + adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
4918 + /* Disable pixel repetition */
4919 + adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
4920 + /* Disable CSC */
4921 + adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
4922 + /* Output format: RGB 4:4:4, Active Format Information is valid,
4923 + * underscanned */
4924 + adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
4925 + /* AVI Info frame packet enable, Audio Info frame disable */
4926 + adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
4927 + /* Colorimetry, Active format aspect ratio: same as picure. */
4928 + adv7511_wr(sd, 0x56, 0xa8);
4929 + /* No encryption */
4930 + adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
4931 +
4932 + /* Positive clk edge capture for input video clock */
4933 + adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
4934 +
4935 + adv7511_audio_setup(sd);
4936 +
4937 + v4l2_ctrl_handler_setup(&state->hdl);
4938 +}
4939 +
4940 +static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
4941 +{
4942 + struct adv7511_monitor_detect mdt;
4943 + struct adv7511_state *state = get_adv7511_state(sd);
4944 +
4945 + mdt.present = state->have_monitor;
4946 + v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
4947 +}
4948 +
4949 +static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
4950 +{
4951 + struct adv7511_state *state = get_adv7511_state(sd);
4952 + /* read hotplug and rx-sense state */
4953 + u8 status = adv7511_rd(sd, 0x42);
4954 +
4955 + v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
4956 + __func__,
4957 + status,
4958 + status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
4959 + status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
4960 +
4961 + /* update read only ctrls */
4962 + v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
4963 + v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
4964 +
4965 + if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
4966 + v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
4967 + if (!state->have_monitor) {
4968 + v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
4969 + state->have_monitor = true;
4970 + adv7511_set_isr(sd, true);
4971 + if (!adv7511_s_power(sd, true)) {
4972 + v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
4973 + return;
4974 + }
4975 + adv7511_setup(sd);
4976 + adv7511_notify_monitor_detect(sd);
4977 + state->edid.read_retries = EDID_MAX_RETRIES;
4978 + queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
4979 + }
4980 + } else if (status & MASK_ADV7511_HPD_DETECT) {
4981 + v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
4982 + state->edid.read_retries = EDID_MAX_RETRIES;
4983 + queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
4984 + } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
4985 + v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
4986 + if (state->have_monitor) {
4987 + v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
4988 + state->have_monitor = false;
4989 + adv7511_notify_monitor_detect(sd);
4990 + }
4991 + adv7511_s_power(sd, false);
4992 + memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
4993 + adv7511_notify_no_edid(sd);
4994 + }
4995 +}
4996 +
4997 +static bool edid_block_verify_crc(u8 *edid_block)
4998 +{
4999 + u8 sum = 0;
5000 + int i;
5001 +
5002 + for (i = 0; i < 128; i++)
5003 + sum += edid_block[i];
5004 + return sum == 0;
5005 +}
5006 +
5007 +static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
5008 +{
5009 + struct adv7511_state *state = get_adv7511_state(sd);
5010 + u32 blocks = state->edid.blocks;
5011 + u8 *data = state->edid.data;
5012 +
5013 + if (!edid_block_verify_crc(&data[segment * 256]))
5014 + return false;
5015 + if ((segment + 1) * 2 <= blocks)
5016 + return edid_block_verify_crc(&data[segment * 256 + 128]);
5017 + return true;
5018 +}
5019 +
5020 +static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
5021 +{
5022 + static const u8 hdmi_header[] = {
5023 + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
5024 + };
5025 + struct adv7511_state *state = get_adv7511_state(sd);
5026 + u8 *data = state->edid.data;
5027 +
5028 + if (segment != 0)
5029 + return true;
5030 + return !memcmp(data, hdmi_header, sizeof(hdmi_header));
5031 +}
5032 +
5033 +static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
5034 +{
5035 + struct adv7511_state *state = get_adv7511_state(sd);
5036 + u8 edidRdy = adv7511_rd(sd, 0xc5);
5037 +
5038 + v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
5039 + __func__, EDID_MAX_RETRIES - state->edid.read_retries);
5040 +
5041 + if (state->edid.complete)
5042 + return true;
5043 +
5044 + if (edidRdy & MASK_ADV7511_EDID_RDY) {
5045 + int segment = adv7511_rd(sd, 0xc4);
5046 + struct adv7511_edid_detect ed;
5047 +
5048 + if (segment >= EDID_MAX_SEGM) {
5049 + v4l2_err(sd, "edid segment number too big\n");
5050 + return false;
5051 + }
5052 + v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
5053 + adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
5054 + adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
5055 + if (segment == 0) {
5056 + state->edid.blocks = state->edid.data[0x7e] + 1;
5057 + v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
5058 + }
5059 + if (!edid_verify_crc(sd, segment) ||
5060 + !edid_verify_header(sd, segment)) {
5061 + /* edid crc error, force reread of edid segment */
5062 + v4l2_err(sd, "%s: edid crc or header error\n", __func__);
5063 + state->have_monitor = false;
5064 + adv7511_s_power(sd, false);
5065 + adv7511_s_power(sd, true);
5066 + return false;
5067 + }
5068 + /* one more segment read ok */
5069 + state->edid.segments = segment + 1;
5070 + v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
5071 + if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
5072 + /* Request next EDID segment */
5073 + v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
5074 + adv7511_wr(sd, 0xc9, 0xf);
5075 + adv7511_wr(sd, 0xc4, state->edid.segments);
5076 + state->edid.read_retries = EDID_MAX_RETRIES;
5077 + queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
5078 + return false;
5079 + }
5080 +
5081 + v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
5082 + state->edid.complete = true;
5083 + ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
5084 + state->edid.segments * 256,
5085 + NULL);
5086 + /* report when we have all segments
5087 + but report only for segment 0
5088 + */
5089 + ed.present = true;
5090 + ed.segment = 0;
5091 + state->edid_detect_counter++;
5092 + cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
5093 + v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
5094 + return ed.present;
5095 + }
5096 +
5097 + return false;
5098 +}
5099 +
5100 +static int adv7511_registered(struct v4l2_subdev *sd)
5101 +{
5102 + struct adv7511_state *state = get_adv7511_state(sd);
5103 + struct i2c_client *client = v4l2_get_subdevdata(sd);
5104 + int err;
5105 +
5106 + err = cec_register_adapter(state->cec_adap, &client->dev);
5107 + if (err)
5108 + cec_delete_adapter(state->cec_adap);
5109 + return err;
5110 +}
5111 +
5112 +static void adv7511_unregistered(struct v4l2_subdev *sd)
5113 +{
5114 + struct adv7511_state *state = get_adv7511_state(sd);
5115 +
5116 + cec_unregister_adapter(state->cec_adap);
5117 +}
5118 +
5119 +static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
5120 + .registered = adv7511_registered,
5121 + .unregistered = adv7511_unregistered,
5122 +};
5123 +
5124 +/* ----------------------------------------------------------------------- */
5125 +/* Setup ADV7511 */
5126 +static void adv7511_init_setup(struct v4l2_subdev *sd)
5127 +{
5128 + struct adv7511_state *state = get_adv7511_state(sd);
5129 + struct adv7511_state_edid *edid = &state->edid;
5130 + u32 cec_clk = state->pdata.cec_clk;
5131 + u8 ratio;
5132 +
5133 + v4l2_dbg(1, debug, sd, "%s\n", __func__);
5134 +
5135 + /* clear all interrupts */
5136 + adv7511_wr(sd, 0x96, 0xff);
5137 + adv7511_wr(sd, 0x97, 0xff);
5138 + /*
5139 + * Stop HPD from resetting a lot of registers.
5140 + * It might leave the chip in a partly un-initialized state,
5141 + * in particular with regards to hotplug bounces.
5142 + */
5143 + adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
5144 + memset(edid, 0, sizeof(struct adv7511_state_edid));
5145 + state->have_monitor = false;
5146 + adv7511_set_isr(sd, false);
5147 + adv7511_s_stream(sd, false);
5148 + adv7511_s_audio_stream(sd, false);
5149 +
5150 + if (state->i2c_cec == NULL)
5151 + return;
5152 +
5153 + v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
5154 +
5155 + /* cec soft reset */
5156 + adv7511_cec_write(sd, 0x50, 0x01);
5157 + adv7511_cec_write(sd, 0x50, 0x00);
5158 +
5159 + /* legacy mode */
5160 + adv7511_cec_write(sd, 0x4a, 0x00);
5161 + adv7511_cec_write(sd, 0x4a, 0x07);
5162 +
5163 + if (cec_clk % 750000 != 0)
5164 + v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
5165 + __func__, cec_clk);
5166 +
5167 + ratio = (cec_clk / 750000) - 1;
5168 + adv7511_cec_write(sd, 0x4e, ratio << 2);
5169 +}
5170 +
5171 +static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
5172 +{
5173 + struct adv7511_state *state;
5174 + struct adv7511_platform_data *pdata = client->dev.platform_data;
5175 + struct v4l2_ctrl_handler *hdl;
5176 + struct v4l2_subdev *sd;
5177 + u8 chip_id[2];
5178 + int err = -EIO;
5179 +
5180 + /* Check if the adapter supports the needed features */
5181 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
5182 + return -EIO;
5183 +
5184 + state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
5185 + if (!state)
5186 + return -ENOMEM;
5187 +
5188 + /* Platform data */
5189 + if (!pdata) {
5190 + v4l_err(client, "No platform data!\n");
5191 + return -ENODEV;
5192 + }
5193 + memcpy(&state->pdata, pdata, sizeof(state->pdata));
5194 + state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
5195 + state->colorspace = V4L2_COLORSPACE_SRGB;
5196 +
5197 + sd = &state->sd;
5198 +
5199 + v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
5200 + client->addr << 1);
5201 +
5202 + v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
5203 + sd->internal_ops = &adv7511_int_ops;
5204 +
5205 + hdl = &state->hdl;
5206 + v4l2_ctrl_handler_init(hdl, 10);
5207 + /* add in ascending ID order */
5208 + state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
5209 + V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
5210 + 0, V4L2_DV_TX_MODE_DVI_D);
5211 + state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
5212 + V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
5213 + state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
5214 + V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
5215 + state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
5216 + V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
5217 + state->rgb_quantization_range_ctrl =
5218 + v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
5219 + V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
5220 + 0, V4L2_DV_RGB_RANGE_AUTO);
5221 + state->content_type_ctrl =
5222 + v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
5223 + V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
5224 + 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
5225 + sd->ctrl_handler = hdl;
5226 + if (hdl->error) {
5227 + err = hdl->error;
5228 + goto err_hdl;
5229 + }
5230 + state->pad.flags = MEDIA_PAD_FL_SINK;
5231 + sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
5232 + err = media_entity_pads_init(&sd->entity, 1, &state->pad);
5233 + if (err)
5234 + goto err_hdl;
5235 +
5236 + /* EDID and CEC i2c addr */
5237 + state->i2c_edid_addr = state->pdata.i2c_edid << 1;
5238 + state->i2c_cec_addr = state->pdata.i2c_cec << 1;
5239 + state->i2c_pktmem_addr = state->pdata.i2c_pktmem << 1;
5240 +
5241 + state->chip_revision = adv7511_rd(sd, 0x0);
5242 + chip_id[0] = adv7511_rd(sd, 0xf5);
5243 + chip_id[1] = adv7511_rd(sd, 0xf6);
5244 + if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
5245 + v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
5246 + chip_id[1]);
5247 + err = -EIO;
5248 + goto err_entity;
5249 + }
5250 +
5251 + state->i2c_edid = i2c_new_dummy(client->adapter,
5252 + state->i2c_edid_addr >> 1);
5253 + if (state->i2c_edid == NULL) {
5254 + v4l2_err(sd, "failed to register edid i2c client\n");
5255 + err = -ENOMEM;
5256 + goto err_entity;
5257 + }
5258 +
5259 + adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
5260 + if (state->pdata.cec_clk < 3000000 ||
5261 + state->pdata.cec_clk > 100000000) {
5262 + v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
5263 + __func__, state->pdata.cec_clk);
5264 + state->pdata.cec_clk = 0;
5265 + }
5266 +
5267 + if (state->pdata.cec_clk) {
5268 + state->i2c_cec = i2c_new_dummy(client->adapter,
5269 + state->i2c_cec_addr >> 1);
5270 + if (state->i2c_cec == NULL) {
5271 + v4l2_err(sd, "failed to register cec i2c client\n");
5272 + err = -ENOMEM;
5273 + goto err_unreg_edid;
5274 + }
5275 + adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
5276 + } else {
5277 + adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
5278 + }
5279 +
5280 + state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
5281 + if (state->i2c_pktmem == NULL) {
5282 + v4l2_err(sd, "failed to register pktmem i2c client\n");
5283 + err = -ENOMEM;
5284 + goto err_unreg_cec;
5285 + }
5286 +
5287 + state->work_queue = create_singlethread_workqueue(sd->name);
5288 + if (state->work_queue == NULL) {
5289 + v4l2_err(sd, "could not create workqueue\n");
5290 + err = -ENOMEM;
5291 + goto err_unreg_pktmem;
5292 + }
5293 +
5294 + INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
5295 +
5296 + adv7511_init_setup(sd);
5297 +
5298 +#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
5299 + state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
5300 + state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
5301 + ADV7511_MAX_ADDRS);
5302 + err = PTR_ERR_OR_ZERO(state->cec_adap);
5303 + if (err) {
5304 + destroy_workqueue(state->work_queue);
5305 + goto err_unreg_pktmem;
5306 + }
5307 +#endif
5308 +
5309 + adv7511_set_isr(sd, true);
5310 + adv7511_check_monitor_present_status(sd);
5311 +
5312 + v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
5313 + client->addr << 1, client->adapter->name);
5314 + return 0;
5315 +
5316 +err_unreg_pktmem:
5317 + i2c_unregister_device(state->i2c_pktmem);
5318 +err_unreg_cec:
5319 + if (state->i2c_cec)
5320 + i2c_unregister_device(state->i2c_cec);
5321 +err_unreg_edid:
5322 + i2c_unregister_device(state->i2c_edid);
5323 +err_entity:
5324 + media_entity_cleanup(&sd->entity);
5325 +err_hdl:
5326 + v4l2_ctrl_handler_free(&state->hdl);
5327 + return err;
5328 +}
5329 +
5330 +/* ----------------------------------------------------------------------- */
5331 +
5332 +static int adv7511_remove(struct i2c_client *client)
5333 +{
5334 + struct v4l2_subdev *sd = i2c_get_clientdata(client);
5335 + struct adv7511_state *state = get_adv7511_state(sd);
5336 +
5337 + state->chip_revision = -1;
5338 +
5339 + v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
5340 + client->addr << 1, client->adapter->name);
5341 +
5342 + adv7511_set_isr(sd, false);
5343 + adv7511_init_setup(sd);
5344 + cancel_delayed_work(&state->edid_handler);
5345 + i2c_unregister_device(state->i2c_edid);
5346 + if (state->i2c_cec)
5347 + i2c_unregister_device(state->i2c_cec);
5348 + i2c_unregister_device(state->i2c_pktmem);
5349 + destroy_workqueue(state->work_queue);
5350 + v4l2_device_unregister_subdev(sd);
5351 + media_entity_cleanup(&sd->entity);
5352 + v4l2_ctrl_handler_free(sd->ctrl_handler);
5353 + return 0;
5354 +}
5355 +
5356 +/* ----------------------------------------------------------------------- */
5357 +
5358 +static const struct i2c_device_id adv7511_id[] = {
5359 + { "adv7511", 0 },
5360 + { }
5361 +};
5362 +MODULE_DEVICE_TABLE(i2c, adv7511_id);
5363 +
5364 +static struct i2c_driver adv7511_driver = {
5365 + .driver = {
5366 + .name = "adv7511",
5367 + },
5368 + .probe = adv7511_probe,
5369 + .remove = adv7511_remove,
5370 + .id_table = adv7511_id,
5371 +};
5372 +
5373 +module_i2c_driver(adv7511_driver);
5374 diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
5375 deleted file mode 100644
5376 index 88349b5053cc..000000000000
5377 --- a/drivers/media/i2c/adv7511.c
5378 +++ /dev/null
5379 @@ -1,1992 +0,0 @@
5380 -// SPDX-License-Identifier: GPL-2.0-only
5381 -/*
5382 - * Analog Devices ADV7511 HDMI Transmitter Device Driver
5383 - *
5384 - * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
5385 - */
5386 -
5387 -
5388 -#include <linux/kernel.h>
5389 -#include <linux/module.h>
5390 -#include <linux/slab.h>
5391 -#include <linux/i2c.h>
5392 -#include <linux/delay.h>
5393 -#include <linux/videodev2.h>
5394 -#include <linux/gpio.h>
5395 -#include <linux/workqueue.h>
5396 -#include <linux/hdmi.h>
5397 -#include <linux/v4l2-dv-timings.h>
5398 -#include <media/v4l2-device.h>
5399 -#include <media/v4l2-common.h>
5400 -#include <media/v4l2-ctrls.h>
5401 -#include <media/v4l2-dv-timings.h>
5402 -#include <media/i2c/adv7511.h>
5403 -#include <media/cec.h>
5404 -
5405 -static int debug;
5406 -module_param(debug, int, 0644);
5407 -MODULE_PARM_DESC(debug, "debug level (0-2)");
5408 -
5409 -MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
5410 -MODULE_AUTHOR("Hans Verkuil");
5411 -MODULE_LICENSE("GPL v2");
5412 -
5413 -#define MASK_ADV7511_EDID_RDY_INT 0x04
5414 -#define MASK_ADV7511_MSEN_INT 0x40
5415 -#define MASK_ADV7511_HPD_INT 0x80
5416 -
5417 -#define MASK_ADV7511_HPD_DETECT 0x40
5418 -#define MASK_ADV7511_MSEN_DETECT 0x20
5419 -#define MASK_ADV7511_EDID_RDY 0x10
5420 -
5421 -#define EDID_MAX_RETRIES (8)
5422 -#define EDID_DELAY 250
5423 -#define EDID_MAX_SEGM 8
5424 -
5425 -#define ADV7511_MAX_WIDTH 1920
5426 -#define ADV7511_MAX_HEIGHT 1200
5427 -#define ADV7511_MIN_PIXELCLOCK 20000000
5428 -#define ADV7511_MAX_PIXELCLOCK 225000000
5429 -
5430 -#define ADV7511_MAX_ADDRS (3)
5431 -
5432 -/*
5433 -**********************************************************************
5434 -*
5435 -* Arrays with configuration parameters for the ADV7511
5436 -*
5437 -**********************************************************************
5438 -*/
5439 -
5440 -struct i2c_reg_value {
5441 - unsigned char reg;
5442 - unsigned char value;
5443 -};
5444 -
5445 -struct adv7511_state_edid {
5446 - /* total number of blocks */
5447 - u32 blocks;
5448 - /* Number of segments read */
5449 - u32 segments;
5450 - u8 data[EDID_MAX_SEGM * 256];
5451 - /* Number of EDID read retries left */
5452 - unsigned read_retries;
5453 - bool complete;
5454 -};
5455 -
5456 -struct adv7511_state {
5457 - struct adv7511_platform_data pdata;
5458 - struct v4l2_subdev sd;
5459 - struct media_pad pad;
5460 - struct v4l2_ctrl_handler hdl;
5461 - int chip_revision;
5462 - u8 i2c_edid_addr;
5463 - u8 i2c_pktmem_addr;
5464 - u8 i2c_cec_addr;
5465 -
5466 - struct i2c_client *i2c_cec;
5467 - struct cec_adapter *cec_adap;
5468 - u8 cec_addr[ADV7511_MAX_ADDRS];
5469 - u8 cec_valid_addrs;
5470 - bool cec_enabled_adap;
5471 -
5472 - /* Is the adv7511 powered on? */
5473 - bool power_on;
5474 - /* Did we receive hotplug and rx-sense signals? */
5475 - bool have_monitor;
5476 - bool enabled_irq;
5477 - /* timings from s_dv_timings */
5478 - struct v4l2_dv_timings dv_timings;
5479 - u32 fmt_code;
5480 - u32 colorspace;
5481 - u32 ycbcr_enc;
5482 - u32 quantization;
5483 - u32 xfer_func;
5484 - u32 content_type;
5485 - /* controls */
5486 - struct v4l2_ctrl *hdmi_mode_ctrl;
5487 - struct v4l2_ctrl *hotplug_ctrl;
5488 - struct v4l2_ctrl *rx_sense_ctrl;
5489 - struct v4l2_ctrl *have_edid0_ctrl;
5490 - struct v4l2_ctrl *rgb_quantization_range_ctrl;
5491 - struct v4l2_ctrl *content_type_ctrl;
5492 - struct i2c_client *i2c_edid;
5493 - struct i2c_client *i2c_pktmem;
5494 - struct adv7511_state_edid edid;
5495 - /* Running counter of the number of detected EDIDs (for debugging) */
5496 - unsigned edid_detect_counter;
5497 - struct workqueue_struct *work_queue;
5498 - struct delayed_work edid_handler; /* work entry */
5499 -};
5500 -
5501 -static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
5502 -static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
5503 -static void adv7511_setup(struct v4l2_subdev *sd);
5504 -static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
5505 -static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
5506 -
5507 -
5508 -static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
5509 - .type = V4L2_DV_BT_656_1120,
5510 - /* keep this initialization for compatibility with GCC < 4.4.6 */
5511 - .reserved = { 0 },
5512 - V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
5513 - ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
5514 - V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
5515 - V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
5516 - V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
5517 - V4L2_DV_BT_CAP_CUSTOM)
5518 -};
5519 -
5520 -static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
5521 -{
5522 - return container_of(sd, struct adv7511_state, sd);
5523 -}
5524 -
5525 -static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
5526 -{
5527 - return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
5528 -}
5529 -
5530 -/* ------------------------ I2C ----------------------------------------------- */
5531 -
5532 -static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
5533 - u8 command, bool check)
5534 -{
5535 - union i2c_smbus_data data;
5536 -
5537 - if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
5538 - I2C_SMBUS_READ, command,
5539 - I2C_SMBUS_BYTE_DATA, &data))
5540 - return data.byte;
5541 - if (check)
5542 - v4l_err(client, "error reading %02x, %02x\n",
5543 - client->addr, command);
5544 - return -1;
5545 -}
5546 -
5547 -static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
5548 -{
5549 - int i;
5550 - for (i = 0; i < 3; i++) {
5551 - int ret = adv_smbus_read_byte_data_check(client, command, true);
5552 - if (ret >= 0) {
5553 - if (i)
5554 - v4l_err(client, "read ok after %d retries\n", i);
5555 - return ret;
5556 - }
5557 - }
5558 - v4l_err(client, "read failed\n");
5559 - return -1;
5560 -}
5561 -
5562 -static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
5563 -{
5564 - struct i2c_client *client = v4l2_get_subdevdata(sd);
5565 -
5566 - return adv_smbus_read_byte_data(client, reg);
5567 -}
5568 -
5569 -static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
5570 -{
5571 - struct i2c_client *client = v4l2_get_subdevdata(sd);
5572 - int ret;
5573 - int i;
5574 -
5575 - for (i = 0; i < 3; i++) {
5576 - ret = i2c_smbus_write_byte_data(client, reg, val);
5577 - if (ret == 0)
5578 - return 0;
5579 - }
5580 - v4l2_err(sd, "%s: i2c write error\n", __func__);
5581 - return ret;
5582 -}
5583 -
5584 -/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
5585 - and then the value-mask (to be OR-ed). */
5586 -static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
5587 -{
5588 - adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
5589 -}
5590 -
5591 -static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
5592 - u8 command, unsigned length, u8 *values)
5593 -{
5594 - union i2c_smbus_data data;
5595 - int ret;
5596 -
5597 - if (length > I2C_SMBUS_BLOCK_MAX)
5598 - length = I2C_SMBUS_BLOCK_MAX;
5599 - data.block[0] = length;
5600 -
5601 - ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
5602 - I2C_SMBUS_READ, command,
5603 - I2C_SMBUS_I2C_BLOCK_DATA, &data);
5604 - memcpy(values, data.block + 1, length);
5605 - return ret;
5606 -}
5607 -
5608 -static void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
5609 -{
5610 - struct adv7511_state *state = get_adv7511_state(sd);
5611 - int i;
5612 - int err = 0;
5613 -
5614 - v4l2_dbg(1, debug, sd, "%s:\n", __func__);
5615 -
5616 - for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
5617 - err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
5618 - I2C_SMBUS_BLOCK_MAX, buf + i);
5619 - if (err)
5620 - v4l2_err(sd, "%s: i2c read error\n", __func__);
5621 -}
5622 -
5623 -static inline int adv7511_cec_read(struct v4l2_subdev *sd, u8 reg)
5624 -{
5625 - struct adv7511_state *state = get_adv7511_state(sd);
5626 -
5627 - return i2c_smbus_read_byte_data(state->i2c_cec, reg);
5628 -}
5629 -
5630 -static int adv7511_cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
5631 -{
5632 - struct adv7511_state *state = get_adv7511_state(sd);
5633 - int ret;
5634 - int i;
5635 -
5636 - for (i = 0; i < 3; i++) {
5637 - ret = i2c_smbus_write_byte_data(state->i2c_cec, reg, val);
5638 - if (ret == 0)
5639 - return 0;
5640 - }
5641 - v4l2_err(sd, "%s: I2C Write Problem\n", __func__);
5642 - return ret;
5643 -}
5644 -
5645 -static inline int adv7511_cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask,
5646 - u8 val)
5647 -{
5648 - return adv7511_cec_write(sd, reg, (adv7511_cec_read(sd, reg) & mask) | val);
5649 -}
5650 -
5651 -static int adv7511_pktmem_rd(struct v4l2_subdev *sd, u8 reg)
5652 -{
5653 - struct adv7511_state *state = get_adv7511_state(sd);
5654 -
5655 - return adv_smbus_read_byte_data(state->i2c_pktmem, reg);
5656 -}
5657 -
5658 -static int adv7511_pktmem_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
5659 -{
5660 - struct adv7511_state *state = get_adv7511_state(sd);
5661 - int ret;
5662 - int i;
5663 -
5664 - for (i = 0; i < 3; i++) {
5665 - ret = i2c_smbus_write_byte_data(state->i2c_pktmem, reg, val);
5666 - if (ret == 0)
5667 - return 0;
5668 - }
5669 - v4l2_err(sd, "%s: i2c write error\n", __func__);
5670 - return ret;
5671 -}
5672 -
5673 -/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
5674 - and then the value-mask (to be OR-ed). */
5675 -static inline void adv7511_pktmem_wr_and_or(struct v4l2_subdev *sd, u8 reg, u8 clr_mask, u8 val_mask)
5676 -{
5677 - adv7511_pktmem_wr(sd, reg, (adv7511_pktmem_rd(sd, reg) & clr_mask) | val_mask);
5678 -}
5679 -
5680 -static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
5681 -{
5682 - return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
5683 -}
5684 -
5685 -static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
5686 -{
5687 - return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
5688 -}
5689 -
5690 -static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, u8 mode)
5691 -{
5692 - adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
5693 -}
5694 -
5695 -static void adv7511_csc_coeff(struct v4l2_subdev *sd,
5696 - u16 A1, u16 A2, u16 A3, u16 A4,
5697 - u16 B1, u16 B2, u16 B3, u16 B4,
5698 - u16 C1, u16 C2, u16 C3, u16 C4)
5699 -{
5700 - /* A */
5701 - adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
5702 - adv7511_wr(sd, 0x19, A1);
5703 - adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
5704 - adv7511_wr(sd, 0x1B, A2);
5705 - adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
5706 - adv7511_wr(sd, 0x1d, A3);
5707 - adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
5708 - adv7511_wr(sd, 0x1f, A4);
5709 -
5710 - /* B */
5711 - adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
5712 - adv7511_wr(sd, 0x21, B1);
5713 - adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
5714 - adv7511_wr(sd, 0x23, B2);
5715 - adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
5716 - adv7511_wr(sd, 0x25, B3);
5717 - adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
5718 - adv7511_wr(sd, 0x27, B4);
5719 -
5720 - /* C */
5721 - adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
5722 - adv7511_wr(sd, 0x29, C1);
5723 - adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
5724 - adv7511_wr(sd, 0x2B, C2);
5725 - adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
5726 - adv7511_wr(sd, 0x2D, C3);
5727 - adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
5728 - adv7511_wr(sd, 0x2F, C4);
5729 -}
5730 -
5731 -static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
5732 -{
5733 - if (enable) {
5734 - u8 csc_mode = 0;
5735 - adv7511_csc_conversion_mode(sd, csc_mode);
5736 - adv7511_csc_coeff(sd,
5737 - 4096-564, 0, 0, 256,
5738 - 0, 4096-564, 0, 256,
5739 - 0, 0, 4096-564, 256);
5740 - /* enable CSC */
5741 - adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
5742 - /* AVI infoframe: Limited range RGB (16-235) */
5743 - adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
5744 - } else {
5745 - /* disable CSC */
5746 - adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
5747 - /* AVI infoframe: Full range RGB (0-255) */
5748 - adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
5749 - }
5750 -}
5751 -
5752 -static void adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
5753 -{
5754 - struct adv7511_state *state = get_adv7511_state(sd);
5755 -
5756 - /* Only makes sense for RGB formats */
5757 - if (state->fmt_code != MEDIA_BUS_FMT_RGB888_1X24) {
5758 - /* so just keep quantization */
5759 - adv7511_csc_rgb_full2limit(sd, false);
5760 - return;
5761 - }
5762 -
5763 - switch (ctrl->val) {
5764 - case V4L2_DV_RGB_RANGE_AUTO:
5765 - /* automatic */
5766 - if (state->dv_timings.bt.flags & V4L2_DV_FL_IS_CE_VIDEO) {
5767 - /* CE format, RGB limited range (16-235) */
5768 - adv7511_csc_rgb_full2limit(sd, true);
5769 - } else {
5770 - /* not CE format, RGB full range (0-255) */
5771 - adv7511_csc_rgb_full2limit(sd, false);
5772 - }
5773 - break;
5774 - case V4L2_DV_RGB_RANGE_LIMITED:
5775 - /* RGB limited range (16-235) */
5776 - adv7511_csc_rgb_full2limit(sd, true);
5777 - break;
5778 - case V4L2_DV_RGB_RANGE_FULL:
5779 - /* RGB full range (0-255) */
5780 - adv7511_csc_rgb_full2limit(sd, false);
5781 - break;
5782 - }
5783 -}
5784 -
5785 -/* ------------------------------ CTRL OPS ------------------------------ */
5786 -
5787 -static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
5788 -{
5789 - struct v4l2_subdev *sd = to_sd(ctrl);
5790 - struct adv7511_state *state = get_adv7511_state(sd);
5791 -
5792 - v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
5793 -
5794 - if (state->hdmi_mode_ctrl == ctrl) {
5795 - /* Set HDMI or DVI-D */
5796 - adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
5797 - return 0;
5798 - }
5799 - if (state->rgb_quantization_range_ctrl == ctrl) {
5800 - adv7511_set_rgb_quantization_mode(sd, ctrl);
5801 - return 0;
5802 - }
5803 - if (state->content_type_ctrl == ctrl) {
5804 - u8 itc, cn;
5805 -
5806 - state->content_type = ctrl->val;
5807 - itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
5808 - cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
5809 - adv7511_wr_and_or(sd, 0x57, 0x7f, itc << 7);
5810 - adv7511_wr_and_or(sd, 0x59, 0xcf, cn << 4);
5811 - return 0;
5812 - }
5813 -
5814 - return -EINVAL;
5815 -}
5816 -
5817 -static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
5818 - .s_ctrl = adv7511_s_ctrl,
5819 -};
5820 -
5821 -/* ---------------------------- CORE OPS ------------------------------------------- */
5822 -
5823 -#ifdef CONFIG_VIDEO_ADV_DEBUG
5824 -static void adv7511_inv_register(struct v4l2_subdev *sd)
5825 -{
5826 - struct adv7511_state *state = get_adv7511_state(sd);
5827 -
5828 - v4l2_info(sd, "0x000-0x0ff: Main Map\n");
5829 - if (state->i2c_cec)
5830 - v4l2_info(sd, "0x100-0x1ff: CEC Map\n");
5831 -}
5832 -
5833 -static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
5834 -{
5835 - struct adv7511_state *state = get_adv7511_state(sd);
5836 -
5837 - reg->size = 1;
5838 - switch (reg->reg >> 8) {
5839 - case 0:
5840 - reg->val = adv7511_rd(sd, reg->reg & 0xff);
5841 - break;
5842 - case 1:
5843 - if (state->i2c_cec) {
5844 - reg->val = adv7511_cec_read(sd, reg->reg & 0xff);
5845 - break;
5846 - }
5847 - /* fall through */
5848 - default:
5849 - v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
5850 - adv7511_inv_register(sd);
5851 - break;
5852 - }
5853 - return 0;
5854 -}
5855 -
5856 -static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
5857 -{
5858 - struct adv7511_state *state = get_adv7511_state(sd);
5859 -
5860 - switch (reg->reg >> 8) {
5861 - case 0:
5862 - adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
5863 - break;
5864 - case 1:
5865 - if (state->i2c_cec) {
5866 - adv7511_cec_write(sd, reg->reg & 0xff, reg->val & 0xff);
5867 - break;
5868 - }
5869 - /* fall through */
5870 - default:
5871 - v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
5872 - adv7511_inv_register(sd);
5873 - break;
5874 - }
5875 - return 0;
5876 -}
5877 -#endif
5878 -
5879 -struct adv7511_cfg_read_infoframe {
5880 - const char *desc;
5881 - u8 present_reg;
5882 - u8 present_mask;
5883 - u8 header[3];
5884 - u16 payload_addr;
5885 -};
5886 -
5887 -static u8 hdmi_infoframe_checksum(u8 *ptr, size_t size)
5888 -{
5889 - u8 csum = 0;
5890 - size_t i;
5891 -
5892 - /* compute checksum */
5893 - for (i = 0; i < size; i++)
5894 - csum += ptr[i];
5895 -
5896 - return 256 - csum;
5897 -}
5898 -
5899 -static void log_infoframe(struct v4l2_subdev *sd, const struct adv7511_cfg_read_infoframe *cri)
5900 -{
5901 - struct i2c_client *client = v4l2_get_subdevdata(sd);
5902 - struct device *dev = &client->dev;
5903 - union hdmi_infoframe frame;
5904 - u8 buffer[32];
5905 - u8 len;
5906 - int i;
5907 -
5908 - if (!(adv7511_rd(sd, cri->present_reg) & cri->present_mask)) {
5909 - v4l2_info(sd, "%s infoframe not transmitted\n", cri->desc);
5910 - return;
5911 - }
5912 -
5913 - memcpy(buffer, cri->header, sizeof(cri->header));
5914 -
5915 - len = buffer[2];
5916 -
5917 - if (len + 4 > sizeof(buffer)) {
5918 - v4l2_err(sd, "%s: invalid %s infoframe length %d\n", __func__, cri->desc, len);
5919 - return;
5920 - }
5921 -
5922 - if (cri->payload_addr >= 0x100) {
5923 - for (i = 0; i < len; i++)
5924 - buffer[i + 4] = adv7511_pktmem_rd(sd, cri->payload_addr + i - 0x100);
5925 - } else {
5926 - for (i = 0; i < len; i++)
5927 - buffer[i + 4] = adv7511_rd(sd, cri->payload_addr + i);
5928 - }
5929 - buffer[3] = 0;
5930 - buffer[3] = hdmi_infoframe_checksum(buffer, len + 4);
5931 -
5932 - if (hdmi_infoframe_unpack(&frame, buffer) < 0) {
5933 - v4l2_err(sd, "%s: unpack of %s infoframe failed\n", __func__, cri->desc);
5934 - return;
5935 - }
5936 -
5937 - hdmi_infoframe_log(KERN_INFO, dev, &frame);
5938 -}
5939 -
5940 -static void adv7511_log_infoframes(struct v4l2_subdev *sd)
5941 -{
5942 - static const struct adv7511_cfg_read_infoframe cri[] = {
5943 - { "AVI", 0x44, 0x10, { 0x82, 2, 13 }, 0x55 },
5944 - { "Audio", 0x44, 0x08, { 0x84, 1, 10 }, 0x73 },
5945 - { "SDP", 0x40, 0x40, { 0x83, 1, 25 }, 0x103 },
5946 - };
5947 - int i;
5948 -
5949 - for (i = 0; i < ARRAY_SIZE(cri); i++)
5950 - log_infoframe(sd, &cri[i]);
5951 -}
5952 -
5953 -static int adv7511_log_status(struct v4l2_subdev *sd)
5954 -{
5955 - struct adv7511_state *state = get_adv7511_state(sd);
5956 - struct adv7511_state_edid *edid = &state->edid;
5957 - int i;
5958 -
5959 - static const char * const states[] = {
5960 - "in reset",
5961 - "reading EDID",
5962 - "idle",
5963 - "initializing HDCP",
5964 - "HDCP enabled",
5965 - "initializing HDCP repeater",
5966 - "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
5967 - };
5968 - static const char * const errors[] = {
5969 - "no error",
5970 - "bad receiver BKSV",
5971 - "Ri mismatch",
5972 - "Pj mismatch",
5973 - "i2c error",
5974 - "timed out",
5975 - "max repeater cascade exceeded",
5976 - "hash check failed",
5977 - "too many devices",
5978 - "9", "A", "B", "C", "D", "E", "F"
5979 - };
5980 -
5981 - v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
5982 - v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
5983 - (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
5984 - (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
5985 - edid->segments ? "found" : "no",
5986 - edid->blocks);
5987 - v4l2_info(sd, "%s output %s\n",
5988 - (adv7511_rd(sd, 0xaf) & 0x02) ?
5989 - "HDMI" : "DVI-D",
5990 - (adv7511_rd(sd, 0xa1) & 0x3c) ?
5991 - "disabled" : "enabled");
5992 - v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
5993 - states[adv7511_rd(sd, 0xc8) & 0xf],
5994 - errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
5995 - adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
5996 - v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
5997 - if (adv7511_rd(sd, 0xaf) & 0x02) {
5998 - /* HDMI only */
5999 - u8 manual_cts = adv7511_rd(sd, 0x0a) & 0x80;
6000 - u32 N = (adv7511_rd(sd, 0x01) & 0xf) << 16 |
6001 - adv7511_rd(sd, 0x02) << 8 |
6002 - adv7511_rd(sd, 0x03);
6003 - u8 vic_detect = adv7511_rd(sd, 0x3e) >> 2;
6004 - u8 vic_sent = adv7511_rd(sd, 0x3d) & 0x3f;
6005 - u32 CTS;
6006 -
6007 - if (manual_cts)
6008 - CTS = (adv7511_rd(sd, 0x07) & 0xf) << 16 |
6009 - adv7511_rd(sd, 0x08) << 8 |
6010 - adv7511_rd(sd, 0x09);
6011 - else
6012 - CTS = (adv7511_rd(sd, 0x04) & 0xf) << 16 |
6013 - adv7511_rd(sd, 0x05) << 8 |
6014 - adv7511_rd(sd, 0x06);
6015 - v4l2_info(sd, "CTS %s mode: N %d, CTS %d\n",
6016 - manual_cts ? "manual" : "automatic", N, CTS);
6017 - v4l2_info(sd, "VIC: detected %d, sent %d\n",
6018 - vic_detect, vic_sent);
6019 - adv7511_log_infoframes(sd);
6020 - }
6021 - if (state->dv_timings.type == V4L2_DV_BT_656_1120)
6022 - v4l2_print_dv_timings(sd->name, "timings: ",
6023 - &state->dv_timings, false);
6024 - else
6025 - v4l2_info(sd, "no timings set\n");
6026 - v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
6027 -
6028 - if (state->i2c_cec == NULL)
6029 - return 0;
6030 -
6031 - v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
6032 -
6033 - v4l2_info(sd, "CEC: %s\n", state->cec_enabled_adap ?
6034 - "enabled" : "disabled");
6035 - if (state->cec_enabled_adap) {
6036 - for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
6037 - bool is_valid = state->cec_valid_addrs & (1 << i);
6038 -
6039 - if (is_valid)
6040 - v4l2_info(sd, "CEC Logical Address: 0x%x\n",
6041 - state->cec_addr[i]);
6042 - }
6043 - }
6044 - v4l2_info(sd, "i2c pktmem addr: 0x%x\n", state->i2c_pktmem_addr);
6045 - return 0;
6046 -}
6047 -
6048 -/* Power up/down adv7511 */
6049 -static int adv7511_s_power(struct v4l2_subdev *sd, int on)
6050 -{
6051 - struct adv7511_state *state = get_adv7511_state(sd);
6052 - const int retries = 20;
6053 - int i;
6054 -
6055 - v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
6056 -
6057 - state->power_on = on;
6058 -
6059 - if (!on) {
6060 - /* Power down */
6061 - adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
6062 - return true;
6063 - }
6064 -
6065 - /* Power up */
6066 - /* The adv7511 does not always come up immediately.
6067 - Retry multiple times. */
6068 - for (i = 0; i < retries; i++) {
6069 - adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
6070 - if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
6071 - break;
6072 - adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
6073 - msleep(10);
6074 - }
6075 - if (i == retries) {
6076 - v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
6077 - adv7511_s_power(sd, 0);
6078 - return false;
6079 - }
6080 - if (i > 1)
6081 - v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
6082 -
6083 - /* Reserved registers that must be set */
6084 - adv7511_wr(sd, 0x98, 0x03);
6085 - adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
6086 - adv7511_wr(sd, 0x9c, 0x30);
6087 - adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
6088 - adv7511_wr(sd, 0xa2, 0xa4);
6089 - adv7511_wr(sd, 0xa3, 0xa4);
6090 - adv7511_wr(sd, 0xe0, 0xd0);
6091 - adv7511_wr(sd, 0xf9, 0x00);
6092 -
6093 - adv7511_wr(sd, 0x43, state->i2c_edid_addr);
6094 - adv7511_wr(sd, 0x45, state->i2c_pktmem_addr);
6095 -
6096 - /* Set number of attempts to read the EDID */
6097 - adv7511_wr(sd, 0xc9, 0xf);
6098 - return true;
6099 -}
6100 -
6101 -#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
6102 -static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
6103 -{
6104 - struct adv7511_state *state = cec_get_drvdata(adap);
6105 - struct v4l2_subdev *sd = &state->sd;
6106 -
6107 - if (state->i2c_cec == NULL)
6108 - return -EIO;
6109 -
6110 - if (!state->cec_enabled_adap && enable) {
6111 - /* power up cec section */
6112 - adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x01);
6113 - /* legacy mode and clear all rx buffers */
6114 - adv7511_cec_write(sd, 0x4a, 0x00);
6115 - adv7511_cec_write(sd, 0x4a, 0x07);
6116 - adv7511_cec_write_and_or(sd, 0x11, 0xfe, 0); /* initially disable tx */
6117 - /* enabled irqs: */
6118 - /* tx: ready */
6119 - /* tx: arbitration lost */
6120 - /* tx: retry timeout */
6121 - /* rx: ready 1 */
6122 - if (state->enabled_irq)
6123 - adv7511_wr_and_or(sd, 0x95, 0xc0, 0x39);
6124 - } else if (state->cec_enabled_adap && !enable) {
6125 - if (state->enabled_irq)
6126 - adv7511_wr_and_or(sd, 0x95, 0xc0, 0x00);
6127 - /* disable address mask 1-3 */
6128 - adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0x00);
6129 - /* power down cec section */
6130 - adv7511_cec_write_and_or(sd, 0x4e, 0xfc, 0x00);
6131 - state->cec_valid_addrs = 0;
6132 - }
6133 - state->cec_enabled_adap = enable;
6134 - return 0;
6135 -}
6136 -
6137 -static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
6138 -{
6139 - struct adv7511_state *state = cec_get_drvdata(adap);
6140 - struct v4l2_subdev *sd = &state->sd;
6141 - unsigned int i, free_idx = ADV7511_MAX_ADDRS;
6142 -
6143 - if (!state->cec_enabled_adap)
6144 - return addr == CEC_LOG_ADDR_INVALID ? 0 : -EIO;
6145 -
6146 - if (addr == CEC_LOG_ADDR_INVALID) {
6147 - adv7511_cec_write_and_or(sd, 0x4b, 0x8f, 0);
6148 - state->cec_valid_addrs = 0;
6149 - return 0;
6150 - }
6151 -
6152 - for (i = 0; i < ADV7511_MAX_ADDRS; i++) {
6153 - bool is_valid = state->cec_valid_addrs & (1 << i);
6154 -
6155 - if (free_idx == ADV7511_MAX_ADDRS && !is_valid)
6156 - free_idx = i;
6157 - if (is_valid && state->cec_addr[i] == addr)
6158 - return 0;
6159 - }
6160 - if (i == ADV7511_MAX_ADDRS) {
6161 - i = free_idx;
6162 - if (i == ADV7511_MAX_ADDRS)
6163 - return -ENXIO;
6164 - }
6165 - state->cec_addr[i] = addr;
6166 - state->cec_valid_addrs |= 1 << i;
6167 -
6168 - switch (i) {
6169 - case 0:
6170 - /* enable address mask 0 */
6171 - adv7511_cec_write_and_or(sd, 0x4b, 0xef, 0x10);
6172 - /* set address for mask 0 */
6173 - adv7511_cec_write_and_or(sd, 0x4c, 0xf0, addr);
6174 - break;
6175 - case 1:
6176 - /* enable address mask 1 */
6177 - adv7511_cec_write_and_or(sd, 0x4b, 0xdf, 0x20);
6178 - /* set address for mask 1 */
6179 - adv7511_cec_write_and_or(sd, 0x4c, 0x0f, addr << 4);
6180 - break;
6181 - case 2:
6182 - /* enable address mask 2 */
6183 - adv7511_cec_write_and_or(sd, 0x4b, 0xbf, 0x40);
6184 - /* set address for mask 1 */
6185 - adv7511_cec_write_and_or(sd, 0x4d, 0xf0, addr);
6186 - break;
6187 - }
6188 - return 0;
6189 -}
6190 -
6191 -static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
6192 - u32 signal_free_time, struct cec_msg *msg)
6193 -{
6194 - struct adv7511_state *state = cec_get_drvdata(adap);
6195 - struct v4l2_subdev *sd = &state->sd;
6196 - u8 len = msg->len;
6197 - unsigned int i;
6198 -
6199 - v4l2_dbg(1, debug, sd, "%s: len %d\n", __func__, len);
6200 -
6201 - if (len > 16) {
6202 - v4l2_err(sd, "%s: len exceeded 16 (%d)\n", __func__, len);
6203 - return -EINVAL;
6204 - }
6205 -
6206 - /*
6207 - * The number of retries is the number of attempts - 1, but retry
6208 - * at least once. It's not clear if a value of 0 is allowed, so
6209 - * let's do at least one retry.
6210 - */
6211 - adv7511_cec_write_and_or(sd, 0x12, ~0x70, max(1, attempts - 1) << 4);
6212 -
6213 - /* clear cec tx irq status */
6214 - adv7511_wr(sd, 0x97, 0x38);
6215 -
6216 - /* write data */
6217 - for (i = 0; i < len; i++)
6218 - adv7511_cec_write(sd, i, msg->msg[i]);
6219 -
6220 - /* set length (data + header) */
6221 - adv7511_cec_write(sd, 0x10, len);
6222 - /* start transmit, enable tx */
6223 - adv7511_cec_write(sd, 0x11, 0x01);
6224 - return 0;
6225 -}
6226 -
6227 -static void adv_cec_tx_raw_status(struct v4l2_subdev *sd, u8 tx_raw_status)
6228 -{
6229 - struct adv7511_state *state = get_adv7511_state(sd);
6230 -
6231 - if ((adv7511_cec_read(sd, 0x11) & 0x01) == 0) {
6232 - v4l2_dbg(1, debug, sd, "%s: tx raw: tx disabled\n", __func__);
6233 - return;
6234 - }
6235 -
6236 - if (tx_raw_status & 0x10) {
6237 - v4l2_dbg(1, debug, sd,
6238 - "%s: tx raw: arbitration lost\n", __func__);
6239 - cec_transmit_done(state->cec_adap, CEC_TX_STATUS_ARB_LOST,
6240 - 1, 0, 0, 0);
6241 - return;
6242 - }
6243 - if (tx_raw_status & 0x08) {
6244 - u8 status;
6245 - u8 nack_cnt;
6246 - u8 low_drive_cnt;
6247 -
6248 - v4l2_dbg(1, debug, sd, "%s: tx raw: retry failed\n", __func__);
6249 - /*
6250 - * We set this status bit since this hardware performs
6251 - * retransmissions.
6252 - */
6253 - status = CEC_TX_STATUS_MAX_RETRIES;
6254 - nack_cnt = adv7511_cec_read(sd, 0x14) & 0xf;
6255 - if (nack_cnt)
6256 - status |= CEC_TX_STATUS_NACK;
6257 - low_drive_cnt = adv7511_cec_read(sd, 0x14) >> 4;
6258 - if (low_drive_cnt)
6259 - status |= CEC_TX_STATUS_LOW_DRIVE;
6260 - cec_transmit_done(state->cec_adap, status,
6261 - 0, nack_cnt, low_drive_cnt, 0);
6262 - return;
6263 - }
6264 - if (tx_raw_status & 0x20) {
6265 - v4l2_dbg(1, debug, sd, "%s: tx raw: ready ok\n", __func__);
6266 - cec_transmit_done(state->cec_adap, CEC_TX_STATUS_OK, 0, 0, 0, 0);
6267 - return;
6268 - }
6269 -}
6270 -
6271 -static const struct cec_adap_ops adv7511_cec_adap_ops = {
6272 - .adap_enable = adv7511_cec_adap_enable,
6273 - .adap_log_addr = adv7511_cec_adap_log_addr,
6274 - .adap_transmit = adv7511_cec_adap_transmit,
6275 -};
6276 -#endif
6277 -
6278 -/* Enable interrupts */
6279 -static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
6280 -{
6281 - struct adv7511_state *state = get_adv7511_state(sd);
6282 - u8 irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
6283 - u8 irqs_rd;
6284 - int retries = 100;
6285 -
6286 - v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
6287 -
6288 - if (state->enabled_irq == enable)
6289 - return;
6290 - state->enabled_irq = enable;
6291 -
6292 - /* The datasheet says that the EDID ready interrupt should be
6293 - disabled if there is no hotplug. */
6294 - if (!enable)
6295 - irqs = 0;
6296 - else if (adv7511_have_hotplug(sd))
6297 - irqs |= MASK_ADV7511_EDID_RDY_INT;
6298 -
6299 - /*
6300 - * This i2c write can fail (approx. 1 in 1000 writes). But it
6301 - * is essential that this register is correct, so retry it
6302 - * multiple times.
6303 - *
6304 - * Note that the i2c write does not report an error, but the readback
6305 - * clearly shows the wrong value.
6306 - */
6307 - do {
6308 - adv7511_wr(sd, 0x94, irqs);
6309 - irqs_rd = adv7511_rd(sd, 0x94);
6310 - } while (retries-- && irqs_rd != irqs);
6311 -
6312 - if (irqs_rd != irqs)
6313 - v4l2_err(sd, "Could not set interrupts: hw failure?\n");
6314 -
6315 - adv7511_wr_and_or(sd, 0x95, 0xc0,
6316 - (state->cec_enabled_adap && enable) ? 0x39 : 0x00);
6317 -}
6318 -
6319 -/* Interrupt handler */
6320 -static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
6321 -{
6322 - u8 irq_status;
6323 - u8 cec_irq;
6324 -
6325 - /* disable interrupts to prevent a race condition */
6326 - adv7511_set_isr(sd, false);
6327 - irq_status = adv7511_rd(sd, 0x96);
6328 - cec_irq = adv7511_rd(sd, 0x97);
6329 - /* clear detected interrupts */
6330 - adv7511_wr(sd, 0x96, irq_status);
6331 - adv7511_wr(sd, 0x97, cec_irq);
6332 -
6333 - v4l2_dbg(1, debug, sd, "%s: irq 0x%x, cec-irq 0x%x\n", __func__,
6334 - irq_status, cec_irq);
6335 -
6336 - if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
6337 - adv7511_check_monitor_present_status(sd);
6338 - if (irq_status & MASK_ADV7511_EDID_RDY_INT)
6339 - adv7511_check_edid_status(sd);
6340 -
6341 -#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
6342 - if (cec_irq & 0x38)
6343 - adv_cec_tx_raw_status(sd, cec_irq);
6344 -
6345 - if (cec_irq & 1) {
6346 - struct adv7511_state *state = get_adv7511_state(sd);
6347 - struct cec_msg msg;
6348 -
6349 - msg.len = adv7511_cec_read(sd, 0x25) & 0x1f;
6350 -
6351 - v4l2_dbg(1, debug, sd, "%s: cec msg len %d\n", __func__,
6352 - msg.len);
6353 -
6354 - if (msg.len > 16)
6355 - msg.len = 16;
6356 -
6357 - if (msg.len) {
6358 - u8 i;
6359 -
6360 - for (i = 0; i < msg.len; i++)
6361 - msg.msg[i] = adv7511_cec_read(sd, i + 0x15);
6362 -
6363 - adv7511_cec_write(sd, 0x4a, 0); /* toggle to re-enable rx 1 */
6364 - adv7511_cec_write(sd, 0x4a, 1);
6365 - cec_received_msg(state->cec_adap, &msg);
6366 - }
6367 - }
6368 -#endif
6369 -
6370 - /* enable interrupts */
6371 - adv7511_set_isr(sd, true);
6372 -
6373 - if (handled)
6374 - *handled = true;
6375 - return 0;
6376 -}
6377 -
6378 -static const struct v4l2_subdev_core_ops adv7511_core_ops = {
6379 - .log_status = adv7511_log_status,
6380 -#ifdef CONFIG_VIDEO_ADV_DEBUG
6381 - .g_register = adv7511_g_register,
6382 - .s_register = adv7511_s_register,
6383 -#endif
6384 - .s_power = adv7511_s_power,
6385 - .interrupt_service_routine = adv7511_isr,
6386 -};
6387 -
6388 -/* ------------------------------ VIDEO OPS ------------------------------ */
6389 -
6390 -/* Enable/disable adv7511 output */
6391 -static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
6392 -{
6393 - struct adv7511_state *state = get_adv7511_state(sd);
6394 -
6395 - v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
6396 - adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
6397 - if (enable) {
6398 - adv7511_check_monitor_present_status(sd);
6399 - } else {
6400 - adv7511_s_power(sd, 0);
6401 - state->have_monitor = false;
6402 - }
6403 - return 0;
6404 -}
6405 -
6406 -static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
6407 - struct v4l2_dv_timings *timings)
6408 -{
6409 - struct adv7511_state *state = get_adv7511_state(sd);
6410 - struct v4l2_bt_timings *bt = &timings->bt;
6411 - u32 fps;
6412 -
6413 - v4l2_dbg(1, debug, sd, "%s:\n", __func__);
6414 -
6415 - /* quick sanity check */
6416 - if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
6417 - return -EINVAL;
6418 -
6419 - /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
6420 - if the format is one of the CEA or DMT timings. */
6421 - v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
6422 -
6423 - /* save timings */
6424 - state->dv_timings = *timings;
6425 -
6426 - /* set h/vsync polarities */
6427 - adv7511_wr_and_or(sd, 0x17, 0x9f,
6428 - ((bt->polarities & V4L2_DV_VSYNC_POS_POL) ? 0 : 0x40) |
6429 - ((bt->polarities & V4L2_DV_HSYNC_POS_POL) ? 0 : 0x20));
6430 -
6431 - fps = (u32)bt->pixelclock / (V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt));
6432 - switch (fps) {
6433 - case 24:
6434 - adv7511_wr_and_or(sd, 0xfb, 0xf9, 1 << 1);
6435 - break;
6436 - case 25:
6437 - adv7511_wr_and_or(sd, 0xfb, 0xf9, 2 << 1);
6438 - break;
6439 - case 30:
6440 - adv7511_wr_and_or(sd, 0xfb, 0xf9, 3 << 1);
6441 - break;
6442 - default:
6443 - adv7511_wr_and_or(sd, 0xfb, 0xf9, 0);
6444 - break;
6445 - }
6446 -
6447 - /* update quantization range based on new dv_timings */
6448 - adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
6449 -
6450 - return 0;
6451 -}
6452 -
6453 -static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
6454 - struct v4l2_dv_timings *timings)
6455 -{
6456 - struct adv7511_state *state = get_adv7511_state(sd);
6457 -
6458 - v4l2_dbg(1, debug, sd, "%s:\n", __func__);
6459 -
6460 - if (!timings)
6461 - return -EINVAL;
6462 -
6463 - *timings = state->dv_timings;
6464 -
6465 - return 0;
6466 -}
6467 -
6468 -static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
6469 - struct v4l2_enum_dv_timings *timings)
6470 -{
6471 - if (timings->pad != 0)
6472 - return -EINVAL;
6473 -
6474 - return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
6475 -}
6476 -
6477 -static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
6478 - struct v4l2_dv_timings_cap *cap)
6479 -{
6480 - if (cap->pad != 0)
6481 - return -EINVAL;
6482 -
6483 - *cap = adv7511_timings_cap;
6484 - return 0;
6485 -}
6486 -
6487 -static const struct v4l2_subdev_video_ops adv7511_video_ops = {
6488 - .s_stream = adv7511_s_stream,
6489 - .s_dv_timings = adv7511_s_dv_timings,
6490 - .g_dv_timings = adv7511_g_dv_timings,
6491 -};
6492 -
6493 -/* ------------------------------ AUDIO OPS ------------------------------ */
6494 -static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
6495 -{
6496 - v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
6497 -
6498 - if (enable)
6499 - adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
6500 - else
6501 - adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
6502 -
6503 - return 0;
6504 -}
6505 -
6506 -static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
6507 -{
6508 - u32 N;
6509 -
6510 - switch (freq) {
6511 - case 32000: N = 4096; break;
6512 - case 44100: N = 6272; break;
6513 - case 48000: N = 6144; break;
6514 - case 88200: N = 12544; break;
6515 - case 96000: N = 12288; break;
6516 - case 176400: N = 25088; break;
6517 - case 192000: N = 24576; break;
6518 - default:
6519 - return -EINVAL;
6520 - }
6521 -
6522 - /* Set N (used with CTS to regenerate the audio clock) */
6523 - adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
6524 - adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
6525 - adv7511_wr(sd, 0x03, N & 0xff);
6526 -
6527 - return 0;
6528 -}
6529 -
6530 -static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
6531 -{
6532 - u32 i2s_sf;
6533 -
6534 - switch (freq) {
6535 - case 32000: i2s_sf = 0x30; break;
6536 - case 44100: i2s_sf = 0x00; break;
6537 - case 48000: i2s_sf = 0x20; break;
6538 - case 88200: i2s_sf = 0x80; break;
6539 - case 96000: i2s_sf = 0xa0; break;
6540 - case 176400: i2s_sf = 0xc0; break;
6541 - case 192000: i2s_sf = 0xe0; break;
6542 - default:
6543 - return -EINVAL;
6544 - }
6545 -
6546 - /* Set sampling frequency for I2S audio to 48 kHz */
6547 - adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
6548 -
6549 - return 0;
6550 -}
6551 -
6552 -static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
6553 -{
6554 - /* Only 2 channels in use for application */
6555 - adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
6556 - /* Speaker mapping */
6557 - adv7511_wr(sd, 0x76, 0x00);
6558 -
6559 - /* 16 bit audio word length */
6560 - adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
6561 -
6562 - return 0;
6563 -}
6564 -
6565 -static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
6566 - .s_stream = adv7511_s_audio_stream,
6567 - .s_clock_freq = adv7511_s_clock_freq,
6568 - .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
6569 - .s_routing = adv7511_s_routing,
6570 -};
6571 -
6572 -/* ---------------------------- PAD OPS ------------------------------------- */
6573 -
6574 -static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
6575 -{
6576 - struct adv7511_state *state = get_adv7511_state(sd);
6577 -
6578 - memset(edid->reserved, 0, sizeof(edid->reserved));
6579 -
6580 - if (edid->pad != 0)
6581 - return -EINVAL;
6582 -
6583 - if (edid->start_block == 0 && edid->blocks == 0) {
6584 - edid->blocks = state->edid.segments * 2;
6585 - return 0;
6586 - }
6587 -
6588 - if (state->edid.segments == 0)
6589 - return -ENODATA;
6590 -
6591 - if (edid->start_block >= state->edid.segments * 2)
6592 - return -EINVAL;
6593 -
6594 - if (edid->start_block + edid->blocks > state->edid.segments * 2)
6595 - edid->blocks = state->edid.segments * 2 - edid->start_block;
6596 -
6597 - memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
6598 - 128 * edid->blocks);
6599 -
6600 - return 0;
6601 -}
6602 -
6603 -static int adv7511_enum_mbus_code(struct v4l2_subdev *sd,
6604 - struct v4l2_subdev_pad_config *cfg,
6605 - struct v4l2_subdev_mbus_code_enum *code)
6606 -{
6607 - if (code->pad != 0)
6608 - return -EINVAL;
6609 -
6610 - switch (code->index) {
6611 - case 0:
6612 - code->code = MEDIA_BUS_FMT_RGB888_1X24;
6613 - break;
6614 - case 1:
6615 - code->code = MEDIA_BUS_FMT_YUYV8_1X16;
6616 - break;
6617 - case 2:
6618 - code->code = MEDIA_BUS_FMT_UYVY8_1X16;
6619 - break;
6620 - default:
6621 - return -EINVAL;
6622 - }
6623 - return 0;
6624 -}
6625 -
6626 -static void adv7511_fill_format(struct adv7511_state *state,
6627 - struct v4l2_mbus_framefmt *format)
6628 -{
6629 - format->width = state->dv_timings.bt.width;
6630 - format->height = state->dv_timings.bt.height;
6631 - format->field = V4L2_FIELD_NONE;
6632 -}
6633 -
6634 -static int adv7511_get_fmt(struct v4l2_subdev *sd,
6635 - struct v4l2_subdev_pad_config *cfg,
6636 - struct v4l2_subdev_format *format)
6637 -{
6638 - struct adv7511_state *state = get_adv7511_state(sd);
6639 -
6640 - if (format->pad != 0)
6641 - return -EINVAL;
6642 -
6643 - memset(&format->format, 0, sizeof(format->format));
6644 - adv7511_fill_format(state, &format->format);
6645 -
6646 - if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
6647 - struct v4l2_mbus_framefmt *fmt;
6648 -
6649 - fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
6650 - format->format.code = fmt->code;
6651 - format->format.colorspace = fmt->colorspace;
6652 - format->format.ycbcr_enc = fmt->ycbcr_enc;
6653 - format->format.quantization = fmt->quantization;
6654 - format->format.xfer_func = fmt->xfer_func;
6655 - } else {
6656 - format->format.code = state->fmt_code;
6657 - format->format.colorspace = state->colorspace;
6658 - format->format.ycbcr_enc = state->ycbcr_enc;
6659 - format->format.quantization = state->quantization;
6660 - format->format.xfer_func = state->xfer_func;
6661 - }
6662 -
6663 - return 0;
6664 -}
6665 -
6666 -static int adv7511_set_fmt(struct v4l2_subdev *sd,
6667 - struct v4l2_subdev_pad_config *cfg,
6668 - struct v4l2_subdev_format *format)
6669 -{
6670 - struct adv7511_state *state = get_adv7511_state(sd);
6671 - /*
6672 - * Bitfield namings come the CEA-861-F standard, table 8 "Auxiliary
6673 - * Video Information (AVI) InfoFrame Format"
6674 - *
6675 - * c = Colorimetry
6676 - * ec = Extended Colorimetry
6677 - * y = RGB or YCbCr
6678 - * q = RGB Quantization Range
6679 - * yq = YCC Quantization Range
6680 - */
6681 - u8 c = HDMI_COLORIMETRY_NONE;
6682 - u8 ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
6683 - u8 y = HDMI_COLORSPACE_RGB;
6684 - u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT;
6685 - u8 yq = HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
6686 - u8 itc = state->content_type != V4L2_DV_IT_CONTENT_TYPE_NO_ITC;
6687 - u8 cn = itc ? state->content_type : V4L2_DV_IT_CONTENT_TYPE_GRAPHICS;
6688 -
6689 - if (format->pad != 0)
6690 - return -EINVAL;
6691 - switch (format->format.code) {
6692 - case MEDIA_BUS_FMT_UYVY8_1X16:
6693 - case MEDIA_BUS_FMT_YUYV8_1X16:
6694 - case MEDIA_BUS_FMT_RGB888_1X24:
6695 - break;
6696 - default:
6697 - return -EINVAL;
6698 - }
6699 -
6700 - adv7511_fill_format(state, &format->format);
6701 - if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
6702 - struct v4l2_mbus_framefmt *fmt;
6703 -
6704 - fmt = v4l2_subdev_get_try_format(sd, cfg, format->pad);
6705 - fmt->code = format->format.code;
6706 - fmt->colorspace = format->format.colorspace;
6707 - fmt->ycbcr_enc = format->format.ycbcr_enc;
6708 - fmt->quantization = format->format.quantization;
6709 - fmt->xfer_func = format->format.xfer_func;
6710 - return 0;
6711 - }
6712 -
6713 - switch (format->format.code) {
6714 - case MEDIA_BUS_FMT_UYVY8_1X16:
6715 - adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
6716 - adv7511_wr_and_or(sd, 0x16, 0x03, 0xb8);
6717 - y = HDMI_COLORSPACE_YUV422;
6718 - break;
6719 - case MEDIA_BUS_FMT_YUYV8_1X16:
6720 - adv7511_wr_and_or(sd, 0x15, 0xf0, 0x01);
6721 - adv7511_wr_and_or(sd, 0x16, 0x03, 0xbc);
6722 - y = HDMI_COLORSPACE_YUV422;
6723 - break;
6724 - case MEDIA_BUS_FMT_RGB888_1X24:
6725 - default:
6726 - adv7511_wr_and_or(sd, 0x15, 0xf0, 0x00);
6727 - adv7511_wr_and_or(sd, 0x16, 0x03, 0x00);
6728 - break;
6729 - }
6730 - state->fmt_code = format->format.code;
6731 - state->colorspace = format->format.colorspace;
6732 - state->ycbcr_enc = format->format.ycbcr_enc;
6733 - state->quantization = format->format.quantization;
6734 - state->xfer_func = format->format.xfer_func;
6735 -
6736 - switch (format->format.colorspace) {
6737 - case V4L2_COLORSPACE_OPRGB:
6738 - c = HDMI_COLORIMETRY_EXTENDED;
6739 - ec = y ? HDMI_EXTENDED_COLORIMETRY_OPYCC_601 :
6740 - HDMI_EXTENDED_COLORIMETRY_OPRGB;
6741 - break;
6742 - case V4L2_COLORSPACE_SMPTE170M:
6743 - c = y ? HDMI_COLORIMETRY_ITU_601 : HDMI_COLORIMETRY_NONE;
6744 - if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV601) {
6745 - c = HDMI_COLORIMETRY_EXTENDED;
6746 - ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
6747 - }
6748 - break;
6749 - case V4L2_COLORSPACE_REC709:
6750 - c = y ? HDMI_COLORIMETRY_ITU_709 : HDMI_COLORIMETRY_NONE;
6751 - if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_XV709) {
6752 - c = HDMI_COLORIMETRY_EXTENDED;
6753 - ec = HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
6754 - }
6755 - break;
6756 - case V4L2_COLORSPACE_SRGB:
6757 - c = y ? HDMI_COLORIMETRY_EXTENDED : HDMI_COLORIMETRY_NONE;
6758 - ec = y ? HDMI_EXTENDED_COLORIMETRY_S_YCC_601 :
6759 - HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
6760 - break;
6761 - case V4L2_COLORSPACE_BT2020:
6762 - c = HDMI_COLORIMETRY_EXTENDED;
6763 - if (y && format->format.ycbcr_enc == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
6764 - ec = 5; /* Not yet available in hdmi.h */
6765 - else
6766 - ec = 6; /* Not yet available in hdmi.h */
6767 - break;
6768 - default:
6769 - break;
6770 - }
6771 -
6772 - /*
6773 - * CEA-861-F says that for RGB formats the YCC range must match the
6774 - * RGB range, although sources should ignore the YCC range.
6775 - *
6776 - * The RGB quantization range shouldn't be non-zero if the EDID doesn't
6777 - * have the Q bit set in the Video Capabilities Data Block, however this
6778 - * isn't checked at the moment. The assumption is that the application
6779 - * knows the EDID and can detect this.
6780 - *
6781 - * The same is true for the YCC quantization range: non-standard YCC
6782 - * quantization ranges should only be sent if the EDID has the YQ bit
6783 - * set in the Video Capabilities Data Block.
6784 - */
6785 - switch (format->format.quantization) {
6786 - case V4L2_QUANTIZATION_FULL_RANGE:
6787 - q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
6788 - HDMI_QUANTIZATION_RANGE_FULL;
6789 - yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL;
6790 - break;
6791 - case V4L2_QUANTIZATION_LIM_RANGE:
6792 - q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT :
6793 - HDMI_QUANTIZATION_RANGE_LIMITED;
6794 - yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
6795 - break;
6796 - }
6797 -
6798 - adv7511_wr_and_or(sd, 0x4a, 0xbf, 0);
6799 - adv7511_wr_and_or(sd, 0x55, 0x9f, y << 5);
6800 - adv7511_wr_and_or(sd, 0x56, 0x3f, c << 6);
6801 - adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7));
6802 - adv7511_wr_and_or(sd, 0x59, 0x0f, (yq << 6) | (cn << 4));
6803 - adv7511_wr_and_or(sd, 0x4a, 0xff, 1);
6804 - adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
6805 -
6806 - return 0;
6807 -}
6808 -
6809 -static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
6810 - .get_edid = adv7511_get_edid,
6811 - .enum_mbus_code = adv7511_enum_mbus_code,
6812 - .get_fmt = adv7511_get_fmt,
6813 - .set_fmt = adv7511_set_fmt,
6814 - .enum_dv_timings = adv7511_enum_dv_timings,
6815 - .dv_timings_cap = adv7511_dv_timings_cap,
6816 -};
6817 -
6818 -/* --------------------- SUBDEV OPS --------------------------------------- */
6819 -
6820 -static const struct v4l2_subdev_ops adv7511_ops = {
6821 - .core = &adv7511_core_ops,
6822 - .pad = &adv7511_pad_ops,
6823 - .video = &adv7511_video_ops,
6824 - .audio = &adv7511_audio_ops,
6825 -};
6826 -
6827 -/* ----------------------------------------------------------------------- */
6828 -static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, u8 *buf)
6829 -{
6830 - if (debug >= lvl) {
6831 - int i, j;
6832 - v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
6833 - for (i = 0; i < 256; i += 16) {
6834 - u8 b[128];
6835 - u8 *bp = b;
6836 - if (i == 128)
6837 - v4l2_dbg(lvl, debug, sd, "\n");
6838 - for (j = i; j < i + 16; j++) {
6839 - sprintf(bp, "0x%02x, ", buf[j]);
6840 - bp += 6;
6841 - }
6842 - bp[0] = '\0';
6843 - v4l2_dbg(lvl, debug, sd, "%s\n", b);
6844 - }
6845 - }
6846 -}
6847 -
6848 -static void adv7511_notify_no_edid(struct v4l2_subdev *sd)
6849 -{
6850 - struct adv7511_state *state = get_adv7511_state(sd);
6851 - struct adv7511_edid_detect ed;
6852 -
6853 - /* We failed to read the EDID, so send an event for this. */
6854 - ed.present = false;
6855 - ed.segment = adv7511_rd(sd, 0xc4);
6856 - ed.phys_addr = CEC_PHYS_ADDR_INVALID;
6857 - cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
6858 - v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
6859 - v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x0);
6860 -}
6861 -
6862 -static void adv7511_edid_handler(struct work_struct *work)
6863 -{
6864 - struct delayed_work *dwork = to_delayed_work(work);
6865 - struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
6866 - struct v4l2_subdev *sd = &state->sd;
6867 -
6868 - v4l2_dbg(1, debug, sd, "%s:\n", __func__);
6869 -
6870 - if (adv7511_check_edid_status(sd)) {
6871 - /* Return if we received the EDID. */
6872 - return;
6873 - }
6874 -
6875 - if (adv7511_have_hotplug(sd)) {
6876 - /* We must retry reading the EDID several times, it is possible
6877 - * that initially the EDID couldn't be read due to i2c errors
6878 - * (DVI connectors are particularly prone to this problem). */
6879 - if (state->edid.read_retries) {
6880 - state->edid.read_retries--;
6881 - v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
6882 - state->have_monitor = false;
6883 - adv7511_s_power(sd, false);
6884 - adv7511_s_power(sd, true);
6885 - queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
6886 - return;
6887 - }
6888 - }
6889 -
6890 - /* We failed to read the EDID, so send an event for this. */
6891 - adv7511_notify_no_edid(sd);
6892 - v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
6893 -}
6894 -
6895 -static void adv7511_audio_setup(struct v4l2_subdev *sd)
6896 -{
6897 - v4l2_dbg(1, debug, sd, "%s\n", __func__);
6898 -
6899 - adv7511_s_i2s_clock_freq(sd, 48000);
6900 - adv7511_s_clock_freq(sd, 48000);
6901 - adv7511_s_routing(sd, 0, 0, 0);
6902 -}
6903 -
6904 -/* Configure hdmi transmitter. */
6905 -static void adv7511_setup(struct v4l2_subdev *sd)
6906 -{
6907 - struct adv7511_state *state = get_adv7511_state(sd);
6908 - v4l2_dbg(1, debug, sd, "%s\n", __func__);
6909 -
6910 - /* Input format: RGB 4:4:4 */
6911 - adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
6912 - /* Output format: RGB 4:4:4 */
6913 - adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
6914 - /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
6915 - adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
6916 - /* Disable pixel repetition */
6917 - adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
6918 - /* Disable CSC */
6919 - adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
6920 - /* Output format: RGB 4:4:4, Active Format Information is valid,
6921 - * underscanned */
6922 - adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
6923 - /* AVI Info frame packet enable, Audio Info frame disable */
6924 - adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
6925 - /* Colorimetry, Active format aspect ratio: same as picure. */
6926 - adv7511_wr(sd, 0x56, 0xa8);
6927 - /* No encryption */
6928 - adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
6929 -
6930 - /* Positive clk edge capture for input video clock */
6931 - adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
6932 -
6933 - adv7511_audio_setup(sd);
6934 -
6935 - v4l2_ctrl_handler_setup(&state->hdl);
6936 -}
6937 -
6938 -static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
6939 -{
6940 - struct adv7511_monitor_detect mdt;
6941 - struct adv7511_state *state = get_adv7511_state(sd);
6942 -
6943 - mdt.present = state->have_monitor;
6944 - v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
6945 -}
6946 -
6947 -static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
6948 -{
6949 - struct adv7511_state *state = get_adv7511_state(sd);
6950 - /* read hotplug and rx-sense state */
6951 - u8 status = adv7511_rd(sd, 0x42);
6952 -
6953 - v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
6954 - __func__,
6955 - status,
6956 - status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
6957 - status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
6958 -
6959 - /* update read only ctrls */
6960 - v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
6961 - v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
6962 -
6963 - if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
6964 - v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
6965 - if (!state->have_monitor) {
6966 - v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
6967 - state->have_monitor = true;
6968 - adv7511_set_isr(sd, true);
6969 - if (!adv7511_s_power(sd, true)) {
6970 - v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
6971 - return;
6972 - }
6973 - adv7511_setup(sd);
6974 - adv7511_notify_monitor_detect(sd);
6975 - state->edid.read_retries = EDID_MAX_RETRIES;
6976 - queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
6977 - }
6978 - } else if (status & MASK_ADV7511_HPD_DETECT) {
6979 - v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
6980 - state->edid.read_retries = EDID_MAX_RETRIES;
6981 - queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
6982 - } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
6983 - v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
6984 - if (state->have_monitor) {
6985 - v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
6986 - state->have_monitor = false;
6987 - adv7511_notify_monitor_detect(sd);
6988 - }
6989 - adv7511_s_power(sd, false);
6990 - memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
6991 - adv7511_notify_no_edid(sd);
6992 - }
6993 -}
6994 -
6995 -static bool edid_block_verify_crc(u8 *edid_block)
6996 -{
6997 - u8 sum = 0;
6998 - int i;
6999 -
7000 - for (i = 0; i < 128; i++)
7001 - sum += edid_block[i];
7002 - return sum == 0;
7003 -}
7004 -
7005 -static bool edid_verify_crc(struct v4l2_subdev *sd, u32 segment)
7006 -{
7007 - struct adv7511_state *state = get_adv7511_state(sd);
7008 - u32 blocks = state->edid.blocks;
7009 - u8 *data = state->edid.data;
7010 -
7011 - if (!edid_block_verify_crc(&data[segment * 256]))
7012 - return false;
7013 - if ((segment + 1) * 2 <= blocks)
7014 - return edid_block_verify_crc(&data[segment * 256 + 128]);
7015 - return true;
7016 -}
7017 -
7018 -static bool edid_verify_header(struct v4l2_subdev *sd, u32 segment)
7019 -{
7020 - static const u8 hdmi_header[] = {
7021 - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
7022 - };
7023 - struct adv7511_state *state = get_adv7511_state(sd);
7024 - u8 *data = state->edid.data;
7025 -
7026 - if (segment != 0)
7027 - return true;
7028 - return !memcmp(data, hdmi_header, sizeof(hdmi_header));
7029 -}
7030 -
7031 -static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
7032 -{
7033 - struct adv7511_state *state = get_adv7511_state(sd);
7034 - u8 edidRdy = adv7511_rd(sd, 0xc5);
7035 -
7036 - v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
7037 - __func__, EDID_MAX_RETRIES - state->edid.read_retries);
7038 -
7039 - if (state->edid.complete)
7040 - return true;
7041 -
7042 - if (edidRdy & MASK_ADV7511_EDID_RDY) {
7043 - int segment = adv7511_rd(sd, 0xc4);
7044 - struct adv7511_edid_detect ed;
7045 -
7046 - if (segment >= EDID_MAX_SEGM) {
7047 - v4l2_err(sd, "edid segment number too big\n");
7048 - return false;
7049 - }
7050 - v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
7051 - adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
7052 - adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
7053 - if (segment == 0) {
7054 - state->edid.blocks = state->edid.data[0x7e] + 1;
7055 - v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
7056 - }
7057 - if (!edid_verify_crc(sd, segment) ||
7058 - !edid_verify_header(sd, segment)) {
7059 - /* edid crc error, force reread of edid segment */
7060 - v4l2_err(sd, "%s: edid crc or header error\n", __func__);
7061 - state->have_monitor = false;
7062 - adv7511_s_power(sd, false);
7063 - adv7511_s_power(sd, true);
7064 - return false;
7065 - }
7066 - /* one more segment read ok */
7067 - state->edid.segments = segment + 1;
7068 - v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, 0x1);
7069 - if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
7070 - /* Request next EDID segment */
7071 - v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
7072 - adv7511_wr(sd, 0xc9, 0xf);
7073 - adv7511_wr(sd, 0xc4, state->edid.segments);
7074 - state->edid.read_retries = EDID_MAX_RETRIES;
7075 - queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
7076 - return false;
7077 - }
7078 -
7079 - v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
7080 - state->edid.complete = true;
7081 - ed.phys_addr = cec_get_edid_phys_addr(state->edid.data,
7082 - state->edid.segments * 256,
7083 - NULL);
7084 - /* report when we have all segments
7085 - but report only for segment 0
7086 - */
7087 - ed.present = true;
7088 - ed.segment = 0;
7089 - state->edid_detect_counter++;
7090 - cec_s_phys_addr(state->cec_adap, ed.phys_addr, false);
7091 - v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
7092 - return ed.present;
7093 - }
7094 -
7095 - return false;
7096 -}
7097 -
7098 -static int adv7511_registered(struct v4l2_subdev *sd)
7099 -{
7100 - struct adv7511_state *state = get_adv7511_state(sd);
7101 - struct i2c_client *client = v4l2_get_subdevdata(sd);
7102 - int err;
7103 -
7104 - err = cec_register_adapter(state->cec_adap, &client->dev);
7105 - if (err)
7106 - cec_delete_adapter(state->cec_adap);
7107 - return err;
7108 -}
7109 -
7110 -static void adv7511_unregistered(struct v4l2_subdev *sd)
7111 -{
7112 - struct adv7511_state *state = get_adv7511_state(sd);
7113 -
7114 - cec_unregister_adapter(state->cec_adap);
7115 -}
7116 -
7117 -static const struct v4l2_subdev_internal_ops adv7511_int_ops = {
7118 - .registered = adv7511_registered,
7119 - .unregistered = adv7511_unregistered,
7120 -};
7121 -
7122 -/* ----------------------------------------------------------------------- */
7123 -/* Setup ADV7511 */
7124 -static void adv7511_init_setup(struct v4l2_subdev *sd)
7125 -{
7126 - struct adv7511_state *state = get_adv7511_state(sd);
7127 - struct adv7511_state_edid *edid = &state->edid;
7128 - u32 cec_clk = state->pdata.cec_clk;
7129 - u8 ratio;
7130 -
7131 - v4l2_dbg(1, debug, sd, "%s\n", __func__);
7132 -
7133 - /* clear all interrupts */
7134 - adv7511_wr(sd, 0x96, 0xff);
7135 - adv7511_wr(sd, 0x97, 0xff);
7136 - /*
7137 - * Stop HPD from resetting a lot of registers.
7138 - * It might leave the chip in a partly un-initialized state,
7139 - * in particular with regards to hotplug bounces.
7140 - */
7141 - adv7511_wr_and_or(sd, 0xd6, 0x3f, 0xc0);
7142 - memset(edid, 0, sizeof(struct adv7511_state_edid));
7143 - state->have_monitor = false;
7144 - adv7511_set_isr(sd, false);
7145 - adv7511_s_stream(sd, false);
7146 - adv7511_s_audio_stream(sd, false);
7147 -
7148 - if (state->i2c_cec == NULL)
7149 - return;
7150 -
7151 - v4l2_dbg(1, debug, sd, "%s: cec_clk %d\n", __func__, cec_clk);
7152 -
7153 - /* cec soft reset */
7154 - adv7511_cec_write(sd, 0x50, 0x01);
7155 - adv7511_cec_write(sd, 0x50, 0x00);
7156 -
7157 - /* legacy mode */
7158 - adv7511_cec_write(sd, 0x4a, 0x00);
7159 - adv7511_cec_write(sd, 0x4a, 0x07);
7160 -
7161 - if (cec_clk % 750000 != 0)
7162 - v4l2_err(sd, "%s: cec_clk %d, not multiple of 750 Khz\n",
7163 - __func__, cec_clk);
7164 -
7165 - ratio = (cec_clk / 750000) - 1;
7166 - adv7511_cec_write(sd, 0x4e, ratio << 2);
7167 -}
7168 -
7169 -static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
7170 -{
7171 - struct adv7511_state *state;
7172 - struct adv7511_platform_data *pdata = client->dev.platform_data;
7173 - struct v4l2_ctrl_handler *hdl;
7174 - struct v4l2_subdev *sd;
7175 - u8 chip_id[2];
7176 - int err = -EIO;
7177 -
7178 - /* Check if the adapter supports the needed features */
7179 - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
7180 - return -EIO;
7181 -
7182 - state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
7183 - if (!state)
7184 - return -ENOMEM;
7185 -
7186 - /* Platform data */
7187 - if (!pdata) {
7188 - v4l_err(client, "No platform data!\n");
7189 - return -ENODEV;
7190 - }
7191 - memcpy(&state->pdata, pdata, sizeof(state->pdata));
7192 - state->fmt_code = MEDIA_BUS_FMT_RGB888_1X24;
7193 - state->colorspace = V4L2_COLORSPACE_SRGB;
7194 -
7195 - sd = &state->sd;
7196 -
7197 - v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
7198 - client->addr << 1);
7199 -
7200 - v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
7201 - sd->internal_ops = &adv7511_int_ops;
7202 -
7203 - hdl = &state->hdl;
7204 - v4l2_ctrl_handler_init(hdl, 10);
7205 - /* add in ascending ID order */
7206 - state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
7207 - V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
7208 - 0, V4L2_DV_TX_MODE_DVI_D);
7209 - state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
7210 - V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
7211 - state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
7212 - V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
7213 - state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
7214 - V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
7215 - state->rgb_quantization_range_ctrl =
7216 - v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
7217 - V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
7218 - 0, V4L2_DV_RGB_RANGE_AUTO);
7219 - state->content_type_ctrl =
7220 - v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
7221 - V4L2_CID_DV_TX_IT_CONTENT_TYPE, V4L2_DV_IT_CONTENT_TYPE_NO_ITC,
7222 - 0, V4L2_DV_IT_CONTENT_TYPE_NO_ITC);
7223 - sd->ctrl_handler = hdl;
7224 - if (hdl->error) {
7225 - err = hdl->error;
7226 - goto err_hdl;
7227 - }
7228 - state->pad.flags = MEDIA_PAD_FL_SINK;
7229 - sd->entity.function = MEDIA_ENT_F_DV_ENCODER;
7230 - err = media_entity_pads_init(&sd->entity, 1, &state->pad);
7231 - if (err)
7232 - goto err_hdl;
7233 -
7234 - /* EDID and CEC i2c addr */
7235 - state->i2c_edid_addr = state->pdata.i2c_edid << 1;
7236 - state->i2c_cec_addr = state->pdata.i2c_cec << 1;
7237 - state->i2c_pktmem_addr = state->pdata.i2c_pktmem << 1;
7238 -
7239 - state->chip_revision = adv7511_rd(sd, 0x0);
7240 - chip_id[0] = adv7511_rd(sd, 0xf5);
7241 - chip_id[1] = adv7511_rd(sd, 0xf6);
7242 - if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
7243 - v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0],
7244 - chip_id[1]);
7245 - err = -EIO;
7246 - goto err_entity;
7247 - }
7248 -
7249 - state->i2c_edid = i2c_new_dummy(client->adapter,
7250 - state->i2c_edid_addr >> 1);
7251 - if (state->i2c_edid == NULL) {
7252 - v4l2_err(sd, "failed to register edid i2c client\n");
7253 - err = -ENOMEM;
7254 - goto err_entity;
7255 - }
7256 -
7257 - adv7511_wr(sd, 0xe1, state->i2c_cec_addr);
7258 - if (state->pdata.cec_clk < 3000000 ||
7259 - state->pdata.cec_clk > 100000000) {
7260 - v4l2_err(sd, "%s: cec_clk %u outside range, disabling cec\n",
7261 - __func__, state->pdata.cec_clk);
7262 - state->pdata.cec_clk = 0;
7263 - }
7264 -
7265 - if (state->pdata.cec_clk) {
7266 - state->i2c_cec = i2c_new_dummy(client->adapter,
7267 - state->i2c_cec_addr >> 1);
7268 - if (state->i2c_cec == NULL) {
7269 - v4l2_err(sd, "failed to register cec i2c client\n");
7270 - err = -ENOMEM;
7271 - goto err_unreg_edid;
7272 - }
7273 - adv7511_wr(sd, 0xe2, 0x00); /* power up cec section */
7274 - } else {
7275 - adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
7276 - }
7277 -
7278 - state->i2c_pktmem = i2c_new_dummy(client->adapter, state->i2c_pktmem_addr >> 1);
7279 - if (state->i2c_pktmem == NULL) {
7280 - v4l2_err(sd, "failed to register pktmem i2c client\n");
7281 - err = -ENOMEM;
7282 - goto err_unreg_cec;
7283 - }
7284 -
7285 - state->work_queue = create_singlethread_workqueue(sd->name);
7286 - if (state->work_queue == NULL) {
7287 - v4l2_err(sd, "could not create workqueue\n");
7288 - err = -ENOMEM;
7289 - goto err_unreg_pktmem;
7290 - }
7291 -
7292 - INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
7293 -
7294 - adv7511_init_setup(sd);
7295 -
7296 -#if IS_ENABLED(CONFIG_VIDEO_ADV7511_CEC)
7297 - state->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
7298 - state, dev_name(&client->dev), CEC_CAP_DEFAULTS,
7299 - ADV7511_MAX_ADDRS);
7300 - err = PTR_ERR_OR_ZERO(state->cec_adap);
7301 - if (err) {
7302 - destroy_workqueue(state->work_queue);
7303 - goto err_unreg_pktmem;
7304 - }
7305 -#endif
7306 -
7307 - adv7511_set_isr(sd, true);
7308 - adv7511_check_monitor_present_status(sd);
7309 -
7310 - v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
7311 - client->addr << 1, client->adapter->name);
7312 - return 0;
7313 -
7314 -err_unreg_pktmem:
7315 - i2c_unregister_device(state->i2c_pktmem);
7316 -err_unreg_cec:
7317 - if (state->i2c_cec)
7318 - i2c_unregister_device(state->i2c_cec);
7319 -err_unreg_edid:
7320 - i2c_unregister_device(state->i2c_edid);
7321 -err_entity:
7322 - media_entity_cleanup(&sd->entity);
7323 -err_hdl:
7324 - v4l2_ctrl_handler_free(&state->hdl);
7325 - return err;
7326 -}
7327 -
7328 -/* ----------------------------------------------------------------------- */
7329 -
7330 -static int adv7511_remove(struct i2c_client *client)
7331 -{
7332 - struct v4l2_subdev *sd = i2c_get_clientdata(client);
7333 - struct adv7511_state *state = get_adv7511_state(sd);
7334 -
7335 - state->chip_revision = -1;
7336 -
7337 - v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
7338 - client->addr << 1, client->adapter->name);
7339 -
7340 - adv7511_set_isr(sd, false);
7341 - adv7511_init_setup(sd);
7342 - cancel_delayed_work(&state->edid_handler);
7343 - i2c_unregister_device(state->i2c_edid);
7344 - if (state->i2c_cec)
7345 - i2c_unregister_device(state->i2c_cec);
7346 - i2c_unregister_device(state->i2c_pktmem);
7347 - destroy_workqueue(state->work_queue);
7348 - v4l2_device_unregister_subdev(sd);
7349 - media_entity_cleanup(&sd->entity);
7350 - v4l2_ctrl_handler_free(sd->ctrl_handler);
7351 - return 0;
7352 -}
7353 -
7354 -/* ----------------------------------------------------------------------- */
7355 -
7356 -static const struct i2c_device_id adv7511_id[] = {
7357 - { "adv7511", 0 },
7358 - { }
7359 -};
7360 -MODULE_DEVICE_TABLE(i2c, adv7511_id);
7361 -
7362 -static struct i2c_driver adv7511_driver = {
7363 - .driver = {
7364 - .name = "adv7511",
7365 - },
7366 - .probe = adv7511_probe,
7367 - .remove = adv7511_remove,
7368 - .id_table = adv7511_id,
7369 -};
7370 -
7371 -module_i2c_driver(adv7511_driver);
7372 diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
7373 index f5a1ee90a6c5..8a6a7a5929aa 100644
7374 --- a/drivers/media/i2c/ov7740.c
7375 +++ b/drivers/media/i2c/ov7740.c
7376 @@ -761,7 +761,11 @@ static int ov7740_try_fmt_internal(struct v4l2_subdev *sd,
7377
7378 fsize++;
7379 }
7380 -
7381 + if (i >= ARRAY_SIZE(ov7740_framesizes)) {
7382 + fsize = &ov7740_framesizes[0];
7383 + fmt->width = fsize->width;
7384 + fmt->height = fsize->height;
7385 + }
7386 if (ret_frmsize != NULL)
7387 *ret_frmsize = fsize;
7388
7389 diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
7390 index 3bae24b15eaa..ed518b1f82e4 100644
7391 --- a/drivers/media/media-device.c
7392 +++ b/drivers/media/media-device.c
7393 @@ -487,6 +487,7 @@ static long media_device_enum_links32(struct media_device *mdev,
7394 {
7395 struct media_links_enum links;
7396 compat_uptr_t pads_ptr, links_ptr;
7397 + int ret;
7398
7399 memset(&links, 0, sizeof(links));
7400
7401 @@ -498,7 +499,14 @@ static long media_device_enum_links32(struct media_device *mdev,
7402 links.pads = compat_ptr(pads_ptr);
7403 links.links = compat_ptr(links_ptr);
7404
7405 - return media_device_enum_links(mdev, &links);
7406 + ret = media_device_enum_links(mdev, &links);
7407 + if (ret)
7408 + return ret;
7409 +
7410 + if (copy_to_user(ulinks->reserved, links.reserved,
7411 + sizeof(ulinks->reserved)))
7412 + return -EFAULT;
7413 + return 0;
7414 }
7415
7416 #define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
7417 diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
7418 index d697e1ad929c..5102519df108 100644
7419 --- a/drivers/media/pci/saa7164/saa7164-core.c
7420 +++ b/drivers/media/pci/saa7164/saa7164-core.c
7421 @@ -1122,16 +1122,25 @@ static int saa7164_proc_show(struct seq_file *m, void *v)
7422 return 0;
7423 }
7424
7425 +static struct proc_dir_entry *saa7164_pe;
7426 +
7427 static int saa7164_proc_create(void)
7428 {
7429 - struct proc_dir_entry *pe;
7430 -
7431 - pe = proc_create_single("saa7164", S_IRUGO, NULL, saa7164_proc_show);
7432 - if (!pe)
7433 + saa7164_pe = proc_create_single("saa7164", 0444, NULL, saa7164_proc_show);
7434 + if (!saa7164_pe)
7435 return -ENOMEM;
7436
7437 return 0;
7438 }
7439 +
7440 +static void saa7164_proc_destroy(void)
7441 +{
7442 + if (saa7164_pe)
7443 + remove_proc_entry("saa7164", NULL);
7444 +}
7445 +#else
7446 +static int saa7164_proc_create(void) { return 0; }
7447 +static void saa7164_proc_destroy(void) {}
7448 #endif
7449
7450 static int saa7164_thread_function(void *data)
7451 @@ -1503,19 +1512,21 @@ static struct pci_driver saa7164_pci_driver = {
7452
7453 static int __init saa7164_init(void)
7454 {
7455 - printk(KERN_INFO "saa7164 driver loaded\n");
7456 + int ret = pci_register_driver(&saa7164_pci_driver);
7457 +
7458 + if (ret)
7459 + return ret;
7460
7461 -#ifdef CONFIG_PROC_FS
7462 saa7164_proc_create();
7463 -#endif
7464 - return pci_register_driver(&saa7164_pci_driver);
7465 +
7466 + pr_info("saa7164 driver loaded\n");
7467 +
7468 + return 0;
7469 }
7470
7471 static void __exit saa7164_fini(void)
7472 {
7473 -#ifdef CONFIG_PROC_FS
7474 - remove_proc_entry("saa7164", NULL);
7475 -#endif
7476 + saa7164_proc_destroy();
7477 pci_unregister_driver(&saa7164_pci_driver);
7478 }
7479
7480 diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
7481 index a3cfefdbee12..c3eaddced721 100644
7482 --- a/drivers/media/platform/coda/coda-bit.c
7483 +++ b/drivers/media/platform/coda/coda-bit.c
7484 @@ -1728,6 +1728,7 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
7485 v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
7486 return ret;
7487 }
7488 + ctx->sequence_offset = ~0U;
7489 ctx->initialized = 1;
7490
7491 /* Update kfifo out pointer from coda bitstream read pointer */
7492 @@ -2142,12 +2143,17 @@ static void coda_finish_decode(struct coda_ctx *ctx)
7493 else if (ctx->display_idx < 0)
7494 ctx->hold = true;
7495 } else if (decoded_idx == -2) {
7496 + if (ctx->display_idx >= 0 &&
7497 + ctx->display_idx < ctx->num_internal_frames)
7498 + ctx->sequence_offset++;
7499 /* no frame was decoded, we still return remaining buffers */
7500 } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
7501 v4l2_err(&dev->v4l2_dev,
7502 "decoded frame index out of range: %d\n", decoded_idx);
7503 } else {
7504 - val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM) - 1;
7505 + val = coda_read(dev, CODA_RET_DEC_PIC_FRAME_NUM);
7506 + if (ctx->sequence_offset == -1)
7507 + ctx->sequence_offset = val;
7508 val -= ctx->sequence_offset;
7509 spin_lock_irqsave(&ctx->buffer_meta_lock, flags);
7510 if (!list_empty(&ctx->buffer_meta_list)) {
7511 @@ -2303,7 +2309,6 @@ irqreturn_t coda_irq_handler(int irq, void *data)
7512 if (ctx == NULL) {
7513 v4l2_err(&dev->v4l2_dev,
7514 "Instance released before the end of transaction\n");
7515 - mutex_unlock(&dev->coda_mutex);
7516 return IRQ_HANDLED;
7517 }
7518
7519 diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
7520 index 19d92edcc981..4b0220f40b42 100644
7521 --- a/drivers/media/platform/coda/coda-common.c
7522 +++ b/drivers/media/platform/coda/coda-common.c
7523 @@ -997,6 +997,8 @@ static int coda_encoder_cmd(struct file *file, void *fh,
7524 /* Set the stream-end flag on this context */
7525 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
7526
7527 + flush_work(&ctx->pic_run_work);
7528 +
7529 /* If there is no buffer in flight, wake up */
7530 if (!ctx->streamon_out || ctx->qsequence == ctx->osequence) {
7531 dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
7532 diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
7533 index 19cf6853411e..89a86c19579b 100644
7534 --- a/drivers/media/platform/davinci/vpss.c
7535 +++ b/drivers/media/platform/davinci/vpss.c
7536 @@ -518,6 +518,11 @@ static int __init vpss_init(void)
7537 return -EBUSY;
7538
7539 oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
7540 + if (unlikely(!oper_cfg.vpss_regs_base2)) {
7541 + release_mem_region(VPSS_CLK_CTRL, 4);
7542 + return -ENOMEM;
7543 + }
7544 +
7545 writel(VPSS_CLK_CTRL_VENCCLKEN |
7546 VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
7547
7548 diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
7549 index dfdbd4354b74..eeee15ff007d 100644
7550 --- a/drivers/media/platform/marvell-ccic/mcam-core.c
7551 +++ b/drivers/media/platform/marvell-ccic/mcam-core.c
7552 @@ -200,7 +200,6 @@ struct mcam_vb_buffer {
7553 struct list_head queue;
7554 struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
7555 dma_addr_t dma_desc_pa; /* Descriptor physical address */
7556 - int dma_desc_nent; /* Number of mapped descriptors */
7557 };
7558
7559 static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_v4l2_buffer *vb)
7560 @@ -608,9 +607,11 @@ static void mcam_dma_contig_done(struct mcam_camera *cam, int frame)
7561 static void mcam_sg_next_buffer(struct mcam_camera *cam)
7562 {
7563 struct mcam_vb_buffer *buf;
7564 + struct sg_table *sg_table;
7565
7566 buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
7567 list_del_init(&buf->queue);
7568 + sg_table = vb2_dma_sg_plane_desc(&buf->vb_buf.vb2_buf, 0);
7569 /*
7570 * Very Bad Not Good Things happen if you don't clear
7571 * C1_DESC_ENA before making any descriptor changes.
7572 @@ -618,7 +619,7 @@ static void mcam_sg_next_buffer(struct mcam_camera *cam)
7573 mcam_reg_clear_bit(cam, REG_CTRL1, C1_DESC_ENA);
7574 mcam_reg_write(cam, REG_DMA_DESC_Y, buf->dma_desc_pa);
7575 mcam_reg_write(cam, REG_DESC_LEN_Y,
7576 - buf->dma_desc_nent*sizeof(struct mcam_dma_desc));
7577 + sg_table->nents * sizeof(struct mcam_dma_desc));
7578 mcam_reg_write(cam, REG_DESC_LEN_U, 0);
7579 mcam_reg_write(cam, REG_DESC_LEN_V, 0);
7580 mcam_reg_set_bit(cam, REG_CTRL1, C1_DESC_ENA);
7581 diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
7582 index 2a15b7cca338..0d1467028811 100644
7583 --- a/drivers/media/platform/rcar_fdp1.c
7584 +++ b/drivers/media/platform/rcar_fdp1.c
7585 @@ -257,6 +257,8 @@ MODULE_PARM_DESC(debug, "activate debug info");
7586 #define FD1_IP_H3_ES1 0x02010101
7587 #define FD1_IP_M3W 0x02010202
7588 #define FD1_IP_H3 0x02010203
7589 +#define FD1_IP_M3N 0x02010204
7590 +#define FD1_IP_E3 0x02010205
7591
7592 /* LUTs */
7593 #define FD1_LUT_DIF_ADJ 0x1000
7594 @@ -2365,6 +2367,12 @@ static int fdp1_probe(struct platform_device *pdev)
7595 case FD1_IP_H3:
7596 dprintk(fdp1, "FDP1 Version R-Car H3\n");
7597 break;
7598 + case FD1_IP_M3N:
7599 + dprintk(fdp1, "FDP1 Version R-Car M3N\n");
7600 + break;
7601 + case FD1_IP_E3:
7602 + dprintk(fdp1, "FDP1 Version R-Car E3\n");
7603 + break;
7604 default:
7605 dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
7606 hw_version);
7607 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
7608 index ca11f8a7569d..4b8516c35bc2 100644
7609 --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
7610 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
7611 @@ -527,7 +527,8 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
7612 dev);
7613 ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
7614 dev);
7615 - ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
7616 + if (FW_HAS_E_MIN_SCRATCH_BUF(dev))
7617 + ctx->scratch_buf_size = s5p_mfc_hw_call(dev->mfc_ops,
7618 get_min_scratch_buf_size, dev);
7619 if (ctx->img_width == 0 || ctx->img_height == 0)
7620 ctx->state = MFCINST_ERROR;
7621 diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
7622 index eb85cedc5ef3..5e080f32b0e8 100644
7623 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
7624 +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
7625 @@ -38,6 +38,11 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
7626 for (i = 0; i < pm->num_clocks; i++) {
7627 pm->clocks[i] = devm_clk_get(pm->device, pm->clk_names[i]);
7628 if (IS_ERR(pm->clocks[i])) {
7629 + /* additional clocks are optional */
7630 + if (i && PTR_ERR(pm->clocks[i]) == -ENOENT) {
7631 + pm->clocks[i] = NULL;
7632 + continue;
7633 + }
7634 mfc_err("Failed to get clock: %s\n",
7635 pm->clk_names[i]);
7636 return PTR_ERR(pm->clocks[i]);
7637 diff --git a/drivers/media/platform/vimc/vimc-capture.c b/drivers/media/platform/vimc/vimc-capture.c
7638 index 65d657daf66f..8e014cc485f0 100644
7639 --- a/drivers/media/platform/vimc/vimc-capture.c
7640 +++ b/drivers/media/platform/vimc/vimc-capture.c
7641 @@ -132,12 +132,15 @@ static int vimc_cap_s_fmt_vid_cap(struct file *file, void *priv,
7642 struct v4l2_format *f)
7643 {
7644 struct vimc_cap_device *vcap = video_drvdata(file);
7645 + int ret;
7646
7647 /* Do not change the format while stream is on */
7648 if (vb2_is_busy(&vcap->queue))
7649 return -EBUSY;
7650
7651 - vimc_cap_try_fmt_vid_cap(file, priv, f);
7652 + ret = vimc_cap_try_fmt_vid_cap(file, priv, f);
7653 + if (ret)
7654 + return ret;
7655
7656 dev_dbg(vcap->dev, "%s: format update: "
7657 "old:%dx%d (0x%x, %d, %d, %d, %d) "
7658 diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c
7659 index dccdf6558e6a..33abc8616ecb 100644
7660 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c
7661 +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c
7662 @@ -549,6 +549,7 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
7663
7664 /* Register with V4L2 subsystem as RADIO device */
7665 if (video_register_device(&gradio_dev, VFL_TYPE_RADIO, radio_nr)) {
7666 + v4l2_device_unregister(&fmdev->v4l2_dev);
7667 fmerr("Could not register video device\n");
7668 return -ENOMEM;
7669 }
7670 @@ -562,6 +563,8 @@ int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
7671 if (ret < 0) {
7672 fmerr("(fmdev): Can't init ctrl handler\n");
7673 v4l2_ctrl_handler_free(&fmdev->ctrl_handler);
7674 + video_unregister_device(fmdev->radio_dev);
7675 + v4l2_device_unregister(&fmdev->v4l2_dev);
7676 return -EBUSY;
7677 }
7678
7679 diff --git a/drivers/media/rc/ir-spi.c b/drivers/media/rc/ir-spi.c
7680 index 66334e8d63ba..c58f2d38a458 100644
7681 --- a/drivers/media/rc/ir-spi.c
7682 +++ b/drivers/media/rc/ir-spi.c
7683 @@ -161,6 +161,7 @@ static const struct of_device_id ir_spi_of_match[] = {
7684 { .compatible = "ir-spi-led" },
7685 {},
7686 };
7687 +MODULE_DEVICE_TABLE(of, ir_spi_of_match);
7688
7689 static struct spi_driver ir_spi_driver = {
7690 .probe = ir_spi_probe,
7691 diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
7692 index 40ca4eafb137..39ac22486bcd 100644
7693 --- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
7694 +++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
7695 @@ -287,12 +287,15 @@ EXPORT_SYMBOL(dvb_usb_device_init);
7696 void dvb_usb_device_exit(struct usb_interface *intf)
7697 {
7698 struct dvb_usb_device *d = usb_get_intfdata(intf);
7699 - const char *name = "generic DVB-USB module";
7700 + const char *default_name = "generic DVB-USB module";
7701 + char name[40];
7702
7703 usb_set_intfdata(intf, NULL);
7704 if (d != NULL && d->desc != NULL) {
7705 - name = d->desc->name;
7706 + strscpy(name, d->desc->name, sizeof(name));
7707 dvb_usb_exit(d);
7708 + } else {
7709 + strscpy(name, default_name, sizeof(name));
7710 }
7711 info("%s successfully deinitialized and disconnected.", name);
7712
7713 diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
7714 index 1b89c77bad66..0615996572e4 100644
7715 --- a/drivers/media/usb/hdpvr/hdpvr-video.c
7716 +++ b/drivers/media/usb/hdpvr/hdpvr-video.c
7717 @@ -439,7 +439,7 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
7718 /* wait for the first buffer */
7719 if (!(file->f_flags & O_NONBLOCK)) {
7720 if (wait_event_interruptible(dev->wait_data,
7721 - hdpvr_get_next_buffer(dev)))
7722 + !list_empty_careful(&dev->rec_buff_list)))
7723 return -ERESTARTSYS;
7724 }
7725
7726 @@ -465,10 +465,17 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
7727 goto err;
7728 }
7729 if (!err) {
7730 - v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev,
7731 - "timeout: restart streaming\n");
7732 + v4l2_info(&dev->v4l2_dev,
7733 + "timeout: restart streaming\n");
7734 + mutex_lock(&dev->io_mutex);
7735 hdpvr_stop_streaming(dev);
7736 - msecs_to_jiffies(4000);
7737 + mutex_unlock(&dev->io_mutex);
7738 + /*
7739 + * The FW needs about 4 seconds after streaming
7740 + * stopped before it is ready to restart
7741 + * streaming.
7742 + */
7743 + msleep(4000);
7744 err = hdpvr_start_streaming(dev);
7745 if (err) {
7746 ret = err;
7747 @@ -1133,9 +1140,7 @@ static void hdpvr_device_release(struct video_device *vdev)
7748 struct hdpvr_device *dev = video_get_drvdata(vdev);
7749
7750 hdpvr_delete(dev);
7751 - mutex_lock(&dev->io_mutex);
7752 flush_work(&dev->worker);
7753 - mutex_unlock(&dev->io_mutex);
7754
7755 v4l2_device_unregister(&dev->v4l2_dev);
7756 v4l2_ctrl_handler_free(&dev->hdl);
7757 diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
7758 index 467b1ddaf4e7..f2854337cdca 100644
7759 --- a/drivers/media/usb/uvc/uvc_ctrl.c
7760 +++ b/drivers/media/usb/uvc/uvc_ctrl.c
7761 @@ -2350,7 +2350,9 @@ void uvc_ctrl_cleanup_device(struct uvc_device *dev)
7762 struct uvc_entity *entity;
7763 unsigned int i;
7764
7765 - cancel_work_sync(&dev->async_ctrl.work);
7766 + /* Can be uninitialized if we are aborting on probe error. */
7767 + if (dev->async_ctrl.work.func)
7768 + cancel_work_sync(&dev->async_ctrl.work);
7769
7770 /* Free controls and control mappings for all entities. */
7771 list_for_each_entry(entity, &dev->entities, list) {
7772 diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
7773 index 6ac5f5d42615..0986572bbe88 100644
7774 --- a/drivers/media/v4l2-core/v4l2-ctrls.c
7775 +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
7776 @@ -2249,16 +2249,15 @@ struct v4l2_ctrl *v4l2_ctrl_new_custom(struct v4l2_ctrl_handler *hdl,
7777 v4l2_ctrl_fill(cfg->id, &name, &type, &min, &max, &step,
7778 &def, &flags);
7779
7780 - is_menu = (cfg->type == V4L2_CTRL_TYPE_MENU ||
7781 - cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU);
7782 + is_menu = (type == V4L2_CTRL_TYPE_MENU ||
7783 + type == V4L2_CTRL_TYPE_INTEGER_MENU);
7784 if (is_menu)
7785 WARN_ON(step);
7786 else
7787 WARN_ON(cfg->menu_skip_mask);
7788 - if (cfg->type == V4L2_CTRL_TYPE_MENU && qmenu == NULL)
7789 + if (type == V4L2_CTRL_TYPE_MENU && !qmenu) {
7790 qmenu = v4l2_ctrl_get_menu(cfg->id);
7791 - else if (cfg->type == V4L2_CTRL_TYPE_INTEGER_MENU &&
7792 - qmenu_int == NULL) {
7793 + } else if (type == V4L2_CTRL_TYPE_INTEGER_MENU && !qmenu_int) {
7794 handler_set_err(hdl, -EINVAL);
7795 return NULL;
7796 }
7797 diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
7798 index 8594659cb592..ad0275191d91 100644
7799 --- a/drivers/mmc/host/sdhci-msm.c
7800 +++ b/drivers/mmc/host/sdhci-msm.c
7801 @@ -582,11 +582,14 @@ static int msm_init_cm_dll(struct sdhci_host *host)
7802 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
7803 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
7804 int wait_cnt = 50;
7805 - unsigned long flags;
7806 + unsigned long flags, xo_clk = 0;
7807 u32 config;
7808 const struct sdhci_msm_offset *msm_offset =
7809 msm_host->offset;
7810
7811 + if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
7812 + xo_clk = clk_get_rate(msm_host->xo_clk);
7813 +
7814 spin_lock_irqsave(&host->lock, flags);
7815
7816 /*
7817 @@ -634,10 +637,10 @@ static int msm_init_cm_dll(struct sdhci_host *host)
7818 config &= CORE_FLL_CYCLE_CNT;
7819 if (config)
7820 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
7821 - clk_get_rate(msm_host->xo_clk));
7822 + xo_clk);
7823 else
7824 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
7825 - clk_get_rate(msm_host->xo_clk));
7826 + xo_clk);
7827
7828 config = readl_relaxed(host->ioaddr +
7829 msm_offset->core_dll_config_2);
7830 diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c
7831 index 57b5ed1699e3..dce5b7e44e7a 100644
7832 --- a/drivers/mtd/nand/raw/mtk_nand.c
7833 +++ b/drivers/mtd/nand/raw/mtk_nand.c
7834 @@ -509,7 +509,8 @@ static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
7835 {
7836 struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
7837 const struct nand_sdr_timings *timings;
7838 - u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt;
7839 + u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst = 0, trlt = 0;
7840 + u32 thold;
7841
7842 timings = nand_get_sdr_timings(conf);
7843 if (IS_ERR(timings))
7844 @@ -545,11 +546,28 @@ static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline,
7845 twh = DIV_ROUND_UP(twh * rate, 1000000) - 1;
7846 twh &= 0xf;
7847
7848 - twst = timings->tWP_min / 1000;
7849 + /* Calculate real WE#/RE# hold time in nanosecond */
7850 + thold = (twh + 1) * 1000000 / rate;
7851 + /* nanosecond to picosecond */
7852 + thold *= 1000;
7853 +
7854 + /*
7855 + * WE# low level time should be expaned to meet WE# pulse time
7856 + * and WE# cycle time at the same time.
7857 + */
7858 + if (thold < timings->tWC_min)
7859 + twst = timings->tWC_min - thold;
7860 + twst = max(timings->tWP_min, twst) / 1000;
7861 twst = DIV_ROUND_UP(twst * rate, 1000000) - 1;
7862 twst &= 0xf;
7863
7864 - trlt = max(timings->tREA_max, timings->tRP_min) / 1000;
7865 + /*
7866 + * RE# low level time should be expaned to meet RE# pulse time,
7867 + * RE# access time and RE# cycle time at the same time.
7868 + */
7869 + if (thold < timings->tRC_min)
7870 + trlt = timings->tRC_min - thold;
7871 + trlt = max3(trlt, timings->tREA_max, timings->tRP_min) / 1000;
7872 trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1;
7873 trlt &= 0xf;
7874
7875 diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
7876 index 8c7bf91ce4e1..48b3ab26b124 100644
7877 --- a/drivers/mtd/nand/spi/core.c
7878 +++ b/drivers/mtd/nand/spi/core.c
7879 @@ -572,12 +572,12 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
7880 if (ret == -EBADMSG) {
7881 ecc_failed = true;
7882 mtd->ecc_stats.failed++;
7883 - ret = 0;
7884 } else {
7885 mtd->ecc_stats.corrected += ret;
7886 max_bitflips = max_t(unsigned int, max_bitflips, ret);
7887 }
7888
7889 + ret = 0;
7890 ops->retlen += iter.req.datalen;
7891 ops->oobretlen += iter.req.ooblen;
7892 }
7893 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
7894 index 7e162fff01ab..be0b785becd0 100644
7895 --- a/drivers/net/bonding/bond_main.c
7896 +++ b/drivers/net/bonding/bond_main.c
7897 @@ -3852,8 +3852,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
7898 struct net_device *bond_dev)
7899 {
7900 struct bonding *bond = netdev_priv(bond_dev);
7901 - struct iphdr *iph = ip_hdr(skb);
7902 struct slave *slave;
7903 + int slave_cnt;
7904 u32 slave_id;
7905
7906 /* Start with the curr_active_slave that joined the bond as the
7907 @@ -3862,23 +3862,32 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
7908 * send the join/membership reports. The curr_active_slave found
7909 * will send all of this type of traffic.
7910 */
7911 - if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
7912 - slave = rcu_dereference(bond->curr_active_slave);
7913 - if (slave)
7914 - bond_dev_queue_xmit(bond, skb, slave->dev);
7915 - else
7916 - bond_xmit_slave_id(bond, skb, 0);
7917 - } else {
7918 - int slave_cnt = READ_ONCE(bond->slave_cnt);
7919 + if (skb->protocol == htons(ETH_P_IP)) {
7920 + int noff = skb_network_offset(skb);
7921 + struct iphdr *iph;
7922
7923 - if (likely(slave_cnt)) {
7924 - slave_id = bond_rr_gen_slave_id(bond);
7925 - bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
7926 - } else {
7927 - bond_tx_drop(bond_dev, skb);
7928 + if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
7929 + goto non_igmp;
7930 +
7931 + iph = ip_hdr(skb);
7932 + if (iph->protocol == IPPROTO_IGMP) {
7933 + slave = rcu_dereference(bond->curr_active_slave);
7934 + if (slave)
7935 + bond_dev_queue_xmit(bond, skb, slave->dev);
7936 + else
7937 + bond_xmit_slave_id(bond, skb, 0);
7938 + return NETDEV_TX_OK;
7939 }
7940 }
7941
7942 +non_igmp:
7943 + slave_cnt = READ_ONCE(bond->slave_cnt);
7944 + if (likely(slave_cnt)) {
7945 + slave_id = bond_rr_gen_slave_id(bond);
7946 + bond_xmit_slave_id(bond, skb, slave_id % slave_cnt);
7947 + } else {
7948 + bond_tx_drop(bond_dev, skb);
7949 + }
7950 return NETDEV_TX_OK;
7951 }
7952
7953 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
7954 index 5a727d4729da..e3ce29951c5e 100644
7955 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
7956 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
7957 @@ -3858,9 +3858,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
7958
7959 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
7960 if (!(bp->flags & TX_TIMESTAMPING_EN)) {
7961 + bp->eth_stats.ptp_skip_tx_ts++;
7962 BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
7963 } else if (bp->ptp_tx_skb) {
7964 - BNX2X_ERR("The device supports only a single outstanding packet to timestamp, this packet will not be timestamped\n");
7965 + bp->eth_stats.ptp_skip_tx_ts++;
7966 + netdev_err_once(bp->dev,
7967 + "Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
7968 } else {
7969 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7970 /* schedule check for Tx timestamp */
7971 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
7972 index c428b0655c26..00f9ed93360c 100644
7973 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
7974 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
7975 @@ -182,7 +182,9 @@ static const struct {
7976 { STATS_OFFSET32(driver_filtered_tx_pkt),
7977 4, false, "driver_filtered_tx_pkt" },
7978 { STATS_OFFSET32(eee_tx_lpi),
7979 - 4, true, "Tx LPI entry count"}
7980 + 4, true, "Tx LPI entry count"},
7981 + { STATS_OFFSET32(ptp_skip_tx_ts),
7982 + 4, false, "ptp_skipped_tx_tstamp" },
7983 };
7984
7985 #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
7986 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
7987 index a585f1025a58..2c9af0f420e5 100644
7988 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
7989 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
7990 @@ -15244,11 +15244,24 @@ static void bnx2x_ptp_task(struct work_struct *work)
7991 u32 val_seq;
7992 u64 timestamp, ns;
7993 struct skb_shared_hwtstamps shhwtstamps;
7994 + bool bail = true;
7995 + int i;
7996 +
7997 + /* FW may take a while to complete timestamping; try a bit and if it's
7998 + * still not complete, may indicate an error state - bail out then.
7999 + */
8000 + for (i = 0; i < 10; i++) {
8001 + /* Read Tx timestamp registers */
8002 + val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
8003 + NIG_REG_P0_TLLH_PTP_BUF_SEQID);
8004 + if (val_seq & 0x10000) {
8005 + bail = false;
8006 + break;
8007 + }
8008 + msleep(1 << i);
8009 + }
8010
8011 - /* Read Tx timestamp registers */
8012 - val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
8013 - NIG_REG_P0_TLLH_PTP_BUF_SEQID);
8014 - if (val_seq & 0x10000) {
8015 + if (!bail) {
8016 /* There is a valid timestamp value */
8017 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
8018 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
8019 @@ -15263,16 +15276,18 @@ static void bnx2x_ptp_task(struct work_struct *work)
8020 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
8021 shhwtstamps.hwtstamp = ns_to_ktime(ns);
8022 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
8023 - dev_kfree_skb_any(bp->ptp_tx_skb);
8024 - bp->ptp_tx_skb = NULL;
8025
8026 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
8027 timestamp, ns);
8028 } else {
8029 - DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
8030 - /* Reschedule to keep checking for a valid timestamp value */
8031 - schedule_work(&bp->ptp_task);
8032 + DP(BNX2X_MSG_PTP,
8033 + "Tx timestamp is not recorded (register read=%u)\n",
8034 + val_seq);
8035 + bp->eth_stats.ptp_skip_tx_ts++;
8036 }
8037 +
8038 + dev_kfree_skb_any(bp->ptp_tx_skb);
8039 + bp->ptp_tx_skb = NULL;
8040 }
8041
8042 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
8043 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
8044 index b2644ed13d06..d55e63692cf3 100644
8045 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
8046 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
8047 @@ -207,6 +207,9 @@ struct bnx2x_eth_stats {
8048 u32 driver_filtered_tx_pkt;
8049 /* src: Clear-on-Read register; Will not survive PMF Migration */
8050 u32 eee_tx_lpi;
8051 +
8052 + /* PTP */
8053 + u32 ptp_skip_tx_ts;
8054 };
8055
8056 struct bnx2x_eth_q_stats {
8057 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
8058 index bf715a367273..4cf80de4c471 100644
8059 --- a/drivers/net/ethernet/freescale/fec_main.c
8060 +++ b/drivers/net/ethernet/freescale/fec_main.c
8061 @@ -1689,10 +1689,10 @@ static void fec_get_mac(struct net_device *ndev)
8062 */
8063 if (!is_valid_ether_addr(iap)) {
8064 /* Report it and use a random ethernet address instead */
8065 - netdev_err(ndev, "Invalid MAC address: %pM\n", iap);
8066 + dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
8067 eth_hw_addr_random(ndev);
8068 - netdev_info(ndev, "Using random MAC address: %pM\n",
8069 - ndev->dev_addr);
8070 + dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
8071 + ndev->dev_addr);
8072 return;
8073 }
8074
8075 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
8076 index fff5be8078ac..0594a6c3dccd 100644
8077 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
8078 +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
8079 @@ -229,6 +229,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
8080
8081 ae_algo->ops->uninit_ae_dev(ae_dev);
8082 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
8083 + ae_dev->ops = NULL;
8084 }
8085
8086 list_del(&ae_algo->node);
8087 @@ -316,6 +317,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
8088
8089 ae_algo->ops->uninit_ae_dev(ae_dev);
8090 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
8091 + ae_dev->ops = NULL;
8092 }
8093
8094 list_del(&ae_dev->node);
8095 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
8096 index 9684ad015c42..6a3c6b02a77c 100644
8097 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
8098 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
8099 @@ -245,11 +245,13 @@ static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
8100
8101 skb_get(skb);
8102 tx_ret = hns3_nic_net_xmit(skb, ndev);
8103 - if (tx_ret == NETDEV_TX_OK)
8104 + if (tx_ret == NETDEV_TX_OK) {
8105 good_cnt++;
8106 - else
8107 + } else {
8108 + kfree_skb(skb);
8109 netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
8110 tx_ret);
8111 + }
8112 }
8113 if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
8114 ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
8115 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8116 index 4648c6a9d9e8..89ca69fa2b97 100644
8117 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8118 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
8119 @@ -663,8 +663,7 @@ static u8 *hclge_comm_get_strings(u32 stringset,
8120 return buff;
8121
8122 for (i = 0; i < size; i++) {
8123 - snprintf(buff, ETH_GSTRING_LEN,
8124 - strs[i].desc);
8125 + snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
8126 buff = buff + ETH_GSTRING_LEN;
8127 }
8128
8129 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
8130 index 48235dc2dd56..11e9259ca040 100644
8131 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
8132 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
8133 @@ -54,7 +54,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
8134 u32 tick;
8135
8136 /* Calc tick */
8137 - if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
8138 + if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
8139 + ir > HCLGE_ETHER_MAX_RATE)
8140 return -EINVAL;
8141
8142 tick = tick_array[shaper_level];
8143 @@ -1057,6 +1058,9 @@ static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
8144 int ret;
8145 u8 i;
8146
8147 + if (vport->vport_id >= HNAE3_MAX_TC)
8148 + return -EINVAL;
8149 +
8150 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
8151 if (ret)
8152 return ret;
8153 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
8154 index a9730711e257..b56d22b530a7 100644
8155 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
8156 +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
8157 @@ -1291,7 +1291,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
8158 struct i40e_rx_buffer *rx_buffer,
8159 unsigned int size)
8160 {
8161 - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8162 + void *va;
8163 #if (PAGE_SIZE < 8192)
8164 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
8165 #else
8166 @@ -1301,6 +1301,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
8167 struct sk_buff *skb;
8168
8169 /* prefetch first cache line of first page */
8170 + va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8171 prefetch(va);
8172 #if L1_CACHE_BYTES < 128
8173 prefetch(va + L1_CACHE_BYTES);
8174 @@ -1355,7 +1356,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
8175 struct i40e_rx_buffer *rx_buffer,
8176 unsigned int size)
8177 {
8178 - void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8179 + void *va;
8180 #if (PAGE_SIZE < 8192)
8181 unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
8182 #else
8183 @@ -1365,6 +1366,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
8184 struct sk_buff *skb;
8185
8186 /* prefetch first cache line of first page */
8187 + va = page_address(rx_buffer->page) + rx_buffer->page_offset;
8188 prefetch(va);
8189 #if L1_CACHE_BYTES < 128
8190 prefetch(va + L1_CACHE_BYTES);
8191 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
8192 index 5aa083d9a6c9..ab76a5f77cd0 100644
8193 --- a/drivers/net/ethernet/intel/igb/igb_main.c
8194 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
8195 @@ -5703,6 +5703,7 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
8196 */
8197 if (tx_ring->launchtime_enable) {
8198 ts = ns_to_timespec64(first->skb->tstamp);
8199 + first->skb->tstamp = 0;
8200 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
8201 } else {
8202 context_desc->seqnum_seed = 0;
8203 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
8204 index e5a8461fe6a9..8829bd95d0d3 100644
8205 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
8206 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
8207 @@ -3223,7 +3223,8 @@ static int ixgbe_get_module_info(struct net_device *dev,
8208 page_swap = true;
8209 }
8210
8211 - if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
8212 + if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap ||
8213 + !(addr_mode & IXGBE_SFF_DDM_IMPLEMENTED)) {
8214 /* We have a SFP, but it does not support SFF-8472 */
8215 modinfo->type = ETH_MODULE_SFF_8079;
8216 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
8217 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
8218 index 64e44e01c973..c56baad04ee6 100644
8219 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
8220 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
8221 @@ -45,6 +45,7 @@
8222 #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
8223 #define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
8224 #define IXGBE_SFF_ADDRESSING_MODE 0x4
8225 +#define IXGBE_SFF_DDM_IMPLEMENTED 0x40
8226 #define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
8227 #define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
8228 #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
8229 diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
8230 index c5dac6bd2be4..ee7857298361 100644
8231 --- a/drivers/net/ethernet/marvell/mvmdio.c
8232 +++ b/drivers/net/ethernet/marvell/mvmdio.c
8233 @@ -64,7 +64,7 @@
8234
8235 struct orion_mdio_dev {
8236 void __iomem *regs;
8237 - struct clk *clk[3];
8238 + struct clk *clk[4];
8239 /*
8240 * If we have access to the error interrupt pin (which is
8241 * somewhat misnamed as it not only reflects internal errors
8242 @@ -321,6 +321,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
8243
8244 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
8245 dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
8246 + if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
8247 + ret = -EPROBE_DEFER;
8248 + goto out_clk;
8249 + }
8250 if (IS_ERR(dev->clk[i]))
8251 break;
8252 clk_prepare_enable(dev->clk[i]);
8253 @@ -362,6 +366,7 @@ out_mdio:
8254 if (dev->err_interrupt > 0)
8255 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
8256
8257 +out_clk:
8258 for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
8259 if (IS_ERR(dev->clk[i]))
8260 break;
8261 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
8262 index ae2240074d8e..5692c6087bbb 100644
8263 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
8264 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
8265 @@ -312,7 +312,8 @@ static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
8266 }
8267
8268 /* Set value */
8269 - pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] = shift & MVPP2_PRS_SRAM_SHIFT_MASK;
8270 + pe->sram[MVPP2_BIT_TO_WORD(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
8271 + shift & MVPP2_PRS_SRAM_SHIFT_MASK;
8272
8273 /* Reset and set operation */
8274 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
8275 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
8276 index 4dd82a1612aa..a6a9688db307 100644
8277 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
8278 +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
8279 @@ -3096,6 +3096,7 @@ static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
8280 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
8281 void __iomem *p_regview,
8282 void __iomem *p_doorbells,
8283 + u64 db_phys_addr,
8284 enum qed_pci_personality personality)
8285 {
8286 int rc = 0;
8287 @@ -3103,6 +3104,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
8288 /* Split PCI bars evenly between hwfns */
8289 p_hwfn->regview = p_regview;
8290 p_hwfn->doorbells = p_doorbells;
8291 + p_hwfn->db_phys_addr = db_phys_addr;
8292
8293 if (IS_VF(p_hwfn->cdev))
8294 return qed_vf_hw_prepare(p_hwfn);
8295 @@ -3198,7 +3200,9 @@ int qed_hw_prepare(struct qed_dev *cdev,
8296 /* Initialize the first hwfn - will learn number of hwfns */
8297 rc = qed_hw_prepare_single(p_hwfn,
8298 cdev->regview,
8299 - cdev->doorbells, personality);
8300 + cdev->doorbells,
8301 + cdev->db_phys_addr,
8302 + personality);
8303 if (rc)
8304 return rc;
8305
8306 @@ -3207,22 +3211,25 @@ int qed_hw_prepare(struct qed_dev *cdev,
8307 /* Initialize the rest of the hwfns */
8308 if (cdev->num_hwfns > 1) {
8309 void __iomem *p_regview, *p_doorbell;
8310 - u8 __iomem *addr;
8311 + u64 db_phys_addr;
8312 + u32 offset;
8313
8314 /* adjust bar offset for second engine */
8315 - addr = cdev->regview +
8316 - qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
8317 - BAR_ID_0) / 2;
8318 - p_regview = addr;
8319 + offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
8320 + BAR_ID_0) / 2;
8321 + p_regview = cdev->regview + offset;
8322
8323 - addr = cdev->doorbells +
8324 - qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
8325 - BAR_ID_1) / 2;
8326 - p_doorbell = addr;
8327 + offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
8328 + BAR_ID_1) / 2;
8329 +
8330 + p_doorbell = cdev->doorbells + offset;
8331 +
8332 + db_phys_addr = cdev->db_phys_addr + offset;
8333
8334 /* prepare second hw function */
8335 rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
8336 - p_doorbell, personality);
8337 + p_doorbell, db_phys_addr,
8338 + personality);
8339
8340 /* in case of error, need to free the previously
8341 * initiliazed hwfn 0.
8342 diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
8343 index b7471e48db7b..7002a660b6b4 100644
8344 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
8345 +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
8346 @@ -2709,6 +2709,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
8347 data.input.rx_num_desc = n_ooo_bufs * 2;
8348 data.input.tx_num_desc = data.input.rx_num_desc;
8349 data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
8350 + data.input.tx_tc = PKT_LB_TC;
8351 + data.input.tx_dest = QED_LL2_TX_DEST_LB;
8352 data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
8353 data.input.secondary_queue = true;
8354 data.cbs = &cbs;
8355 diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
8356 index 7873d6dfd91f..13802b825d65 100644
8357 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
8358 +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
8359 @@ -803,7 +803,7 @@ static int qed_rdma_add_user(void *rdma_cxt,
8360 dpi_start_offset +
8361 ((out_params->dpi) * p_hwfn->dpi_size));
8362
8363 - out_params->dpi_phys_addr = p_hwfn->cdev->db_phys_addr +
8364 + out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
8365 dpi_start_offset +
8366 ((out_params->dpi) * p_hwfn->dpi_size);
8367
8368 diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
8369 index 272b9ca66314..b069b3a2453b 100644
8370 --- a/drivers/net/ethernet/stmicro/stmmac/common.h
8371 +++ b/drivers/net/ethernet/stmicro/stmmac/common.h
8372 @@ -261,7 +261,7 @@ struct stmmac_safety_stats {
8373 #define STMMAC_COAL_TX_TIMER 1000
8374 #define STMMAC_MAX_COAL_TX_TICK 100000
8375 #define STMMAC_TX_MAX_FRAMES 256
8376 -#define STMMAC_TX_FRAMES 25
8377 +#define STMMAC_TX_FRAMES 1
8378
8379 /* Packets types */
8380 enum packets_types {
8381 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
8382 index 49a896a16391..79c91526f3ec 100644
8383 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
8384 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
8385 @@ -893,6 +893,11 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
8386 * address. No need to mask it again.
8387 */
8388 reg |= 1 << H3_EPHY_ADDR_SHIFT;
8389 + } else {
8390 + /* For SoCs without internal PHY the PHY selection bit should be
8391 + * set to 0 (external PHY).
8392 + */
8393 + reg &= ~H3_EPHY_SELECT;
8394 }
8395
8396 if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
8397 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
8398 index 0877bde6e860..21d131347e2e 100644
8399 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
8400 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
8401 @@ -216,6 +216,12 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
8402 GMAC_ADDR_LOW(reg));
8403 reg++;
8404 }
8405 +
8406 + while (reg <= perfect_addr_number) {
8407 + writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
8408 + writel(0, ioaddr + GMAC_ADDR_LOW(reg));
8409 + reg++;
8410 + }
8411 }
8412
8413 #ifdef FRAME_FILTER_DEBUG
8414 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
8415 index 7e5d5db0d516..d0e6e1503581 100644
8416 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
8417 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
8418 @@ -444,14 +444,20 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
8419 * are required
8420 */
8421 value |= GMAC_PACKET_FILTER_PR;
8422 - } else if (!netdev_uc_empty(dev)) {
8423 - int reg = 1;
8424 + } else {
8425 struct netdev_hw_addr *ha;
8426 + int reg = 1;
8427
8428 netdev_for_each_uc_addr(ha, dev) {
8429 dwmac4_set_umac_addr(hw, ha->addr, reg);
8430 reg++;
8431 }
8432 +
8433 + while (reg <= GMAC_MAX_PERFECT_ADDRESSES) {
8434 + writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
8435 + writel(0, ioaddr + GMAC_ADDR_LOW(reg));
8436 + reg++;
8437 + }
8438 }
8439
8440 writel(value, ioaddr + GMAC_PACKET_FILTER);
8441 @@ -469,8 +475,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
8442 if (fc & FLOW_RX) {
8443 pr_debug("\tReceive Flow-Control ON\n");
8444 flow |= GMAC_RX_FLOW_CTRL_RFE;
8445 - writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
8446 }
8447 + writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
8448 +
8449 if (fc & FLOW_TX) {
8450 pr_debug("\tTransmit Flow-Control ON\n");
8451
8452 @@ -478,7 +485,7 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
8453 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
8454
8455 for (queue = 0; queue < tx_cnt; queue++) {
8456 - flow |= GMAC_TX_FLOW_CTRL_TFE;
8457 + flow = GMAC_TX_FLOW_CTRL_TFE;
8458
8459 if (duplex)
8460 flow |=
8461 @@ -486,6 +493,9 @@ static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
8462
8463 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
8464 }
8465 + } else {
8466 + for (queue = 0; queue < tx_cnt; queue++)
8467 + writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
8468 }
8469 }
8470
8471 diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
8472 index 7cfd7ff38e86..66b30ebd45ee 100644
8473 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
8474 +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
8475 @@ -614,6 +614,10 @@ static void axienet_start_xmit_done(struct net_device *ndev)
8476
8477 ndev->stats.tx_packets += packets;
8478 ndev->stats.tx_bytes += size;
8479 +
8480 + /* Matches barrier in axienet_start_xmit */
8481 + smp_mb();
8482 +
8483 netif_wake_queue(ndev);
8484 }
8485
8486 @@ -668,9 +672,19 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
8487 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
8488
8489 if (axienet_check_tx_bd_space(lp, num_frag)) {
8490 - if (!netif_queue_stopped(ndev))
8491 - netif_stop_queue(ndev);
8492 - return NETDEV_TX_BUSY;
8493 + if (netif_queue_stopped(ndev))
8494 + return NETDEV_TX_BUSY;
8495 +
8496 + netif_stop_queue(ndev);
8497 +
8498 + /* Matches barrier in axienet_start_xmit_done */
8499 + smp_mb();
8500 +
8501 + /* Space might have just been freed - check again */
8502 + if (axienet_check_tx_bd_space(lp, num_frag))
8503 + return NETDEV_TX_BUSY;
8504 +
8505 + netif_wake_queue(ndev);
8506 }
8507
8508 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8509 diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
8510 index 7a145172d503..d178d5bad7e4 100644
8511 --- a/drivers/net/gtp.c
8512 +++ b/drivers/net/gtp.c
8513 @@ -289,16 +289,29 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
8514 return gtp_rx(pctx, skb, hdrlen, gtp->role);
8515 }
8516
8517 -static void gtp_encap_destroy(struct sock *sk)
8518 +static void __gtp_encap_destroy(struct sock *sk)
8519 {
8520 struct gtp_dev *gtp;
8521
8522 - gtp = rcu_dereference_sk_user_data(sk);
8523 + lock_sock(sk);
8524 + gtp = sk->sk_user_data;
8525 if (gtp) {
8526 + if (gtp->sk0 == sk)
8527 + gtp->sk0 = NULL;
8528 + else
8529 + gtp->sk1u = NULL;
8530 udp_sk(sk)->encap_type = 0;
8531 rcu_assign_sk_user_data(sk, NULL);
8532 sock_put(sk);
8533 }
8534 + release_sock(sk);
8535 +}
8536 +
8537 +static void gtp_encap_destroy(struct sock *sk)
8538 +{
8539 + rtnl_lock();
8540 + __gtp_encap_destroy(sk);
8541 + rtnl_unlock();
8542 }
8543
8544 static void gtp_encap_disable_sock(struct sock *sk)
8545 @@ -306,7 +319,7 @@ static void gtp_encap_disable_sock(struct sock *sk)
8546 if (!sk)
8547 return;
8548
8549 - gtp_encap_destroy(sk);
8550 + __gtp_encap_destroy(sk);
8551 }
8552
8553 static void gtp_encap_disable(struct gtp_dev *gtp)
8554 @@ -800,7 +813,8 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
8555 goto out_sock;
8556 }
8557
8558 - if (rcu_dereference_sk_user_data(sock->sk)) {
8559 + lock_sock(sock->sk);
8560 + if (sock->sk->sk_user_data) {
8561 sk = ERR_PTR(-EBUSY);
8562 goto out_sock;
8563 }
8564 @@ -816,6 +830,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
8565 setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
8566
8567 out_sock:
8568 + release_sock(sock->sk);
8569 sockfd_put(sock);
8570 return sk;
8571 }
8572 @@ -847,8 +862,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
8573
8574 if (data[IFLA_GTP_ROLE]) {
8575 role = nla_get_u32(data[IFLA_GTP_ROLE]);
8576 - if (role > GTP_ROLE_SGSN)
8577 + if (role > GTP_ROLE_SGSN) {
8578 + if (sk0)
8579 + gtp_encap_disable_sock(sk0);
8580 + if (sk1u)
8581 + gtp_encap_disable_sock(sk1u);
8582 return -EINVAL;
8583 + }
8584 }
8585
8586 gtp->sk0 = sk0;
8587 @@ -949,7 +969,7 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
8588
8589 }
8590
8591 - pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
8592 + pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
8593 if (pctx == NULL)
8594 return -ENOMEM;
8595
8596 @@ -1038,6 +1058,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
8597 return -EINVAL;
8598 }
8599
8600 + rtnl_lock();
8601 rcu_read_lock();
8602
8603 gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
8604 @@ -1062,6 +1083,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
8605
8606 out_unlock:
8607 rcu_read_unlock();
8608 + rtnl_unlock();
8609 return err;
8610 }
8611
8612 @@ -1363,9 +1385,9 @@ late_initcall(gtp_init);
8613
8614 static void __exit gtp_fini(void)
8615 {
8616 - unregister_pernet_subsys(&gtp_net_ops);
8617 genl_unregister_family(&gtp_genl_family);
8618 rtnl_link_unregister(&gtp_link_ops);
8619 + unregister_pernet_subsys(&gtp_net_ops);
8620
8621 pr_info("GTP module unloaded\n");
8622 }
8623 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
8624 index 8a96d985a52f..6144146aec29 100644
8625 --- a/drivers/net/phy/phy_device.c
8626 +++ b/drivers/net/phy/phy_device.c
8627 @@ -757,6 +757,9 @@ int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
8628 {
8629 int rc;
8630
8631 + if (!dev)
8632 + return -EINVAL;
8633 +
8634 rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface);
8635 if (rc)
8636 return rc;
8637 @@ -1098,6 +1101,9 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
8638 struct device *d;
8639 int rc;
8640
8641 + if (!dev)
8642 + return ERR_PTR(-EINVAL);
8643 +
8644 /* Search the list of PHY devices on the mdio bus for the
8645 * PHY with the requested name
8646 */
8647 diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
8648 index 8807a806cc47..418522aa2f71 100644
8649 --- a/drivers/net/phy/sfp.c
8650 +++ b/drivers/net/phy/sfp.c
8651 @@ -185,10 +185,11 @@ struct sfp {
8652 struct gpio_desc *gpio[GPIO_MAX];
8653
8654 bool attached;
8655 + struct mutex st_mutex; /* Protects state */
8656 unsigned int state;
8657 struct delayed_work poll;
8658 struct delayed_work timeout;
8659 - struct mutex sm_mutex;
8660 + struct mutex sm_mutex; /* Protects state machine */
8661 unsigned char sm_mod_state;
8662 unsigned char sm_dev_state;
8663 unsigned short sm_state;
8664 @@ -1718,6 +1719,7 @@ static void sfp_check_state(struct sfp *sfp)
8665 {
8666 unsigned int state, i, changed;
8667
8668 + mutex_lock(&sfp->st_mutex);
8669 state = sfp_get_state(sfp);
8670 changed = state ^ sfp->state;
8671 changed &= SFP_F_PRESENT | SFP_F_LOS | SFP_F_TX_FAULT;
8672 @@ -1743,6 +1745,7 @@ static void sfp_check_state(struct sfp *sfp)
8673 sfp_sm_event(sfp, state & SFP_F_LOS ?
8674 SFP_E_LOS_HIGH : SFP_E_LOS_LOW);
8675 rtnl_unlock();
8676 + mutex_unlock(&sfp->st_mutex);
8677 }
8678
8679 static irqreturn_t sfp_irq(int irq, void *data)
8680 @@ -1773,6 +1776,7 @@ static struct sfp *sfp_alloc(struct device *dev)
8681 sfp->dev = dev;
8682
8683 mutex_init(&sfp->sm_mutex);
8684 + mutex_init(&sfp->st_mutex);
8685 INIT_DELAYED_WORK(&sfp->poll, sfp_poll);
8686 INIT_DELAYED_WORK(&sfp->timeout, sfp_timeout);
8687
8688 diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
8689 index 3d93993e74da..2eca4168af2f 100644
8690 --- a/drivers/net/usb/asix_devices.c
8691 +++ b/drivers/net/usb/asix_devices.c
8692 @@ -238,7 +238,7 @@ static void asix_phy_reset(struct usbnet *dev, unsigned int reset_bits)
8693 static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
8694 {
8695 int ret = 0;
8696 - u8 buf[ETH_ALEN];
8697 + u8 buf[ETH_ALEN] = {0};
8698 int i;
8699 unsigned long gpio_bits = dev->driver_info->data;
8700
8701 @@ -689,7 +689,7 @@ static int asix_resume(struct usb_interface *intf)
8702 static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
8703 {
8704 int ret, i;
8705 - u8 buf[ETH_ALEN], chipcode = 0;
8706 + u8 buf[ETH_ALEN] = {0}, chipcode = 0;
8707 u32 phyid;
8708 struct asix_common_private *priv;
8709
8710 @@ -1073,7 +1073,7 @@ static const struct net_device_ops ax88178_netdev_ops = {
8711 static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
8712 {
8713 int ret;
8714 - u8 buf[ETH_ALEN];
8715 + u8 buf[ETH_ALEN] = {0};
8716
8717 usbnet_get_endpoints(dev,intf);
8718
8719 diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
8720 index 677535b3d207..476e0535f06f 100644
8721 --- a/drivers/net/wireless/ath/ath10k/hw.c
8722 +++ b/drivers/net/wireless/ath/ath10k/hw.c
8723 @@ -168,7 +168,7 @@ const struct ath10k_hw_values qca6174_values = {
8724 };
8725
8726 const struct ath10k_hw_values qca99x0_values = {
8727 - .rtc_state_val_on = 5,
8728 + .rtc_state_val_on = 7,
8729 .ce_count = 12,
8730 .msi_assign_ce_max = 12,
8731 .num_target_ce_config_wlan = 10,
8732 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
8733 index f3b1cfacfe9d..1419f9d1505f 100644
8734 --- a/drivers/net/wireless/ath/ath10k/mac.c
8735 +++ b/drivers/net/wireless/ath/ath10k/mac.c
8736 @@ -1624,6 +1624,10 @@ static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
8737 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
8738 return 0;
8739
8740 + /* For mesh, probe response and beacon share the same template */
8741 + if (ieee80211_vif_is_mesh(vif))
8742 + return 0;
8743 +
8744 prb = ieee80211_proberesp_get(hw, vif);
8745 if (!prb) {
8746 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
8747 diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
8748 index 7f61591ce0de..686759b5613f 100644
8749 --- a/drivers/net/wireless/ath/ath10k/sdio.c
8750 +++ b/drivers/net/wireless/ath/ath10k/sdio.c
8751 @@ -613,6 +613,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
8752 full_len,
8753 last_in_bundle,
8754 last_in_bundle);
8755 + if (ret) {
8756 + ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
8757 + goto err;
8758 + }
8759 }
8760
8761 ar_sdio->n_rx_pkts = i;
8762 @@ -2069,6 +2073,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
8763 cancel_work_sync(&ar_sdio->wr_async_work);
8764 ath10k_core_unregister(ar);
8765 ath10k_core_destroy(ar);
8766 +
8767 + flush_workqueue(ar_sdio->workqueue);
8768 + destroy_workqueue(ar_sdio->workqueue);
8769 }
8770
8771 static const struct sdio_device_id ath10k_sdio_devices[] = {
8772 diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
8773 index cda164f6e9f6..6f62ddc0494c 100644
8774 --- a/drivers/net/wireless/ath/ath10k/txrx.c
8775 +++ b/drivers/net/wireless/ath/ath10k/txrx.c
8776 @@ -156,6 +156,9 @@ struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
8777 {
8778 struct ath10k_peer *peer;
8779
8780 + if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
8781 + return NULL;
8782 +
8783 lockdep_assert_held(&ar->data_lock);
8784
8785 list_for_each_entry(peer, &ar->peers, list)
8786 diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
8787 index 777acc564ac9..bc7916f2add0 100644
8788 --- a/drivers/net/wireless/ath/ath6kl/wmi.c
8789 +++ b/drivers/net/wireless/ath/ath6kl/wmi.c
8790 @@ -1178,6 +1178,10 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
8791 return -EINVAL;
8792
8793 ev = (struct wmi_pstream_timeout_event *) datap;
8794 + if (ev->traffic_class >= WMM_NUM_AC) {
8795 + ath6kl_err("invalid traffic class: %d\n", ev->traffic_class);
8796 + return -EINVAL;
8797 + }
8798
8799 /*
8800 * When the pstream (fat pipe == AC) timesout, it means there were
8801 @@ -1519,6 +1523,10 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
8802 return -EINVAL;
8803
8804 reply = (struct wmi_cac_event *) datap;
8805 + if (reply->ac >= WMM_NUM_AC) {
8806 + ath6kl_err("invalid AC: %d\n", reply->ac);
8807 + return -EINVAL;
8808 + }
8809
8810 if ((reply->cac_indication == CAC_INDICATION_ADMISSION_RESP) &&
8811 (reply->status_code != IEEE80211_TSPEC_STATUS_ADMISS_ACCEPTED)) {
8812 @@ -2635,7 +2643,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
8813 u16 active_tsids = 0;
8814 int ret;
8815
8816 - if (traffic_class > 3) {
8817 + if (traffic_class >= WMM_NUM_AC) {
8818 ath6kl_err("invalid traffic class: %d\n", traffic_class);
8819 return -EINVAL;
8820 }
8821 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
8822 index bb319f22761f..b4f7ee423d40 100644
8823 --- a/drivers/net/wireless/ath/ath9k/hw.c
8824 +++ b/drivers/net/wireless/ath/ath9k/hw.c
8825 @@ -252,8 +252,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
8826 /* Chip Revisions */
8827 /******************/
8828
8829 -static void ath9k_hw_read_revisions(struct ath_hw *ah)
8830 +static bool ath9k_hw_read_revisions(struct ath_hw *ah)
8831 {
8832 + u32 srev;
8833 u32 val;
8834
8835 if (ah->get_mac_revision)
8836 @@ -269,25 +270,33 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
8837 val = REG_READ(ah, AR_SREV);
8838 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
8839 }
8840 - return;
8841 + return true;
8842 case AR9300_DEVID_AR9340:
8843 ah->hw_version.macVersion = AR_SREV_VERSION_9340;
8844 - return;
8845 + return true;
8846 case AR9300_DEVID_QCA955X:
8847 ah->hw_version.macVersion = AR_SREV_VERSION_9550;
8848 - return;
8849 + return true;
8850 case AR9300_DEVID_AR953X:
8851 ah->hw_version.macVersion = AR_SREV_VERSION_9531;
8852 - return;
8853 + return true;
8854 case AR9300_DEVID_QCA956X:
8855 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
8856 - return;
8857 + return true;
8858 }
8859
8860 - val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
8861 + srev = REG_READ(ah, AR_SREV);
8862 +
8863 + if (srev == -EIO) {
8864 + ath_err(ath9k_hw_common(ah),
8865 + "Failed to read SREV register");
8866 + return false;
8867 + }
8868 +
8869 + val = srev & AR_SREV_ID;
8870
8871 if (val == 0xFF) {
8872 - val = REG_READ(ah, AR_SREV);
8873 + val = srev;
8874 ah->hw_version.macVersion =
8875 (val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
8876 ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
8877 @@ -306,6 +315,8 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
8878 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
8879 ah->is_pciexpress = true;
8880 }
8881 +
8882 + return true;
8883 }
8884
8885 /************************************/
8886 @@ -559,7 +570,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
8887 struct ath_common *common = ath9k_hw_common(ah);
8888 int r = 0;
8889
8890 - ath9k_hw_read_revisions(ah);
8891 + if (!ath9k_hw_read_revisions(ah)) {
8892 + ath_err(common, "Could not read hardware revisions");
8893 + return -EOPNOTSUPP;
8894 + }
8895
8896 switch (ah->hw_version.macVersion) {
8897 case AR_SREV_VERSION_5416_PCI:
8898 diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
8899 index d52b31b45df7..a274eb0d1968 100644
8900 --- a/drivers/net/wireless/ath/dfs_pattern_detector.c
8901 +++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
8902 @@ -111,7 +111,7 @@ static const struct radar_detector_specs jp_radar_ref_types[] = {
8903 JP_PATTERN(0, 0, 1, 1428, 1428, 1, 18, 29, false),
8904 JP_PATTERN(1, 2, 3, 3846, 3846, 1, 18, 29, false),
8905 JP_PATTERN(2, 0, 1, 1388, 1388, 1, 18, 50, false),
8906 - JP_PATTERN(3, 1, 2, 4000, 4000, 1, 18, 50, false),
8907 + JP_PATTERN(3, 0, 4, 4000, 4000, 1, 18, 50, false),
8908 JP_PATTERN(4, 0, 5, 150, 230, 1, 23, 50, false),
8909 JP_PATTERN(5, 6, 10, 200, 500, 1, 16, 50, false),
8910 JP_PATTERN(6, 11, 20, 200, 500, 1, 12, 50, false),
8911 diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
8912 index 5d287a8e1b45..0655cd884514 100644
8913 --- a/drivers/net/wireless/ath/wil6210/interrupt.c
8914 +++ b/drivers/net/wireless/ath/wil6210/interrupt.c
8915 @@ -296,21 +296,24 @@ void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
8916 static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
8917 {
8918 struct wil6210_priv *wil = cookie;
8919 - u32 isr = wil_ioread32_and_clear(wil->csr +
8920 - HOSTADDR(RGF_DMA_EP_RX_ICR) +
8921 - offsetof(struct RGF_ICR, ICR));
8922 + u32 isr;
8923 bool need_unmask = true;
8924
8925 + wil6210_mask_irq_rx(wil);
8926 +
8927 + isr = wil_ioread32_and_clear(wil->csr +
8928 + HOSTADDR(RGF_DMA_EP_RX_ICR) +
8929 + offsetof(struct RGF_ICR, ICR));
8930 +
8931 trace_wil6210_irq_rx(isr);
8932 wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
8933
8934 if (unlikely(!isr)) {
8935 wil_err_ratelimited(wil, "spurious IRQ: RX\n");
8936 + wil6210_unmask_irq_rx(wil);
8937 return IRQ_NONE;
8938 }
8939
8940 - wil6210_mask_irq_rx(wil);
8941 -
8942 /* RX_DONE and RX_HTRSH interrupts are the same if interrupt
8943 * moderation is not used. Interrupt moderation may cause RX
8944 * buffer overflow while RX_DONE is delayed. The required
8945 @@ -355,21 +358,24 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
8946 static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
8947 {
8948 struct wil6210_priv *wil = cookie;
8949 - u32 isr = wil_ioread32_and_clear(wil->csr +
8950 - HOSTADDR(RGF_INT_GEN_RX_ICR) +
8951 - offsetof(struct RGF_ICR, ICR));
8952 + u32 isr;
8953 bool need_unmask = true;
8954
8955 + wil6210_mask_irq_rx_edma(wil);
8956 +
8957 + isr = wil_ioread32_and_clear(wil->csr +
8958 + HOSTADDR(RGF_INT_GEN_RX_ICR) +
8959 + offsetof(struct RGF_ICR, ICR));
8960 +
8961 trace_wil6210_irq_rx(isr);
8962 wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
8963
8964 if (unlikely(!isr)) {
8965 wil_err(wil, "spurious IRQ: RX\n");
8966 + wil6210_unmask_irq_rx_edma(wil);
8967 return IRQ_NONE;
8968 }
8969
8970 - wil6210_mask_irq_rx_edma(wil);
8971 -
8972 if (likely(isr & BIT_RX_STATUS_IRQ)) {
8973 wil_dbg_irq(wil, "RX status ring\n");
8974 isr &= ~BIT_RX_STATUS_IRQ;
8975 @@ -403,21 +409,24 @@ static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
8976 static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
8977 {
8978 struct wil6210_priv *wil = cookie;
8979 - u32 isr = wil_ioread32_and_clear(wil->csr +
8980 - HOSTADDR(RGF_INT_GEN_TX_ICR) +
8981 - offsetof(struct RGF_ICR, ICR));
8982 + u32 isr;
8983 bool need_unmask = true;
8984
8985 + wil6210_mask_irq_tx_edma(wil);
8986 +
8987 + isr = wil_ioread32_and_clear(wil->csr +
8988 + HOSTADDR(RGF_INT_GEN_TX_ICR) +
8989 + offsetof(struct RGF_ICR, ICR));
8990 +
8991 trace_wil6210_irq_tx(isr);
8992 wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
8993
8994 if (unlikely(!isr)) {
8995 wil_err(wil, "spurious IRQ: TX\n");
8996 + wil6210_unmask_irq_tx_edma(wil);
8997 return IRQ_NONE;
8998 }
8999
9000 - wil6210_mask_irq_tx_edma(wil);
9001 -
9002 if (likely(isr & BIT_TX_STATUS_IRQ)) {
9003 wil_dbg_irq(wil, "TX status ring\n");
9004 isr &= ~BIT_TX_STATUS_IRQ;
9005 @@ -446,21 +455,24 @@ static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
9006 static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
9007 {
9008 struct wil6210_priv *wil = cookie;
9009 - u32 isr = wil_ioread32_and_clear(wil->csr +
9010 - HOSTADDR(RGF_DMA_EP_TX_ICR) +
9011 - offsetof(struct RGF_ICR, ICR));
9012 + u32 isr;
9013 bool need_unmask = true;
9014
9015 + wil6210_mask_irq_tx(wil);
9016 +
9017 + isr = wil_ioread32_and_clear(wil->csr +
9018 + HOSTADDR(RGF_DMA_EP_TX_ICR) +
9019 + offsetof(struct RGF_ICR, ICR));
9020 +
9021 trace_wil6210_irq_tx(isr);
9022 wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
9023
9024 if (unlikely(!isr)) {
9025 wil_err_ratelimited(wil, "spurious IRQ: TX\n");
9026 + wil6210_unmask_irq_tx(wil);
9027 return IRQ_NONE;
9028 }
9029
9030 - wil6210_mask_irq_tx(wil);
9031 -
9032 if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
9033 wil_dbg_irq(wil, "TX done\n");
9034 isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
9035 @@ -532,20 +544,23 @@ static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
9036 static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
9037 {
9038 struct wil6210_priv *wil = cookie;
9039 - u32 isr = wil_ioread32_and_clear(wil->csr +
9040 - HOSTADDR(RGF_DMA_EP_MISC_ICR) +
9041 - offsetof(struct RGF_ICR, ICR));
9042 + u32 isr;
9043 +
9044 + wil6210_mask_irq_misc(wil, false);
9045 +
9046 + isr = wil_ioread32_and_clear(wil->csr +
9047 + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
9048 + offsetof(struct RGF_ICR, ICR));
9049
9050 trace_wil6210_irq_misc(isr);
9051 wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
9052
9053 if (!isr) {
9054 wil_err(wil, "spurious IRQ: MISC\n");
9055 + wil6210_unmask_irq_misc(wil, false);
9056 return IRQ_NONE;
9057 }
9058
9059 - wil6210_mask_irq_misc(wil, false);
9060 -
9061 if (isr & ISR_MISC_FW_ERROR) {
9062 u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
9063 u32 ucode_assert_code =
9064 diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
9065 index 75c8aa297107..1b1b58e0129a 100644
9066 --- a/drivers/net/wireless/ath/wil6210/txrx.c
9067 +++ b/drivers/net/wireless/ath/wil6210/txrx.c
9068 @@ -736,6 +736,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
9069 [GRO_HELD] = "GRO_HELD",
9070 [GRO_NORMAL] = "GRO_NORMAL",
9071 [GRO_DROP] = "GRO_DROP",
9072 + [GRO_CONSUMED] = "GRO_CONSUMED",
9073 };
9074
9075 wil->txrx_ops.get_netif_rx_params(skb, &cid, &security);
9076 diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
9077 index 6e3b3031f29b..2010f771478d 100644
9078 --- a/drivers/net/wireless/ath/wil6210/wmi.c
9079 +++ b/drivers/net/wireless/ath/wil6210/wmi.c
9080 @@ -2816,7 +2816,18 @@ static void wmi_event_handle(struct wil6210_priv *wil,
9081 /* check if someone waits for this event */
9082 if (wil->reply_id && wil->reply_id == id &&
9083 wil->reply_mid == mid) {
9084 - WARN_ON(wil->reply_buf);
9085 + if (wil->reply_buf) {
9086 + /* event received while wmi_call is waiting
9087 + * with a buffer. Such event should be handled
9088 + * in wmi_recv_cmd function. Handling the event
9089 + * here means a previous wmi_call was timeout.
9090 + * Drop the event and do not handle it.
9091 + */
9092 + wil_err(wil,
9093 + "Old event (%d, %s) while wmi_call is waiting. Drop it and Continue waiting\n",
9094 + id, eventid2name(id));
9095 + return;
9096 + }
9097
9098 wmi_evt_call_handler(vif, id, evt_data,
9099 len - sizeof(*wmi));
9100 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
9101 index ff85d69c2a8c..557ee47bffd8 100644
9102 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c
9103 +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
9104 @@ -8,7 +8,7 @@
9105 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9106 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9107 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9108 - * Copyright(c) 2018 Intel Corporation
9109 + * Copyright(c) 2018 - 2019 Intel Corporation
9110 *
9111 * This program is free software; you can redistribute it and/or modify
9112 * it under the terms of version 2 of the GNU General Public License as
9113 @@ -31,7 +31,7 @@
9114 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9115 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9116 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
9117 - * Copyright(c) 2018 Intel Corporation
9118 + * Copyright(c) 2018 - 2019 Intel Corporation
9119 * All rights reserved.
9120 *
9121 * Redistribution and use in source and binary forms, with or without
9122 @@ -134,6 +134,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
9123 .len = { 0, },
9124 };
9125 struct iwl_rx_packet *pkt;
9126 + int ret;
9127
9128 if (fw_has_capa(&fwrt->fw->ucode_capa,
9129 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
9130 @@ -141,8 +142,13 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
9131 else
9132 cmd.id = SHARED_MEM_CFG;
9133
9134 - if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
9135 + ret = iwl_trans_send_cmd(fwrt->trans, &cmd);
9136 +
9137 + if (ret) {
9138 + WARN(ret != -ERFKILL,
9139 + "Could not send the SMEM command: %d\n", ret);
9140 return;
9141 + }
9142
9143 pkt = cmd.resp_pkt;
9144 if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
9145 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
9146 index 2d21f0a1fa00..ffae299c3492 100644
9147 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
9148 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
9149 @@ -641,6 +641,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
9150
9151 memcpy(&info, skb->cb, sizeof(info));
9152
9153 + if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
9154 + return -1;
9155 +
9156 if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
9157 return -1;
9158
9159 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
9160 index 2146fda8da2f..64d976d872b8 100644
9161 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
9162 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
9163 @@ -164,7 +164,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
9164
9165 memcpy(iml_img, trans->iml, trans->iml_len);
9166
9167 - iwl_enable_interrupts(trans);
9168 + iwl_enable_fw_load_int_ctx_info(trans);
9169
9170 /* kick FW self load */
9171 iwl_write64(trans, CSR_CTXT_INFO_ADDR,
9172 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
9173 index b2cd7ef5fc3a..6f25fd1bbd8f 100644
9174 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
9175 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
9176 @@ -206,7 +206,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
9177
9178 trans_pcie->ctxt_info = ctxt_info;
9179
9180 - iwl_enable_interrupts(trans);
9181 + iwl_enable_fw_load_int_ctx_info(trans);
9182
9183 /* Configure debug, if exists */
9184 if (trans->dbg_dest_tlv)
9185 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
9186 index b63d44b7cd7c..00f9566bcc21 100644
9187 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
9188 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
9189 @@ -896,6 +896,33 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
9190 }
9191 }
9192
9193 +static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
9194 +{
9195 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
9196 +
9197 + IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
9198 +
9199 + if (!trans_pcie->msix_enabled) {
9200 + /*
9201 + * When we'll receive the ALIVE interrupt, the ISR will call
9202 + * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
9203 + * interrupt (which is not really needed anymore) but also the
9204 + * RX interrupt which will allow us to receive the ALIVE
9205 + * notification (which is Rx) and continue the flow.
9206 + */
9207 + trans_pcie->inta_mask = CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
9208 + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
9209 + } else {
9210 + iwl_enable_hw_int_msk_msix(trans,
9211 + MSIX_HW_INT_CAUSES_REG_ALIVE);
9212 + /*
9213 + * Leave all the FH causes enabled to get the ALIVE
9214 + * notification.
9215 + */
9216 + iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
9217 + }
9218 +}
9219 +
9220 static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
9221 {
9222 return index & (q->n_window - 1);
9223 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9224 index 6dcd5374d9b4..1d144985ea58 100644
9225 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9226 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
9227 @@ -1778,26 +1778,26 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
9228 goto out;
9229 }
9230
9231 - if (iwl_have_debug_level(IWL_DL_ISR)) {
9232 - /* NIC fires this, but we don't use it, redundant with WAKEUP */
9233 - if (inta & CSR_INT_BIT_SCD) {
9234 - IWL_DEBUG_ISR(trans,
9235 - "Scheduler finished to transmit the frame/frames.\n");
9236 - isr_stats->sch++;
9237 - }
9238 + /* NIC fires this, but we don't use it, redundant with WAKEUP */
9239 + if (inta & CSR_INT_BIT_SCD) {
9240 + IWL_DEBUG_ISR(trans,
9241 + "Scheduler finished to transmit the frame/frames.\n");
9242 + isr_stats->sch++;
9243 + }
9244
9245 - /* Alive notification via Rx interrupt will do the real work */
9246 - if (inta & CSR_INT_BIT_ALIVE) {
9247 - IWL_DEBUG_ISR(trans, "Alive interrupt\n");
9248 - isr_stats->alive++;
9249 - if (trans->cfg->gen2) {
9250 - /*
9251 - * We can restock, since firmware configured
9252 - * the RFH
9253 - */
9254 - iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
9255 - }
9256 + /* Alive notification via Rx interrupt will do the real work */
9257 + if (inta & CSR_INT_BIT_ALIVE) {
9258 + IWL_DEBUG_ISR(trans, "Alive interrupt\n");
9259 + isr_stats->alive++;
9260 + if (trans->cfg->gen2) {
9261 + /*
9262 + * We can restock, since firmware configured
9263 + * the RFH
9264 + */
9265 + iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
9266 }
9267 +
9268 + handled |= CSR_INT_BIT_ALIVE;
9269 }
9270
9271 /* Safely ignore these bits for debug checks below */
9272 @@ -1916,6 +1916,9 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
9273 /* Re-enable RF_KILL if it occurred */
9274 else if (handled & CSR_INT_BIT_RF_KILL)
9275 iwl_enable_rfkill_int(trans);
9276 + /* Re-enable the ALIVE / Rx interrupt if it occurred */
9277 + else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
9278 + iwl_enable_fw_load_int_ctx_info(trans);
9279 spin_unlock(&trans_pcie->irq_lock);
9280
9281 out:
9282 @@ -2060,10 +2063,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
9283 return IRQ_NONE;
9284 }
9285
9286 - if (iwl_have_debug_level(IWL_DL_ISR))
9287 - IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
9288 - inta_fh,
9289 + if (iwl_have_debug_level(IWL_DL_ISR)) {
9290 + IWL_DEBUG_ISR(trans,
9291 + "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
9292 + inta_fh, trans_pcie->fh_mask,
9293 iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
9294 + if (inta_fh & ~trans_pcie->fh_mask)
9295 + IWL_DEBUG_ISR(trans,
9296 + "We got a masked interrupt (0x%08x)\n",
9297 + inta_fh & ~trans_pcie->fh_mask);
9298 + }
9299 +
9300 + inta_fh &= trans_pcie->fh_mask;
9301
9302 if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
9303 inta_fh & MSIX_FH_INT_CAUSES_Q0) {
9304 @@ -2103,11 +2114,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
9305 }
9306
9307 /* After checking FH register check HW register */
9308 - if (iwl_have_debug_level(IWL_DL_ISR))
9309 + if (iwl_have_debug_level(IWL_DL_ISR)) {
9310 IWL_DEBUG_ISR(trans,
9311 - "ISR inta_hw 0x%08x, enabled 0x%08x\n",
9312 - inta_hw,
9313 + "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
9314 + inta_hw, trans_pcie->hw_mask,
9315 iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
9316 + if (inta_hw & ~trans_pcie->hw_mask)
9317 + IWL_DEBUG_ISR(trans,
9318 + "We got a masked interrupt 0x%08x\n",
9319 + inta_hw & ~trans_pcie->hw_mask);
9320 + }
9321 +
9322 + inta_hw &= trans_pcie->hw_mask;
9323
9324 /* Alive notification via Rx interrupt will do the real work */
9325 if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
9326 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
9327 index 2bc67219ed3e..31e72e1ff1e2 100644
9328 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
9329 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
9330 @@ -289,6 +289,15 @@ void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
9331 * paging memory cannot be freed included since FW will still use it
9332 */
9333 iwl_pcie_ctxt_info_free(trans);
9334 +
9335 + /*
9336 + * Re-enable all the interrupts, including the RF-Kill one, now that
9337 + * the firmware is alive.
9338 + */
9339 + iwl_enable_interrupts(trans);
9340 + mutex_lock(&trans_pcie->mutex);
9341 + iwl_pcie_check_hw_rf_kill(trans);
9342 + mutex_unlock(&trans_pcie->mutex);
9343 }
9344
9345 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
9346 diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
9347 index 7f3e3983b781..47cebb2ec05c 100644
9348 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c
9349 +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
9350 @@ -193,10 +193,23 @@ static void mt7601u_complete_rx(struct urb *urb)
9351 struct mt7601u_rx_queue *q = &dev->rx_q;
9352 unsigned long flags;
9353
9354 - spin_lock_irqsave(&dev->rx_lock, flags);
9355 + /* do no schedule rx tasklet if urb has been unlinked
9356 + * or the device has been removed
9357 + */
9358 + switch (urb->status) {
9359 + case -ECONNRESET:
9360 + case -ESHUTDOWN:
9361 + case -ENOENT:
9362 + return;
9363 + default:
9364 + dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
9365 + urb->status);
9366 + /* fall through */
9367 + case 0:
9368 + break;
9369 + }
9370
9371 - if (mt7601u_urb_has_error(urb))
9372 - dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
9373 + spin_lock_irqsave(&dev->rx_lock, flags);
9374 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
9375 goto out;
9376
9377 @@ -228,14 +241,25 @@ static void mt7601u_complete_tx(struct urb *urb)
9378 struct sk_buff *skb;
9379 unsigned long flags;
9380
9381 - spin_lock_irqsave(&dev->tx_lock, flags);
9382 + switch (urb->status) {
9383 + case -ECONNRESET:
9384 + case -ESHUTDOWN:
9385 + case -ENOENT:
9386 + return;
9387 + default:
9388 + dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
9389 + urb->status);
9390 + /* fall through */
9391 + case 0:
9392 + break;
9393 + }
9394
9395 - if (mt7601u_urb_has_error(urb))
9396 - dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
9397 + spin_lock_irqsave(&dev->tx_lock, flags);
9398 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
9399 goto out;
9400
9401 skb = q->e[q->start].skb;
9402 + q->e[q->start].skb = NULL;
9403 trace_mt_tx_dma_done(dev, skb);
9404
9405 __skb_queue_tail(&dev->tx_skb_done, skb);
9406 @@ -363,19 +387,9 @@ int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
9407 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
9408 {
9409 int i;
9410 - unsigned long flags;
9411 -
9412 - spin_lock_irqsave(&dev->rx_lock, flags);
9413 -
9414 - for (i = 0; i < dev->rx_q.entries; i++) {
9415 - int next = dev->rx_q.end;
9416
9417 - spin_unlock_irqrestore(&dev->rx_lock, flags);
9418 - usb_poison_urb(dev->rx_q.e[next].urb);
9419 - spin_lock_irqsave(&dev->rx_lock, flags);
9420 - }
9421 -
9422 - spin_unlock_irqrestore(&dev->rx_lock, flags);
9423 + for (i = 0; i < dev->rx_q.entries; i++)
9424 + usb_poison_urb(dev->rx_q.e[i].urb);
9425 }
9426
9427 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
9428 @@ -445,10 +459,10 @@ static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
9429 {
9430 int i;
9431
9432 - WARN_ON(q->used);
9433 -
9434 for (i = 0; i < q->entries; i++) {
9435 usb_poison_urb(q->e[i].urb);
9436 + if (q->e[i].skb)
9437 + mt7601u_tx_status(q->dev, q->e[i].skb);
9438 usb_free_urb(q->e[i].urb);
9439 }
9440 }
9441 diff --git a/drivers/net/wireless/mediatek/mt7601u/tx.c b/drivers/net/wireless/mediatek/mt7601u/tx.c
9442 index 3600e911a63e..4d81c45722fb 100644
9443 --- a/drivers/net/wireless/mediatek/mt7601u/tx.c
9444 +++ b/drivers/net/wireless/mediatek/mt7601u/tx.c
9445 @@ -117,9 +117,9 @@ void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb)
9446 info->status.rates[0].idx = -1;
9447 info->flags |= IEEE80211_TX_STAT_ACK;
9448
9449 - spin_lock(&dev->mac_lock);
9450 + spin_lock_bh(&dev->mac_lock);
9451 ieee80211_tx_status(dev->hw, skb);
9452 - spin_unlock(&dev->mac_lock);
9453 + spin_unlock_bh(&dev->mac_lock);
9454 }
9455
9456 static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb)
9457 diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
9458 index 2ac5004d7a40..5adb939afee8 100644
9459 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
9460 +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
9461 @@ -1081,13 +1081,13 @@ int rtl_usb_probe(struct usb_interface *intf,
9462 rtlpriv->cfg->ops->read_eeprom_info(hw);
9463 err = _rtl_usb_init(hw);
9464 if (err)
9465 - goto error_out;
9466 + goto error_out2;
9467 rtl_usb_init_sw(hw);
9468 /* Init mac80211 sw */
9469 err = rtl_init_core(hw);
9470 if (err) {
9471 pr_err("Can't allocate sw for mac80211\n");
9472 - goto error_out;
9473 + goto error_out2;
9474 }
9475 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
9476 pr_err("Can't init_sw_vars\n");
9477 @@ -1108,6 +1108,7 @@ int rtl_usb_probe(struct usb_interface *intf,
9478
9479 error_out:
9480 rtl_deinit_core(hw);
9481 +error_out2:
9482 _rtl_usb_io_handler_release(hw);
9483 usb_put_dev(udev);
9484 complete(&rtlpriv->firmware_loading_complete);
9485 diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
9486 index 0453f49dc708..326f02ffca81 100644
9487 --- a/drivers/nvdimm/dax_devs.c
9488 +++ b/drivers/nvdimm/dax_devs.c
9489 @@ -126,7 +126,7 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
9490 nvdimm_bus_unlock(&ndns->dev);
9491 if (!dax_dev)
9492 return -ENOMEM;
9493 - pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
9494 + pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
9495 nd_pfn->pfn_sb = pfn_sb;
9496 rc = nd_pfn_validate(nd_pfn, DAX_SIG);
9497 dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : "<none>");
9498 diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
9499 index dde9853453d3..e901e3a3b04c 100644
9500 --- a/drivers/nvdimm/pfn.h
9501 +++ b/drivers/nvdimm/pfn.h
9502 @@ -36,6 +36,7 @@ struct nd_pfn_sb {
9503 __le32 end_trunc;
9504 /* minor-version-2 record the base alignment of the mapping */
9505 __le32 align;
9506 + /* minor-version-3 guarantee the padding and flags are zero */
9507 u8 padding[4000];
9508 __le64 checksum;
9509 };
9510 diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
9511 index 3ee995a3bfc9..86ed09b2a192 100644
9512 --- a/drivers/nvdimm/pfn_devs.c
9513 +++ b/drivers/nvdimm/pfn_devs.c
9514 @@ -361,6 +361,15 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
9515 return dev;
9516 }
9517
9518 +/**
9519 + * nd_pfn_validate - read and validate info-block
9520 + * @nd_pfn: fsdax namespace runtime state / properties
9521 + * @sig: 'devdax' or 'fsdax' signature
9522 + *
9523 + * Upon return the info-block buffer contents (->pfn_sb) are
9524 + * indeterminate when validation fails, and a coherent info-block
9525 + * otherwise.
9526 + */
9527 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
9528 {
9529 u64 checksum, offset;
9530 @@ -506,7 +515,7 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
9531 nvdimm_bus_unlock(&ndns->dev);
9532 if (!pfn_dev)
9533 return -ENOMEM;
9534 - pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
9535 + pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL);
9536 nd_pfn = to_nd_pfn(pfn_dev);
9537 nd_pfn->pfn_sb = pfn_sb;
9538 rc = nd_pfn_validate(nd_pfn, PFN_SIG);
9539 @@ -638,7 +647,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
9540 u64 checksum;
9541 int rc;
9542
9543 - pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
9544 + pfn_sb = devm_kmalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL);
9545 if (!pfn_sb)
9546 return -ENOMEM;
9547
9548 @@ -647,11 +656,14 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
9549 sig = DAX_SIG;
9550 else
9551 sig = PFN_SIG;
9552 +
9553 rc = nd_pfn_validate(nd_pfn, sig);
9554 if (rc != -ENODEV)
9555 return rc;
9556
9557 /* no info block, do init */;
9558 + memset(pfn_sb, 0, sizeof(*pfn_sb));
9559 +
9560 nd_region = to_nd_region(nd_pfn->dev.parent);
9561 if (nd_region->ro) {
9562 dev_info(&nd_pfn->dev,
9563 @@ -705,7 +717,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
9564 memcpy(pfn_sb->uuid, nd_pfn->uuid, 16);
9565 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
9566 pfn_sb->version_major = cpu_to_le16(1);
9567 - pfn_sb->version_minor = cpu_to_le16(2);
9568 + pfn_sb->version_minor = cpu_to_le16(3);
9569 pfn_sb->start_pad = cpu_to_le32(start_pad);
9570 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
9571 pfn_sb->align = cpu_to_le32(nd_pfn->align);
9572 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
9573 index d8869d978c34..e26d1191c5ad 100644
9574 --- a/drivers/nvme/host/core.c
9575 +++ b/drivers/nvme/host/core.c
9576 @@ -3168,6 +3168,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
9577 return;
9578
9579 nvme_fault_inject_fini(ns);
9580 +
9581 + mutex_lock(&ns->ctrl->subsys->lock);
9582 + list_del_rcu(&ns->siblings);
9583 + mutex_unlock(&ns->ctrl->subsys->lock);
9584 + synchronize_rcu(); /* guarantee not available in head->list */
9585 + nvme_mpath_clear_current_path(ns);
9586 + synchronize_srcu(&ns->head->srcu); /* wait for concurrent submissions */
9587 +
9588 if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
9589 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
9590 &nvme_ns_id_attr_group);
9591 @@ -3179,16 +3187,10 @@ static void nvme_ns_remove(struct nvme_ns *ns)
9592 blk_integrity_unregister(ns->disk);
9593 }
9594
9595 - mutex_lock(&ns->ctrl->subsys->lock);
9596 - list_del_rcu(&ns->siblings);
9597 - nvme_mpath_clear_current_path(ns);
9598 - mutex_unlock(&ns->ctrl->subsys->lock);
9599 -
9600 down_write(&ns->ctrl->namespaces_rwsem);
9601 list_del_init(&ns->list);
9602 up_write(&ns->ctrl->namespaces_rwsem);
9603
9604 - synchronize_srcu(&ns->head->srcu);
9605 nvme_mpath_check_last_path(ns);
9606 nvme_put_ns(ns);
9607 }
9608 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
9609 index c8eeecc58115..0a5d064f82ca 100644
9610 --- a/drivers/nvme/host/pci.c
9611 +++ b/drivers/nvme/host/pci.c
9612 @@ -2253,11 +2253,13 @@ static void nvme_reset_work(struct work_struct *work)
9613 struct nvme_dev *dev =
9614 container_of(work, struct nvme_dev, ctrl.reset_work);
9615 bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
9616 - int result = -ENODEV;
9617 + int result;
9618 enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
9619
9620 - if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING))
9621 + if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
9622 + result = -ENODEV;
9623 goto out;
9624 + }
9625
9626 /*
9627 * If we're called to reset a live controller first shut it down before
9628 @@ -2294,6 +2296,7 @@ static void nvme_reset_work(struct work_struct *work)
9629 if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
9630 dev_warn(dev->ctrl.device,
9631 "failed to mark controller CONNECTING\n");
9632 + result = -EBUSY;
9633 goto out;
9634 }
9635
9636 @@ -2354,6 +2357,7 @@ static void nvme_reset_work(struct work_struct *work)
9637 if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
9638 dev_warn(dev->ctrl.device,
9639 "failed to mark controller state %d\n", new_state);
9640 + result = -ENODEV;
9641 goto out;
9642 }
9643
9644 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
9645 index 4352c1cb926d..87a8887fd4d3 100644
9646 --- a/drivers/pci/controller/dwc/pcie-qcom.c
9647 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
9648 @@ -178,6 +178,8 @@ static void qcom_ep_reset_assert(struct qcom_pcie *pcie)
9649
9650 static void qcom_ep_reset_deassert(struct qcom_pcie *pcie)
9651 {
9652 + /* Ensure that PERST has been asserted for at least 100 ms */
9653 + msleep(100);
9654 gpiod_set_value_cansleep(pcie->reset, 0);
9655 usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
9656 }
9657 diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
9658 index 808a182830e5..5dadc964ad3b 100644
9659 --- a/drivers/pci/controller/pci-hyperv.c
9660 +++ b/drivers/pci/controller/pci-hyperv.c
9661 @@ -1880,6 +1880,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
9662 static void hv_eject_device_work(struct work_struct *work)
9663 {
9664 struct pci_eject_response *ejct_pkt;
9665 + struct hv_pcibus_device *hbus;
9666 struct hv_pci_dev *hpdev;
9667 struct pci_dev *pdev;
9668 unsigned long flags;
9669 @@ -1890,6 +1891,7 @@ static void hv_eject_device_work(struct work_struct *work)
9670 } ctxt;
9671
9672 hpdev = container_of(work, struct hv_pci_dev, wrk);
9673 + hbus = hpdev->hbus;
9674
9675 WARN_ON(hpdev->state != hv_pcichild_ejecting);
9676
9677 @@ -1900,8 +1902,7 @@ static void hv_eject_device_work(struct work_struct *work)
9678 * because hbus->pci_bus may not exist yet.
9679 */
9680 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
9681 - pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0,
9682 - wslot);
9683 + pdev = pci_get_domain_bus_and_slot(hbus->sysdata.domain, 0, wslot);
9684 if (pdev) {
9685 pci_lock_rescan_remove();
9686 pci_stop_and_remove_bus_device(pdev);
9687 @@ -1909,9 +1910,9 @@ static void hv_eject_device_work(struct work_struct *work)
9688 pci_unlock_rescan_remove();
9689 }
9690
9691 - spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags);
9692 + spin_lock_irqsave(&hbus->device_list_lock, flags);
9693 list_del(&hpdev->list_entry);
9694 - spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
9695 + spin_unlock_irqrestore(&hbus->device_list_lock, flags);
9696
9697 if (hpdev->pci_slot)
9698 pci_destroy_slot(hpdev->pci_slot);
9699 @@ -1920,7 +1921,7 @@ static void hv_eject_device_work(struct work_struct *work)
9700 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
9701 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
9702 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
9703 - vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt,
9704 + vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
9705 sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt,
9706 VM_PKT_DATA_INBAND, 0);
9707
9708 @@ -1929,7 +1930,9 @@ static void hv_eject_device_work(struct work_struct *work)
9709 /* For the two refs got in new_pcichild_device() */
9710 put_pcichild(hpdev);
9711 put_pcichild(hpdev);
9712 - put_hvpcibus(hpdev->hbus);
9713 + /* hpdev has been freed. Do not use it any more. */
9714 +
9715 + put_hvpcibus(hbus);
9716 }
9717
9718 /**
9719 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
9720 index 61f2ef28ea1c..c65465385d8c 100644
9721 --- a/drivers/pci/pci.c
9722 +++ b/drivers/pci/pci.c
9723 @@ -2004,6 +2004,13 @@ static void pci_pme_list_scan(struct work_struct *work)
9724 */
9725 if (bridge && bridge->current_state != PCI_D0)
9726 continue;
9727 + /*
9728 + * If the device is in D3cold it should not be
9729 + * polled either.
9730 + */
9731 + if (pme_dev->dev->current_state == PCI_D3cold)
9732 + continue;
9733 +
9734 pci_pme_wakeup(pme_dev->dev, NULL);
9735 } else {
9736 list_del(&pme_dev->list);
9737 diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c
9738 index f85d6b7a1984..5d2b2c02cbbe 100644
9739 --- a/drivers/ras/cec.c
9740 +++ b/drivers/ras/cec.c
9741 @@ -369,7 +369,9 @@ static int pfn_set(void *data, u64 val)
9742 {
9743 *(u64 *)data = val;
9744
9745 - return cec_add_elem(val);
9746 + cec_add_elem(val);
9747 +
9748 + return 0;
9749 }
9750
9751 DEFINE_DEBUGFS_ATTRIBUTE(pfn_ops, u64_get, pfn_set, "0x%llx\n");
9752 diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
9753 index c584bd1ffa9c..7c598c156d9e 100644
9754 --- a/drivers/regulator/s2mps11.c
9755 +++ b/drivers/regulator/s2mps11.c
9756 @@ -373,8 +373,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
9757 regulator_desc_s2mps11_buck1_4(4),
9758 regulator_desc_s2mps11_buck5,
9759 regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
9760 - regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
9761 - regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
9762 + regulator_desc_s2mps11_buck67810(7, MIN_750_MV, STEP_12_5_MV),
9763 + regulator_desc_s2mps11_buck67810(8, MIN_750_MV, STEP_12_5_MV),
9764 regulator_desc_s2mps11_buck9,
9765 regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
9766 };
9767 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
9768 index 9c7d9da42ba0..4ac4a73037f5 100644
9769 --- a/drivers/s390/cio/qdio_main.c
9770 +++ b/drivers/s390/cio/qdio_main.c
9771 @@ -749,6 +749,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
9772
9773 switch (state) {
9774 case SLSB_P_OUTPUT_EMPTY:
9775 + case SLSB_P_OUTPUT_PENDING:
9776 /* the adapter got it */
9777 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
9778 "out empty:%1d %02x", q->nr, count);
9779 diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
9780 index 90ea0f5d9bdb..5160d6214a36 100644
9781 --- a/drivers/scsi/NCR5380.c
9782 +++ b/drivers/scsi/NCR5380.c
9783 @@ -710,6 +710,8 @@ static void NCR5380_main(struct work_struct *work)
9784 NCR5380_information_transfer(instance);
9785 done = 0;
9786 }
9787 + if (!hostdata->connected)
9788 + NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9789 spin_unlock_irq(&hostdata->lock);
9790 if (!done)
9791 cond_resched();
9792 @@ -984,7 +986,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
9793 if (!hostdata->selecting) {
9794 /* Command was aborted */
9795 NCR5380_write(MODE_REG, MR_BASE);
9796 - goto out;
9797 + return NULL;
9798 }
9799 if (err < 0) {
9800 NCR5380_write(MODE_REG, MR_BASE);
9801 @@ -1033,7 +1035,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
9802 if (!hostdata->selecting) {
9803 NCR5380_write(MODE_REG, MR_BASE);
9804 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
9805 - goto out;
9806 + return NULL;
9807 }
9808
9809 dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
9810 @@ -1106,8 +1108,6 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
9811 spin_lock_irq(&hostdata->lock);
9812 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
9813 NCR5380_reselect(instance);
9814 - if (!hostdata->connected)
9815 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9816 shost_printk(KERN_ERR, instance, "reselection after won arbitration?\n");
9817 goto out;
9818 }
9819 @@ -1115,14 +1115,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
9820 if (err < 0) {
9821 spin_lock_irq(&hostdata->lock);
9822 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
9823 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9824 +
9825 /* Can't touch cmd if it has been reclaimed by the scsi ML */
9826 - if (hostdata->selecting) {
9827 - cmd->result = DID_BAD_TARGET << 16;
9828 - complete_cmd(instance, cmd);
9829 - dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
9830 - cmd = NULL;
9831 - }
9832 + if (!hostdata->selecting)
9833 + return NULL;
9834 +
9835 + cmd->result = DID_BAD_TARGET << 16;
9836 + complete_cmd(instance, cmd);
9837 + dsprintk(NDEBUG_SELECTION, instance,
9838 + "target did not respond within 250ms\n");
9839 + cmd = NULL;
9840 goto out;
9841 }
9842
9843 @@ -1150,12 +1152,11 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
9844 if (err < 0) {
9845 shost_printk(KERN_ERR, instance, "select: REQ timeout\n");
9846 NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
9847 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9848 goto out;
9849 }
9850 if (!hostdata->selecting) {
9851 do_abort(instance);
9852 - goto out;
9853 + return NULL;
9854 }
9855
9856 dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
9857 @@ -1817,9 +1818,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
9858 */
9859 NCR5380_write(TARGET_COMMAND_REG, 0);
9860
9861 - /* Enable reselect interrupts */
9862 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9863 -
9864 maybe_release_dma_irq(instance);
9865 return;
9866 case MESSAGE_REJECT:
9867 @@ -1851,8 +1849,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
9868 */
9869 NCR5380_write(TARGET_COMMAND_REG, 0);
9870
9871 - /* Enable reselect interrupts */
9872 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9873 #ifdef SUN3_SCSI_VME
9874 dregs->csr |= CSR_DMA_ENABLE;
9875 #endif
9876 @@ -1954,7 +1950,6 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
9877 cmd->result = DID_ERROR << 16;
9878 complete_cmd(instance, cmd);
9879 maybe_release_dma_irq(instance);
9880 - NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
9881 return;
9882 }
9883 msgout = NOP;
9884 diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
9885 index 31096a0b0fdd..8a6d002e6789 100644
9886 --- a/drivers/scsi/NCR5380.h
9887 +++ b/drivers/scsi/NCR5380.h
9888 @@ -235,7 +235,7 @@ struct NCR5380_cmd {
9889 #define NCR5380_PIO_CHUNK_SIZE 256
9890
9891 /* Time limit (ms) to poll registers when IRQs are disabled, e.g. during PDMA */
9892 -#define NCR5380_REG_POLL_TIME 15
9893 +#define NCR5380_REG_POLL_TIME 10
9894
9895 static inline struct scsi_cmnd *NCR5380_to_scmd(struct NCR5380_cmd *ncmd_ptr)
9896 {
9897 diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
9898 index dd6057359d7c..643321fc152d 100644
9899 --- a/drivers/scsi/mac_scsi.c
9900 +++ b/drivers/scsi/mac_scsi.c
9901 @@ -3,6 +3,8 @@
9902 *
9903 * Copyright 1998, Michael Schmitz <mschmitz@lbl.gov>
9904 *
9905 + * Copyright 2019 Finn Thain
9906 + *
9907 * derived in part from:
9908 */
9909 /*
9910 @@ -11,6 +13,7 @@
9911 * Copyright 1995, Russell King
9912 */
9913
9914 +#include <linux/delay.h>
9915 #include <linux/types.h>
9916 #include <linux/module.h>
9917 #include <linux/ioport.h>
9918 @@ -52,7 +55,7 @@ static int setup_cmd_per_lun = -1;
9919 module_param(setup_cmd_per_lun, int, 0);
9920 static int setup_sg_tablesize = -1;
9921 module_param(setup_sg_tablesize, int, 0);
9922 -static int setup_use_pdma = -1;
9923 +static int setup_use_pdma = 512;
9924 module_param(setup_use_pdma, int, 0);
9925 static int setup_hostid = -1;
9926 module_param(setup_hostid, int, 0);
9927 @@ -89,101 +92,217 @@ static int __init mac_scsi_setup(char *str)
9928 __setup("mac5380=", mac_scsi_setup);
9929 #endif /* !MODULE */
9930
9931 -/* Pseudo DMA asm originally by Ove Edlund */
9932 -
9933 -#define CP_IO_TO_MEM(s,d,n) \
9934 -__asm__ __volatile__ \
9935 - (" cmp.w #4,%2\n" \
9936 - " bls 8f\n" \
9937 - " move.w %1,%%d0\n" \
9938 - " neg.b %%d0\n" \
9939 - " and.w #3,%%d0\n" \
9940 - " sub.w %%d0,%2\n" \
9941 - " bra 2f\n" \
9942 - " 1: move.b (%0),(%1)+\n" \
9943 - " 2: dbf %%d0,1b\n" \
9944 - " move.w %2,%%d0\n" \
9945 - " lsr.w #5,%%d0\n" \
9946 - " bra 4f\n" \
9947 - " 3: move.l (%0),(%1)+\n" \
9948 - "31: move.l (%0),(%1)+\n" \
9949 - "32: move.l (%0),(%1)+\n" \
9950 - "33: move.l (%0),(%1)+\n" \
9951 - "34: move.l (%0),(%1)+\n" \
9952 - "35: move.l (%0),(%1)+\n" \
9953 - "36: move.l (%0),(%1)+\n" \
9954 - "37: move.l (%0),(%1)+\n" \
9955 - " 4: dbf %%d0,3b\n" \
9956 - " move.w %2,%%d0\n" \
9957 - " lsr.w #2,%%d0\n" \
9958 - " and.w #7,%%d0\n" \
9959 - " bra 6f\n" \
9960 - " 5: move.l (%0),(%1)+\n" \
9961 - " 6: dbf %%d0,5b\n" \
9962 - " and.w #3,%2\n" \
9963 - " bra 8f\n" \
9964 - " 7: move.b (%0),(%1)+\n" \
9965 - " 8: dbf %2,7b\n" \
9966 - " moveq.l #0, %2\n" \
9967 - " 9: \n" \
9968 - ".section .fixup,\"ax\"\n" \
9969 - " .even\n" \
9970 - "91: moveq.l #1, %2\n" \
9971 - " jra 9b\n" \
9972 - "94: moveq.l #4, %2\n" \
9973 - " jra 9b\n" \
9974 - ".previous\n" \
9975 - ".section __ex_table,\"a\"\n" \
9976 - " .align 4\n" \
9977 - " .long 1b,91b\n" \
9978 - " .long 3b,94b\n" \
9979 - " .long 31b,94b\n" \
9980 - " .long 32b,94b\n" \
9981 - " .long 33b,94b\n" \
9982 - " .long 34b,94b\n" \
9983 - " .long 35b,94b\n" \
9984 - " .long 36b,94b\n" \
9985 - " .long 37b,94b\n" \
9986 - " .long 5b,94b\n" \
9987 - " .long 7b,91b\n" \
9988 - ".previous" \
9989 - : "=a"(s), "=a"(d), "=d"(n) \
9990 - : "0"(s), "1"(d), "2"(n) \
9991 - : "d0")
9992 +/*
9993 + * According to "Inside Macintosh: Devices", Mac OS requires disk drivers to
9994 + * specify the number of bytes between the delays expected from a SCSI target.
9995 + * This allows the operating system to "prevent bus errors when a target fails
9996 + * to deliver the next byte within the processor bus error timeout period."
9997 + * Linux SCSI drivers lack knowledge of the timing behaviour of SCSI targets
9998 + * so bus errors are unavoidable.
9999 + *
10000 + * If a MOVE.B instruction faults, we assume that zero bytes were transferred
10001 + * and simply retry. That assumption probably depends on target behaviour but
10002 + * seems to hold up okay. The NOP provides synchronization: without it the
10003 + * fault can sometimes occur after the program counter has moved past the
10004 + * offending instruction. Post-increment addressing can't be used.
10005 + */
10006 +
10007 +#define MOVE_BYTE(operands) \
10008 + asm volatile ( \
10009 + "1: moveb " operands " \n" \
10010 + "11: nop \n" \
10011 + " addq #1,%0 \n" \
10012 + " subq #1,%1 \n" \
10013 + "40: \n" \
10014 + " \n" \
10015 + ".section .fixup,\"ax\" \n" \
10016 + ".even \n" \
10017 + "90: movel #1, %2 \n" \
10018 + " jra 40b \n" \
10019 + ".previous \n" \
10020 + " \n" \
10021 + ".section __ex_table,\"a\" \n" \
10022 + ".align 4 \n" \
10023 + ".long 1b,90b \n" \
10024 + ".long 11b,90b \n" \
10025 + ".previous \n" \
10026 + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
10027 +
10028 +/*
10029 + * If a MOVE.W (or MOVE.L) instruction faults, it cannot be retried because
10030 + * the residual byte count would be uncertain. In that situation the MOVE_WORD
10031 + * macro clears n in the fixup section to abort the transfer.
10032 + */
10033 +
10034 +#define MOVE_WORD(operands) \
10035 + asm volatile ( \
10036 + "1: movew " operands " \n" \
10037 + "11: nop \n" \
10038 + " subq #2,%1 \n" \
10039 + "40: \n" \
10040 + " \n" \
10041 + ".section .fixup,\"ax\" \n" \
10042 + ".even \n" \
10043 + "90: movel #0, %1 \n" \
10044 + " movel #2, %2 \n" \
10045 + " jra 40b \n" \
10046 + ".previous \n" \
10047 + " \n" \
10048 + ".section __ex_table,\"a\" \n" \
10049 + ".align 4 \n" \
10050 + ".long 1b,90b \n" \
10051 + ".long 11b,90b \n" \
10052 + ".previous \n" \
10053 + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
10054 +
10055 +#define MOVE_16_WORDS(operands) \
10056 + asm volatile ( \
10057 + "1: movew " operands " \n" \
10058 + "2: movew " operands " \n" \
10059 + "3: movew " operands " \n" \
10060 + "4: movew " operands " \n" \
10061 + "5: movew " operands " \n" \
10062 + "6: movew " operands " \n" \
10063 + "7: movew " operands " \n" \
10064 + "8: movew " operands " \n" \
10065 + "9: movew " operands " \n" \
10066 + "10: movew " operands " \n" \
10067 + "11: movew " operands " \n" \
10068 + "12: movew " operands " \n" \
10069 + "13: movew " operands " \n" \
10070 + "14: movew " operands " \n" \
10071 + "15: movew " operands " \n" \
10072 + "16: movew " operands " \n" \
10073 + "17: nop \n" \
10074 + " subl #32,%1 \n" \
10075 + "40: \n" \
10076 + " \n" \
10077 + ".section .fixup,\"ax\" \n" \
10078 + ".even \n" \
10079 + "90: movel #0, %1 \n" \
10080 + " movel #2, %2 \n" \
10081 + " jra 40b \n" \
10082 + ".previous \n" \
10083 + " \n" \
10084 + ".section __ex_table,\"a\" \n" \
10085 + ".align 4 \n" \
10086 + ".long 1b,90b \n" \
10087 + ".long 2b,90b \n" \
10088 + ".long 3b,90b \n" \
10089 + ".long 4b,90b \n" \
10090 + ".long 5b,90b \n" \
10091 + ".long 6b,90b \n" \
10092 + ".long 7b,90b \n" \
10093 + ".long 8b,90b \n" \
10094 + ".long 9b,90b \n" \
10095 + ".long 10b,90b \n" \
10096 + ".long 11b,90b \n" \
10097 + ".long 12b,90b \n" \
10098 + ".long 13b,90b \n" \
10099 + ".long 14b,90b \n" \
10100 + ".long 15b,90b \n" \
10101 + ".long 16b,90b \n" \
10102 + ".long 17b,90b \n" \
10103 + ".previous \n" \
10104 + : "+a" (addr), "+r" (n), "+r" (result) : "a" (io))
10105 +
10106 +#define MAC_PDMA_DELAY 32
10107 +
10108 +static inline int mac_pdma_recv(void __iomem *io, unsigned char *start, int n)
10109 +{
10110 + unsigned char *addr = start;
10111 + int result = 0;
10112 +
10113 + if (n >= 1) {
10114 + MOVE_BYTE("%3@,%0@");
10115 + if (result)
10116 + goto out;
10117 + }
10118 + if (n >= 1 && ((unsigned long)addr & 1)) {
10119 + MOVE_BYTE("%3@,%0@");
10120 + if (result)
10121 + goto out;
10122 + }
10123 + while (n >= 32)
10124 + MOVE_16_WORDS("%3@,%0@+");
10125 + while (n >= 2)
10126 + MOVE_WORD("%3@,%0@+");
10127 + if (result)
10128 + return start - addr; /* Negated to indicate uncertain length */
10129 + if (n == 1)
10130 + MOVE_BYTE("%3@,%0@");
10131 +out:
10132 + return addr - start;
10133 +}
10134 +
10135 +static inline int mac_pdma_send(unsigned char *start, void __iomem *io, int n)
10136 +{
10137 + unsigned char *addr = start;
10138 + int result = 0;
10139 +
10140 + if (n >= 1) {
10141 + MOVE_BYTE("%0@,%3@");
10142 + if (result)
10143 + goto out;
10144 + }
10145 + if (n >= 1 && ((unsigned long)addr & 1)) {
10146 + MOVE_BYTE("%0@,%3@");
10147 + if (result)
10148 + goto out;
10149 + }
10150 + while (n >= 32)
10151 + MOVE_16_WORDS("%0@+,%3@");
10152 + while (n >= 2)
10153 + MOVE_WORD("%0@+,%3@");
10154 + if (result)
10155 + return start - addr; /* Negated to indicate uncertain length */
10156 + if (n == 1)
10157 + MOVE_BYTE("%0@,%3@");
10158 +out:
10159 + return addr - start;
10160 +}
10161
10162 static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
10163 unsigned char *dst, int len)
10164 {
10165 u8 __iomem *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
10166 unsigned char *d = dst;
10167 - int n = len;
10168 - int transferred;
10169 +
10170 + hostdata->pdma_residual = len;
10171
10172 while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
10173 BASR_DRQ | BASR_PHASE_MATCH,
10174 BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
10175 - CP_IO_TO_MEM(s, d, n);
10176 + int bytes;
10177
10178 - transferred = d - dst - n;
10179 - hostdata->pdma_residual = len - transferred;
10180 + bytes = mac_pdma_recv(s, d, min(hostdata->pdma_residual, 512));
10181
10182 - /* No bus error. */
10183 - if (n == 0)
10184 + if (bytes > 0) {
10185 + d += bytes;
10186 + hostdata->pdma_residual -= bytes;
10187 + }
10188 +
10189 + if (hostdata->pdma_residual == 0)
10190 return 0;
10191
10192 - /* Target changed phase early? */
10193 if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
10194 - BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
10195 - scmd_printk(KERN_ERR, hostdata->connected,
10196 + BUS_AND_STATUS_REG, BASR_ACK,
10197 + BASR_ACK, HZ / 64) < 0)
10198 + scmd_printk(KERN_DEBUG, hostdata->connected,
10199 "%s: !REQ and !ACK\n", __func__);
10200 if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
10201 return 0;
10202
10203 + if (bytes == 0)
10204 + udelay(MAC_PDMA_DELAY);
10205 +
10206 + if (bytes >= 0)
10207 + continue;
10208 +
10209 dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
10210 - "%s: bus error (%d/%d)\n", __func__, transferred, len);
10211 + "%s: bus error (%d/%d)\n", __func__, d - dst, len);
10212 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
10213 - d = dst + transferred;
10214 - n = len - transferred;
10215 + return -1;
10216 }
10217
10218 scmd_printk(KERN_ERR, hostdata->connected,
10219 @@ -192,93 +311,27 @@ static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
10220 return -1;
10221 }
10222
10223 -
10224 -#define CP_MEM_TO_IO(s,d,n) \
10225 -__asm__ __volatile__ \
10226 - (" cmp.w #4,%2\n" \
10227 - " bls 8f\n" \
10228 - " move.w %0,%%d0\n" \
10229 - " neg.b %%d0\n" \
10230 - " and.w #3,%%d0\n" \
10231 - " sub.w %%d0,%2\n" \
10232 - " bra 2f\n" \
10233 - " 1: move.b (%0)+,(%1)\n" \
10234 - " 2: dbf %%d0,1b\n" \
10235 - " move.w %2,%%d0\n" \
10236 - " lsr.w #5,%%d0\n" \
10237 - " bra 4f\n" \
10238 - " 3: move.l (%0)+,(%1)\n" \
10239 - "31: move.l (%0)+,(%1)\n" \
10240 - "32: move.l (%0)+,(%1)\n" \
10241 - "33: move.l (%0)+,(%1)\n" \
10242 - "34: move.l (%0)+,(%1)\n" \
10243 - "35: move.l (%0)+,(%1)\n" \
10244 - "36: move.l (%0)+,(%1)\n" \
10245 - "37: move.l (%0)+,(%1)\n" \
10246 - " 4: dbf %%d0,3b\n" \
10247 - " move.w %2,%%d0\n" \
10248 - " lsr.w #2,%%d0\n" \
10249 - " and.w #7,%%d0\n" \
10250 - " bra 6f\n" \
10251 - " 5: move.l (%0)+,(%1)\n" \
10252 - " 6: dbf %%d0,5b\n" \
10253 - " and.w #3,%2\n" \
10254 - " bra 8f\n" \
10255 - " 7: move.b (%0)+,(%1)\n" \
10256 - " 8: dbf %2,7b\n" \
10257 - " moveq.l #0, %2\n" \
10258 - " 9: \n" \
10259 - ".section .fixup,\"ax\"\n" \
10260 - " .even\n" \
10261 - "91: moveq.l #1, %2\n" \
10262 - " jra 9b\n" \
10263 - "94: moveq.l #4, %2\n" \
10264 - " jra 9b\n" \
10265 - ".previous\n" \
10266 - ".section __ex_table,\"a\"\n" \
10267 - " .align 4\n" \
10268 - " .long 1b,91b\n" \
10269 - " .long 3b,94b\n" \
10270 - " .long 31b,94b\n" \
10271 - " .long 32b,94b\n" \
10272 - " .long 33b,94b\n" \
10273 - " .long 34b,94b\n" \
10274 - " .long 35b,94b\n" \
10275 - " .long 36b,94b\n" \
10276 - " .long 37b,94b\n" \
10277 - " .long 5b,94b\n" \
10278 - " .long 7b,91b\n" \
10279 - ".previous" \
10280 - : "=a"(s), "=a"(d), "=d"(n) \
10281 - : "0"(s), "1"(d), "2"(n) \
10282 - : "d0")
10283 -
10284 static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
10285 unsigned char *src, int len)
10286 {
10287 unsigned char *s = src;
10288 u8 __iomem *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
10289 - int n = len;
10290 - int transferred;
10291 +
10292 + hostdata->pdma_residual = len;
10293
10294 while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
10295 BASR_DRQ | BASR_PHASE_MATCH,
10296 BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
10297 - CP_MEM_TO_IO(s, d, n);
10298 + int bytes;
10299
10300 - transferred = s - src - n;
10301 - hostdata->pdma_residual = len - transferred;
10302 + bytes = mac_pdma_send(s, d, min(hostdata->pdma_residual, 512));
10303
10304 - /* Target changed phase early? */
10305 - if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
10306 - BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
10307 - scmd_printk(KERN_ERR, hostdata->connected,
10308 - "%s: !REQ and !ACK\n", __func__);
10309 - if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
10310 - return 0;
10311 + if (bytes > 0) {
10312 + s += bytes;
10313 + hostdata->pdma_residual -= bytes;
10314 + }
10315
10316 - /* No bus error. */
10317 - if (n == 0) {
10318 + if (hostdata->pdma_residual == 0) {
10319 if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
10320 TCR_LAST_BYTE_SENT,
10321 TCR_LAST_BYTE_SENT, HZ / 64) < 0)
10322 @@ -287,17 +340,29 @@ static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
10323 return 0;
10324 }
10325
10326 + if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
10327 + BUS_AND_STATUS_REG, BASR_ACK,
10328 + BASR_ACK, HZ / 64) < 0)
10329 + scmd_printk(KERN_DEBUG, hostdata->connected,
10330 + "%s: !REQ and !ACK\n", __func__);
10331 + if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
10332 + return 0;
10333 +
10334 + if (bytes == 0)
10335 + udelay(MAC_PDMA_DELAY);
10336 +
10337 + if (bytes >= 0)
10338 + continue;
10339 +
10340 dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
10341 - "%s: bus error (%d/%d)\n", __func__, transferred, len);
10342 + "%s: bus error (%d/%d)\n", __func__, s - src, len);
10343 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
10344 - s = src + transferred;
10345 - n = len - transferred;
10346 + return -1;
10347 }
10348
10349 scmd_printk(KERN_ERR, hostdata->connected,
10350 "%s: phase mismatch or !DRQ\n", __func__);
10351 NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
10352 -
10353 return -1;
10354 }
10355
10356 @@ -305,7 +370,7 @@ static int macscsi_dma_xfer_len(struct NCR5380_hostdata *hostdata,
10357 struct scsi_cmnd *cmd)
10358 {
10359 if (hostdata->flags & FLAG_NO_PSEUDO_DMA ||
10360 - cmd->SCp.this_residual < 16)
10361 + cmd->SCp.this_residual < setup_use_pdma)
10362 return 0;
10363
10364 return cmd->SCp.this_residual;
10365 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
10366 index acb503ea8f0c..e0c87228438d 100644
10367 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
10368 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
10369 @@ -5862,7 +5862,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
10370 int ret;
10371 struct megasas_cmd *cmd;
10372 struct megasas_dcmd_frame *dcmd;
10373 - u16 targetId = (sdev->channel % 2) + sdev->id;
10374 + u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
10375 + sdev->id;
10376
10377 cmd = megasas_get_cmd(instance);
10378
10379 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
10380 index 1fc832751a4f..32652b2c5e7c 100644
10381 --- a/drivers/scsi/scsi_lib.c
10382 +++ b/drivers/scsi/scsi_lib.c
10383 @@ -71,11 +71,11 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
10384 struct kmem_cache *cache;
10385 int ret = 0;
10386
10387 + mutex_lock(&scsi_sense_cache_mutex);
10388 cache = scsi_select_sense_cache(shost->unchecked_isa_dma);
10389 if (cache)
10390 - return 0;
10391 + goto exit;
10392
10393 - mutex_lock(&scsi_sense_cache_mutex);
10394 if (shost->unchecked_isa_dma) {
10395 scsi_sense_isadma_cache =
10396 kmem_cache_create("scsi_sense_cache(DMA)",
10397 @@ -91,7 +91,7 @@ int scsi_init_sense_cache(struct Scsi_Host *shost)
10398 if (!scsi_sense_cache)
10399 ret = -ENOMEM;
10400 }
10401 -
10402 + exit:
10403 mutex_unlock(&scsi_sense_cache_mutex);
10404 return ret;
10405 }
10406 diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
10407 index 1269a983455e..13b890b9ef18 100644
10408 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
10409 +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
10410 @@ -422,6 +422,9 @@ static int vpfe_open(struct file *file)
10411 /* If decoder is not initialized. initialize it */
10412 if (!video->initialized && vpfe_update_pipe_state(video)) {
10413 mutex_unlock(&video->lock);
10414 + v4l2_fh_del(&handle->vfh);
10415 + v4l2_fh_exit(&handle->vfh);
10416 + kfree(handle);
10417 return -ENODEV;
10418 }
10419 /* Increment device users counter */
10420 diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
10421 index 4e680d753941..e2fa3a3bc81d 100644
10422 --- a/drivers/target/iscsi/iscsi_target_auth.c
10423 +++ b/drivers/target/iscsi/iscsi_target_auth.c
10424 @@ -89,6 +89,12 @@ out:
10425 return CHAP_DIGEST_UNKNOWN;
10426 }
10427
10428 +static void chap_close(struct iscsi_conn *conn)
10429 +{
10430 + kfree(conn->auth_protocol);
10431 + conn->auth_protocol = NULL;
10432 +}
10433 +
10434 static struct iscsi_chap *chap_server_open(
10435 struct iscsi_conn *conn,
10436 struct iscsi_node_auth *auth,
10437 @@ -126,7 +132,7 @@ static struct iscsi_chap *chap_server_open(
10438 case CHAP_DIGEST_UNKNOWN:
10439 default:
10440 pr_err("Unsupported CHAP_A value\n");
10441 - kfree(conn->auth_protocol);
10442 + chap_close(conn);
10443 return NULL;
10444 }
10445
10446 @@ -141,19 +147,13 @@ static struct iscsi_chap *chap_server_open(
10447 * Generate Challenge.
10448 */
10449 if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) {
10450 - kfree(conn->auth_protocol);
10451 + chap_close(conn);
10452 return NULL;
10453 }
10454
10455 return chap;
10456 }
10457
10458 -static void chap_close(struct iscsi_conn *conn)
10459 -{
10460 - kfree(conn->auth_protocol);
10461 - conn->auth_protocol = NULL;
10462 -}
10463 -
10464 static int chap_server_compute_md5(
10465 struct iscsi_conn *conn,
10466 struct iscsi_node_auth *auth,
10467 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
10468 index eb24ec0e160d..f4e8e869649a 100644
10469 --- a/drivers/usb/core/hub.c
10470 +++ b/drivers/usb/core/hub.c
10471 @@ -3575,6 +3575,7 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
10472 struct usb_device *hdev;
10473 struct usb_device *udev;
10474 int connect_change = 0;
10475 + u16 link_state;
10476 int ret;
10477
10478 hdev = hub->hdev;
10479 @@ -3584,9 +3585,11 @@ static int hub_handle_remote_wakeup(struct usb_hub *hub, unsigned int port,
10480 return 0;
10481 usb_clear_port_feature(hdev, port, USB_PORT_FEAT_C_SUSPEND);
10482 } else {
10483 + link_state = portstatus & USB_PORT_STAT_LINK_STATE;
10484 if (!udev || udev->state != USB_STATE_SUSPENDED ||
10485 - (portstatus & USB_PORT_STAT_LINK_STATE) !=
10486 - USB_SS_PORT_LS_U0)
10487 + (link_state != USB_SS_PORT_LS_U0 &&
10488 + link_state != USB_SS_PORT_LS_U1 &&
10489 + link_state != USB_SS_PORT_LS_U2))
10490 return 0;
10491 }
10492
10493 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
10494 index 39155d7cc894..ae704658b528 100644
10495 --- a/drivers/vhost/net.c
10496 +++ b/drivers/vhost/net.c
10497 @@ -36,7 +36,7 @@
10498
10499 #include "vhost.h"
10500
10501 -static int experimental_zcopytx = 1;
10502 +static int experimental_zcopytx = 0;
10503 module_param(experimental_zcopytx, int, 0444);
10504 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
10505 " 1 -Enable; 0 - Disable");
10506 diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
10507 index 7ab6caef599c..d4e8b717ce2b 100644
10508 --- a/drivers/xen/balloon.c
10509 +++ b/drivers/xen/balloon.c
10510 @@ -527,8 +527,15 @@ static void balloon_process(struct work_struct *work)
10511 state = reserve_additional_memory();
10512 }
10513
10514 - if (credit < 0)
10515 - state = decrease_reservation(-credit, GFP_BALLOON);
10516 + if (credit < 0) {
10517 + long n_pages;
10518 +
10519 + n_pages = min(-credit, si_mem_available());
10520 + state = decrease_reservation(n_pages, GFP_BALLOON);
10521 + if (state == BP_DONE && n_pages != -credit &&
10522 + n_pages < totalreserve_pages)
10523 + state = BP_EAGAIN;
10524 + }
10525
10526 state = update_schedule(state);
10527
10528 @@ -567,6 +574,9 @@ static int add_ballooned_pages(int nr_pages)
10529 }
10530 }
10531
10532 + if (si_mem_available() < nr_pages)
10533 + return -ENOMEM;
10534 +
10535 st = decrease_reservation(nr_pages, GFP_USER);
10536 if (st != BP_DONE)
10537 return -ENOMEM;
10538 @@ -696,7 +706,7 @@ static int __init balloon_init(void)
10539 balloon_stats.schedule_delay = 1;
10540 balloon_stats.max_schedule_delay = 32;
10541 balloon_stats.retry_count = 1;
10542 - balloon_stats.max_retry_count = RETRY_UNLIMITED;
10543 + balloon_stats.max_retry_count = 4;
10544
10545 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
10546 set_online_page_callback(&xen_online_page);
10547 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
10548 index fe1f16351f94..8d49b91d92cd 100644
10549 --- a/drivers/xen/events/events_base.c
10550 +++ b/drivers/xen/events/events_base.c
10551 @@ -1293,7 +1293,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
10552 }
10553
10554 /* Rebind an evtchn so that it gets delivered to a specific cpu */
10555 -int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
10556 +static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
10557 {
10558 struct evtchn_bind_vcpu bind_vcpu;
10559 int masked;
10560 @@ -1327,7 +1327,6 @@ int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu)
10561
10562 return 0;
10563 }
10564 -EXPORT_SYMBOL_GPL(xen_rebind_evtchn_to_cpu);
10565
10566 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
10567 bool force)
10568 @@ -1341,6 +1340,15 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
10569 return ret;
10570 }
10571
10572 +/* To be called with desc->lock held. */
10573 +int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
10574 +{
10575 + struct irq_data *d = irq_desc_get_irq_data(desc);
10576 +
10577 + return set_affinity_irq(d, cpumask_of(tcpu), false);
10578 +}
10579 +EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
10580 +
10581 static void enable_dynirq(struct irq_data *data)
10582 {
10583 int evtchn = evtchn_from_irq(data->irq);
10584 diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
10585 index 6d1a5e58968f..47c70b826a6a 100644
10586 --- a/drivers/xen/evtchn.c
10587 +++ b/drivers/xen/evtchn.c
10588 @@ -447,7 +447,7 @@ static void evtchn_bind_interdom_next_vcpu(int evtchn)
10589 this_cpu_write(bind_last_selected_cpu, selected_cpu);
10590
10591 /* unmask expects irqs to be disabled */
10592 - xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
10593 + xen_set_affinity_evtchn(desc, selected_cpu);
10594 raw_spin_unlock_irqrestore(&desc->lock, flags);
10595 }
10596
10597 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
10598 index e24c0a69ff5d..c84186563c31 100644
10599 --- a/fs/btrfs/file.c
10600 +++ b/fs/btrfs/file.c
10601 @@ -2732,6 +2732,11 @@ out_only_mutex:
10602 * for detecting, at fsync time, if the inode isn't yet in the
10603 * log tree or it's there but not up to date.
10604 */
10605 + struct timespec64 now = current_time(inode);
10606 +
10607 + inode_inc_iversion(inode);
10608 + inode->i_mtime = now;
10609 + inode->i_ctime = now;
10610 trans = btrfs_start_transaction(root, 1);
10611 if (IS_ERR(trans)) {
10612 err = PTR_ERR(trans);
10613 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
10614 index 0d5840d20efc..08c5afa06aee 100644
10615 --- a/fs/btrfs/tree-log.c
10616 +++ b/fs/btrfs/tree-log.c
10617 @@ -3262,6 +3262,30 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
10618 return 0;
10619 }
10620
10621 +/*
10622 + * Check if an inode was logged in the current transaction. We can't always rely
10623 + * on an inode's logged_trans value, because it's an in-memory only field and
10624 + * therefore not persisted. This means that its value is lost if the inode gets
10625 + * evicted and loaded again from disk (in which case it has a value of 0, and
10626 + * certainly it is smaller then any possible transaction ID), when that happens
10627 + * the full_sync flag is set in the inode's runtime flags, so on that case we
10628 + * assume eviction happened and ignore the logged_trans value, assuming the
10629 + * worst case, that the inode was logged before in the current transaction.
10630 + */
10631 +static bool inode_logged(struct btrfs_trans_handle *trans,
10632 + struct btrfs_inode *inode)
10633 +{
10634 + if (inode->logged_trans == trans->transid)
10635 + return true;
10636 +
10637 + if (inode->last_trans == trans->transid &&
10638 + test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) &&
10639 + !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags))
10640 + return true;
10641 +
10642 + return false;
10643 +}
10644 +
10645 /*
10646 * If both a file and directory are logged, and unlinks or renames are
10647 * mixed in, we have a few interesting corners:
10648 @@ -3296,7 +3320,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
10649 int bytes_del = 0;
10650 u64 dir_ino = btrfs_ino(dir);
10651
10652 - if (dir->logged_trans < trans->transid)
10653 + if (!inode_logged(trans, dir))
10654 return 0;
10655
10656 ret = join_running_log_trans(root);
10657 @@ -3401,7 +3425,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
10658 u64 index;
10659 int ret;
10660
10661 - if (inode->logged_trans < trans->transid)
10662 + if (!inode_logged(trans, inode))
10663 return 0;
10664
10665 ret = join_running_log_trans(root);
10666 @@ -5250,9 +5274,19 @@ log_extents:
10667 }
10668 }
10669
10670 + /*
10671 + * Don't update last_log_commit if we logged that an inode exists after
10672 + * it was loaded to memory (full_sync bit set).
10673 + * This is to prevent data loss when we do a write to the inode, then
10674 + * the inode gets evicted after all delalloc was flushed, then we log
10675 + * it exists (due to a rename for example) and then fsync it. This last
10676 + * fsync would do nothing (not logging the extents previously written).
10677 + */
10678 spin_lock(&inode->lock);
10679 inode->logged_trans = trans->transid;
10680 - inode->last_log_commit = inode->last_sub_trans;
10681 + if (inode_only != LOG_INODE_EXISTS ||
10682 + !test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
10683 + inode->last_log_commit = inode->last_sub_trans;
10684 spin_unlock(&inode->lock);
10685 out_unlock:
10686 mutex_unlock(&inode->log_mutex);
10687 diff --git a/fs/coda/file.c b/fs/coda/file.c
10688 index 1cbc1f2298ee..43d371551d2b 100644
10689 --- a/fs/coda/file.c
10690 +++ b/fs/coda/file.c
10691 @@ -27,6 +27,13 @@
10692 #include "coda_linux.h"
10693 #include "coda_int.h"
10694
10695 +struct coda_vm_ops {
10696 + atomic_t refcnt;
10697 + struct file *coda_file;
10698 + const struct vm_operations_struct *host_vm_ops;
10699 + struct vm_operations_struct vm_ops;
10700 +};
10701 +
10702 static ssize_t
10703 coda_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
10704 {
10705 @@ -61,6 +68,34 @@ coda_file_write_iter(struct kiocb *iocb, struct iov_iter *to)
10706 return ret;
10707 }
10708
10709 +static void
10710 +coda_vm_open(struct vm_area_struct *vma)
10711 +{
10712 + struct coda_vm_ops *cvm_ops =
10713 + container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
10714 +
10715 + atomic_inc(&cvm_ops->refcnt);
10716 +
10717 + if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->open)
10718 + cvm_ops->host_vm_ops->open(vma);
10719 +}
10720 +
10721 +static void
10722 +coda_vm_close(struct vm_area_struct *vma)
10723 +{
10724 + struct coda_vm_ops *cvm_ops =
10725 + container_of(vma->vm_ops, struct coda_vm_ops, vm_ops);
10726 +
10727 + if (cvm_ops->host_vm_ops && cvm_ops->host_vm_ops->close)
10728 + cvm_ops->host_vm_ops->close(vma);
10729 +
10730 + if (atomic_dec_and_test(&cvm_ops->refcnt)) {
10731 + vma->vm_ops = cvm_ops->host_vm_ops;
10732 + fput(cvm_ops->coda_file);
10733 + kfree(cvm_ops);
10734 + }
10735 +}
10736 +
10737 static int
10738 coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
10739 {
10740 @@ -68,6 +103,8 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
10741 struct coda_inode_info *cii;
10742 struct file *host_file;
10743 struct inode *coda_inode, *host_inode;
10744 + struct coda_vm_ops *cvm_ops;
10745 + int ret;
10746
10747 cfi = CODA_FTOC(coda_file);
10748 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC);
10749 @@ -76,6 +113,13 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
10750 if (!host_file->f_op->mmap)
10751 return -ENODEV;
10752
10753 + if (WARN_ON(coda_file != vma->vm_file))
10754 + return -EIO;
10755 +
10756 + cvm_ops = kmalloc(sizeof(struct coda_vm_ops), GFP_KERNEL);
10757 + if (!cvm_ops)
10758 + return -ENOMEM;
10759 +
10760 coda_inode = file_inode(coda_file);
10761 host_inode = file_inode(host_file);
10762
10763 @@ -89,6 +133,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
10764 * the container file on us! */
10765 else if (coda_inode->i_mapping != host_inode->i_mapping) {
10766 spin_unlock(&cii->c_lock);
10767 + kfree(cvm_ops);
10768 return -EBUSY;
10769 }
10770
10771 @@ -97,7 +142,29 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma)
10772 cfi->cfi_mapcount++;
10773 spin_unlock(&cii->c_lock);
10774
10775 - return call_mmap(host_file, vma);
10776 + vma->vm_file = get_file(host_file);
10777 + ret = call_mmap(vma->vm_file, vma);
10778 +
10779 + if (ret) {
10780 + /* if call_mmap fails, our caller will put coda_file so we
10781 + * should drop the reference to the host_file that we got.
10782 + */
10783 + fput(host_file);
10784 + kfree(cvm_ops);
10785 + } else {
10786 + /* here we add redirects for the open/close vm_operations */
10787 + cvm_ops->host_vm_ops = vma->vm_ops;
10788 + if (vma->vm_ops)
10789 + cvm_ops->vm_ops = *vma->vm_ops;
10790 +
10791 + cvm_ops->vm_ops.open = coda_vm_open;
10792 + cvm_ops->vm_ops.close = coda_vm_close;
10793 + cvm_ops->coda_file = coda_file;
10794 + atomic_set(&cvm_ops->refcnt, 1);
10795 +
10796 + vma->vm_ops = &cvm_ops->vm_ops;
10797 + }
10798 + return ret;
10799 }
10800
10801 int coda_open(struct inode *coda_inode, struct file *coda_file)
10802 @@ -207,4 +274,3 @@ const struct file_operations coda_file_operations = {
10803 .fsync = coda_fsync,
10804 .splice_read = generic_file_splice_read,
10805 };
10806 -
10807 diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
10808 index 0f46cf550907..c83ddff3ff4a 100644
10809 --- a/fs/crypto/crypto.c
10810 +++ b/fs/crypto/crypto.c
10811 @@ -149,7 +149,10 @@ int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
10812 struct crypto_skcipher *tfm = ci->ci_ctfm;
10813 int res = 0;
10814
10815 - BUG_ON(len == 0);
10816 + if (WARN_ON_ONCE(len <= 0))
10817 + return -EINVAL;
10818 + if (WARN_ON_ONCE(len % FS_CRYPTO_BLOCK_SIZE != 0))
10819 + return -EINVAL;
10820
10821 BUILD_BUG_ON(sizeof(iv) != FS_IV_SIZE);
10822 BUILD_BUG_ON(AES_BLOCK_SIZE != FS_IV_SIZE);
10823 @@ -241,8 +244,6 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
10824 struct page *ciphertext_page = page;
10825 int err;
10826
10827 - BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
10828 -
10829 if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
10830 /* with inplace-encryption we just encrypt the page */
10831 err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
10832 @@ -254,7 +255,8 @@ struct page *fscrypt_encrypt_page(const struct inode *inode,
10833 return ciphertext_page;
10834 }
10835
10836 - BUG_ON(!PageLocked(page));
10837 + if (WARN_ON_ONCE(!PageLocked(page)))
10838 + return ERR_PTR(-EINVAL);
10839
10840 ctx = fscrypt_get_ctx(inode, gfp_flags);
10841 if (IS_ERR(ctx))
10842 @@ -302,8 +304,9 @@ EXPORT_SYMBOL(fscrypt_encrypt_page);
10843 int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
10844 unsigned int len, unsigned int offs, u64 lblk_num)
10845 {
10846 - if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
10847 - BUG_ON(!PageLocked(page));
10848 + if (WARN_ON_ONCE(!PageLocked(page) &&
10849 + !(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES)))
10850 + return -EINVAL;
10851
10852 return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
10853 len, offs, GFP_NOFS);
10854 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
10855 index 4dd842f72846..708f931c36f1 100644
10856 --- a/fs/ecryptfs/crypto.c
10857 +++ b/fs/ecryptfs/crypto.c
10858 @@ -1018,8 +1018,10 @@ int ecryptfs_read_and_validate_header_region(struct inode *inode)
10859
10860 rc = ecryptfs_read_lower(file_size, 0, ECRYPTFS_SIZE_AND_MARKER_BYTES,
10861 inode);
10862 - if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
10863 - return rc >= 0 ? -EINVAL : rc;
10864 + if (rc < 0)
10865 + return rc;
10866 + else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
10867 + return -EINVAL;
10868 rc = ecryptfs_validate_marker(marker);
10869 if (!rc)
10870 ecryptfs_i_size_init(file_size, inode);
10871 @@ -1381,8 +1383,10 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
10872 ecryptfs_inode_to_lower(inode),
10873 ECRYPTFS_XATTR_NAME, file_size,
10874 ECRYPTFS_SIZE_AND_MARKER_BYTES);
10875 - if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
10876 - return rc >= 0 ? -EINVAL : rc;
10877 + if (rc < 0)
10878 + return rc;
10879 + else if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES)
10880 + return -EINVAL;
10881 rc = ecryptfs_validate_marker(marker);
10882 if (!rc)
10883 ecryptfs_i_size_init(file_size, inode);
10884 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
10885 index 9544e2f8b79f..7ee86d8f313d 100644
10886 --- a/fs/fs-writeback.c
10887 +++ b/fs/fs-writeback.c
10888 @@ -721,6 +721,7 @@ void wbc_detach_inode(struct writeback_control *wbc)
10889 void wbc_account_io(struct writeback_control *wbc, struct page *page,
10890 size_t bytes)
10891 {
10892 + struct cgroup_subsys_state *css;
10893 int id;
10894
10895 /*
10896 @@ -732,7 +733,12 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
10897 if (!wbc->wb)
10898 return;
10899
10900 - id = mem_cgroup_css_from_page(page)->id;
10901 + css = mem_cgroup_css_from_page(page);
10902 + /* dead cgroups shouldn't contribute to inode ownership arbitration */
10903 + if (!(css->flags & CSS_ONLINE))
10904 + return;
10905 +
10906 + id = css->id;
10907
10908 if (id == wbc->wb_id) {
10909 wbc->wb_bytes += bytes;
10910 diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
10911 index 364028c710a8..8da239b6cc16 100644
10912 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
10913 +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
10914 @@ -307,7 +307,7 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
10915 if (status == 0)
10916 return 0;
10917
10918 - if (mirror->mirror_ds == NULL)
10919 + if (IS_ERR_OR_NULL(mirror->mirror_ds))
10920 return -EINVAL;
10921
10922 dserr = kmalloc(sizeof(*dserr), gfp_flags);
10923 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
10924 index b65aee481d13..e4cd3a2fe698 100644
10925 --- a/fs/nfs/inode.c
10926 +++ b/fs/nfs/inode.c
10927 @@ -1100,6 +1100,7 @@ int nfs_open(struct inode *inode, struct file *filp)
10928 nfs_fscache_open_file(inode, filp);
10929 return 0;
10930 }
10931 +EXPORT_SYMBOL_GPL(nfs_open);
10932
10933 /*
10934 * This function is called whenever some part of NFS notices that
10935 diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
10936 index 134858507268..61abbb087ed1 100644
10937 --- a/fs/nfs/nfs4file.c
10938 +++ b/fs/nfs/nfs4file.c
10939 @@ -49,7 +49,7 @@ nfs4_file_open(struct inode *inode, struct file *filp)
10940 return err;
10941
10942 if ((openflags & O_ACCMODE) == 3)
10943 - openflags--;
10944 + return nfs_open(inode, filp);
10945
10946 /* We can't create new files here */
10947 openflags &= ~(O_CREAT|O_EXCL);
10948 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
10949 index 7d9a51e6b847..4931c3a75f03 100644
10950 --- a/fs/nfs/pnfs.c
10951 +++ b/fs/nfs/pnfs.c
10952 @@ -1866,8 +1866,8 @@ lookup_again:
10953 atomic_read(&lo->plh_outstanding) != 0) {
10954 spin_unlock(&ino->i_lock);
10955 lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
10956 - atomic_read(&lo->plh_outstanding)));
10957 - if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
10958 + !atomic_read(&lo->plh_outstanding)));
10959 + if (IS_ERR(lseg))
10960 goto out_put_layout_hdr;
10961 pnfs_put_layout_hdr(lo);
10962 goto lookup_again;
10963 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
10964 index 7325baa8f9d4..c95f32b83a94 100644
10965 --- a/fs/proc/proc_sysctl.c
10966 +++ b/fs/proc/proc_sysctl.c
10967 @@ -498,6 +498,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
10968
10969 if (root->set_ownership)
10970 root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
10971 + else {
10972 + inode->i_uid = GLOBAL_ROOT_UID;
10973 + inode->i_gid = GLOBAL_ROOT_GID;
10974 + }
10975
10976 return inode;
10977 }
10978 diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
10979 index e701ebc36c06..e2ba2a3b63b2 100644
10980 --- a/fs/xfs/libxfs/xfs_ag_resv.c
10981 +++ b/fs/xfs/libxfs/xfs_ag_resv.c
10982 @@ -281,7 +281,7 @@ xfs_ag_resv_init(
10983 */
10984 ask = used = 0;
10985
10986 - mp->m_inotbt_nores = true;
10987 + mp->m_finobt_nores = true;
10988
10989 error = xfs_refcountbt_calc_reserves(mp, tp, agno, &ask,
10990 &used);
10991 diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
10992 index 86c50208a143..adb2f6df5a11 100644
10993 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c
10994 +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
10995 @@ -124,7 +124,7 @@ xfs_finobt_alloc_block(
10996 union xfs_btree_ptr *new,
10997 int *stat)
10998 {
10999 - if (cur->bc_mp->m_inotbt_nores)
11000 + if (cur->bc_mp->m_finobt_nores)
11001 return xfs_inobt_alloc_block(cur, start, new, stat);
11002 return __xfs_inobt_alloc_block(cur, start, new, stat,
11003 XFS_AG_RESV_METADATA);
11004 @@ -157,7 +157,7 @@ xfs_finobt_free_block(
11005 struct xfs_btree_cur *cur,
11006 struct xfs_buf *bp)
11007 {
11008 - if (cur->bc_mp->m_inotbt_nores)
11009 + if (cur->bc_mp->m_finobt_nores)
11010 return xfs_inobt_free_block(cur, bp);
11011 return __xfs_inobt_free_block(cur, bp, XFS_AG_RESV_METADATA);
11012 }
11013 diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
11014 index a58034049995..3d213a7394c5 100644
11015 --- a/fs/xfs/xfs_attr_list.c
11016 +++ b/fs/xfs/xfs_attr_list.c
11017 @@ -555,6 +555,7 @@ xfs_attr_put_listent(
11018 attrlist_ent_t *aep;
11019 int arraytop;
11020
11021 + ASSERT(!context->seen_enough);
11022 ASSERT(!(context->flags & ATTR_KERNOVAL));
11023 ASSERT(context->count >= 0);
11024 ASSERT(context->count < (ATTR_MAX_VALUELEN/8));
11025 diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
11026 index 211b06e4702e..41ad9eaab6ce 100644
11027 --- a/fs/xfs/xfs_bmap_util.c
11028 +++ b/fs/xfs/xfs_bmap_util.c
11029 @@ -1080,7 +1080,7 @@ xfs_adjust_extent_unmap_boundaries(
11030 return 0;
11031 }
11032
11033 -static int
11034 +int
11035 xfs_flush_unmap_range(
11036 struct xfs_inode *ip,
11037 xfs_off_t offset,
11038 diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
11039 index 87363d136bb6..9c73d012f56a 100644
11040 --- a/fs/xfs/xfs_bmap_util.h
11041 +++ b/fs/xfs/xfs_bmap_util.h
11042 @@ -76,6 +76,8 @@ int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
11043 xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
11044
11045 xfs_extnum_t xfs_bmap_count_leaves(struct xfs_ifork *ifp, xfs_filblks_t *count);
11046 +int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
11047 + xfs_off_t len);
11048 int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
11049 int whichfork, xfs_extnum_t *nextents,
11050 xfs_filblks_t *count);
11051 diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
11052 index 61a5ad2600e8..259549698ba7 100644
11053 --- a/fs/xfs/xfs_file.c
11054 +++ b/fs/xfs/xfs_file.c
11055 @@ -517,6 +517,9 @@ xfs_file_dio_aio_write(
11056 }
11057
11058 if (iocb->ki_flags & IOCB_NOWAIT) {
11059 + /* unaligned dio always waits, bail */
11060 + if (unaligned_io)
11061 + return -EAGAIN;
11062 if (!xfs_ilock_nowait(ip, iolock))
11063 return -EAGAIN;
11064 } else {
11065 @@ -529,18 +532,14 @@ xfs_file_dio_aio_write(
11066 count = iov_iter_count(from);
11067
11068 /*
11069 - * If we are doing unaligned IO, wait for all other IO to drain,
11070 - * otherwise demote the lock if we had to take the exclusive lock
11071 - * for other reasons in xfs_file_aio_write_checks.
11072 + * If we are doing unaligned IO, we can't allow any other overlapping IO
11073 + * in-flight at the same time or we risk data corruption. Wait for all
11074 + * other IO to drain before we submit. If the IO is aligned, demote the
11075 + * iolock if we had to take the exclusive lock in
11076 + * xfs_file_aio_write_checks() for other reasons.
11077 */
11078 if (unaligned_io) {
11079 - /* If we are going to wait for other DIO to finish, bail */
11080 - if (iocb->ki_flags & IOCB_NOWAIT) {
11081 - if (atomic_read(&inode->i_dio_count))
11082 - return -EAGAIN;
11083 - } else {
11084 - inode_dio_wait(inode);
11085 - }
11086 + inode_dio_wait(inode);
11087 } else if (iolock == XFS_IOLOCK_EXCL) {
11088 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
11089 iolock = XFS_IOLOCK_SHARED;
11090 @@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
11091
11092 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
11093 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
11094 +
11095 + /*
11096 + * If unaligned, this is the only IO in-flight. If it has not yet
11097 + * completed, wait on it before we release the iolock to prevent
11098 + * subsequent overlapping IO.
11099 + */
11100 + if (ret == -EIOCBQUEUED && unaligned_io)
11101 + inode_dio_wait(inode);
11102 out:
11103 xfs_iunlock(ip, iolock);
11104
11105 diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
11106 index 7c00b8bedfe3..09fd602507ef 100644
11107 --- a/fs/xfs/xfs_fsops.c
11108 +++ b/fs/xfs/xfs_fsops.c
11109 @@ -534,6 +534,7 @@ xfs_fs_reserve_ag_blocks(
11110 int error = 0;
11111 int err2;
11112
11113 + mp->m_finobt_nores = false;
11114 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
11115 pag = xfs_perag_get(mp, agno);
11116 err2 = xfs_ag_resv_init(pag, NULL);
11117 diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
11118 index 05db9540e459..5ed84d6c7059 100644
11119 --- a/fs/xfs/xfs_inode.c
11120 +++ b/fs/xfs/xfs_inode.c
11121 @@ -1332,7 +1332,7 @@ xfs_create_tmpfile(
11122 if (error)
11123 goto out_trans_cancel;
11124
11125 - error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
11126 + error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
11127 if (error)
11128 goto out_trans_cancel;
11129
11130 @@ -1754,7 +1754,7 @@ xfs_inactive_ifree(
11131 * now remains allocated and sits on the unlinked list until the fs is
11132 * repaired.
11133 */
11134 - if (unlikely(mp->m_inotbt_nores)) {
11135 + if (unlikely(mp->m_finobt_nores)) {
11136 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
11137 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
11138 &tp);
11139 @@ -1907,11 +1907,8 @@ xfs_inactive(
11140 }
11141
11142 /*
11143 - * This is called when the inode's link count goes to 0 or we are creating a
11144 - * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
11145 - * set to true as the link count is dropped to zero by the VFS after we've
11146 - * created the file successfully, so we have to add it to the unlinked list
11147 - * while the link count is non-zero.
11148 + * This is called when the inode's link count has gone to 0 or we are creating
11149 + * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
11150 *
11151 * We place the on-disk inode on a list in the AGI. It will be pulled from this
11152 * list when the inode is freed.
11153 @@ -1931,6 +1928,7 @@ xfs_iunlink(
11154 int offset;
11155 int error;
11156
11157 + ASSERT(VFS_I(ip)->i_nlink == 0);
11158 ASSERT(VFS_I(ip)->i_mode != 0);
11159
11160 /*
11161 @@ -2837,11 +2835,9 @@ xfs_rename_alloc_whiteout(
11162
11163 /*
11164 * Prepare the tmpfile inode as if it were created through the VFS.
11165 - * Otherwise, the link increment paths will complain about nlink 0->1.
11166 - * Drop the link count as done by d_tmpfile(), complete the inode setup
11167 - * and flag it as linkable.
11168 + * Complete the inode setup and flag it as linkable. nlink is already
11169 + * zero, so we can skip the drop_nlink.
11170 */
11171 - drop_nlink(VFS_I(tmpfile));
11172 xfs_setup_iops(tmpfile);
11173 xfs_finish_inode_setup(tmpfile);
11174 VFS_I(tmpfile)->i_state |= I_LINKABLE;
11175 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
11176 index f48ffd7a8d3e..74047bd0c1ae 100644
11177 --- a/fs/xfs/xfs_iops.c
11178 +++ b/fs/xfs/xfs_iops.c
11179 @@ -191,9 +191,18 @@ xfs_generic_create(
11180
11181 xfs_setup_iops(ip);
11182
11183 - if (tmpfile)
11184 + if (tmpfile) {
11185 + /*
11186 + * The VFS requires that any inode fed to d_tmpfile must have
11187 + * nlink == 1 so that it can decrement the nlink in d_tmpfile.
11188 + * However, we created the temp file with nlink == 0 because
11189 + * we're not allowed to put an inode with nlink > 0 on the
11190 + * unlinked list. Therefore we have to set nlink to 1 so that
11191 + * d_tmpfile can immediately set it back to zero.
11192 + */
11193 + set_nlink(inode, 1);
11194 d_tmpfile(dentry, inode);
11195 - else
11196 + } else
11197 d_instantiate(dentry, inode);
11198
11199 xfs_finish_inode_setup(ip);
11200 @@ -522,6 +531,10 @@ xfs_vn_getattr(
11201 }
11202 }
11203
11204 + /*
11205 + * Note: If you add another clause to set an attribute flag, please
11206 + * update attributes_mask below.
11207 + */
11208 if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
11209 stat->attributes |= STATX_ATTR_IMMUTABLE;
11210 if (ip->i_d.di_flags & XFS_DIFLAG_APPEND)
11211 @@ -529,6 +542,10 @@ xfs_vn_getattr(
11212 if (ip->i_d.di_flags & XFS_DIFLAG_NODUMP)
11213 stat->attributes |= STATX_ATTR_NODUMP;
11214
11215 + stat->attributes_mask |= (STATX_ATTR_IMMUTABLE |
11216 + STATX_ATTR_APPEND |
11217 + STATX_ATTR_NODUMP);
11218 +
11219 switch (inode->i_mode & S_IFMT) {
11220 case S_IFBLK:
11221 case S_IFCHR:
11222 diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
11223 index 7964513c3128..7e0bf952e087 100644
11224 --- a/fs/xfs/xfs_mount.h
11225 +++ b/fs/xfs/xfs_mount.h
11226 @@ -127,7 +127,7 @@ typedef struct xfs_mount {
11227 struct mutex m_growlock; /* growfs mutex */
11228 int m_fixedfsid[2]; /* unchanged for life of FS */
11229 uint64_t m_flags; /* global mount flags */
11230 - bool m_inotbt_nores; /* no per-AG finobt resv. */
11231 + bool m_finobt_nores; /* no per-AG finobt resv. */
11232 int m_ialloc_inos; /* inodes in inode allocation */
11233 int m_ialloc_blks; /* blocks in inode allocation */
11234 int m_ialloc_min_blks;/* min blocks in sparse inode
11235 diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
11236 index 7088f44c0c59..f3c393f309e1 100644
11237 --- a/fs/xfs/xfs_reflink.c
11238 +++ b/fs/xfs/xfs_reflink.c
11239 @@ -1368,9 +1368,19 @@ xfs_reflink_remap_prep(
11240 if (ret)
11241 goto out_unlock;
11242
11243 - /* Zap any page cache for the destination file's range. */
11244 - truncate_inode_pages_range(&inode_out->i_data, pos_out,
11245 - PAGE_ALIGN(pos_out + *len) - 1);
11246 + /*
11247 + * If pos_out > EOF, we may have dirtied blocks between EOF and
11248 + * pos_out. In that case, we need to extend the flush and unmap to cover
11249 + * from EOF to the end of the copy length.
11250 + */
11251 + if (pos_out > XFS_ISIZE(dest)) {
11252 + loff_t flen = *len + (pos_out - XFS_ISIZE(dest));
11253 + ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
11254 + } else {
11255 + ret = xfs_flush_unmap_range(dest, pos_out, *len);
11256 + }
11257 + if (ret)
11258 + goto out_unlock;
11259
11260 /* If we're altering the file contents... */
11261 if (!is_dedupe) {
11262 diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
11263 index 207ee302b1bb..dce8114e3198 100644
11264 --- a/fs/xfs/xfs_super.c
11265 +++ b/fs/xfs/xfs_super.c
11266 @@ -1561,6 +1561,13 @@ xfs_mount_alloc(
11267 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
11268 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
11269 mp->m_kobj.kobject.kset = xfs_kset;
11270 + /*
11271 + * We don't create the finobt per-ag space reservation until after log
11272 + * recovery, so we must set this to true so that an ifree transaction
11273 + * started during log recovery will not depend on space reservations
11274 + * for finobt expansion.
11275 + */
11276 + mp->m_finobt_nores = true;
11277 return mp;
11278 }
11279
11280 diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
11281 index 63ee1d5bf1d7..9a63016009a1 100644
11282 --- a/fs/xfs/xfs_xattr.c
11283 +++ b/fs/xfs/xfs_xattr.c
11284 @@ -129,6 +129,9 @@ __xfs_xattr_put_listent(
11285 char *offset;
11286 int arraytop;
11287
11288 + if (context->count < 0 || context->seen_enough)
11289 + return;
11290 +
11291 if (!context->alist)
11292 goto compute_size;
11293
11294 diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
11295 index 20561a60db9c..d4fb510a4fbe 100644
11296 --- a/include/asm-generic/bug.h
11297 +++ b/include/asm-generic/bug.h
11298 @@ -104,8 +104,10 @@ extern void warn_slowpath_null(const char *file, const int line);
11299 warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
11300 #else
11301 extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
11302 -#define __WARN() __WARN_TAINT(TAINT_WARN)
11303 -#define __WARN_printf(arg...) do { __warn_printk(arg); __WARN(); } while (0)
11304 +#define __WARN() do { \
11305 + printk(KERN_WARNING CUT_HERE); __WARN_TAINT(TAINT_WARN); \
11306 +} while (0)
11307 +#define __WARN_printf(arg...) __WARN_printf_taint(TAINT_WARN, arg)
11308 #define __WARN_printf_taint(taint, arg...) \
11309 do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
11310 #endif
11311 diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
11312 index c0d4df6a606f..9d3b745c3107 100644
11313 --- a/include/drm/drm_displayid.h
11314 +++ b/include/drm/drm_displayid.h
11315 @@ -40,6 +40,7 @@
11316 #define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
11317 #define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
11318 #define DATA_BLOCK_TILED_DISPLAY 0x12
11319 +#define DATA_BLOCK_CTA 0x81
11320
11321 #define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
11322
11323 @@ -90,4 +91,13 @@ struct displayid_detailed_timing_block {
11324 struct displayid_block base;
11325 struct displayid_detailed_timings_1 timings[0];
11326 };
11327 +
11328 +#define for_each_displayid_db(displayid, block, idx, length) \
11329 + for ((block) = (struct displayid_block *)&(displayid)[idx]; \
11330 + (idx) + sizeof(struct displayid_block) <= (length) && \
11331 + (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
11332 + (block)->num_bytes > 0; \
11333 + (idx) += (block)->num_bytes + sizeof(struct displayid_block), \
11334 + (block) = (struct displayid_block *)&(displayid)[idx])
11335 +
11336 #endif
11337 diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
11338 index dec0372efe2e..d67c0035165c 100644
11339 --- a/include/linux/cpuhotplug.h
11340 +++ b/include/linux/cpuhotplug.h
11341 @@ -116,10 +116,10 @@ enum cpuhp_state {
11342 CPUHP_AP_PERF_ARM_ACPI_STARTING,
11343 CPUHP_AP_PERF_ARM_STARTING,
11344 CPUHP_AP_ARM_L2X0_STARTING,
11345 + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
11346 CPUHP_AP_ARM_ARCH_TIMER_STARTING,
11347 CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
11348 CPUHP_AP_JCORE_TIMER_STARTING,
11349 - CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
11350 CPUHP_AP_ARM_TWD_STARTING,
11351 CPUHP_AP_QCOM_TIMER_STARTING,
11352 CPUHP_AP_ARMADA_TIMER_STARTING,
11353 diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
11354 index e102c5bccbb9..68cbe111420b 100644
11355 --- a/include/linux/rcupdate.h
11356 +++ b/include/linux/rcupdate.h
11357 @@ -620,7 +620,7 @@ static inline void rcu_preempt_sleep_check(void) { }
11358 * read-side critical sections may be preempted and they may also block, but
11359 * only when acquiring spinlocks that are subject to priority inheritance.
11360 */
11361 -static inline void rcu_read_lock(void)
11362 +static __always_inline void rcu_read_lock(void)
11363 {
11364 __rcu_read_lock();
11365 __acquire(RCU);
11366 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
11367 index a0d2e0bb9a94..0e3c0d83bd99 100644
11368 --- a/include/net/ip_vs.h
11369 +++ b/include/net/ip_vs.h
11370 @@ -806,11 +806,12 @@ struct ipvs_master_sync_state {
11371 struct ip_vs_sync_buff *sync_buff;
11372 unsigned long sync_queue_len;
11373 unsigned int sync_queue_delay;
11374 - struct task_struct *master_thread;
11375 struct delayed_work master_wakeup_work;
11376 struct netns_ipvs *ipvs;
11377 };
11378
11379 +struct ip_vs_sync_thread_data;
11380 +
11381 /* How much time to keep dests in trash */
11382 #define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
11383
11384 @@ -941,7 +942,8 @@ struct netns_ipvs {
11385 spinlock_t sync_lock;
11386 struct ipvs_master_sync_state *ms;
11387 spinlock_t sync_buff_lock;
11388 - struct task_struct **backup_threads;
11389 + struct ip_vs_sync_thread_data *master_tinfo;
11390 + struct ip_vs_sync_thread_data *backup_tinfo;
11391 int threads_mask;
11392 volatile int sync_state;
11393 struct mutex sync_mutex;
11394 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
11395 index ec299fcf55f7..412c2820626d 100644
11396 --- a/include/rdma/ib_verbs.h
11397 +++ b/include/rdma/ib_verbs.h
11398 @@ -290,8 +290,8 @@ struct ib_rss_caps {
11399 };
11400
11401 enum ib_tm_cap_flags {
11402 - /* Support tag matching on RC transport */
11403 - IB_TM_CAP_RC = 1 << 0,
11404 + /* Support tag matching with rendezvous offload for RC transport */
11405 + IB_TM_CAP_RNDV_RC = 1 << 0,
11406 };
11407
11408 struct ib_tm_caps {
11409 diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
11410 index 6d182746afab..147546e0c11b 100644
11411 --- a/include/trace/events/rxrpc.h
11412 +++ b/include/trace/events/rxrpc.h
11413 @@ -1381,7 +1381,7 @@ TRACE_EVENT(rxrpc_rx_eproto,
11414 ),
11415
11416 TP_fast_assign(
11417 - __entry->call = call->debug_id;
11418 + __entry->call = call ? call->debug_id : 0;
11419 __entry->serial = serial;
11420 __entry->why = why;
11421 ),
11422 diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
11423 index 2932600ce271..d143e277cdaf 100644
11424 --- a/include/uapi/linux/bpf.h
11425 +++ b/include/uapi/linux/bpf.h
11426 @@ -2486,6 +2486,7 @@ struct bpf_prog_info {
11427 char name[BPF_OBJ_NAME_LEN];
11428 __u32 ifindex;
11429 __u32 gpl_compatible:1;
11430 + __u32 :31; /* alignment pad */
11431 __u64 netns_dev;
11432 __u64 netns_ino;
11433 __u32 nr_jited_ksyms;
11434 diff --git a/include/xen/events.h b/include/xen/events.h
11435 index c3e6bc643a7b..1650d39decae 100644
11436 --- a/include/xen/events.h
11437 +++ b/include/xen/events.h
11438 @@ -3,6 +3,7 @@
11439 #define _XEN_EVENTS_H
11440
11441 #include <linux/interrupt.h>
11442 +#include <linux/irq.h>
11443 #ifdef CONFIG_PCI_MSI
11444 #include <linux/msi.h>
11445 #endif
11446 @@ -59,7 +60,7 @@ void evtchn_put(unsigned int evtchn);
11447
11448 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector);
11449 void rebind_evtchn_irq(int evtchn, int irq);
11450 -int xen_rebind_evtchn_to_cpu(int evtchn, unsigned tcpu);
11451 +int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu);
11452
11453 static inline void notify_remote_via_evtchn(int port)
11454 {
11455 diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
11456 index 0488b8258321..ffc39a7e028d 100644
11457 --- a/kernel/bpf/Makefile
11458 +++ b/kernel/bpf/Makefile
11459 @@ -1,5 +1,6 @@
11460 # SPDX-License-Identifier: GPL-2.0
11461 obj-y := core.o
11462 +CFLAGS_core.o += $(call cc-disable-warning, override-init)
11463
11464 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o
11465 obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
11466 diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
11467 index 26b57e24476f..e810e8cb17e1 100644
11468 --- a/kernel/locking/lockdep.c
11469 +++ b/kernel/locking/lockdep.c
11470 @@ -3326,17 +3326,17 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
11471 if (depth) {
11472 hlock = curr->held_locks + depth - 1;
11473 if (hlock->class_idx == class_idx && nest_lock) {
11474 - if (hlock->references) {
11475 - /*
11476 - * Check: unsigned int references:12, overflow.
11477 - */
11478 - if (DEBUG_LOCKS_WARN_ON(hlock->references == (1 << 12)-1))
11479 - return 0;
11480 + if (!references)
11481 + references++;
11482
11483 + if (!hlock->references)
11484 hlock->references++;
11485 - } else {
11486 - hlock->references = 2;
11487 - }
11488 +
11489 + hlock->references += references;
11490 +
11491 + /* Overflow */
11492 + if (DEBUG_LOCKS_WARN_ON(hlock->references < references))
11493 + return 0;
11494
11495 return 1;
11496 }
11497 diff --git a/kernel/padata.c b/kernel/padata.c
11498 index d568cc56405f..6c06b3039fae 100644
11499 --- a/kernel/padata.c
11500 +++ b/kernel/padata.c
11501 @@ -267,7 +267,12 @@ static void padata_reorder(struct parallel_data *pd)
11502 * The next object that needs serialization might have arrived to
11503 * the reorder queues in the meantime, we will be called again
11504 * from the timer function if no one else cares for it.
11505 + *
11506 + * Ensure reorder_objects is read after pd->lock is dropped so we see
11507 + * an increment from another task in padata_do_serial. Pairs with
11508 + * smp_mb__after_atomic in padata_do_serial.
11509 */
11510 + smp_mb();
11511 if (atomic_read(&pd->reorder_objects)
11512 && !(pinst->flags & PADATA_RESET))
11513 mod_timer(&pd->timer, jiffies + HZ);
11514 @@ -387,6 +392,13 @@ void padata_do_serial(struct padata_priv *padata)
11515 list_add_tail(&padata->list, &pqueue->reorder.list);
11516 spin_unlock(&pqueue->reorder.lock);
11517
11518 + /*
11519 + * Ensure the atomic_inc of reorder_objects above is ordered correctly
11520 + * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
11521 + * in padata_reorder.
11522 + */
11523 + smp_mb__after_atomic();
11524 +
11525 put_cpu();
11526
11527 /* If we're running on the wrong CPU, call padata_reorder() via a
11528 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
11529 index 2a2ac53d8b8b..95271f180687 100644
11530 --- a/kernel/pid_namespace.c
11531 +++ b/kernel/pid_namespace.c
11532 @@ -325,7 +325,7 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
11533 }
11534
11535 read_lock(&tasklist_lock);
11536 - force_sig(SIGKILL, pid_ns->child_reaper);
11537 + send_sig(SIGKILL, pid_ns->child_reaper, 1);
11538 read_unlock(&tasklist_lock);
11539
11540 do_exit(0);
11541 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
11542 index 6859ea1d5c04..795c63ca44a9 100644
11543 --- a/kernel/sched/core.c
11544 +++ b/kernel/sched/core.c
11545 @@ -5133,7 +5133,7 @@ long __sched io_schedule_timeout(long timeout)
11546 }
11547 EXPORT_SYMBOL(io_schedule_timeout);
11548
11549 -void io_schedule(void)
11550 +void __sched io_schedule(void)
11551 {
11552 int token;
11553
11554 diff --git a/kernel/sched/sched-pelt.h b/kernel/sched/sched-pelt.h
11555 index a26473674fb7..c529706bed11 100644
11556 --- a/kernel/sched/sched-pelt.h
11557 +++ b/kernel/sched/sched-pelt.h
11558 @@ -1,7 +1,7 @@
11559 /* SPDX-License-Identifier: GPL-2.0 */
11560 /* Generated by Documentation/scheduler/sched-pelt; do not modify. */
11561
11562 -static const u32 runnable_avg_yN_inv[] = {
11563 +static const u32 runnable_avg_yN_inv[] __maybe_unused = {
11564 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
11565 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
11566 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
11567 diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
11568 index 6b23cd584295..e1110a7bd3e6 100644
11569 --- a/kernel/time/ntp.c
11570 +++ b/kernel/time/ntp.c
11571 @@ -43,6 +43,7 @@ static u64 tick_length_base;
11572 #define MAX_TICKADJ 500LL /* usecs */
11573 #define MAX_TICKADJ_SCALED \
11574 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
11575 +#define MAX_TAI_OFFSET 100000
11576
11577 /*
11578 * phase-lock loop variables
11579 @@ -698,7 +699,8 @@ static inline void process_adjtimex_modes(const struct timex *txc, s32 *time_tai
11580 time_constant = max(time_constant, 0l);
11581 }
11582
11583 - if (txc->modes & ADJ_TAI && txc->constant >= 0)
11584 + if (txc->modes & ADJ_TAI &&
11585 + txc->constant >= 0 && txc->constant <= MAX_TAI_OFFSET)
11586 *time_tai = txc->constant;
11587
11588 if (txc->modes & ADJ_OFFSET)
11589 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
11590 index d647dabdac97..07afcfe2a61b 100644
11591 --- a/kernel/time/timer_list.c
11592 +++ b/kernel/time/timer_list.c
11593 @@ -287,23 +287,6 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
11594 SEQ_printf(m, "\n");
11595 }
11596
11597 -static int timer_list_show(struct seq_file *m, void *v)
11598 -{
11599 - struct timer_list_iter *iter = v;
11600 -
11601 - if (iter->cpu == -1 && !iter->second_pass)
11602 - timer_list_header(m, iter->now);
11603 - else if (!iter->second_pass)
11604 - print_cpu(m, iter->cpu, iter->now);
11605 -#ifdef CONFIG_GENERIC_CLOCKEVENTS
11606 - else if (iter->cpu == -1 && iter->second_pass)
11607 - timer_list_show_tickdevices_header(m);
11608 - else
11609 - print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
11610 -#endif
11611 - return 0;
11612 -}
11613 -
11614 void sysrq_timer_list_show(void)
11615 {
11616 u64 now = ktime_to_ns(ktime_get());
11617 @@ -322,6 +305,24 @@ void sysrq_timer_list_show(void)
11618 return;
11619 }
11620
11621 +#ifdef CONFIG_PROC_FS
11622 +static int timer_list_show(struct seq_file *m, void *v)
11623 +{
11624 + struct timer_list_iter *iter = v;
11625 +
11626 + if (iter->cpu == -1 && !iter->second_pass)
11627 + timer_list_header(m, iter->now);
11628 + else if (!iter->second_pass)
11629 + print_cpu(m, iter->cpu, iter->now);
11630 +#ifdef CONFIG_GENERIC_CLOCKEVENTS
11631 + else if (iter->cpu == -1 && iter->second_pass)
11632 + timer_list_show_tickdevices_header(m);
11633 + else
11634 + print_tickdevice(m, tick_get_device(iter->cpu), iter->cpu);
11635 +#endif
11636 + return 0;
11637 +}
11638 +
11639 static void *move_iter(struct timer_list_iter *iter, loff_t offset)
11640 {
11641 for (; offset; offset--) {
11642 @@ -381,3 +382,4 @@ static int __init init_timer_list_procfs(void)
11643 return 0;
11644 }
11645 __initcall(init_timer_list_procfs);
11646 +#endif
11647 diff --git a/lib/reed_solomon/decode_rs.c b/lib/reed_solomon/decode_rs.c
11648 index 1db74eb098d0..121beb2f0930 100644
11649 --- a/lib/reed_solomon/decode_rs.c
11650 +++ b/lib/reed_solomon/decode_rs.c
11651 @@ -42,8 +42,18 @@
11652 BUG_ON(pad < 0 || pad >= nn);
11653
11654 /* Does the caller provide the syndrome ? */
11655 - if (s != NULL)
11656 - goto decode;
11657 + if (s != NULL) {
11658 + for (i = 0; i < nroots; i++) {
11659 + /* The syndrome is in index form,
11660 + * so nn represents zero
11661 + */
11662 + if (s[i] != nn)
11663 + goto decode;
11664 + }
11665 +
11666 + /* syndrome is zero, no errors to correct */
11667 + return 0;
11668 + }
11669
11670 /* form the syndromes; i.e., evaluate data(x) at roots of
11671 * g(x) */
11672 @@ -99,9 +109,9 @@
11673 if (no_eras > 0) {
11674 /* Init lambda to be the erasure locator polynomial */
11675 lambda[1] = alpha_to[rs_modnn(rs,
11676 - prim * (nn - 1 - eras_pos[0]))];
11677 + prim * (nn - 1 - (eras_pos[0] + pad)))];
11678 for (i = 1; i < no_eras; i++) {
11679 - u = rs_modnn(rs, prim * (nn - 1 - eras_pos[i]));
11680 + u = rs_modnn(rs, prim * (nn - 1 - (eras_pos[i] + pad)));
11681 for (j = i + 1; j > 0; j--) {
11682 tmp = index_of[lambda[j - 1]];
11683 if (tmp != nn) {
11684 diff --git a/lib/scatterlist.c b/lib/scatterlist.c
11685 index 7c6096a71704..8c3036c37ba0 100644
11686 --- a/lib/scatterlist.c
11687 +++ b/lib/scatterlist.c
11688 @@ -652,17 +652,18 @@ static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
11689 {
11690 if (!miter->__remaining) {
11691 struct scatterlist *sg;
11692 - unsigned long pgoffset;
11693
11694 if (!__sg_page_iter_next(&miter->piter))
11695 return false;
11696
11697 sg = miter->piter.sg;
11698 - pgoffset = miter->piter.sg_pgoffset;
11699
11700 - miter->__offset = pgoffset ? 0 : sg->offset;
11701 + miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
11702 + miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
11703 + miter->__offset &= PAGE_SIZE - 1;
11704 miter->__remaining = sg->offset + sg->length -
11705 - (pgoffset << PAGE_SHIFT) - miter->__offset;
11706 + (miter->piter.sg_pgoffset << PAGE_SHIFT) -
11707 + miter->__offset;
11708 miter->__remaining = min_t(unsigned long, miter->__remaining,
11709 PAGE_SIZE - miter->__offset);
11710 }
11711 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
11712 index eb596c2ed546..849336211c79 100644
11713 --- a/net/9p/trans_virtio.c
11714 +++ b/net/9p/trans_virtio.c
11715 @@ -782,10 +782,16 @@ static struct p9_trans_module p9_virtio_trans = {
11716 /* The standard init function */
11717 static int __init p9_virtio_init(void)
11718 {
11719 + int rc;
11720 +
11721 INIT_LIST_HEAD(&virtio_chan_list);
11722
11723 v9fs_register_trans(&p9_virtio_trans);
11724 - return register_virtio_driver(&p9_virtio_drv);
11725 + rc = register_virtio_driver(&p9_virtio_drv);
11726 + if (rc)
11727 + v9fs_unregister_trans(&p9_virtio_trans);
11728 +
11729 + return rc;
11730 }
11731
11732 static void __exit p9_virtio_cleanup(void)
11733 diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
11734 index e2fbf3677b9b..9daab0dd833b 100644
11735 --- a/net/9p/trans_xen.c
11736 +++ b/net/9p/trans_xen.c
11737 @@ -530,13 +530,19 @@ static struct xenbus_driver xen_9pfs_front_driver = {
11738
11739 static int p9_trans_xen_init(void)
11740 {
11741 + int rc;
11742 +
11743 if (!xen_domain())
11744 return -ENODEV;
11745
11746 pr_info("Initialising Xen transport for 9pfs\n");
11747
11748 v9fs_register_trans(&p9_xen_trans);
11749 - return xenbus_register_frontend(&xen_9pfs_front_driver);
11750 + rc = xenbus_register_frontend(&xen_9pfs_front_driver);
11751 + if (rc)
11752 + v9fs_unregister_trans(&p9_xen_trans);
11753 +
11754 + return rc;
11755 }
11756 module_init(p9_trans_xen_init);
11757
11758 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
11759 index 73bf6a93a3cf..0b7b36fa0d5c 100644
11760 --- a/net/batman-adv/bat_iv_ogm.c
11761 +++ b/net/batman-adv/bat_iv_ogm.c
11762 @@ -2485,7 +2485,7 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
11763 return ret;
11764 }
11765
11766 -static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
11767 +static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
11768 {
11769 /* begin scheduling originator messages on that interface */
11770 batadv_iv_ogm_schedule(hard_iface);
11771 @@ -2825,8 +2825,8 @@ unlock:
11772 static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
11773 .name = "BATMAN_IV",
11774 .iface = {
11775 - .activate = batadv_iv_iface_activate,
11776 .enable = batadv_iv_ogm_iface_enable,
11777 + .enabled = batadv_iv_iface_enabled,
11778 .disable = batadv_iv_ogm_iface_disable,
11779 .update_mac = batadv_iv_ogm_iface_update_mac,
11780 .primary_set = batadv_iv_ogm_primary_iface_set,
11781 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
11782 index 08690d06b7be..36f0962040d1 100644
11783 --- a/net/batman-adv/hard-interface.c
11784 +++ b/net/batman-adv/hard-interface.c
11785 @@ -821,6 +821,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
11786
11787 batadv_hardif_recalc_extra_skbroom(soft_iface);
11788
11789 + if (bat_priv->algo_ops->iface.enabled)
11790 + bat_priv->algo_ops->iface.enabled(hard_iface);
11791 +
11792 out:
11793 return 0;
11794
11795 diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
11796 index 359ec1a6e822..9fa5389ea244 100644
11797 --- a/net/batman-adv/translation-table.c
11798 +++ b/net/batman-adv/translation-table.c
11799 @@ -3821,6 +3821,8 @@ static void batadv_tt_purge(struct work_struct *work)
11800 */
11801 void batadv_tt_free(struct batadv_priv *bat_priv)
11802 {
11803 + batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_ROAM, 1);
11804 +
11805 batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_TT, 1);
11806 batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_TT, 1);
11807
11808 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
11809 index eeee3e61c625..fdba8a144d73 100644
11810 --- a/net/batman-adv/types.h
11811 +++ b/net/batman-adv/types.h
11812 @@ -2130,6 +2130,9 @@ struct batadv_algo_iface_ops {
11813 /** @enable: init routing info when hard-interface is enabled */
11814 int (*enable)(struct batadv_hard_iface *hard_iface);
11815
11816 + /** @enabled: notification when hard-interface was enabled (optional) */
11817 + void (*enabled)(struct batadv_hard_iface *hard_iface);
11818 +
11819 /** @disable: de-init routing info when hard-interface is disabled */
11820 void (*disable)(struct batadv_hard_iface *hard_iface);
11821
11822 diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
11823 index 4e2576fc0c59..357475cceec6 100644
11824 --- a/net/bluetooth/6lowpan.c
11825 +++ b/net/bluetooth/6lowpan.c
11826 @@ -187,10 +187,16 @@ static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev,
11827 }
11828
11829 if (!rt) {
11830 - nexthop = &lowpan_cb(skb)->gw;
11831 -
11832 - if (ipv6_addr_any(nexthop))
11833 - return NULL;
11834 + if (ipv6_addr_any(&lowpan_cb(skb)->gw)) {
11835 + /* There is neither route nor gateway,
11836 + * probably the destination is a direct peer.
11837 + */
11838 + nexthop = daddr;
11839 + } else {
11840 + /* There is a known gateway
11841 + */
11842 + nexthop = &lowpan_cb(skb)->gw;
11843 + }
11844 } else {
11845 nexthop = rt6_nexthop(rt, daddr);
11846
11847 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
11848 index 3e7badb3ac2d..0adcddb211fa 100644
11849 --- a/net/bluetooth/hci_event.c
11850 +++ b/net/bluetooth/hci_event.c
11851 @@ -5545,6 +5545,11 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
11852 return send_conn_param_neg_reply(hdev, handle,
11853 HCI_ERROR_UNKNOWN_CONN_ID);
11854
11855 + if (min < hcon->le_conn_min_interval ||
11856 + max > hcon->le_conn_max_interval)
11857 + return send_conn_param_neg_reply(hdev, handle,
11858 + HCI_ERROR_INVALID_LL_PARAMS);
11859 +
11860 if (hci_check_conn_params(min, max, latency, timeout))
11861 return send_conn_param_neg_reply(hdev, handle,
11862 HCI_ERROR_INVALID_LL_PARAMS);
11863 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
11864 index 879d5432bf77..a54dadf4a6ca 100644
11865 --- a/net/bluetooth/l2cap_core.c
11866 +++ b/net/bluetooth/l2cap_core.c
11867 @@ -4384,6 +4384,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
11868
11869 l2cap_chan_lock(chan);
11870
11871 + if (chan->state != BT_DISCONN) {
11872 + l2cap_chan_unlock(chan);
11873 + mutex_unlock(&conn->chan_lock);
11874 + return 0;
11875 + }
11876 +
11877 l2cap_chan_hold(chan);
11878 l2cap_chan_del(chan, 0);
11879
11880 @@ -5281,7 +5287,14 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
11881
11882 memset(&rsp, 0, sizeof(rsp));
11883
11884 - err = hci_check_conn_params(min, max, latency, to_multiplier);
11885 + if (min < hcon->le_conn_min_interval ||
11886 + max > hcon->le_conn_max_interval) {
11887 + BT_DBG("requested connection interval exceeds current bounds.");
11888 + err = -EINVAL;
11889 + } else {
11890 + err = hci_check_conn_params(min, max, latency, to_multiplier);
11891 + }
11892 +
11893 if (err)
11894 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
11895 else
11896 diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
11897 index a1c1b7e8a45c..cc2f7ca91ccd 100644
11898 --- a/net/bluetooth/smp.c
11899 +++ b/net/bluetooth/smp.c
11900 @@ -2580,6 +2580,19 @@ static int smp_cmd_ident_addr_info(struct l2cap_conn *conn,
11901 goto distribute;
11902 }
11903
11904 + /* Drop IRK if peer is using identity address during pairing but is
11905 + * providing different address as identity information.
11906 + *
11907 + * Microsoft Surface Precision Mouse is known to have this bug.
11908 + */
11909 + if (hci_is_identity_address(&hcon->dst, hcon->dst_type) &&
11910 + (bacmp(&info->bdaddr, &hcon->dst) ||
11911 + info->addr_type != hcon->dst_type)) {
11912 + bt_dev_err(hcon->hdev,
11913 + "ignoring IRK with invalid identity address");
11914 + goto distribute;
11915 + }
11916 +
11917 bacpy(&smp->id_addr, &info->bdaddr);
11918 smp->id_addr_type = info->addr_type;
11919
11920 diff --git a/net/key/af_key.c b/net/key/af_key.c
11921 index 0b79c9aa8eb1..1982f9f31deb 100644
11922 --- a/net/key/af_key.c
11923 +++ b/net/key/af_key.c
11924 @@ -2442,8 +2442,10 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
11925 goto out;
11926 }
11927 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
11928 - if (err < 0)
11929 + if (err < 0) {
11930 + kfree_skb(out_skb);
11931 goto out;
11932 + }
11933
11934 out_hdr = (struct sadb_msg *) out_skb->data;
11935 out_hdr->sadb_msg_version = hdr->sadb_msg_version;
11936 @@ -2694,8 +2696,10 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
11937 return PTR_ERR(out_skb);
11938
11939 err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
11940 - if (err < 0)
11941 + if (err < 0) {
11942 + kfree_skb(out_skb);
11943 return err;
11944 + }
11945
11946 out_hdr = (struct sadb_msg *) out_skb->data;
11947 out_hdr->sadb_msg_version = pfk->dump.msg_version;
11948 diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
11949 index 8a33dac4e805..ddfe06d7530b 100644
11950 --- a/net/netfilter/ipset/ip_set_hash_gen.h
11951 +++ b/net/netfilter/ipset/ip_set_hash_gen.h
11952 @@ -625,7 +625,7 @@ retry:
11953 goto cleanup;
11954 }
11955 m->size = AHASH_INIT_SIZE;
11956 - extsize = ext_size(AHASH_INIT_SIZE, dsize);
11957 + extsize += ext_size(AHASH_INIT_SIZE, dsize);
11958 RCU_INIT_POINTER(hbucket(t, key), m);
11959 } else if (m->pos >= m->size) {
11960 struct hbucket *ht;
11961 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
11962 index 62c0e80dcd71..a71f777d1353 100644
11963 --- a/net/netfilter/ipvs/ip_vs_core.c
11964 +++ b/net/netfilter/ipvs/ip_vs_core.c
11965 @@ -2218,7 +2218,6 @@ static const struct nf_hook_ops ip_vs_ops[] = {
11966 static int __net_init __ip_vs_init(struct net *net)
11967 {
11968 struct netns_ipvs *ipvs;
11969 - int ret;
11970
11971 ipvs = net_generic(net, ip_vs_net_id);
11972 if (ipvs == NULL)
11973 @@ -2250,17 +2249,11 @@ static int __net_init __ip_vs_init(struct net *net)
11974 if (ip_vs_sync_net_init(ipvs) < 0)
11975 goto sync_fail;
11976
11977 - ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
11978 - if (ret < 0)
11979 - goto hook_fail;
11980 -
11981 return 0;
11982 /*
11983 * Error handling
11984 */
11985
11986 -hook_fail:
11987 - ip_vs_sync_net_cleanup(ipvs);
11988 sync_fail:
11989 ip_vs_conn_net_cleanup(ipvs);
11990 conn_fail:
11991 @@ -2290,6 +2283,19 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
11992 net->ipvs = NULL;
11993 }
11994
11995 +static int __net_init __ip_vs_dev_init(struct net *net)
11996 +{
11997 + int ret;
11998 +
11999 + ret = nf_register_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
12000 + if (ret < 0)
12001 + goto hook_fail;
12002 + return 0;
12003 +
12004 +hook_fail:
12005 + return ret;
12006 +}
12007 +
12008 static void __net_exit __ip_vs_dev_cleanup(struct net *net)
12009 {
12010 struct netns_ipvs *ipvs = net_ipvs(net);
12011 @@ -2309,6 +2315,7 @@ static struct pernet_operations ipvs_core_ops = {
12012 };
12013
12014 static struct pernet_operations ipvs_core_dev_ops = {
12015 + .init = __ip_vs_dev_init,
12016 .exit = __ip_vs_dev_cleanup,
12017 };
12018
12019 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
12020 index 2d4e048762f6..3df94a499126 100644
12021 --- a/net/netfilter/ipvs/ip_vs_ctl.c
12022 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
12023 @@ -2382,9 +2382,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
12024 cfg.syncid = dm->syncid;
12025 ret = start_sync_thread(ipvs, &cfg, dm->state);
12026 } else {
12027 - mutex_lock(&ipvs->sync_mutex);
12028 ret = stop_sync_thread(ipvs, dm->state);
12029 - mutex_unlock(&ipvs->sync_mutex);
12030 }
12031 goto out_dec;
12032 }
12033 @@ -3492,10 +3490,8 @@ static int ip_vs_genl_del_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
12034 if (!attrs[IPVS_DAEMON_ATTR_STATE])
12035 return -EINVAL;
12036
12037 - mutex_lock(&ipvs->sync_mutex);
12038 ret = stop_sync_thread(ipvs,
12039 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
12040 - mutex_unlock(&ipvs->sync_mutex);
12041 return ret;
12042 }
12043
12044 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
12045 index d4020c5e831d..ecb71062fcb3 100644
12046 --- a/net/netfilter/ipvs/ip_vs_sync.c
12047 +++ b/net/netfilter/ipvs/ip_vs_sync.c
12048 @@ -195,6 +195,7 @@ union ip_vs_sync_conn {
12049 #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1))
12050
12051 struct ip_vs_sync_thread_data {
12052 + struct task_struct *task;
12053 struct netns_ipvs *ipvs;
12054 struct socket *sock;
12055 char *buf;
12056 @@ -374,8 +375,11 @@ static inline void sb_queue_tail(struct netns_ipvs *ipvs,
12057 max(IPVS_SYNC_SEND_DELAY, 1));
12058 ms->sync_queue_len++;
12059 list_add_tail(&sb->list, &ms->sync_queue);
12060 - if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE)
12061 - wake_up_process(ms->master_thread);
12062 + if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) {
12063 + int id = (int)(ms - ipvs->ms);
12064 +
12065 + wake_up_process(ipvs->master_tinfo[id].task);
12066 + }
12067 } else
12068 ip_vs_sync_buff_release(sb);
12069 spin_unlock(&ipvs->sync_lock);
12070 @@ -1636,8 +1640,10 @@ static void master_wakeup_work_handler(struct work_struct *work)
12071 spin_lock_bh(&ipvs->sync_lock);
12072 if (ms->sync_queue_len &&
12073 ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) {
12074 + int id = (int)(ms - ipvs->ms);
12075 +
12076 ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE;
12077 - wake_up_process(ms->master_thread);
12078 + wake_up_process(ipvs->master_tinfo[id].task);
12079 }
12080 spin_unlock_bh(&ipvs->sync_lock);
12081 }
12082 @@ -1703,10 +1709,6 @@ done:
12083 if (sb)
12084 ip_vs_sync_buff_release(sb);
12085
12086 - /* release the sending multicast socket */
12087 - sock_release(tinfo->sock);
12088 - kfree(tinfo);
12089 -
12090 return 0;
12091 }
12092
12093 @@ -1740,11 +1742,6 @@ static int sync_thread_backup(void *data)
12094 }
12095 }
12096
12097 - /* release the sending multicast socket */
12098 - sock_release(tinfo->sock);
12099 - kfree(tinfo->buf);
12100 - kfree(tinfo);
12101 -
12102 return 0;
12103 }
12104
12105 @@ -1752,8 +1749,8 @@ static int sync_thread_backup(void *data)
12106 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
12107 int state)
12108 {
12109 - struct ip_vs_sync_thread_data *tinfo = NULL;
12110 - struct task_struct **array = NULL, *task;
12111 + struct ip_vs_sync_thread_data *ti = NULL, *tinfo;
12112 + struct task_struct *task;
12113 struct net_device *dev;
12114 char *name;
12115 int (*threadfn)(void *data);
12116 @@ -1822,7 +1819,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
12117 threadfn = sync_thread_master;
12118 } else if (state == IP_VS_STATE_BACKUP) {
12119 result = -EEXIST;
12120 - if (ipvs->backup_threads)
12121 + if (ipvs->backup_tinfo)
12122 goto out_early;
12123
12124 ipvs->bcfg = *c;
12125 @@ -1849,28 +1846,22 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
12126 master_wakeup_work_handler);
12127 ms->ipvs = ipvs;
12128 }
12129 - } else {
12130 - array = kcalloc(count, sizeof(struct task_struct *),
12131 - GFP_KERNEL);
12132 - result = -ENOMEM;
12133 - if (!array)
12134 - goto out;
12135 }
12136 + result = -ENOMEM;
12137 + ti = kcalloc(count, sizeof(struct ip_vs_sync_thread_data),
12138 + GFP_KERNEL);
12139 + if (!ti)
12140 + goto out;
12141
12142 for (id = 0; id < count; id++) {
12143 - result = -ENOMEM;
12144 - tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
12145 - if (!tinfo)
12146 - goto out;
12147 + tinfo = &ti[id];
12148 tinfo->ipvs = ipvs;
12149 - tinfo->sock = NULL;
12150 if (state == IP_VS_STATE_BACKUP) {
12151 + result = -ENOMEM;
12152 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
12153 GFP_KERNEL);
12154 if (!tinfo->buf)
12155 goto out;
12156 - } else {
12157 - tinfo->buf = NULL;
12158 }
12159 tinfo->id = id;
12160 if (state == IP_VS_STATE_MASTER)
12161 @@ -1885,17 +1876,15 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
12162 result = PTR_ERR(task);
12163 goto out;
12164 }
12165 - tinfo = NULL;
12166 - if (state == IP_VS_STATE_MASTER)
12167 - ipvs->ms[id].master_thread = task;
12168 - else
12169 - array[id] = task;
12170 + tinfo->task = task;
12171 }
12172
12173 /* mark as active */
12174
12175 - if (state == IP_VS_STATE_BACKUP)
12176 - ipvs->backup_threads = array;
12177 + if (state == IP_VS_STATE_MASTER)
12178 + ipvs->master_tinfo = ti;
12179 + else
12180 + ipvs->backup_tinfo = ti;
12181 spin_lock_bh(&ipvs->sync_buff_lock);
12182 ipvs->sync_state |= state;
12183 spin_unlock_bh(&ipvs->sync_buff_lock);
12184 @@ -1910,29 +1899,31 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
12185
12186 out:
12187 /* We do not need RTNL lock anymore, release it here so that
12188 - * sock_release below and in the kthreads can use rtnl_lock
12189 - * to leave the mcast group.
12190 + * sock_release below can use rtnl_lock to leave the mcast group.
12191 */
12192 rtnl_unlock();
12193 - count = id;
12194 - while (count-- > 0) {
12195 - if (state == IP_VS_STATE_MASTER)
12196 - kthread_stop(ipvs->ms[count].master_thread);
12197 - else
12198 - kthread_stop(array[count]);
12199 + id = min(id, count - 1);
12200 + if (ti) {
12201 + for (tinfo = ti + id; tinfo >= ti; tinfo--) {
12202 + if (tinfo->task)
12203 + kthread_stop(tinfo->task);
12204 + }
12205 }
12206 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
12207 kfree(ipvs->ms);
12208 ipvs->ms = NULL;
12209 }
12210 mutex_unlock(&ipvs->sync_mutex);
12211 - if (tinfo) {
12212 - if (tinfo->sock)
12213 - sock_release(tinfo->sock);
12214 - kfree(tinfo->buf);
12215 - kfree(tinfo);
12216 +
12217 + /* No more mutexes, release socks */
12218 + if (ti) {
12219 + for (tinfo = ti + id; tinfo >= ti; tinfo--) {
12220 + if (tinfo->sock)
12221 + sock_release(tinfo->sock);
12222 + kfree(tinfo->buf);
12223 + }
12224 + kfree(ti);
12225 }
12226 - kfree(array);
12227 return result;
12228
12229 out_early:
12230 @@ -1944,15 +1935,18 @@ out_early:
12231
12232 int stop_sync_thread(struct netns_ipvs *ipvs, int state)
12233 {
12234 - struct task_struct **array;
12235 + struct ip_vs_sync_thread_data *ti, *tinfo;
12236 int id;
12237 int retc = -EINVAL;
12238
12239 IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
12240
12241 + mutex_lock(&ipvs->sync_mutex);
12242 if (state == IP_VS_STATE_MASTER) {
12243 + retc = -ESRCH;
12244 if (!ipvs->ms)
12245 - return -ESRCH;
12246 + goto err;
12247 + ti = ipvs->master_tinfo;
12248
12249 /*
12250 * The lock synchronizes with sb_queue_tail(), so that we don't
12251 @@ -1971,38 +1965,56 @@ int stop_sync_thread(struct netns_ipvs *ipvs, int state)
12252 struct ipvs_master_sync_state *ms = &ipvs->ms[id];
12253 int ret;
12254
12255 + tinfo = &ti[id];
12256 pr_info("stopping master sync thread %d ...\n",
12257 - task_pid_nr(ms->master_thread));
12258 + task_pid_nr(tinfo->task));
12259 cancel_delayed_work_sync(&ms->master_wakeup_work);
12260 - ret = kthread_stop(ms->master_thread);
12261 + ret = kthread_stop(tinfo->task);
12262 if (retc >= 0)
12263 retc = ret;
12264 }
12265 kfree(ipvs->ms);
12266 ipvs->ms = NULL;
12267 + ipvs->master_tinfo = NULL;
12268 } else if (state == IP_VS_STATE_BACKUP) {
12269 - if (!ipvs->backup_threads)
12270 - return -ESRCH;
12271 + retc = -ESRCH;
12272 + if (!ipvs->backup_tinfo)
12273 + goto err;
12274 + ti = ipvs->backup_tinfo;
12275
12276 ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
12277 - array = ipvs->backup_threads;
12278 retc = 0;
12279 for (id = ipvs->threads_mask; id >= 0; id--) {
12280 int ret;
12281
12282 + tinfo = &ti[id];
12283 pr_info("stopping backup sync thread %d ...\n",
12284 - task_pid_nr(array[id]));
12285 - ret = kthread_stop(array[id]);
12286 + task_pid_nr(tinfo->task));
12287 + ret = kthread_stop(tinfo->task);
12288 if (retc >= 0)
12289 retc = ret;
12290 }
12291 - kfree(array);
12292 - ipvs->backup_threads = NULL;
12293 + ipvs->backup_tinfo = NULL;
12294 + } else {
12295 + goto err;
12296 }
12297 + id = ipvs->threads_mask;
12298 + mutex_unlock(&ipvs->sync_mutex);
12299 +
12300 + /* No more mutexes, release socks */
12301 + for (tinfo = ti + id; tinfo >= ti; tinfo--) {
12302 + if (tinfo->sock)
12303 + sock_release(tinfo->sock);
12304 + kfree(tinfo->buf);
12305 + }
12306 + kfree(ti);
12307
12308 /* decrease the module use count */
12309 ip_vs_use_count_dec();
12310 + return retc;
12311
12312 +err:
12313 + mutex_unlock(&ipvs->sync_mutex);
12314 return retc;
12315 }
12316
12317 @@ -2021,7 +2033,6 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
12318 {
12319 int retc;
12320
12321 - mutex_lock(&ipvs->sync_mutex);
12322 retc = stop_sync_thread(ipvs, IP_VS_STATE_MASTER);
12323 if (retc && retc != -ESRCH)
12324 pr_err("Failed to stop Master Daemon\n");
12325 @@ -2029,5 +2040,4 @@ void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs)
12326 retc = stop_sync_thread(ipvs, IP_VS_STATE_BACKUP);
12327 if (retc && retc != -ESRCH)
12328 pr_err("Failed to stop Backup Daemon\n");
12329 - mutex_unlock(&ipvs->sync_mutex);
12330 }
12331 diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
12332 index 8a64b150be54..fe96c0d039f2 100644
12333 --- a/net/xdp/xsk_queue.h
12334 +++ b/net/xdp/xsk_queue.h
12335 @@ -239,7 +239,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
12336 /* Order producer and data */
12337 smp_wmb();
12338
12339 - q->prod_tail = q->prod_head,
12340 + q->prod_tail = q->prod_head;
12341 WRITE_ONCE(q->ring->producer, q->prod_tail);
12342 }
12343
12344 diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
12345 index 4a9ee2d83158..372c91faa283 100644
12346 --- a/net/xfrm/Kconfig
12347 +++ b/net/xfrm/Kconfig
12348 @@ -14,6 +14,8 @@ config XFRM_ALGO
12349 tristate
12350 select XFRM
12351 select CRYPTO
12352 + select CRYPTO_HASH
12353 + select CRYPTO_BLKCIPHER
12354
12355 config XFRM_USER
12356 tristate "Transformation user configuration interface"
12357 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
12358 index 2122f89f6155..1484bc99a537 100644
12359 --- a/net/xfrm/xfrm_user.c
12360 +++ b/net/xfrm/xfrm_user.c
12361 @@ -150,6 +150,25 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
12362
12363 err = -EINVAL;
12364 switch (p->family) {
12365 + case AF_INET:
12366 + break;
12367 +
12368 + case AF_INET6:
12369 +#if IS_ENABLED(CONFIG_IPV6)
12370 + break;
12371 +#else
12372 + err = -EAFNOSUPPORT;
12373 + goto out;
12374 +#endif
12375 +
12376 + default:
12377 + goto out;
12378 + }
12379 +
12380 + switch (p->sel.family) {
12381 + case AF_UNSPEC:
12382 + break;
12383 +
12384 case AF_INET:
12385 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
12386 goto out;
12387 diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
12388 index 91d0a5c014ac..fd99ae90a618 100644
12389 --- a/scripts/kconfig/confdata.c
12390 +++ b/scripts/kconfig/confdata.c
12391 @@ -834,11 +834,12 @@ int conf_write(const char *name)
12392 "#\n"
12393 "# %s\n"
12394 "#\n", str);
12395 - } else if (!(sym->flags & SYMBOL_CHOICE)) {
12396 + } else if (!(sym->flags & SYMBOL_CHOICE) &&
12397 + !(sym->flags & SYMBOL_WRITTEN)) {
12398 sym_calc_value(sym);
12399 if (!(sym->flags & SYMBOL_WRITE))
12400 goto next;
12401 - sym->flags &= ~SYMBOL_WRITE;
12402 + sym->flags |= SYMBOL_WRITTEN;
12403
12404 conf_write_symbol(out, sym, &kconfig_printer_cb, NULL);
12405 }
12406 @@ -1024,8 +1025,6 @@ int conf_write_autoconf(int overwrite)
12407 if (!overwrite && is_present(autoconf_name))
12408 return 0;
12409
12410 - sym_clear_all_valid();
12411 -
12412 conf_write_dep("include/config/auto.conf.cmd");
12413
12414 if (conf_split_config())
12415 diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
12416 index 7c329e179007..43a87f8ea738 100644
12417 --- a/scripts/kconfig/expr.h
12418 +++ b/scripts/kconfig/expr.h
12419 @@ -141,6 +141,7 @@ struct symbol {
12420 #define SYMBOL_OPTIONAL 0x0100 /* choice is optional - values can be 'n' */
12421 #define SYMBOL_WRITE 0x0200 /* write symbol to file (KCONFIG_CONFIG) */
12422 #define SYMBOL_CHANGED 0x0400 /* ? */
12423 +#define SYMBOL_WRITTEN 0x0800 /* track info to avoid double-write to .config */
12424 #define SYMBOL_NO_WRITE 0x1000 /* Symbol for internal use only; it will not be written */
12425 #define SYMBOL_CHECKED 0x2000 /* used during dependency checking */
12426 #define SYMBOL_WARNED 0x8000 /* warning has been issued */
12427 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
12428 index 70bad15ed7a0..109ab510bdb1 100644
12429 --- a/security/selinux/hooks.c
12430 +++ b/security/selinux/hooks.c
12431 @@ -6550,11 +6550,12 @@ static int selinux_setprocattr(const char *name, void *value, size_t size)
12432 } else if (!strcmp(name, "fscreate")) {
12433 tsec->create_sid = sid;
12434 } else if (!strcmp(name, "keycreate")) {
12435 - error = avc_has_perm(&selinux_state,
12436 - mysid, sid, SECCLASS_KEY, KEY__CREATE,
12437 - NULL);
12438 - if (error)
12439 - goto abort_change;
12440 + if (sid) {
12441 + error = avc_has_perm(&selinux_state, mysid, sid,
12442 + SECCLASS_KEY, KEY__CREATE, NULL);
12443 + if (error)
12444 + goto abort_change;
12445 + }
12446 tsec->keycreate_sid = sid;
12447 } else if (!strcmp(name, "sockcreate")) {
12448 tsec->sockcreate_sid = sid;
12449 diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
12450 index f59e13c1d84a..bbf91a5a938b 100644
12451 --- a/sound/core/seq/seq_clientmgr.c
12452 +++ b/sound/core/seq/seq_clientmgr.c
12453 @@ -1004,7 +1004,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
12454 {
12455 struct snd_seq_client *client = file->private_data;
12456 int written = 0, len;
12457 - int err;
12458 + int err, handled;
12459 struct snd_seq_event event;
12460
12461 if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
12462 @@ -1017,6 +1017,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
12463 if (!client->accept_output || client->pool == NULL)
12464 return -ENXIO;
12465
12466 + repeat:
12467 + handled = 0;
12468 /* allocate the pool now if the pool is not allocated yet */
12469 mutex_lock(&client->ioctl_mutex);
12470 if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
12471 @@ -1076,12 +1078,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
12472 0, 0, &client->ioctl_mutex);
12473 if (err < 0)
12474 break;
12475 + handled++;
12476
12477 __skip_event:
12478 /* Update pointers and counts */
12479 count -= len;
12480 buf += len;
12481 written += len;
12482 +
12483 + /* let's have a coffee break if too many events are queued */
12484 + if (++handled >= 200) {
12485 + mutex_unlock(&client->ioctl_mutex);
12486 + goto repeat;
12487 + }
12488 }
12489
12490 out:
12491 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12492 index 98cfdcfce5b3..dc1989686f09 100644
12493 --- a/sound/pci/hda/patch_realtek.c
12494 +++ b/sound/pci/hda/patch_realtek.c
12495 @@ -7518,9 +7518,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
12496 {0x12, 0x90a60130},
12497 {0x17, 0x90170110},
12498 {0x21, 0x03211020}),
12499 - SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
12500 + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
12501 {0x14, 0x90170110},
12502 {0x21, 0x04211020}),
12503 + SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
12504 + {0x14, 0x90170110},
12505 + {0x21, 0x04211030}),
12506 SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
12507 ALC295_STANDARD_PINS,
12508 {0x17, 0x21014020},
12509 @@ -8654,6 +8657,11 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
12510 {0x18, 0x01a19030},
12511 {0x1a, 0x01813040},
12512 {0x21, 0x01014020}),
12513 + SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
12514 + {0x16, 0x01813030},
12515 + {0x17, 0x02211010},
12516 + {0x18, 0x01a19040},
12517 + {0x21, 0x01014020}),
12518 SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
12519 {0x14, 0x01014010},
12520 {0x18, 0x01a19020},
12521 diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
12522 index 63487240b61e..098196610542 100644
12523 --- a/sound/soc/codecs/hdac_hdmi.c
12524 +++ b/sound/soc/codecs/hdac_hdmi.c
12525 @@ -1854,6 +1854,12 @@ static void hdmi_codec_remove(struct snd_soc_component *component)
12526 {
12527 struct hdac_hdmi_priv *hdmi = snd_soc_component_get_drvdata(component);
12528 struct hdac_device *hdev = hdmi->hdev;
12529 + int ret;
12530 +
12531 + ret = snd_hdac_acomp_register_notifier(hdev->bus, NULL);
12532 + if (ret < 0)
12533 + dev_err(&hdev->dev, "notifier unregister failed: err: %d\n",
12534 + ret);
12535
12536 pm_runtime_disable(&hdev->dev);
12537 }
12538 diff --git a/sound/soc/meson/axg-tdm.h b/sound/soc/meson/axg-tdm.h
12539 index e578b6f40a07..5774ce0916d4 100644
12540 --- a/sound/soc/meson/axg-tdm.h
12541 +++ b/sound/soc/meson/axg-tdm.h
12542 @@ -40,7 +40,7 @@ struct axg_tdm_iface {
12543
12544 static inline bool axg_tdm_lrclk_invert(unsigned int fmt)
12545 {
12546 - return (fmt & SND_SOC_DAIFMT_I2S) ^
12547 + return ((fmt & SND_SOC_DAIFMT_FORMAT_MASK) == SND_SOC_DAIFMT_I2S) ^
12548 !!(fmt & (SND_SOC_DAIFMT_IB_IF | SND_SOC_DAIFMT_NB_IF));
12549 }
12550
12551 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
12552 index 2257b1b0151c..3bfc788372f3 100644
12553 --- a/sound/soc/soc-dapm.c
12554 +++ b/sound/soc/soc-dapm.c
12555 @@ -2139,23 +2139,25 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
12556 {
12557 struct dentry *d;
12558
12559 - if (!parent)
12560 + if (!parent || IS_ERR(parent))
12561 return;
12562
12563 dapm->debugfs_dapm = debugfs_create_dir("dapm", parent);
12564
12565 - if (!dapm->debugfs_dapm) {
12566 + if (IS_ERR(dapm->debugfs_dapm)) {
12567 dev_warn(dapm->dev,
12568 - "ASoC: Failed to create DAPM debugfs directory\n");
12569 + "ASoC: Failed to create DAPM debugfs directory %ld\n",
12570 + PTR_ERR(dapm->debugfs_dapm));
12571 return;
12572 }
12573
12574 d = debugfs_create_file("bias_level", 0444,
12575 dapm->debugfs_dapm, dapm,
12576 &dapm_bias_fops);
12577 - if (!d)
12578 + if (IS_ERR(d))
12579 dev_warn(dapm->dev,
12580 - "ASoC: Failed to create bias level debugfs file\n");
12581 + "ASoC: Failed to create bias level debugfs file: %ld\n",
12582 + PTR_ERR(d));
12583 }
12584
12585 static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
12586 @@ -2169,10 +2171,10 @@ static void dapm_debugfs_add_widget(struct snd_soc_dapm_widget *w)
12587 d = debugfs_create_file(w->name, 0444,
12588 dapm->debugfs_dapm, w,
12589 &dapm_widget_power_fops);
12590 - if (!d)
12591 + if (IS_ERR(d))
12592 dev_warn(w->dapm->dev,
12593 - "ASoC: Failed to create %s debugfs file\n",
12594 - w->name);
12595 + "ASoC: Failed to create %s debugfs file: %ld\n",
12596 + w->name, PTR_ERR(d));
12597 }
12598
12599 static void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
12600 diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
12601 index 87439320ef70..73d7252729fa 100644
12602 --- a/tools/bpf/bpftool/jit_disasm.c
12603 +++ b/tools/bpf/bpftool/jit_disasm.c
12604 @@ -10,6 +10,8 @@
12605 * Licensed under the GNU General Public License, version 2.0 (GPLv2)
12606 */
12607
12608 +#define _GNU_SOURCE
12609 +#include <stdio.h>
12610 #include <stdarg.h>
12611 #include <stdint.h>
12612 #include <stdio.h>
12613 @@ -51,11 +53,13 @@ static int fprintf_json(void *out, const char *fmt, ...)
12614 char *s;
12615
12616 va_start(ap, fmt);
12617 + if (vasprintf(&s, fmt, ap) < 0)
12618 + return -1;
12619 + va_end(ap);
12620 +
12621 if (!oper_count) {
12622 int i;
12623
12624 - s = va_arg(ap, char *);
12625 -
12626 /* Strip trailing spaces */
12627 i = strlen(s) - 1;
12628 while (s[i] == ' ')
12629 @@ -68,11 +72,10 @@ static int fprintf_json(void *out, const char *fmt, ...)
12630 } else if (!strcmp(fmt, ",")) {
12631 /* Skip */
12632 } else {
12633 - s = va_arg(ap, char *);
12634 jsonw_string(json_wtr, s);
12635 oper_count++;
12636 }
12637 - va_end(ap);
12638 + free(s);
12639 return 0;
12640 }
12641
12642 diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
12643 index 66917a4eba27..bf4cd924aed5 100644
12644 --- a/tools/include/uapi/linux/bpf.h
12645 +++ b/tools/include/uapi/linux/bpf.h
12646 @@ -2484,6 +2484,7 @@ struct bpf_prog_info {
12647 char name[BPF_OBJ_NAME_LEN];
12648 __u32 ifindex;
12649 __u32 gpl_compatible:1;
12650 + __u32 :31; /* alignment pad */
12651 __u64 netns_dev;
12652 __u64 netns_ino;
12653 __u32 nr_jited_ksyms;
12654 diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
12655 index bdb94939fd60..a350f97e3a1a 100644
12656 --- a/tools/lib/bpf/libbpf.c
12657 +++ b/tools/lib/bpf/libbpf.c
12658 @@ -2293,10 +2293,7 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type,
12659 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
12660 struct bpf_object **pobj, int *prog_fd)
12661 {
12662 - struct bpf_object_open_attr open_attr = {
12663 - .file = attr->file,
12664 - .prog_type = attr->prog_type,
12665 - };
12666 + struct bpf_object_open_attr open_attr = {};
12667 struct bpf_program *prog, *first_prog = NULL;
12668 enum bpf_attach_type expected_attach_type;
12669 enum bpf_prog_type prog_type;
12670 @@ -2309,6 +2306,9 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
12671 if (!attr->file)
12672 return -EINVAL;
12673
12674 + open_attr.file = attr->file;
12675 + open_attr.prog_type = attr->prog_type;
12676 +
12677 obj = bpf_object__open_xattr(&open_attr);
12678 if (IS_ERR_OR_NULL(obj))
12679 return -ENOENT;
12680 diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
12681 index 2f595cd73da6..16af6c3b1365 100644
12682 --- a/tools/perf/arch/arm/util/cs-etm.c
12683 +++ b/tools/perf/arch/arm/util/cs-etm.c
12684 @@ -32,6 +32,8 @@ struct cs_etm_recording {
12685 struct auxtrace_record itr;
12686 struct perf_pmu *cs_etm_pmu;
12687 struct perf_evlist *evlist;
12688 + int wrapped_cnt;
12689 + bool *wrapped;
12690 bool snapshot_mode;
12691 size_t snapshot_size;
12692 };
12693 @@ -495,16 +497,131 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
12694 return 0;
12695 }
12696
12697 -static int cs_etm_find_snapshot(struct auxtrace_record *itr __maybe_unused,
12698 +static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
12699 +{
12700 + bool *wrapped;
12701 + int cnt = ptr->wrapped_cnt;
12702 +
12703 + /* Make @ptr->wrapped as big as @idx */
12704 + while (cnt <= idx)
12705 + cnt++;
12706 +
12707 + /*
12708 + * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
12709 + * cross compilation problems where the host's system supports
12710 + * reallocarray() but not the target.
12711 + */
12712 + wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
12713 + if (!wrapped)
12714 + return -ENOMEM;
12715 +
12716 + wrapped[cnt - 1] = false;
12717 + ptr->wrapped_cnt = cnt;
12718 + ptr->wrapped = wrapped;
12719 +
12720 + return 0;
12721 +}
12722 +
12723 +static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
12724 + size_t buffer_size, u64 head)
12725 +{
12726 + u64 i, watermark;
12727 + u64 *buf = (u64 *)buffer;
12728 + size_t buf_size = buffer_size;
12729 +
12730 + /*
12731 + * We want to look the very last 512 byte (chosen arbitrarily) in
12732 + * the ring buffer.
12733 + */
12734 + watermark = buf_size - 512;
12735 +
12736 + /*
12737 + * @head is continuously increasing - if its value is equal or greater
12738 + * than the size of the ring buffer, it has wrapped around.
12739 + */
12740 + if (head >= buffer_size)
12741 + return true;
12742 +
12743 + /*
12744 + * The value of @head is somewhere within the size of the ring buffer.
12745 + * This can be that there hasn't been enough data to fill the ring
12746 + * buffer yet or the trace time was so long that @head has numerically
12747 + * wrapped around. To find we need to check if we have data at the very
12748 + * end of the ring buffer. We can reliably do this because mmap'ed
12749 + * pages are zeroed out and there is a fresh mapping with every new
12750 + * session.
12751 + */
12752 +
12753 + /* @head is less than 512 byte from the end of the ring buffer */
12754 + if (head > watermark)
12755 + watermark = head;
12756 +
12757 + /*
12758 + * Speed things up by using 64 bit transactions (see "u64 *buf" above)
12759 + */
12760 + watermark >>= 3;
12761 + buf_size >>= 3;
12762 +
12763 + /*
12764 + * If we find trace data at the end of the ring buffer, @head has
12765 + * been there and has numerically wrapped around at least once.
12766 + */
12767 + for (i = watermark; i < buf_size; i++)
12768 + if (buf[i])
12769 + return true;
12770 +
12771 + return false;
12772 +}
12773 +
12774 +static int cs_etm_find_snapshot(struct auxtrace_record *itr,
12775 int idx, struct auxtrace_mmap *mm,
12776 - unsigned char *data __maybe_unused,
12777 + unsigned char *data,
12778 u64 *head, u64 *old)
12779 {
12780 + int err;
12781 + bool wrapped;
12782 + struct cs_etm_recording *ptr =
12783 + container_of(itr, struct cs_etm_recording, itr);
12784 +
12785 + /*
12786 + * Allocate memory to keep track of wrapping if this is the first
12787 + * time we deal with this *mm.
12788 + */
12789 + if (idx >= ptr->wrapped_cnt) {
12790 + err = cs_etm_alloc_wrapped_array(ptr, idx);
12791 + if (err)
12792 + return err;
12793 + }
12794 +
12795 + /*
12796 + * Check to see if *head has wrapped around. If it hasn't only the
12797 + * amount of data between *head and *old is snapshot'ed to avoid
12798 + * bloating the perf.data file with zeros. But as soon as *head has
12799 + * wrapped around the entire size of the AUX ring buffer it taken.
12800 + */
12801 + wrapped = ptr->wrapped[idx];
12802 + if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
12803 + wrapped = true;
12804 + ptr->wrapped[idx] = true;
12805 + }
12806 +
12807 pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
12808 __func__, idx, (size_t)*old, (size_t)*head, mm->len);
12809
12810 - *old = *head;
12811 - *head += mm->len;
12812 + /* No wrap has occurred, we can just use *head and *old. */
12813 + if (!wrapped)
12814 + return 0;
12815 +
12816 + /*
12817 + * *head has wrapped around - adjust *head and *old to pickup the
12818 + * entire content of the AUX buffer.
12819 + */
12820 + if (*head >= mm->len) {
12821 + *old = *head - mm->len;
12822 + } else {
12823 + *head += mm->len;
12824 + *old = *head - mm->len;
12825 + }
12826
12827 return 0;
12828 }
12829 @@ -545,6 +662,8 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
12830 {
12831 struct cs_etm_recording *ptr =
12832 container_of(itr, struct cs_etm_recording, itr);
12833 +
12834 + zfree(&ptr->wrapped);
12835 free(ptr);
12836 }
12837
12838 diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
12839 index 6add3e982614..3361d98a4edd 100644
12840 --- a/tools/perf/jvmti/libjvmti.c
12841 +++ b/tools/perf/jvmti/libjvmti.c
12842 @@ -1,5 +1,6 @@
12843 // SPDX-License-Identifier: GPL-2.0
12844 #include <linux/compiler.h>
12845 +#include <linux/string.h>
12846 #include <sys/types.h>
12847 #include <stdio.h>
12848 #include <string.h>
12849 @@ -150,8 +151,7 @@ copy_class_filename(const char * class_sign, const char * file_name, char * resu
12850 result[i] = '\0';
12851 } else {
12852 /* fallback case */
12853 - size_t file_name_len = strlen(file_name);
12854 - strncpy(result, file_name, file_name_len < max_length ? file_name_len : max_length);
12855 + strlcpy(result, file_name, max_length);
12856 }
12857 }
12858
12859 diff --git a/tools/perf/perf.h b/tools/perf/perf.h
12860 index 21bf7f5a3cf5..19d435a9623b 100644
12861 --- a/tools/perf/perf.h
12862 +++ b/tools/perf/perf.h
12863 @@ -26,7 +26,7 @@ static inline unsigned long long rdclock(void)
12864 }
12865
12866 #ifndef MAX_NR_CPUS
12867 -#define MAX_NR_CPUS 1024
12868 +#define MAX_NR_CPUS 2048
12869 #endif
12870
12871 extern const char *input_name;
12872 diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
12873 index 3b97ac018d5a..532c95e8fa6b 100644
12874 --- a/tools/perf/tests/parse-events.c
12875 +++ b/tools/perf/tests/parse-events.c
12876 @@ -18,6 +18,32 @@
12877 #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
12878 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
12879
12880 +#if defined(__s390x__)
12881 +/* Return true if kvm module is available and loaded. Test this
12882 + * and retun success when trace point kvm_s390_create_vm
12883 + * exists. Otherwise this test always fails.
12884 + */
12885 +static bool kvm_s390_create_vm_valid(void)
12886 +{
12887 + char *eventfile;
12888 + bool rc = false;
12889 +
12890 + eventfile = get_events_file("kvm-s390");
12891 +
12892 + if (eventfile) {
12893 + DIR *mydir = opendir(eventfile);
12894 +
12895 + if (mydir) {
12896 + rc = true;
12897 + closedir(mydir);
12898 + }
12899 + put_events_file(eventfile);
12900 + }
12901 +
12902 + return rc;
12903 +}
12904 +#endif
12905 +
12906 static int test__checkevent_tracepoint(struct perf_evlist *evlist)
12907 {
12908 struct perf_evsel *evsel = perf_evlist__first(evlist);
12909 @@ -1622,6 +1648,7 @@ static struct evlist_test test__events[] = {
12910 {
12911 .name = "kvm-s390:kvm_s390_create_vm",
12912 .check = test__checkevent_tracepoint,
12913 + .valid = kvm_s390_create_vm_valid,
12914 .id = 100,
12915 },
12916 #endif
12917 diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
12918 index cab7b0aea6ea..f5837f28f3af 100755
12919 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
12920 +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
12921 @@ -43,7 +43,7 @@ trace_libc_inet_pton_backtrace() {
12922 eventattr='max-stack=4'
12923 echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
12924 echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
12925 - echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
12926 + echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected
12927 ;;
12928 *)
12929 eventattr='max-stack=3'
12930 diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
12931 index 1d00e5ec7906..a3c255228d62 100644
12932 --- a/tools/perf/ui/browsers/annotate.c
12933 +++ b/tools/perf/ui/browsers/annotate.c
12934 @@ -96,11 +96,12 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
12935 struct annotate_browser *ab = container_of(browser, struct annotate_browser, b);
12936 struct annotation *notes = browser__annotation(browser);
12937 struct annotation_line *al = list_entry(entry, struct annotation_line, node);
12938 + const bool is_current_entry = ui_browser__is_current_entry(browser, row);
12939 struct annotation_write_ops ops = {
12940 .first_line = row == 0,
12941 - .current_entry = ui_browser__is_current_entry(browser, row),
12942 + .current_entry = is_current_entry,
12943 .change_color = (!notes->options->hide_src_code &&
12944 - (!ops.current_entry ||
12945 + (!is_current_entry ||
12946 (browser->use_navkeypressed &&
12947 !browser->navkeypressed))),
12948 .width = browser->width,
12949 diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
12950 index dfee110b3a58..c357051dd2b6 100644
12951 --- a/tools/perf/util/annotate.c
12952 +++ b/tools/perf/util/annotate.c
12953 @@ -911,9 +911,8 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
12954 if (sym == NULL)
12955 return 0;
12956 src = symbol__hists(sym, evsel->evlist->nr_entries);
12957 - if (src == NULL)
12958 - return -ENOMEM;
12959 - return __symbol__inc_addr_samples(sym, map, src, evsel->idx, addr, sample);
12960 + return (src) ? __symbol__inc_addr_samples(sym, map, src, evsel->idx,
12961 + addr, sample) : 0;
12962 }
12963
12964 static int symbol__account_cycles(u64 addr, u64 start,
12965 diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
12966 index b65ad5a273eb..4fad92213609 100644
12967 --- a/tools/perf/util/evsel.c
12968 +++ b/tools/perf/util/evsel.c
12969 @@ -590,6 +590,9 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
12970 {
12971 char bf[128];
12972
12973 + if (!evsel)
12974 + goto out_unknown;
12975 +
12976 if (evsel->name)
12977 return evsel->name;
12978
12979 @@ -626,7 +629,10 @@ const char *perf_evsel__name(struct perf_evsel *evsel)
12980
12981 evsel->name = strdup(bf);
12982
12983 - return evsel->name ?: "unknown";
12984 + if (evsel->name)
12985 + return evsel->name;
12986 +out_unknown:
12987 + return "unknown";
12988 }
12989
12990 const char *perf_evsel__group_name(struct perf_evsel *evsel)
12991 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
12992 index b9a82598e2ac..7f2e3b1c746c 100644
12993 --- a/tools/perf/util/header.c
12994 +++ b/tools/perf/util/header.c
12995 @@ -1173,7 +1173,7 @@ static int build_caches(struct cpu_cache_level caches[], u32 size, u32 *cntp)
12996 return 0;
12997 }
12998
12999 -#define MAX_CACHES 2000
13000 +#define MAX_CACHES (MAX_NR_CPUS * 4)
13001
13002 static int write_cache(struct feat_fd *ff,
13003 struct perf_evlist *evlist __maybe_unused)
13004 diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
13005 index a28f9b5cc4ff..8b3dafe3fac3 100644
13006 --- a/tools/perf/util/metricgroup.c
13007 +++ b/tools/perf/util/metricgroup.c
13008 @@ -94,26 +94,49 @@ struct egroup {
13009 const char *metric_expr;
13010 };
13011
13012 -static struct perf_evsel *find_evsel(struct perf_evlist *perf_evlist,
13013 - const char **ids,
13014 - int idnum,
13015 - struct perf_evsel **metric_events)
13016 +static bool record_evsel(int *ind, struct perf_evsel **start,
13017 + int idnum,
13018 + struct perf_evsel **metric_events,
13019 + struct perf_evsel *ev)
13020 +{
13021 + metric_events[*ind] = ev;
13022 + if (*ind == 0)
13023 + *start = ev;
13024 + if (++*ind == idnum) {
13025 + metric_events[*ind] = NULL;
13026 + return true;
13027 + }
13028 + return false;
13029 +}
13030 +
13031 +static struct perf_evsel *find_evsel_group(struct perf_evlist *perf_evlist,
13032 + const char **ids,
13033 + int idnum,
13034 + struct perf_evsel **metric_events)
13035 {
13036 struct perf_evsel *ev, *start = NULL;
13037 int ind = 0;
13038
13039 evlist__for_each_entry (perf_evlist, ev) {
13040 + if (ev->collect_stat)
13041 + continue;
13042 if (!strcmp(ev->name, ids[ind])) {
13043 - metric_events[ind] = ev;
13044 - if (ind == 0)
13045 - start = ev;
13046 - if (++ind == idnum) {
13047 - metric_events[ind] = NULL;
13048 + if (record_evsel(&ind, &start, idnum,
13049 + metric_events, ev))
13050 return start;
13051 - }
13052 } else {
13053 + /*
13054 + * We saw some other event that is not
13055 + * in our list of events. Discard
13056 + * the whole match and start again.
13057 + */
13058 ind = 0;
13059 start = NULL;
13060 + if (!strcmp(ev->name, ids[ind])) {
13061 + if (record_evsel(&ind, &start, idnum,
13062 + metric_events, ev))
13063 + return start;
13064 + }
13065 }
13066 }
13067 /*
13068 @@ -143,8 +166,8 @@ static int metricgroup__setup_events(struct list_head *groups,
13069 ret = -ENOMEM;
13070 break;
13071 }
13072 - evsel = find_evsel(perf_evlist, eg->ids, eg->idnum,
13073 - metric_events);
13074 + evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
13075 + metric_events);
13076 if (!evsel) {
13077 pr_debug("Cannot resolve %s: %s\n",
13078 eg->metric_name, eg->metric_expr);
13079 diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
13080 index 99990f5f2512..bbb0e042d8e5 100644
13081 --- a/tools/perf/util/stat-shadow.c
13082 +++ b/tools/perf/util/stat-shadow.c
13083 @@ -303,7 +303,7 @@ static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
13084 struct perf_evsel *c2;
13085
13086 evlist__for_each_entry (evsel_list, c2) {
13087 - if (!strcasecmp(c2->name, name))
13088 + if (!strcasecmp(c2->name, name) && !c2->collect_stat)
13089 return c2;
13090 }
13091 return NULL;
13092 @@ -342,7 +342,8 @@ void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
13093 if (leader) {
13094 /* Search in group */
13095 for_each_group_member (oc, leader) {
13096 - if (!strcasecmp(oc->name, metric_names[i])) {
13097 + if (!strcasecmp(oc->name, metric_names[i]) &&
13098 + !oc->collect_stat) {
13099 found = true;
13100 break;
13101 }
13102 diff --git a/tools/power/cpupower/utils/cpufreq-set.c b/tools/power/cpupower/utils/cpufreq-set.c
13103 index 1eef0aed6423..08a405593a79 100644
13104 --- a/tools/power/cpupower/utils/cpufreq-set.c
13105 +++ b/tools/power/cpupower/utils/cpufreq-set.c
13106 @@ -306,6 +306,8 @@ int cmd_freq_set(int argc, char **argv)
13107 bitmask_setbit(cpus_chosen, cpus->cpu);
13108 cpus = cpus->next;
13109 }
13110 + /* Set the last cpu in related cpus list */
13111 + bitmask_setbit(cpus_chosen, cpus->cpu);
13112 cpufreq_put_related_cpus(cpus);
13113 }
13114 }
13115 diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.c b/tools/testing/selftests/bpf/test_lwt_seg6local.c
13116 index 0575751bc1bc..e2f6ed0a583d 100644
13117 --- a/tools/testing/selftests/bpf/test_lwt_seg6local.c
13118 +++ b/tools/testing/selftests/bpf/test_lwt_seg6local.c
13119 @@ -61,7 +61,7 @@ struct sr6_tlv_t {
13120 unsigned char value[0];
13121 } BPF_PACKET_HEADER;
13122
13123 -__attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
13124 +static __always_inline struct ip6_srh_t *get_srh(struct __sk_buff *skb)
13125 {
13126 void *cursor, *data_end;
13127 struct ip6_srh_t *srh;
13128 @@ -95,7 +95,7 @@ __attribute__((always_inline)) struct ip6_srh_t *get_srh(struct __sk_buff *skb)
13129 return srh;
13130 }
13131
13132 -__attribute__((always_inline))
13133 +static __always_inline
13134 int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
13135 uint32_t old_pad, uint32_t pad_off)
13136 {
13137 @@ -125,7 +125,7 @@ int update_tlv_pad(struct __sk_buff *skb, uint32_t new_pad,
13138 return 0;
13139 }
13140
13141 -__attribute__((always_inline))
13142 +static __always_inline
13143 int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
13144 uint32_t *tlv_off, uint32_t *pad_size,
13145 uint32_t *pad_off)
13146 @@ -184,7 +184,7 @@ int is_valid_tlv_boundary(struct __sk_buff *skb, struct ip6_srh_t *srh,
13147 return 0;
13148 }
13149
13150 -__attribute__((always_inline))
13151 +static __always_inline
13152 int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
13153 struct sr6_tlv_t *itlv, uint8_t tlv_size)
13154 {
13155 @@ -228,7 +228,7 @@ int add_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh, uint32_t tlv_off,
13156 return update_tlv_pad(skb, new_pad, pad_size, pad_off);
13157 }
13158
13159 -__attribute__((always_inline))
13160 +static __always_inline
13161 int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
13162 uint32_t tlv_off)
13163 {
13164 @@ -266,7 +266,7 @@ int delete_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh,
13165 return update_tlv_pad(skb, new_pad, pad_size, pad_off);
13166 }
13167
13168 -__attribute__((always_inline))
13169 +static __always_inline
13170 int has_egr_tlv(struct __sk_buff *skb, struct ip6_srh_t *srh)
13171 {
13172 int tlv_offset = sizeof(struct ip6_t) + sizeof(struct ip6_srh_t) +